[cig-commits] r5841 - in vendor: . buildbot buildbot/current buildbot/current/bin buildbot/current/buildbot buildbot/current/buildbot/changes buildbot/current/buildbot/clients buildbot/current/buildbot/process buildbot/current/buildbot/scripts buildbot/current/buildbot/slave buildbot/current/buildbot/status buildbot/current/buildbot/steps buildbot/current/buildbot/test buildbot/current/buildbot/test/mail buildbot/current/buildbot/test/subdir buildbot/current/contrib buildbot/current/contrib/windows buildbot/current/docs buildbot/current/docs/examples buildbot/current/docs/images

leif at geodynamics.org leif at geodynamics.org
Fri Jan 19 14:06:12 PST 2007


Author: leif
Date: 2007-01-19 14:06:08 -0800 (Fri, 19 Jan 2007)
New Revision: 5841

Added:
   vendor/buildbot/
   vendor/buildbot/current/
   vendor/buildbot/current/CREDITS
   vendor/buildbot/current/ChangeLog
   vendor/buildbot/current/MANIFEST.in
   vendor/buildbot/current/NEWS
   vendor/buildbot/current/PKG-INFO
   vendor/buildbot/current/README
   vendor/buildbot/current/README.w32
   vendor/buildbot/current/bin/
   vendor/buildbot/current/bin/buildbot
   vendor/buildbot/current/buildbot/
   vendor/buildbot/current/buildbot/__init__.py
   vendor/buildbot/current/buildbot/buildbot.png
   vendor/buildbot/current/buildbot/buildset.py
   vendor/buildbot/current/buildbot/changes/
   vendor/buildbot/current/buildbot/changes/__init__.py
   vendor/buildbot/current/buildbot/changes/base.py
   vendor/buildbot/current/buildbot/changes/bonsaipoller.py
   vendor/buildbot/current/buildbot/changes/changes.py
   vendor/buildbot/current/buildbot/changes/dnotify.py
   vendor/buildbot/current/buildbot/changes/freshcvs.py
   vendor/buildbot/current/buildbot/changes/freshcvsmail.py
   vendor/buildbot/current/buildbot/changes/mail.py
   vendor/buildbot/current/buildbot/changes/maildir.py
   vendor/buildbot/current/buildbot/changes/maildirgtk.py
   vendor/buildbot/current/buildbot/changes/maildirtwisted.py
   vendor/buildbot/current/buildbot/changes/monotone.py
   vendor/buildbot/current/buildbot/changes/p4poller.py
   vendor/buildbot/current/buildbot/changes/pb.py
   vendor/buildbot/current/buildbot/changes/svnpoller.py
   vendor/buildbot/current/buildbot/clients/
   vendor/buildbot/current/buildbot/clients/__init__.py
   vendor/buildbot/current/buildbot/clients/base.py
   vendor/buildbot/current/buildbot/clients/debug.glade
   vendor/buildbot/current/buildbot/clients/debug.py
   vendor/buildbot/current/buildbot/clients/gtkPanes.py
   vendor/buildbot/current/buildbot/clients/sendchange.py
   vendor/buildbot/current/buildbot/dnotify.py
   vendor/buildbot/current/buildbot/interfaces.py
   vendor/buildbot/current/buildbot/locks.py
   vendor/buildbot/current/buildbot/manhole.py
   vendor/buildbot/current/buildbot/master.py
   vendor/buildbot/current/buildbot/pbutil.py
   vendor/buildbot/current/buildbot/process/
   vendor/buildbot/current/buildbot/process/__init__.py
   vendor/buildbot/current/buildbot/process/base.py
   vendor/buildbot/current/buildbot/process/builder.py
   vendor/buildbot/current/buildbot/process/buildstep.py
   vendor/buildbot/current/buildbot/process/factory.py
   vendor/buildbot/current/buildbot/process/maxq.py
   vendor/buildbot/current/buildbot/process/process_twisted.py
   vendor/buildbot/current/buildbot/process/step.py
   vendor/buildbot/current/buildbot/process/step_twisted.py
   vendor/buildbot/current/buildbot/process/step_twisted2.py
   vendor/buildbot/current/buildbot/scheduler.py
   vendor/buildbot/current/buildbot/scripts/
   vendor/buildbot/current/buildbot/scripts/__init__.py
   vendor/buildbot/current/buildbot/scripts/logwatcher.py
   vendor/buildbot/current/buildbot/scripts/reconfig.py
   vendor/buildbot/current/buildbot/scripts/runner.py
   vendor/buildbot/current/buildbot/scripts/sample.cfg
   vendor/buildbot/current/buildbot/scripts/startup.py
   vendor/buildbot/current/buildbot/scripts/tryclient.py
   vendor/buildbot/current/buildbot/slave/
   vendor/buildbot/current/buildbot/slave/__init__.py
   vendor/buildbot/current/buildbot/slave/bot.py
   vendor/buildbot/current/buildbot/slave/commands.py
   vendor/buildbot/current/buildbot/slave/interfaces.py
   vendor/buildbot/current/buildbot/slave/registry.py
   vendor/buildbot/current/buildbot/slave/trial.py
   vendor/buildbot/current/buildbot/sourcestamp.py
   vendor/buildbot/current/buildbot/status/
   vendor/buildbot/current/buildbot/status/__init__.py
   vendor/buildbot/current/buildbot/status/base.py
   vendor/buildbot/current/buildbot/status/builder.py
   vendor/buildbot/current/buildbot/status/classic.css
   vendor/buildbot/current/buildbot/status/client.py
   vendor/buildbot/current/buildbot/status/html.py
   vendor/buildbot/current/buildbot/status/mail.py
   vendor/buildbot/current/buildbot/status/progress.py
   vendor/buildbot/current/buildbot/status/tests.py
   vendor/buildbot/current/buildbot/status/tinderbox.py
   vendor/buildbot/current/buildbot/status/words.py
   vendor/buildbot/current/buildbot/steps/
   vendor/buildbot/current/buildbot/steps/__init__.py
   vendor/buildbot/current/buildbot/steps/dummy.py
   vendor/buildbot/current/buildbot/steps/maxq.py
   vendor/buildbot/current/buildbot/steps/python.py
   vendor/buildbot/current/buildbot/steps/python_twisted.py
   vendor/buildbot/current/buildbot/steps/shell.py
   vendor/buildbot/current/buildbot/steps/source.py
   vendor/buildbot/current/buildbot/steps/transfer.py
   vendor/buildbot/current/buildbot/test/
   vendor/buildbot/current/buildbot/test/__init__.py
   vendor/buildbot/current/buildbot/test/emit.py
   vendor/buildbot/current/buildbot/test/emitlogs.py
   vendor/buildbot/current/buildbot/test/mail/
   vendor/buildbot/current/buildbot/test/mail/msg1
   vendor/buildbot/current/buildbot/test/mail/msg2
   vendor/buildbot/current/buildbot/test/mail/msg3
   vendor/buildbot/current/buildbot/test/mail/msg4
   vendor/buildbot/current/buildbot/test/mail/msg5
   vendor/buildbot/current/buildbot/test/mail/msg6
   vendor/buildbot/current/buildbot/test/mail/msg7
   vendor/buildbot/current/buildbot/test/mail/msg8
   vendor/buildbot/current/buildbot/test/mail/msg9
   vendor/buildbot/current/buildbot/test/mail/syncmail.1
   vendor/buildbot/current/buildbot/test/mail/syncmail.2
   vendor/buildbot/current/buildbot/test/mail/syncmail.3
   vendor/buildbot/current/buildbot/test/mail/syncmail.4
   vendor/buildbot/current/buildbot/test/mail/syncmail.5
   vendor/buildbot/current/buildbot/test/runutils.py
   vendor/buildbot/current/buildbot/test/sleep.py
   vendor/buildbot/current/buildbot/test/subdir/
   vendor/buildbot/current/buildbot/test/subdir/emit.py
   vendor/buildbot/current/buildbot/test/test__versions.py
   vendor/buildbot/current/buildbot/test/test_bonsaipoller.py
   vendor/buildbot/current/buildbot/test/test_buildreq.py
   vendor/buildbot/current/buildbot/test/test_changes.py
   vendor/buildbot/current/buildbot/test/test_config.py
   vendor/buildbot/current/buildbot/test/test_control.py
   vendor/buildbot/current/buildbot/test/test_dependencies.py
   vendor/buildbot/current/buildbot/test/test_locks.py
   vendor/buildbot/current/buildbot/test/test_maildir.py
   vendor/buildbot/current/buildbot/test/test_mailparse.py
   vendor/buildbot/current/buildbot/test/test_p4poller.py
   vendor/buildbot/current/buildbot/test/test_properties.py
   vendor/buildbot/current/buildbot/test/test_run.py
   vendor/buildbot/current/buildbot/test/test_runner.py
   vendor/buildbot/current/buildbot/test/test_scheduler.py
   vendor/buildbot/current/buildbot/test/test_shell.py
   vendor/buildbot/current/buildbot/test/test_slavecommand.py
   vendor/buildbot/current/buildbot/test/test_slaves.py
   vendor/buildbot/current/buildbot/test/test_status.py
   vendor/buildbot/current/buildbot/test/test_steps.py
   vendor/buildbot/current/buildbot/test/test_svnpoller.py
   vendor/buildbot/current/buildbot/test/test_transfer.py
   vendor/buildbot/current/buildbot/test/test_twisted.py
   vendor/buildbot/current/buildbot/test/test_util.py
   vendor/buildbot/current/buildbot/test/test_vc.py
   vendor/buildbot/current/buildbot/test/test_web.py
   vendor/buildbot/current/buildbot/twcompat.py
   vendor/buildbot/current/buildbot/util.py
   vendor/buildbot/current/contrib/
   vendor/buildbot/current/contrib/README.txt
   vendor/buildbot/current/contrib/arch_buildbot.py
   vendor/buildbot/current/contrib/darcs_buildbot.py
   vendor/buildbot/current/contrib/fakechange.py
   vendor/buildbot/current/contrib/hg_buildbot.py
   vendor/buildbot/current/contrib/run_maxq.py
   vendor/buildbot/current/contrib/svn_buildbot.py
   vendor/buildbot/current/contrib/svn_watcher.py
   vendor/buildbot/current/contrib/svnpoller.py
   vendor/buildbot/current/contrib/viewcvspoll.py
   vendor/buildbot/current/contrib/windows/
   vendor/buildbot/current/contrib/windows/buildbot.bat
   vendor/buildbot/current/contrib/windows/buildbot2.bat
   vendor/buildbot/current/contrib/windows/buildbot_service.py
   vendor/buildbot/current/contrib/windows/setup.py
   vendor/buildbot/current/docs/
   vendor/buildbot/current/docs/buildbot.html
   vendor/buildbot/current/docs/buildbot.info
   vendor/buildbot/current/docs/buildbot.texinfo
   vendor/buildbot/current/docs/epyrun
   vendor/buildbot/current/docs/examples/
   vendor/buildbot/current/docs/examples/glib_master.cfg
   vendor/buildbot/current/docs/examples/hello.cfg
   vendor/buildbot/current/docs/examples/twisted_master.cfg
   vendor/buildbot/current/docs/gen-reference
   vendor/buildbot/current/docs/images/
   vendor/buildbot/current/docs/images/master.png
   vendor/buildbot/current/docs/images/overview.png
   vendor/buildbot/current/docs/images/slavebuilder.png
   vendor/buildbot/current/docs/images/slaves.png
   vendor/buildbot/current/docs/images/status.png
   vendor/buildbot/current/setup.py
Log:
Imported buildbot.

Added: vendor/buildbot/current/CREDITS
===================================================================
--- vendor/buildbot/current/CREDITS	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/CREDITS	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,51 @@
+This is a list of everybody who has contributed to Buildbot in some way, in
+no particular order. Thanks everybody!
+
+Scott Lamb
+Olivier Bonnet
+Mark Hammond
+Gary Granger
+Marius Gedminas
+Paul Winkler
+John O'Duinn
+JP Calderone
+Zandr Milewski
+Niklaus Giger
+Tobi Vollebregt
+John Pye
+Neal Norwitz
+Anthony Baxter
+AllMyData.com
+Clement Stenac
+Kevin Turner
+Steven Walter
+Dobes Vandermeer
+Brad Hards
+Nathaniel Smith
+Mark Dillavou
+Thomas Vander Stichele
+Bear
+Brandon Philips
+Nick Trout
+Paul Warren
+Rene Rivera
+Baptiste Lepilleur
+Gerald Combs
+Yoz Grahame
+Alexander Staubo
+Elliot Murphy
+Stephen Davis
+Kirill Lapshin
+Dave Peticolas
+Andrew Bennetts
+Olly Betts
+Philipp Frauenfelder
+James Knight
+Albert Hofkamp
+Brett Neely
+Wade Brainerd
+Nick Mathewson
+Roy Rapoport
+Mark Rowe
+Ben Hearsum
+Dave Liebreich

Added: vendor/buildbot/current/ChangeLog
===================================================================
--- vendor/buildbot/current/ChangeLog	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/ChangeLog	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,7331 @@
+2006-12-10  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.7.5
+	* docs/buildbot.texinfo: set version to match
+
+2006-12-10  Brian Warner  <warner at lothar.com>
+
+	* README (REQUIREMENTS): update for release
+	* NEWS: update for release
+	* buildbot/slave/commands.py (command_version): mention that this
+	version (2.2) was released with buildbot-0.7.5
+
+	* buildbot/test/test_config.py (StartService.testStartService):
+	inhibit the usual read-config-on-startup behavior, since otherwise
+	the log.err that gets recorded causes the test to fail
+
+	* buildbot/status/builder.py (LogFile.finish): forget about all
+	subscribers once the log has finished, since after that point
+	we're never going to use them again. This might help free up some
+	memory somewhere.
+
+	* buildbot/clients/debug.py: update to use gtk.main_quit() rather
+	than the old/deprecated gtk.mainquit()
+
+2006-12-09  Brian Warner  <warner at lothar.com>
+
+	* buildbot/steps/transfer.py (_FileWriter.__del__): handle errors
+	better when we can't open the masterdst file
+
+	* buildbot/scripts/startup.py (Follower._failure): add missing
+	import statement for BuildSlaveDetectedError
+
+	* buildbot/steps/transfer.py (FileUpload): cleanup
+	(FileDownload): same. Add tests for slave version, add mode=.
+	* buildbot/slave/commands.py (SlaveFileUploadCommand): same
+	(SlaveFileDownloadCommand): same
+	* buildbot/test/test_transfer.py: enhance tests
+	* buildbot/test/runutils.py (makeBuildStep): create a fake form of
+	step.slaveVersion
+
+2006-12-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py (sendchange): halt the reactor on
+	both success *and* failure. Without this, the 'buildbot
+	sendchange' command would hang if it could not contact the
+	buildmaster or deliver the Change, which would generally cause the
+	user's commit/record/checkin command to hang too. Thanks to
+	Christian Unger for the catch.
+
+2006-12-06  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update with items for the next release
+
+	* docs/buildbot.texinfo (Adding LogObservers): add a somewhat
+	whimsical example pulled from a punch-drunk email I sent out late
+	one night.
+	(Transferring Files): document some of the other parameters
+	(Adding LogObservers): update to 0.7.5 buildbot.steps.*
+	(SVNPoller): rename svnpoller.SvnSource to SVNPoller
+	* buildbot/test/test_svnpoller.py: same
+	* buildbot/changes/svnpoller.py (SVNPoller): same
+
+2006-11-26  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (Build Properties): remind users that
+	WithProperties must appear in a command= list, not as a top-level
+	instance.
+	* buildbot/steps/shell.py (ShellCommand.start): and assert that
+	we're sending a list or a single string to the RemoteShellCommand
+
+	* buildbot/scheduler.py (Nightly): Improve docs slightly.
+
+	* buildbot/scripts/startup.py (start): skip the whole
+	watch-the-logfile thing under windows, since it needs os.fork()
+
+	* buildbot/scripts/runner.py (restart): remove the old message
+	that got printed after the buildbot was restarted.. it most cases
+	it didn't get printed at the right time anyways
+
+2006-11-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py: enhance 'start' and 'restart' to
+	follow twistd.log and print lines until the process has started
+	started properly. For the buildmaster, this means until the config
+	file has been parsed and accepted. For the buildslave, this means
+	until we've connected to the master. We give up after 5 seconds in
+	any case. Helpful error messages and troubleshooting suggestions
+	are printed when we don't see a successful startup. This closes the
+	remainder of SF#1517975.
+	* buildbot/scripts/startup.py: moved app startup code to here
+	* buildbot/scripts/logwatcher.py: utility class to follow log
+	* buildbot/scripts/reconfig.py: rewrite to use LogWatcher
+	* buildbot/slave/bot.py: announce our BuildSlaveness to the log
+	so the LogWatcher can tell the difference between a buildmaster
+	and a buildslave
+
+2006-11-24  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: update to match the version
+	in use on twistedmatrix.com
+	(IRC): re-enable IRC bot. The 'irc.us.freenode.net' hostname I
+	was using before stopped working, but 'irc.freenode.net' works
+	just fine.
+
+	* buildbot/scripts/runner.py (run): oops, forgot to enable the new
+	'reconfig' command
+
+	* buildbot/clients/base.py (TextClient.not_connected): upon
+	UnauthorizedLogin failures, remind the user to connect to the
+	PBListener port instead of the slaveport.
+	(TextClient.disconnected): shut down more quietly
+	* docs/buildbot.texinfo (statuslog): add another reminder
+
+	* buildbot/scripts/runner.py (Options.subCommands): rename
+	'buildbot sighup DIR' to 'buildbot reconfig DIR', but keep
+	'sighup' as an alias.
+	* buildbot/scripts/reconfig.py (Reconfigurator): enhance the
+	reconfig command to follow twistd.log and print all of the lines
+	from the start of the config-file reload to its completion. This
+	should make it a lot easier to discover bugs in the config file.
+	Use --quiet to disable this behavior. This addresses half of
+	SF#1517975, the other half will be to add this same utility to
+	'buildbot start' and 'buildbot restart'.
+	* docs/buildbot.texinfo (Loading the Config File): same
+	(Shutdown): same
+
+	* buildbot/interfaces.py (IBuilderControl.forceBuild): remove this
+	method, it has been deprecated for a long time. Use
+	IBuilderControl.requestBuild instead.
+	* buildbot/process/builder.py (BuilderControl.forceBuild): remove
+	* buildbot/master.py (BotPerspective.perspective_forceBuild): same
+	* buildbot/slave/bot.py (Bot.debug_forceBuild): same
+	* buildbot/test/test_control.py (Force.testForce): same
+	* buildbot/test/test_run.py: use requestBuild instead
+
+	* buildbot/clients/debug.py: replace 'Force Build' button with
+	'Request Build' (which just adds one to the queue), add Ping
+	Builder, add branch/revision fields to Request Build.
+	* buildbot/clients/debug.glade: same
+	* buildbot/master.py: update interface to match. This creates an
+	incompatibility between new debugclients and old buildmasters.
+
+	* buildbot/process/builder.py (Builder._attached): delay the call
+	to maybeStartBuild for a reactor turn, to avoid starting a build
+	in the middle of a reconfig (say, if the new Builder uses a new
+	slave which is already connected).
+
+2006-11-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_transfer.py: appease pyflakes
+	* buildbot/test/test_steps.py: same
+
+	* buildbot/test/test_bonsaipoller.py: remove the 'import *' that
+	keeps pyflakes from finding undefined names
+
+	* buildbot/master.py (BuildMaster.loadConfig_Builders): changing a
+	Builder no longer induces a disconnect/reconnect cycle. This means
+	that any builds currently in progress will not be interrupted, and
+	any builds which are queued in the Builder will not be lost. This
+	is implemented by having the new Builder extract the state (i.e.
+	all pending Builds and any desired SlaveBuilders) from the old
+	Builder.
+	(BotPerspective): refactor. The BotPerspective no longer keeps
+	track of all the Builders that want to use this slave; instead, it
+	asks the BotMaster each time it needs this list. This removes
+	addBuilder and removeBuilder. Clean up attached() to acquire all
+	the slave's information in a more atomic fashion. updateSlave() is
+	now the way to make sure the slave is using the right set of
+	Builders: just call it after everything else has been
+	reconfigured.
+	(BotMaster): refactor, removing addBuilder/removeBuilder and
+	replacing them with an all-at-once setBuilders() call.
+
+	* buildbot/test/test_slaves.py (Reconfig): new test case to
+	exercise this functionality
+	* buildbot/steps/dummy.py (Wait): new dummy BuildStep for the test
+	* buildbot/slave/commands.py (WaitCommand): same
+
+	* docs/buildbot.texinfo (Loading the Config File): document the
+	changes
+
+	* buildbot/process/builder.py (SlaveBuilder): refactor. Allow the
+	SlaveBuilder to have its parent Builder changed.
+	(SlaveBuilder.isAvailable): new method to give access to state,
+	which is now a private attribute
+	(SlaveBuilder.buildStarted,buildFinished): new methods to inform
+	the SlaveBuilder about how it is being used. These methods update
+	its internal state. buildFinished() is now the place that invokes
+	maybeStartBuild() on its parent Builder.
+	(Builder.consumeTheSoulOfYourPredecessor): new method to allow a
+	new Builder to take over for an old one, transferring state from
+	the old one.
+	(Buider): refactor the way that SlaveBuilders are used to match,
+	giving them a bit more autonomy.
+	(Builder.buildFinished): this no longer calls maybeStartBuild():
+	instead the SlaveBuilder calls it on whoever its parent Builder is
+	at the time. This way, when an old Builder is replaced by a new
+	one, and there was a build in progress during the transition, when
+	that build finishes, it will be the new Builder that is told about
+	the newly available slave so it can start a new build.
+
+	* buildbot/process/base.py (Build.startBuild._release_slave): when
+	the Build finishes, tell the SlaveBuilder that they've been
+	released.
+
+	* buildbot/status/builder.py (SlaveStatus): add some new setter
+	methods for use by BotPerspective, to keep some attributes more
+	private
+
+	* buildbot/slave/bot.py (Bot.remote_getDirs): this is no longer
+	called by the buildmaster.
+	(Bot.setBuilderList): instead, we locally announce any leftover
+	directories based upon which Builders we were told about. The
+	master doesn't really care; it's the local admin who may or may not
+	wish to delete them.
+
+
+	* contrib/svn_buildbot.py: use /usr/bin/python, not /usr/bin/env,
+	to allow use of python2.4 or whatever. This tool still requires
+	python2.3 or newer.
+
+2006-11-19  Brian Warner  <warner at lothar.com>
+
+	* NEWS (IStatusLog.readlines): more news items
+
+2006-11-18  Brian Warner  <warner at lothar.com>
+
+	* NEWS: start collecting items for the next release.
+
+2006-11-04  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/bonsaipoller.py: apply updates from Ben
+	Hearsum. Closes SF#1590310.
+	* buildbot/test/test_bonsaipoller.py: and tests
+
+	* buildbot/status/tinderbox.py
+	(TinderboxMailNotifier.buildMessage): send out a "testfailed"
+	status when the build results in WARNINGS. Patch from Dave
+	Liebreich. Closes SF#1587352.
+
+	* buildbot/slave/commands.py (LogFileWatcher.poll): overcome a
+	linux-vs-osx behavior difference w.r.t. reading from files that
+	have reached EOF. This should fix LogFileWatcher on OS-X. Thanks
+	to Mark Rowe for the patch.
+
+2006-10-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/interfaces.py (IStatus.getURLForThing): oops, the
+	method name was misspelled in the interface definition. Thanks to
+	Roy Rapoport for the catch.
+
+2006-10-13  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (Adding LogObservers): update sample code
+	to match the great Steps renaming
+
+	* buildbot/steps/transfer.py (FileUpload.start): Fix stupid error.
+	Maybe I should run my own unit tests before recording a big
+	change. Good thing I've got a buildbot to remind me.
+
+2006-10-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/steps/transfer.py: rework __init__ and args setup
+	* buildbot/slave/commands.py (SlaveFileDownloadCommand): minor
+	docs improvements
+	* buildbot/slave/commands.py (SlaveFileDownloadCommand.setup):
+	when opening the target file, only catch IOError (to report via
+	stderr/rc!=0), let the others be reported as normal exceptions
+
+2006-10-08  Brian Warner  <warner at lothar.com>
+
+	* contrib/svn_watcher.py: fix security holes by using proper argv
+	arrays and subprocess.Popen() rather than commands.getoutput().
+	Thanks to Nick Mathewson for the patch. Note that svn_watcher.py
+	is deprecated in favor of buildbot/changes/svnpoller.py, and will
+	probably be removed by the next release.
+	* CREDITS: add Nick
+
+2006-10-04  Brian Warner  <warner at lothar.com>
+
+	* buildbot/steps/python.py (PyFlakes.createSummary): skip any
+	initial lines that weren't emitted by pyflakes. When the pyflakes
+	command is run under a Makefile, 'make' will echo the command it
+	runs to stdio, and that was getting logged as a "misc" warning.
+	* buildbot/test/test_steps.py (Python.testPyFlakes2): test it
+	* buildbot/test/test_steps.py (testPyFlakes3): another test
+
+2006-10-01  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py (HtmlResource.render): if we get a
+	unicode object from our content() method, encode it into utf-8
+	like we've been claiming to all along. This allows the comments
+	and author names from svnpoller.py to be delivered properly.
+
+	* buildbot/changes/svnpoller.py (SvnSource.create_changes):
+	de-unicodify filenames before creating the Change, because the
+	rest of buildbot is unlikely to handle them well. Leave the 'who'
+	field as a unicode object.. I don't think there's anything that
+	will break very soon, and it will probably nudge us towards
+	accepting unicode everywhere sooner or later. Stop using the
+	"date" field that comes out of SVN, since it is using the
+	repository's clock (and timezone) and what we care about is the
+	buildmaster's (otherwise Changes from the future show up later
+	than the builds they triggered).
+	* buildbot/test/test_svnpoller.py (Everything.test1): match the
+	change to .when
+
+	* buildbot/changes/svnpoller.py (SvnSource): added Niklaus Giger's
+	Suvbersion repository polling ChangeSource. I've hacked it up
+	considerably: any bugs are entirely my own fault. Thank you
+	Niklaus!
+	* buildbot/test/test_svnpoller.py: tests for it
+	* docs/buildbot.texinfo (SvnSource): document it
+
+2006-09-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scheduler.py (Periodic): submit a reason= to the
+	BuildSet to indicate which Scheduler triggered the build. Thanks
+	to Mateusz Loskot for the suggestion.
+	(Nightly): same
+	* buildbot/test/test_scheduler.py (Scheduling.testPeriodic1): test it
+
+	* buildbot/changes/p4poller.py (P4Source): some minor stylistic
+	changes: set self.loop in __init__, remove unused volatile=
+
+	* docs/buildbot.texinfo (.buildbot config directory): add more
+	docs on the .buildbot/options keys used by "buildbot try"
+	* buildbot/scripts/tryclient.py (Try.createJob): remove dead code
+	(Try.deliverJob): same
+
+	* buildbot/changes/bonsaipoller.py (BonsaiParser): more updates
+	from Robert Helmer
+	(BonsaiPoller): same
+
+	* buildbot/slave/commands.py (LogFileWatcher.stop): explicitly
+	close the filehandle when we stop watching the file. Before, the
+	filehandle was only closed when the LogFileWatcher was
+	garbage-collected, which could be quite a while in the future. If
+	it was still open by the time the next build started, windows will
+	refuse to let the new build delete the old build/ directory. Fixes
+	SF#1568415, thanks to <scmikes>, <FireMoth>, and <radix> on
+	#twisted for the catch.
+
+2006-09-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/tinderbox.py (TinderboxMailNotifier): updates
+	from Robert Helmer
+
+2006-09-25  Brian Warner  <warner at lothar.com>
+
+	* setup.py: the new buildbot.steps module wasn't being installed.
+	Thanks to Jose Dapena Paz for the catch, fixes SF#1560631.
+	(testmsgs): add the extra stuff from buildbot/test/* so you can
+	run unit tests on an installed copy of buildbot, not just from
+	the source tree.
+
+	* contrib/svn_buildbot.py (ChangeSender.getChanges): the first *4*
+	columns of 'svnlook changed' output contain status information, so
+	strip [:4] instead of [:6]. Depending upon what the status flags
+	were, this would sometimes lead to mangled filenames. Thanks to
+	Riccardo Magliocchetti for the patch. Closes SF#1545146.
+
+	* buildbot/steps/source.py (Monotone): initial Monotone support,
+	contributed by Nathaniel Smith. Still needs docs and tests, but
+	this code has been in use on the Monotone buildbot for a long
+	time now.
+	* buildbot/slave/commands.py (Monotone): slave-side support
+	* buildbot/changes/monotone.py (MonotoneSource): polling change
+	source
+
+	* buildbot/changes/bonsaipoller.py (BonsaiPoller): Ben also
+	contributed a Change Source that polls a Bonsai server (a
+	kind of web-based viewcvs CGI script).
+
+	* buildbot/status/tinderbox.py (TinderboxMailNotifier): Ben
+	Hearsum contributed a status plugin which sends email in the same
+	format that Tinderbox does: this allows a number of tinderbox
+	tools to be driven by Buildbot instead. Thanks Ben!
+
+2006-09-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/mail.py (parseBonsaiMail): fix the parser.
+	Thanks to Robert Helmer for the patch.
+
+	* buildbot/process/base.py (Build.setupSlaveBuilder): tell our
+	BuildStatus about the buildslave name at the *beginning* of the
+	build, rather than at the end. Thanks to Alexander Lorenz for the
+	patch.
+	* buildbot/status/html.py (StatusResourceBuild.body): always
+	include the slavename in the build page, not just when the build
+	has finished.
+	* buildbot/status/mail.py (MailNotifier.buildMessage): include the
+	slavename in the email message
+
+2006-09-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/sample.cfg: update to use new BuildStep classes
+	from buildbot.steps
+	* docs/examples/glib_master.cfg: same
+	* docs/examples/hello.cfg: same
+	* docs/examples/twisted_master.cfg: same, update to current usage
+
+2006-09-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/steps/python.py (PyFlakes): refactor, add summary logs
+	(PyFlakes.createSummary): make it compatible with python-2.2
+
+	* buildbot/test/test_steps.py (Python.testPyFlakes): add a test
+	for at least the output-parsing parts of PyFlakes
+
+2006-09-18  Brian Warner  <warner at lothar.com>
+
+	* buildbot/steps/python.py: oops, fix import of StringIO
+
+2006-09-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (VCBase._do_vctest_update_retry_1): it
+	turns out that SVN-1.4.0 doesn't fail to update once you've
+	replaced a file with a directory, unlike older versions of SVN and
+	pretty much every other VC tool we support. Since what we really
+	care about is that the update succeeds anyway, stop checking that
+	the tree got clobbered and just assert that the build succeeded.
+	(VCBase.printLogs): add a utility function for debugging
+
+	* buildbot/process/step.py: oops, added extra imports by mistake
+
+	* buildbot/changes/p4poller.py (P4Source._process_describe): do an
+	rstrip() on the first line coming out of the 'p4 describe'
+	process, to remove the stray ^M that Wade Brainerd reports seeing
+	in the 'when' field. Fixes SF#1555985.
+
+	* buildbot/master.py (BuildMaster.loadConfig): improve the error
+	message logged when c['schedulers'] is not right
+	* buildbot/scheduler.py (Scheduler.__init__): improve error
+	message when a Scheduler() is created with the wrong arguments
+	* buildbot/test/test_config.py (ConfigTest.testSchedulerErrors):
+	verify that these error messages are emitted
+
+	* buildbot/process/buildstep.py: rename step.py to buildstep.py .
+	The idea is that all the base classes (like BuildStep and
+	RemoteCommand and LogObserver) live in b.p.buildstep, and b.p.step
+	will be a leftover backwards-compatibility file that only contains
+	aliases for the steps that were moved out to buildbot.steps.*
+	* lots: change imports to match
+	* buildbot/process/step.py: add a DeprecationWarning if it ever
+	gets imported
+
+2006-09-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scheduler.py (Scheduler.__init__): make sure that
+	builderNames= is actually a sequence, since if you happen to give
+	it a single builder-specification dictionary instead, it won't get
+	caught by the existing assert. Thanks to Brett Neely for the
+	catch.
+
+	* buildbot/steps/python.py (BuildEPYDoc, PyFlakes): add new steps. No
+	tests yet, alas.
+	* docs/buildbot.texinfo (Python BuildSteps): document them
+	(sendchange): include a link to PBChangeSource, since you need one
+
+	* buildbot/steps/shell.py: clean up test-case-name line, remove some
+	unnecessary imports
+	* buildbot/steps/dummy.py: same
+
+2006-09-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/steps/transfer.py (FileUpload,FileDownload): new
+	BuildStep which lets you transfer files from the master to the
+	slave or vice versa. Thanks to Albert Hofkamp for the original
+	patch. Fixes SF#1504631.
+	* buildbot/slave/commands.py (SlaveFileUploadCommand): slave-side
+	support for it
+	(SlaveFileDownloadCommand): same
+	* docs/buildbot.texinfo (Transferring Files): document it
+	* buildbot/test/test_transfer.py: test it
+	* buildbot/test/runutils.py (StepTester): new utility class for
+	testing BuildSteps and RemoteCommands without Builds or Bots or PB
+	* buildbot/test/test_steps.py (CheckStepTester): validate that the
+	utility class works
+
+	* buildbot/interfaces.py (IStatusLog.readlines): make it easier to
+	walk through StatusLogs one line at a time, mostly for the benefit
+	of ShellCommand.createSummary methods. You can either walk through
+	STDOUT or STDERR, but the default is STDOUT.
+
+	* buildbot/status/builder.py (LogFile.readlines): implement it.
+	Note that this is not yet memory-efficient, it just pulls the
+	whole file into RAM and then splits it up with a StringIO.
+	Eventually this should be a generator that only pulls chunks from
+	disk as necessary.
+	* buildbot/test/test_status.py (Log.testReadlines): test it
+
+	* docs/buildbot.texinfo: update to match changes
+	* buildbot/process/factory.py: stop using old definitions
+	* buildbot/process/process_twisted.py: same
+	* buildbot/test/test_*.py: same
+
+	* buildbot/process/step_twisted.py: move definition to..
+	* buildbot/steps/python_twisted.py: .. here, unfortunately python's
+	relative-import mechanisms prevent this from being named 'twisted'
+	or 'python/twisted' as I would have preferred.
+
+	* buildbot/process/maxq.py: move definition to..
+	* buildbot/steps/maxq.py: .. here, leave a compatibility import
+
+	* buildbot/process/step.py: split the user-visible BuildSteps into
+	separate files, all under buildbot/steps/
+	* buildbot/steps/source.py: this holds VC-checkout steps like SVN
+	* buildbot/steps/shell.py: this holds ShellCommand and friends
+	* buildbot/steps/dummy.py: this holds the testing steps like Dummy
+
+2006-09-05  Brian Warner  <warner at lothar.com>
+
+	* lots: run pyflakes, removed a lot of unused imports, changed the
+	form of some conditional imports to remove false pyflakes
+	warnings. There are still a number of warnings left, mostly from
+	imports that are done for their side-effects.
+	* buildbot/test/test_vc.py: import twisted.python.failure, since it
+	was missing
+
+2006-08-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_locks.py (Unit.testLater): make the tests
+	compatible with twisted-1.3.0, for some reason I just can't seem
+	to let go of the past.
+
+2006-08-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/mail.py (MailNotifier.__init__): fix typo in docs
+
+	* buildbot/process/step.py (LoggingBuildStep.startCommand): set up
+	all logfiles= in startCommand(), rather than in start() . This
+	makes it easier to have the 'stdio' log come before any secondary
+	logfiles, which I feel makes the waterfall display more
+	understandable.
+	(LoggingBuildStep.setupLogfiles): move the addLog/cmd.useLog code
+	from ShellCommand up into LoggingBuildStep
+	(LoggingBuildStep.__init__): move the handling of logfiles= from
+	ShellCommand up to LoggingBuildStep, because startCommand is
+	provided by LoggingBuildStep, whereas start() was specific to
+	subclasses like ShellCommand and Source. This removes code
+	duplication in those subclasses.
+	(ShellCommand.__init__): same
+	(ShellCommand.checkForOldSlaveAndLogfiles): split out the check
+	for a slave that's too old to understand logfiles= into a separate
+	method, so it can live in ShellCommand. The rest of
+	setupLogfiles() can live in LoggingBuildStep.
+
+2006-08-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/locks.py (BaseLock): you can now configure Locks to
+	allow multiple simultaneous owners. They still default to
+	maxCount=1. Fixes SF#1434997. Thanks to James Knight (foom) for
+	the patch.
+	* docs/buildbot.texinfo (Interlocks): document the new options
+	* buildbot/test/test_locks.py: add a bunch of new unit tests
+	* buildbot/process/base.py (Build.acquireLocks): locks now offer
+	waitUntilMaybeAvailable, not waitUntilAvailable
+	* buildbot/process/step.py (BuildStep.acquireLocks): same
+	* buildbot/master.py (BotMaster.getLockByID): real locks now use
+	the whole lockid in their constructor, not just the name. Also,
+	keep track of which real locks we've handed out by the full
+	lockid, not just class+name, otherwise changing just the maxCount=
+	in the master.cfg file would not actually cause a behavioral
+	change
+
+2006-08-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): bump to 0.7.4+ while between
+	releases
+	* docs/buildbot.texinfo: same
+
+2006-08-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.7.4
+	* docs/buildbot.texinfo: set version to match
+	* NEWS: update for 0.7.4
+	* buildbot/slave/commands.py (command_version): mention that this
+	version (2.1) was released with buildbot-0.7.4
+
+2006-08-22  Brian Warner  <warner at lothar.com>
+
+	* README: update
+
+	* CREDITS: new file, list of people who have helped. Thanks!
+	* MANIFEST.in: ship it
+
+	* MANIFEST.in: stop shipping the old PyCon-2003 paper.. with the
+	new diagrams, the user's manual is more informative than it was.
+	Start shipping the .html user's manual (and generated .png
+	images).
+	* Makefile: update 'release' target to match
+
+	* buildbot/test/test_web.py (GetURL.testBrokenStuff): delete this
+	test.. I think the web-parts effort will render it pointless well
+	before it ever actually starts to work.
+
+2006-08-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/pb.py (PBChangeSource): fix and simplify
+	meaning of the prefix= argument. It is now just a string which is
+	stripped from the beginning of the filename. If prefix= is set but
+	not found on any given filename, that filename is ignored. If all
+	filenames in a Change are ignored, the Change is dropped. This is
+	much simpler than the previous sep= nonsense, and I should have
+	implemented it this way from the beginning. Effectively resolves
+	SF#1217699 and SF#1381867. Thanks to Gary Granger and Marius
+	Gedminas for the catch and suggested fixes.
+	(ChangePerspective.perspective_addChange): implement the actual
+	prefix comparison
+	* buildbot/test/test_changes.py (TestChangePerspective): test it
+	* docs/buildbot.texinfo (PBChangeSource): document it, explain
+	how to properly use prefix=
+	* docs/examples/twisted_master.cfg (source): update prefix= by
+	adding the trailing slash
+
+
+	* docs/examples/twisted_master.cfg: update to actual practice
+
+	* buildbot/test/test_web.py (WaterfallSteps.test_urls): oops,
+	update test case to match new link text.. the HREF has both a
+	class= setting and an enclosing [] pair that I didn't match in the
+	test.
+
+	* docs/buildbot.texinfo (ShellCommand.command=): explain why a
+	list of strings is preferred over a single string with embedded
+	spaces
+	(ShellCommand.description=): explain that either single strings or
+	a list of strings is acceptable, and why you might prefer one over
+	the other. Add an example. Fixes SF#1524659, thanks to Paul
+	Winkler for the catch.
+	(Build Steps): update to use f.addStep() rather than using s()
+	and the constructor list
+
+	* buildbot/process/step.py (ShellCommand): accept either a single
+	string or a list of strings in both description= and
+	descriptionDone=
+	* buildbot/test/test_steps.py (Steps.test_description): test it
+	* buildbot/test/runutils.py (makeBuildStep): support for that test
+
+	* contrib/CSS/*.css: add some contributed CSS stylesheets, to make
+	the Waterfall display a bit less ugly. Thanks to John O'Duinn for
+	collecting the files and creating the patch.
+
+	* docs/buildbot.texinfo (BuildStep URLs): document new feature:
+	per-step URLs that will be displayed on the waterfall display,
+	for things like the HTML output of code-coverage tools, when
+	the results are hosted elsewhere.
+	* buildbot/interfaces.py (IBuildStepStatus.getURLs): document the
+	way to retrieve these URLs
+	* buildbot/status/builder.py (BuildStepStatus.getURLs): implement
+	the method to retrieve these URLs. Also provide backwards
+	compatibility for saved BuildStepStatus instances that didn't have
+	the .urls attribute
+	* buildbot/process/step.py (BuildStep.addURL): method to set these
+	URLs from within a BuildStep
+	* buildbot/status/html.py (StepBox.getBox): emit links to the URLs
+	(StepBox.getBox): give these external links a distinct CSS class
+	named "BuildStep external" so a .css file can display them
+	differently
+
+	* buildbot/test/test_web.py (WaterfallSteps): test that we really
+	do emit those links
+	* buildbot/test/test_steps.py (Steps): test that we can all the
+	URLs. Also add a bunch of other tests on methods that can be
+	called from within BuildSteps
+	* buildbot/test/runutils.py (makeBuildStep): add utility function
+
+2006-08-13  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (BuildStep LogFiles): document them
+
+2006-08-10  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (Index of master.cfg keys): add another
+	index, this one of things like c['sources'] and c['schedulers']
+	(indices): it looks like my clever idea of putting the @fooindex
+	commands in between the @node and the @subsection (to make the
+	HREF anchor jump to slightly above the section title, which works
+	much better in html) confused texinfo horribly, so I'm moving the
+	index tags back to be just after the @subsection marker. I also
+	added extra lines between the @node/@section paragraph and the
+	index tags, since I think maybe texinfo wants to see these be
+	separate paragraphs.
+
+	* docs/Makefile (images): make sure images get built when
+	rendering the manual
+	* docs/images/Makefile: same
+
+	* buildbot/scripts/runner.py: rename 'buildbot master' to
+	'buildbot create-master', and 'buildbot slave' to 'buildbot
+	create-slave'
+	* docs/buildbot.texinfo: same
+	* README: same
+
+	* docs/buildbot.texinfo: reimplement the "useful classes" index
+	with actual texinfo indices. The .info rendering is a bit
+	weird-looking but it works well, and the HTML rendering is quite
+	nice. This also puts the index targets in the regular flow of the
+	text, which is easier to maintain.
+
+2006-08-06  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (ShellCommand.__init__): patch from
+	Kevin Turner to prefer the environ= argument be a list rather than
+	a string. If it is a list, it will be joined with a platform-local
+	os.pathsep delimiter, and then prepended to any existing
+	$PYTHONPATH value. This works better in cross-platform (i.e.
+	windows buildslaves) environments when you need to push multiple
+	directories onto the front of the path.
+	(SlaveShellCommand): documented the new magic
+	* docs/buildbot.texinfo (ShellCommand): documented the new magic
+	in a user-visible form
+
+	* buildbot/test/test_vc.py (BaseHelper.dovc): patch from Kevin
+	Turner to prefer lists over strings when creating/running VC
+	commands during unit tests. This is clearly necessary to survive
+	vcexe containing spaces, like "C:\Program Files\darcs.exe". I
+	renamed the wq() function to qw() though, since that's how it's
+	spelled in perl. Eventually I'd prefer all commands to be
+	specified with lists.
+
+	* buildbot/slave/commands.py (LogFileWatcher): handle logfiles
+	which are deleted (or not yet created) correctly. Also add
+	failsafe code to not explode if the file-watching poller doesn't
+	get started. Thanks to JP Calderone for the catch and the poller
+	patch.
+	* buildbot/test/test_shell.py (SlaveSide._testLogFiles): add test
+	for that case
+	* buildbot/test/emitlogs.py: same
+
+	* NEWS: summarize recent changes
+
+	* docs/buildbot.texinfo (Debug options): suggest an .ssh/options
+	clause to avoid the "host key mismatch" warning
+
+	* buildbot/process/step_twisted.py (Trial.start): if the
+	buildslave is too old to understand logfiles=, fall back to
+	running 'cat _trial_temp/test.log' like before.
+	(Trial.commandComplete): same. this takes advantage of the
+	LoggingBuildStep refactoring to stall commandComplete long enough
+	to run a second RemoteShellCommand.
+
+	* buildbot/process/step.py (LoggingBuildStep.startCommand):
+	refactor command-completion handling, to allow methods like
+	commandComplete/createSummary/evaluateCommand to return Deferreds.
+	(LoggingBuildStep._commandComplete): delete the refactored method
+	(ShellCommand.setupLogfiles): if the buildslave is too old to
+	understand logfiles=, put a warning message both into twistd.log
+	and into the otherwise empty user-visible LogFiles.
+
+	* buildbot/process/step.py (LoggedRemoteCommand.useLog): allow
+	callers to provide the slave-side logfile name, rather than
+	forcing it to come from the local name of the LogFile.
+	(BuildStep.getSlaveName): new method
+
+	* buildbot/process/base.py (Build.getSlaveName): new method, so
+	steps can find out which buildslave they're running on
+
+	* buildbot/test/test_steps.py (Version.checkCompare): oops, update
+	to match the s/cvs_ver/command_version/ change
+
+2006-08-05  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (command_version): replace the CVS
+	auto-updated cvs_ver keyword with a manually-updated variable,
+	since CVS is no longer the master repository. Add a description of
+	the remote API change starting in this version (2.1), specifically
+	the fact that SlaveShellCommand now accepts 'initial_stdin',
+	'keep_stdin_open', and 'logfiles'.
+
+2006-07-31  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (System Architecture): Finally add lots of
+	diagrams to describe how the whole system fits together. The
+	images themselves are kept in SVG files, with ascii-art versions
+	in corresponding .txt files. Texinfo knows how to interpolate the
+	text version into .info files, reference the .png versions from
+	.html files, and include .eps versions in the .ps format.
+	* docs/images/Makefile: tools to create .png and .eps
+	* docs/images/*.svg: created pictures with Inkscape.
+	* .darcs-boring: ignore the generated .eps and .png files
+
+2006-07-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (BuildMaster.loadConfig): check for duplicate
+	Scheduler names, since they cause setServiceParent to explode
+	later.
+	* buildbot/test/test_config.py (ConfigTest._testSchedulers_7): test it
+
+2006-07-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/sample.cfg: simplify the sample BuildFactory,
+	which runs the buildbot unit tests
+
+	* docs/buildbot.texinfo (Index of Useful Classes): add a table of
+	classes that are useful in master.cfg
+
+2006-07-15  Brian Warner  <warner at lothar.com>
+
+	* Makefile (some-apidocs): new target to build only some epydocs
+
+	* setup.py: minor comment.. does the classifiers= argument prevent
+	the setup.py script from working on python2.2/2.3?
+
+	* buildbot/scripts/sample.cfg: update manhole example, arrange into
+	major sections
+
+	* buildbot/twcompat.py: fix minor typo in comments
+
+	* buildbot/manhole.py: move all Manhole-related code out to this
+	module. Implement SSH-based manholes (with TwistedConch), and move
+	to conch's nifty line-editing syntax-coloring REPL shell instead
+	of the boring non-editing monochromatic (and deprecated) old
+	'telnet' protocol.
+	* buildbot/master.py: remove all Manhole-related code
+	(BuildMaster.loadConfig._add): make sure the old manhole is
+	removed before we add the new one
+	* docs/buildbot.texinfo (Debug options): document new Manhole options
+
+	* buildbot/twcompat.py (_which): fix some epydoc issues
+	* buildbot/status/html.py (Waterfall.__init__): same
+
+2006-06-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/interfaces.py: get Interface from b.twcompat to hush
+	deprecation warnings under newer Twisteds (by using
+	zope.interface.Interface instead of old twisted.python.components
+	stuff)
+	* buildbot/slave/interfaces.py: same
+
+2006-06-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (SVN): add --non-interactive to all
+	svn commands, so it will fail immediately instead of hanging while
+	it waits for a username/password to be typed in.
+
+	* buildbot/slave/bot.py (SlaveBuilder.commandComplete): add minor
+	log message if the step was shut down
+
+	* buildbot/scripts/runner.py (SlaveOptions.longdesc): remove
+	obsolete reference to mktap.
+
+2006-06-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_steps.py (BuildStep.testShellCommand1): update
+	test to include new 'logfiles' argument sent from master to slave
+
+2006-06-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (Trial): track Progress from
+	_trial_temp/test.log too
+
+	* buildbot/process/step.py (OutputProgressObserver): generalize
+	the earlier StdioProgressObserver into an OutputProgressObserver
+	that can track LogFiles other than stdio.
+	(LoggingBuildStep.__init__): same
+
+	* buildbot/process/step_twisted.py (Trial): use logfiles= to track
+	_trial_temp/test.log, not a separate 'cat' command. TODO: this
+	will fail under windows because of os.sep issues. It might have
+	worked before if 'cat' was doing cygwin path conversion.
+
+	* buildbot/slave/commands.py (LogFileWatcher.__init__): note the
+	creation of LogFileWatchers
+	(ShellCommand._startCommand): and record the files that were
+	watched in the 'headers' section of the ShellCommand output
+
+	* buildbot/process/step.py (RemoteShellCommand.__init__): duh, you
+	need to actually pass it to the slave if you want it to work.
+	(ShellCommand): document it a bit
+
+	* buildbot/test/test_shell.py: new test to validate LogFiles
+	* buildbot/test/runutils.py (SlaveCommandTestBase): updates to
+	test LogFiles
+	* buildbot/test/emitlogs.py: enhance to wait for a line on stdin
+	before printing the last batch of lines, to test that the polling
+	logic is working properly
+
+	* buildbot/process/step.py (LoggedRemoteCommand): improve LogFile
+	handling, making it possible to track multiple logs for a single
+	RemoteCommand. The previous single logfile is now known as the
+	'stdio' log.
+	(LoggedRemoteCommand.remoteUpdate): accept key='log' updates
+	(RemoteShellCommand.__init__): accept logfiles=
+	(LoggingBuildStep.startCommand): stdio_log is now one of many
+	(ShellCommand): added logfiles= argument, as well as a class-level
+	.logfiles attribute, which will be merged together to figure out
+	which logfiles should be tracked. The latter maybe be useful for
+	subclasses of ShellCommand which know they will aways produce
+	secondary logfiles in the same location.
+
+	* buildbot/slave/commands.py (ShellCommandPP): add writeStdin()
+	and closeStdin() methods, preparing to make it possible to write
+	to a ShellCommand's stdin at any time, not just at startup. These
+	writes are buffered if the child process hasn't started yet.
+	(LogFileWatcher): new helper class to watch arbitrary logfiles
+	while a ShellCommand runs. This class polls the file every two
+	seconds, and sends back 10k chunks to the buildmaster.
+	(ShellCommand): rename stdin= to initialStdin=, and add
+	keepStdinOpen= and logfiles= to arguments. Set up LogFileWatchers
+	at startup.
+	(ShellCommand.addLogfile): LogFile text is sent in updates with a
+	key of "log" and a value of (logname, data).
+	(SlaveShellCommand): add 'initial_stdin', 'keep_stdin_open', and
+	'logfiles' to the master-visible args dictionary.
+	(SourceBase.doPatch): match s/stdin/initialStdin/ change
+	(CVS.start): same
+	(P4.doVCFull): same
+	* buildbot/test/test_vc.py (Patch.testPatch): same
+
+
+	* buildbot/test/emit.py: write to a logfile in the current
+	directory. We use this to figure out what was used as a basedir
+	rather than looking to see which copy of emit.py gets run, so that
+	we can run the commands from inside _trial_temp rather than inside
+	buildbot/test
+	* buildbot/test/subdir/emit.py: same
+	* buildbot/test/runutils.py (FakeSlaveBuilder): take a 'basedir'
+	argument rather than running from buildbot/test/
+	(SlaveCommandTestBase.setUpBuilder): explicitly set up the Builder
+	rather than using an implicit setUp()
+	* buildbot/test/test_slavecommand.py (ShellBase.setUp): same
+	(ShellBase.testShell1, etc): use explicit path to emit.py instead
+	of assuming that we're running in buildbot/test/ (and that '.' is
+	on our $PATH)
+
+	* buildbot/slave/commands.py (Command.doStart): refactor Command
+	startup/completion a bit: now the SlaveBuilder calls doStart(),
+	which is not meant for overridding by subclasses, and doStart()
+	calls start(), which is. Likewise the SlaveBuilder calls
+	doInterrupt(), and subclasses override interrupt(). This also puts
+	responsibility for maintaining .running in Command rather than in
+	SlaveBuilder.
+	(Command.doInterrupt): same
+	(Command.commandComplete): same, this is called when the deferred
+	returned by start() completes.
+	* buildbot/slave/bot.py (SlaveBuilder.remote_startCommand): same
+	(SlaveBuilder.remote_interruptCommand): same
+	(SlaveBuilder.stopCommand): same
+
+2006-06-16  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_shell.py: new test file to contain everything
+	relating to ShellCommand
+	(SlaveSide.testLogFiles): (todo) test for the upcoming "watch
+	multiple logfiles in realtime" feature, not yet implemented
+	* buildbot/test/emitlogs.py: support file for testLogFiles
+	* docs/buildbot.texinfo (ShellCommand): document the feature
+
+	* buildbot/test/test_steps.py (BuildStep.setUp): rmtree refactoring
+
+	* buildbot/test/runutils.py (SlaveCommandTestBase): utility class
+	for tests which exercise SlaveCommands in isolation.
+
+	* buildbot/test/test_slavecommand.py: Move some utilities like
+	SignalMixin and FakeSlaveBuilder from here ..
+	* buildbot/test/runutils.py: .. to here, so they can be used by
+	other test classes too
+	* buildbot/test/test_vc.py: more SignalMixin refactoring
+	* buildbot/test/test_control.py: same
+	* buildbot/test/test_run.py: and some rmtree refactoring
+
+2006-06-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (P4.testCheckoutBranch): rename from
+	'testBranch' to match other VC tests and have the tests run in
+	roughly increasing order of dependency
+
+	* buildbot/test/test_steps.py (LogObserver): new test to verify
+	LogObservers can be created at various times and still get
+	connected up properly
+
+	* buildbot/test/runutils.py (setupBuildStepStatus): utility method
+	to create BuildStepStatus instances that actually work.
+
+	* buildbot/process/step.py (LogObserver): add outReceived and
+	errReceived base methods, to be overridden
+
+	* buildbot/status/builder.py (BuildStatus.addStepWithName): change
+	API to take a name instead of a step, reducing the coupling
+	somewhat. This returns the BuildStepStatus object so it can be
+	passed to the new Step, instead of jamming it directly into the
+	Step.
+	* buildbot/process/step.py (BuildStep.setStepStatus): add a setter
+	method
+	* buildbot/process/base.py (Build.setupBuild): use both methods
+	* buildbot/test/test_web.py (Logfile.setUp): rearrange the setup
+	process a bit to match
+
+2006-06-14  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (Adding LogObservers): add some limited
+	docs on writing new LogObserver classes
+	(Writing New Status Plugins): brief docs on how Status Plugins fit
+	together
+
+	* buildbot/process/step_twisted.py (TrialTestCaseCounter):
+	implement a LogObserver that counts how many unit tests have been
+	run so far
+	(Trial.__init__): wire it in
+	* buildbot/test/test_twisted.py (Counter): unit test for it
+
+	* buildbot/process/step_twisted.py (HLint.commandComplete): update
+	to new cmd.logs['stdio'] scheme
+	(Trial.commandComplete): same
+	(BuildDebs.commandComplete): same
+
+	* buildbot/process/step.py (LoggedRemoteCommand): use a dict of
+	LogFiles, instead of just a single one. The old single logfile is
+	now called "stdio". LoggedRemoteCommand no longer creates a
+	LogFile for you (the code to do that was broken anyway). If you
+	don't create a "stdio" LogFile, then stdout/stderr will be
+	discarded.
+	(LogObserver): implement "LogObservers", which a BuildStep can add
+	to parse the output of a command in real-time. The primary use is
+	to provide more useful information to the Progress code, allowing
+	better ETA estimates.
+	(LogLineObserver): utility subclass which feeds complete lines to
+	the parser instead of bytes.
+	(BuildStep.progressMetrics): this is safer as a tuple
+	(BuildStep.setProgress): utility method, meant to be called by
+	LogObservers
+	(BuildStep.addLogObserver): new method, to be called at any time
+	during the BuildStep (even before any LogFiles have been created),
+	to attach (or schedule for eventual attachment) a LogObserver to a
+	LogFile.
+	(StdioProgressObserver): new LogObserver which replaces the old
+	"output" progress gatherer
+	(LoggingBuildStep.__init__): same
+	(LoggingBuildStep.startCommand): set up the "stdio" LogFile
+	(LoggingBuildStep._commandComplete): must use logs['stdio']
+	instead of the old single ".log" attribute.
+	* buildbot/status/builder.py (LogFile): remove old logProgressTo
+	functionality, now subsumed into StdioProgressObserver
+	* buildbot/test/test_status.py (Subscription._testSlave_2): the
+	log name changed from "output" to "stdio".
+
+
+	* buildbot/interfaces.py (ILogFile): add the Interface used from
+	the BuildStep towards the LogFile
+	(ILogObserver): and the one provided by a LogObserver
+	* buildbot/status/builder.py (LogFile): implement it
+
+	* buildbot/interfaces.py (LOG_CHANNEL_*): move STDOUT / STDERR /
+	HEADER constants here ..
+	* buildbot/status/builder.py (STDOUT): .. from here
+
+2006-06-13  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_p4poller.py (TestP4Poller.failUnlessIn): fix
+	compatibility with python2.2, which doesn't have the 'substr in
+	str' feature.
+	(TestP4Poller.makeTime): utility function to construct the
+	timestamp using the same strptime() approach as p4poller does. It
+	turns out that time.mktime() behaves slightly differently under
+	python2.2, probably something to do with the DST flag, and that
+	causes the test to fail under python2.2. (changing the mktime()
+	arguments to have dst=0 instead of -1 caused it to fail under
+	python2.3. Go figure.)
+	(TestP4Poller._testCheck3): use our makeTime() instead of mktime()
+
+2006-06-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (P4): merge in patch SF#1473939, adding
+	proper Perforce (P4) support. Many many thanks to Scott Lamb for
+	contributing such an excellent patch, including docs and unit
+	tests! This makes it *so* much easier to apply. I had to update
+	test_vc.py to handle some recent refactorings, but everything else
+	applied smoothly. The only remaining thing I'd like to fix would
+	be to remove the hard-wired port 1666 used by p4d, and allow it to
+	claim any unused port. This would allow two copies of the test
+	suite to run on the same host at the same time, as well as
+	allowing the test suite to run while a real (production) p4d was
+	running on the same host. Oh, and maybe we should add a warning to
+	step.P4 that gets emitted if the slave is too old to provide the
+	'p4' SlaveCommand. Otherwise it looks great. (closes: SF#1473939).
+	* buildbot/slave/commands.py (P4): same
+	(P4Sync): same, some minor updates
+	* buildbot/changes/p4poller.py: same
+	* docs/buildbot.texinfo: same
+	* buildbot/test/test_p4poller.py: same
+	* buildbot/test/test_vc.py (P4): same
+
+	* setup.py: add Trove classifiers for PyPI
+
+2006-06-08  Brian Warner  <warner at allmydata.com>
+
+	* buildbot/status/client.py
+	(RemoteBuilder.remote_getCurrentBuilds): oops, I screwed up when
+	changing this from getCurrentBuild() to getCurrentBuilds(). Each
+	build needs to be IRemote'd separately, rather than IRemote'ing
+	the whole list at once. I can't wait until newpb's serialization
+	adapters make this unnecessary.
+
+2006-06-06  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (WithProperties): make this inherit
+	from ComparableMixin, so that reloading an unchanged config file
+	doesn't cause us to spuriously reload any Builders which use them.
+	* buildbot/test/test_config.py (ConfigTest.testWithProperties):
+	add a test for it
+
+2006-06-03  Brian Warner  <warner at lothar.com>
+
+	* contrib/windows/{setup.py, buildbot_service.py}: add support for
+	running py2exe on windows, contributed by Mark Hammond. Addresses
+	SF#1401121, but I think we still need to include
+	buildbot/scripts/sample.cfg
+	* setup.py: include buildbot_service.py as a script under windows
+	* buildbot/status/html.py: when sys.frozen (i.e. we're running in
+	a py2exe application), get the icon/css datafiles from a different
+	place than usual.
+
+	* buildbot/status/mail.py (MailNotifier.buildMessage): don't
+	double-escape the build URL. Thanks to Olivier Bonnet for the
+	patch. Fixes SF#1452801.
+
+2006-06-02  Brian Warner  <warner at lothar.com>
+
+	* contrib/svn_buildbot.py (ChangeSender.getChanges): ignore the
+	first six columns of 'svnlook' output, not just the first column,
+	since property changes appear in the other five. Thanks to Olivier
+	Bonnet for the patch. Fixes SF#1398174.
+
+2006-06-01  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_web.py (Logfile.setUp): set the .reason on
+	the fake build, so that title= has something to be set to
+
+	* buildbot/status/html.py (BuildBox.getBox): set the 'title='
+	attribute of the "Build #NN" link in the yellow start-the-build
+	box to the build's reason. This means that you get a little
+	tooltip explaining why the build was done when you hover over the
+	yellow box. Thanks to Zandr Milewski for the suggestion.
+
+	* buildbot/clients/gtkPanes.py (Box.setColor): ignore color=None
+	(Box.setETA): handle ETA=None (by stopping the timer)
+	(Box.update): make the [soon] text less different than the usual
+	text, so the rest of the text doesn't flop around so much. It
+	would be awfully nice to figure out how to center this stuff.
+	(ThreeRowBuilder.stepETAUpdate): more debugging printouts
+
+	* buildbot/process/step.py (ShellCommand): set flunkOnFailure=True
+	by default, so that any ShellCommand which fails marks the overall
+	build as a failure. I should have done this from the beginning.
+	Add flunkOnFailure=False to the arguments if you want to turn off
+	this behavior.
+
+2006-05-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/clients/gtkPanes.py: add a third row: now it shows
+	last-build/current-build/current-step. Show what step is currently
+	running. Show ETA for both the overall build and the current step.
+	Update GTK calls to modern non-deprecated forms. There's still a
+	lot of dead code and debug noise to remove.
+
+	* buildbot/process/step_twisted.py (Trial): set the step name, so it
+	shows up properly in status displays
+
+2006-05-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_properties.py (Run.testInterpolate): on the
+	build we use to verify that WithProperties works:
+
+	** set flunkOnFailure=True so that build failures get noticed
+	** set workdir='.' so that the build succeeds, otherwise it is trying
+	    to touch 'build/something', and 'build/' doesn't exist because
+	    usually that's created by a Source step
+	** set timeout=10, because Twisted-1.3.0 has a race condition that
+	    this test somehow triggers, in which the 'touch' process becomes
+	    a zombie and we wait for th etimeout before giving up on it.
+
+	* buildbot/test/runutils.py (RunMixin.logBuildResults): utility method
+	to log the Build results and step logs to the twisted log.
+	(RunMixin.failUnlessBuildSucceeded): use logBuildResults to record
+	what went wrong if a build was expected to succeed but didn't.
+
+	* buildbot/process/step_twisted.py (Trial): set the default
+	trialMode to '--reporter=bwverbose', which specifies verbose
+	black-and-white text. Back in twisted-1.3/2.0 days we had to use
+	'-to', but those are completely missing in modern Twisteds.
+
+	* buildbot/scripts/sample.cfg: make the sample Manhole config use
+	a localhost-only port, to encourage better security
+
+	* docs/buildbot.texinfo (Change Sources): mention
+	darcs_buildbot.py
+
+	* .darcs-boring: add a Darcs boringfile
+
+	* README (REQUIREMENTS): stop claiming compatibility with
+	Twisted-1.3.0
+
+	* contrib/darcs_buildbot.py: write a darcs-commit-hook change
+	sender
+
+2006-05-27  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py: bump to 0.7.3+ while between releases
+	* docs/buildbot.texinfo: same
+
+2006-05-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.7.3
+	* docs/buildbot.texinfo: set version to match
+	* NEWS: update for 0.7.3
+
+	* docs/buildbot.texinfo (Change Sources): mention hg_buildbot.py,
+	give a quick mapping from VC system to possible ChangeSources
+	(Build Properties): add 'buildername'
+
+	* buildbot/process/base.py (Build.setupStatus): oops, set
+	'buildername' and 'buildnumber' properties
+	* buildbot/test/test_properties.py (Interpolate.testBuildNumber):
+	test them
+
+2006-05-22  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (Build Properties): explain the syntax of
+	property interpolation better
+
+	* README (INSTALLATION): remove old '-v' argument from recommended
+	trial command line
+
+	* docs/buildbot.texinfo (ShellCommand): add docs for description=
+	and descriptionDone= arguments. Thanks to Niklaus Giger for the
+	patch. SF#1475494.
+
+	* buildbot/slave/commands.py (SVN.parseGotRevision._parse): use
+	'svnversion' instead of grepping the output of 'svn info', much
+	simpler and avoids CR/LF problems on windows. Thanks to Olivier
+	Bonnet for the suggestion.
+	(SVN.parseGotRevision): oops, older verisons of 'svnversion'
+	require the WC_PATH argument, so run 'svnversion .' instead.
+
+	* buildbot/interfaces.py (IChangeSource): methods in Interfaces
+	aren't supposed to have 'self' in their argument list
+
+2006-05-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (ShellCommand.start): make
+	testInterpolate pass. I was passing the uninterpolated command to
+	the RemoteShellCommand constructor
+	(ShellCommand._interpolateProperties): oops, handle non-list
+	commands (i.e. strings with multiple words separated by spaces in
+	them) properly, instead of forgetting about them.
+
+	* buildbot/test/test_properties.py (Run.testInterpolate): new test
+	to actually try to use build properties in a real build. This test
+	fails.
+	* buildbot/test/runutils.py (RunMixin.requestBuild): utility methods
+	to start and evaluate builds
+
+	* buildbot/test/test__versions.py: add a pseudo-test to record
+	what version of Twisted/Python/Buildbot are running. This should
+	show up at the beginning of _trial_tmp/test.log, and exists to help
+	debug other problems.
+
+	* buildbot/status/html.py (Waterfall): add 'robots_txt=' argument,
+	a filename to be served as 'robots.txt' to discourage web spiders.
+	Adapted from a patch by Tobi Vollebregt, thanks!
+	* buildbot/test/test_web.py (Waterfall._test_waterfall_5): test it
+	(Waterfall.test_waterfall): tweak the way that filenames are put
+	into the config file, to accomodate windows pathnames better.
+
+	* docs/buildbot.texinfo (HTML Waterfall): document it
+
+	* buildbot/process/process_twisted.py
+	(QuickTwistedBuildFactory.__init__): recent versions of Twisted
+	changed the build process. The new setup.py no longer takes the
+	'all' argument.
+	(FullTwistedBuildFactory.__init__): same
+	(TwistedReactorsBuildFactory.__init__): same
+
+	* contrib/hg_buildbot.py: wrote a commit script for mercurial, to
+	be placed in the [hooks] section of the central repository (the
+	one that everybody pushes changes to).
+
+2006-05-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (Darcs.doVCFull): when writing the
+	.darcs-context file, use binary mode. I think this was causing a
+	Darcs failure under windows.
+
+2006-05-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/tryclient.py (CVSExtractor.getBaseRevision):
+	use a timezone string of +0000 and gmtime, since this timestamp is
+	sent to a buildmaster and %z is broken.
+
+	* buildbot/test/test_vc.py (CVSHelper.getdate): use no timezone
+	string and localtime, since this timestamp will only be consumed
+	locally, and %z is broken.
+
+	* buildbot/slave/commands.py (CVS.parseGotRevision): use +0000 and
+	gmtime, since this timestamp is returned to the buildmaster, and
+	%z is broken.
+
+2006-05-18  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update in preparation for next release
+
+	* buildbot/test/test_vc.py (VCS_Helper): factor out all the
+	setup-repository and do-we-have-the-vc-tools code into a separate
+	"helper" class, which sticks around in a single module-level
+	object. This seems more likely to continue to work in the future
+	than having it hide in the TestCase and hope that TestCases stick
+	around for a long time.
+
+	* buildbot/test/test_vc.py (MercurialSupport.vc_create): 'hg
+	addremove' has been deprecated in recent versions of mercurial, so
+	use 'hg add' instead
+
+2006-05-07  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scheduler.py (Try_Jobdir.messageReceived): when
+	operating under windows, move the file before opening it, since
+	you can't rename a file that somebody has open.
+
+	* buildbot/process/base.py (Build.setupBuild): if something goes
+	wrong while creating a Step, log the name and arguments, since the
+	error message when you get the number of arguments wrong is really
+	opaque.
+
+2006-05-06  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (Trial.setupEnvironment): more
+	bugs in twisted-specific code not covered by my unit tests, this
+	time use 'cmd' argument instead of self.cmd
+
+	* buildbot/process/process_twisted.py (TwistedBuild.isFileImportant):
+	fix stupid braino: either use startwith or find()==0, not both.
+	(TwistedReactorsBuildFactory.__init__): another dumb typo
+
+	* buildbot/test/test_slavecommand.py (ShellBase.testInterrupt1): 
+	mark this test as TODO under windows, since process-killing seems
+	dodgy there. We'll come back to this later and try to fix it
+	properly.
+
+	* buildbot/test/test_vc.py (CVSSupport.getdate): use localtime,
+	and don't include a timezone
+	(CVSSupport.vc_try_checkout): stop trying to strip the timezone.
+	This should avoid the windows-with-verbose-timezone-name problem
+	altogether.
+	(Patch.testPatch): add a test which runs 'patch' with less
+	overhead than the full VCBase.do_patch sequence, to try to isolate
+	a windows test failure. This one uses slave.commands.ShellCommand
+	and 'patch', but none of the VC code.
+
+	* buildbot/slave/commands.py (getCommand): use which() to find the
+	executables for 'cvs', 'svn', etc. This ought to help under
+	windows.
+
+	* buildbot/test/test_vc.py (VCBase.do_getpatch): Delete the
+	working directory before starting. If an earlier test failed, the
+	leftover directory would mistakenly flunk a later test.
+	(ArchCommon.registerRepository): fix some tla-vs-baz problems.
+	Make sure that we use the right commandlines if which("tla") picks
+	up "tla.exe" (as it does under windows).
+	(TlaSupport.do_get): factor out this tla-vs-baz difference
+	(TlaSupport.vc_create): more tla-vs-baz differences
+
+	* buildbot/test/test_slavecommand.py
+	(ShellBase.testShellMissingCommand): stop trying to assert
+	anything about the error message: different shells on different
+	OSes with different languages makes it hard, and it really isn't
+	that interesting of a thing to test anyway.
+
+	* buildbot/test/test_vc.py (CVSSupport.capable): skip CVS tests if
+	we detect cvs-1.10 (which is the version shipped with OS-X 10.3
+	"Panther"), because it has a bug which flunks a couple tests in
+	weird ways. I've checked that cvs-1.12.9 (as shipped with debian)
+	is ok. OS-X 10.4 "Tiger" ships with cvs-1.11, but I haven't been
+	able to test that yet.
+
+2006-04-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (VCBase.runCommand): set $LC_ALL="C" to
+	make sure child commands emit messages in english, so our regexps
+	will match. Thanks to Nikaus Giger for identifying the problems.
+	(VCBase._do_vctest_export_1): mode="export" is not responsible
+	for setting the "got_revision" property, since in many cases it is
+	not convenient to determine.
+	(SVNSupport.capable): when running 'svn --version' to check for
+	ra_local, we want error messages in english
+	* buildbot/test/test_slavecommand.py 
+	(ShellBase.testShellMissingCommand): set $LC_ALL="C" to get bash
+	to emit the error message in english
+
+	* buildbot/slave/commands.py (SourceBase.setup): stash a copy of
+	the environment with $LC_ALL="C" so that Commands which need to
+	parse the output of their child processes can obtain it in
+	english.
+	(SVN.parseGotRevision): call "svn info" afterwards instead of
+	watching the output of the "svn update" or "svn checkout".
+	(Darcs.parseGotRevision): use $LC_ALL="C" when running the command
+	(Arch.parseGotRevision): same
+	(Bazaar.parseGotRevision): same
+	(Mercurial.parseGotRevision): same
+
+	* buildbot/scripts/tryclient.py (SourceStampExtractor.dovc): set
+	$LC_ALL="C" when running commands under 'buildbot try', too
+
+	* buildbot/test/__init__.py: remove the global os.environ()
+	setting, instead we do it just for the tests that run commands and
+	need to parse their output.
+
+	* buildbot/test/test_scheduler.py (Scheduling.testTryJobdir):
+	remove the overly-short .timeout on this test, because non-DNotify
+	platforms must fall back to polling which happens at 10 second
+	intervals, so a 5 second timeout would never succeed.
+
+2006-04-24  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (Installing the code): update trial
+	invocation, SF#1469116 by Niklaus Giger.
+	(Attributes of Changes): updated branch-name examples to be
+	a bit more realistic, SF#1475240 by Stephen Davis.
+
+	* contrib/windows/buildbot2.bat: utility wrapper for windows
+	developers, contributed by Nick Trout (after a year of neglect..
+	sorry!). SF#1194231.
+
+	* buildbot/test/test_vc.py (*.capable): store the actual VC
+	binary's pathname in VCS[vcname], so it can be retrieved later
+	(CVSSupport.vc_try_checkout): incorporate Niklaus Giger's patch to
+	strip out non-numeric timezone information, specifically the funky
+	German string that his system produced that confuses CVS.
+	(DarcsSupport.vc_create): use dovc() instead of vc(), this should
+	allow Darcs tests to work on windows
+	* buildbot/scripts/tryclient.py (SourceStampExtractor): use
+	procutils.which() everywhere, to allow tryclient to work under
+	windows. Also from Niklaus Giger, SF#1463394.
+
+	* buildbot/twcompat.py (which): move the replacement for a missing
+	twisted.python.procutils.which from test_vc.py to here, so it can
+	be used in other places too (specifically tryclient.py)
+
+2006-04-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py (StatusResourceBuild.body): replace the
+	bare buildbotURL/projectName line with a proper DIV, along with a
+	CSS class of "title", from Stefan Seefeld (SF#1461675).
+	(WaterfallStatusResource.body0): remove the redundant 'table'
+	class from the table
+	(WaterfallStatusResource.body): same. Also add class="LastBuild"
+	to the top-row TR, and class="Activity" to the second-row TR,
+	rather than putting them in the individual TD nodes.
+
+	* buildbot/test/test_vc.py (VCBase.checkGotRevision): test
+	'got_revision' build property for all VC systems that implement
+	accurate ones: SVN, Darcs, Arch, Bazaar, Mercurial.
+
+	* buildbot/slave/commands.py (SourceBase._handleGotRevision): try
+	to determine which revision we actually obtained
+	(CVS.parseGotRevision): implement this for CVS, which just means
+	to grab a timestamp. Not ideal, and it depends upon the buildslave
+	having a clock that is reasonably well syncronized with the server,
+	but it's better than nothing.
+	(SVN.parseGotRevision): implement it for SVN, which is accurate
+	(Darcs.parseGotRevision): same
+	(Arch.parseGotRevision): same
+	(Bazaar.parseGotRevision): same
+	(Mercurial.parseGotRevision): same
+
+	* buildbot/process/step.py (LoggedRemoteCommand.remoteUpdate):
+	keep a record of all non-stdout/stderr/header/rc status updates,
+	for the benefit of RemoteCommands that send other useful things,
+	like got_revision
+	(Source.commandComplete): put any 'got_revision' status values
+	into a build property of the same name
+
+
+	* buildbot/process/step_twisted.py (Trial): update to deal with
+	new ShellCommand refactoring
+
+	* docs/buildbot.texinfo (Build Properties): document new feature
+	that allows BuildSteps to get/set Build-wide properties like which
+	revision was requested and/or checked out.
+
+	* buildbot/interfaces.py (IBuildStatus.getProperty): new method
+	* buildbot/status/builder.py (BuildStatus.getProperty): implement
+	it. Note that this bumps the persistenceVersion of the saved Build
+	object, so add the necessary upgrade-old-version logic to include
+	an empty properties dict.
+
+	* buildbot/process/base.py (Build.setProperty): implement it
+	(Build.getProperty): same
+	(Build.startBuild): change build startup to set 'branch',
+	'revision', and 'slavename' properties at the right time
+
+	* buildbot/process/step.py (BuildStep.__init__): change setup to
+	require 'build' argument in a better way
+	(LoggingBuildStep): split ShellCommand into two pieces, for better
+	subclassing elsewhere. LoggingBuildStep is a BuildStep which runs
+	a single RemoteCommand that sends stdout/stderr status text. It
+	also provides the usual commandComplete / createSummary /
+	evaluateCommand / getText methods to be overridden...
+	(ShellCommand): .. whereas ShellCommand is specifically for
+	running RemoteShellCommands. Other shell-like BuildSteps (like
+	Source) can inherit from LoggingBuildStep instead of ShellCommand
+	(WithProperties): marker class to do build-property interpolation
+	(Source): inherit from LoggingBuildStep instead of ShellCommand
+	(RemoteDummy): same
+
+	* buildbot/test/test_properties.py: test new functionality
+
+2006-04-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py: rename testBranch to
+	testCheckoutBranch to keep the tests in about the right
+	alphabetical order
+
+2006-04-18  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (PBListener): improve cross-references
+	between PBListener and 'buildbot statusgui', thanks to John Pye
+	for the suggestion.
+
+2006-04-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/twcompat.py (maybeWait): handle SkipTest properly when
+	running under Twisted-1.3.0, otherwise skipped tests are reported
+	as errors.
+
+	* all: use isinstance() instead of 'type(x) is foo', suggested by
+	Neal Norwitz
+
+	* buildbot/process/process_twisted.py (QuickTwistedBuildFactory):
+	oops, fix a brain-fade from the other week, when making the
+	addStep changes. I changed all the __init__ upcalls to use the
+	wrong superclass name.
+	(FullTwistedBuildFactory.__init__): same
+	(TwistedDebsBuildFactory.__init__): same
+	(TwistedReactorsBuildFactory.__init__): same
+	(TwistedBuild.isFileImportant): use .startswith for clarity,
+	thanks to Neal Norwitz for the suggestions.
+
+	* contrib/viewcvspoll.py: script to poll a viewcvs database for
+	changes, then deliver them over PB to a remote buildmaster.
+
+	* contrib/svnpoller.py: added script by John Pye to poll a remote
+	SVN repository (by running 'svn log') from a cronjob, and run
+	'buildbot sendchange' to deliver the changes to a remote
+	buildmaster.
+	* contrib/svn_watcher.py: added script by Niklaus Giger (a
+	modification of svnpoller.py), same purpose, but this one loops
+	internally (rather than expecting to run from a cronjob) and works
+	under windows.
+	* contrib/README.txt: same
+
+2006-04-11  Brian Warner  <warner at lothar.com>
+
+	* all: fix a number of incorrect names and missing imports, thanks
+	to Anthony Baxter for the patch.
+	* buildbot/status/html.py (WaterfallStatusResource.statusToHTML): 
+	remove unused buggy method.
+	* buildbot/status/builder.py (BuildStatus.saveYourself): rmtree
+	comes from shutil, not "shutils"
+	* buildbot/process/step.py (TreeSize.evaluateCommand): fix bad name
+	(Arch.checkSlaveVersion): same
+	* buildbot/process/step_twisted.py (Trial.commandComplete): same, in
+	some disabled code
+	* buildbot/process/step_twisted2.py: add some missing imports
+	* buildbot/twcompat.py (_deferGenerator): fix cut-and-paste error,
+	this code used to live in twisted.internet.defer
+
+2006-04-10  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (Mercurial): add Mercurial support
+	* buildbot/slave/commands.py (Mercurial): same
+	* buildbot/scripts/tryclient.py (MercurialExtractor): same
+	* buildbot/test/test_vc.py (Mercurial): same, checkout over HTTP is
+	not yet tested, but 'try' support *is* covered
+	* docs/buildbot.texinfo (Mercurial): document it
+
+	* buildbot/process/step.py (LoggedRemoteCommand.remoteUpdate): add
+	some debugging messages (turned off)
+	* buildbot/test/test_vc.py: improve debug messages
+
+2006-04-07  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (which): define our own which() in case
+	we can't import twisted.python.procutils, because procutils doesn't
+	exist in Twisted-1.3
+
+	* docs/buildbot.texinfo (Interlocks): fix some typos, mention use
+	of SlaveLocks for performance tests
+
+	* docs/examples/twisted_master.cfg: update to match current usage
+
+	* buildbot/changes/p4poller.py (P4Source): add new arguments:
+	password, p4 binary, pollinterval, maximum history to check.
+	Patch from an anonymous sf.net contributor, SF#1219384.
+	* buildbot/process/step.py (P4Sync.__init__): add username,
+	password, and client arguments.
+	* buildbot/slave/commands.py (P4Sync): same
+
+2006-04-05  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/factory.py (BuildFactory.addStep): new method
+	to add steps to a BuildFactory. Use it instead of f.steps.append,
+	and you can probably avoid using the s() convenience function.
+	Patch from Neal Norwitz, sf.net #1412605.
+	(other): update all factories to use addStep
+	* buildbot/process/process_twisted.py: update all factories to use
+	addStep.
+
+2006-04-03  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py: modified find-the-VC-command logic to
+	work under windows too. Adapted from a patch by Niklaus Giger,
+	addresses SF#1463399.
+
+	* buildbot/test/__init__.py: set $LANG to 'C', to insure that
+	spawned commands emit parseable results in english and not some
+	other language. Patch from Niklaus Giger, SF#1463395.
+
+	* README (INSTALLATION): discourage users from running unit tests on
+	a "network drive", patch from Niklaus Giger, SF#1463394.
+
+2006-03-22  Brian Warner  <warner at lothar.com>
+
+	* contrib/svn_buildbot.py: rearrange, add an easy-to-change
+	function to turn a repository-relative pathname into a (branch,
+	branch-relative-filename) tuple. Change this function to handle
+	the branch naming policy used by your Subversion repository.
+	Thanks to AllMyData.com for sponsoring this work.
+
+2006-03-16  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/sample.cfg: add python-mode declaration for
+	vim. Thanks to John Pye for the patch.
+
+	* docs/buildbot.texinfo (Launching the daemons): fix @reboot job
+	command line, mention the importance of running 'crontab' as the
+	buildmaster/buildslave user. Thanks to John Pye for the catch.
+
+2006-03-13  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py (IRC): add an optional password=
+	argument, which will be sent to Nickserv in an IDENTIFY message at
+	login, to claim the nickname. freenode requires this before the
+	bot can sent (or reply to) private messages. Thanks to Clement
+	Stenac for the patch.
+	* docs/buildbot.texinfo (IRC Bot): document it
+
+	* buildbot/status/builder.py (LogFile.merge): don't write chunks
+	larger than chunkSize. Fixes SF#1349253.
+	* buildbot/test/test_status.py (Log.testLargeSummary): test it
+	(Log.testConsumer): update to match new internal chunking behavior
+
+2006-03-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py: remove the last use of waitForDeferred
+
+	* buildbot/test/test_maildir.py (MaildirTest): rename the
+	'timeout' method, as it collides with trial's internals
+
+	* buildbot/scripts/runner.py: add 'buildbot restart' command
+	(stop): don't sys.exit() out of here, otherwise restart can't work
+	* docs/buildbot.texinfo (Shutdown): document it
+
+	* buildbot/buildset.py (BuildSet.__init__): clean up docstring
+	* buildbot/status/html.py (Waterfall.__init__): same
+	* buildbot/process/builder.py (Builder.startBuild): same
+	* buildbot/process/base.py (BuildRequest): same
+	* buildbot/sourcestamp.py (SourceStamp): same
+	* buildbot/scheduler.py (Nightly): same
+
+	* buildbot/__init__.py (version): bump to 0.7.2+ while between
+	releases
+	* docs/buildbot.texinfo: same
+
+2006-02-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.7.2
+	* docs/buildbot.texinfo: set version number to match
+	* NEWS: update for 0.7.2
+
+2006-02-16  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (Build Dependencies): add cindex tag
+
+2006-02-09  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (How Different VC Systems Specify Sources):
+	add text to explain per-build branch parameters
+	* NEWS: mention --umask
+
+2006-02-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py (Maker.makeSlaveTAC): remove unused
+	method
+	(SlaveOptions.optParameters): add --umask, to make it possible to
+	make buildslave-generated files (including build products) be
+	world-readable
+	(slaveTAC): same
+	* buildbot/slave/bot.py (BuildSlave.startService): same
+
+2006-01-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py: urllib.quote() all URLs that include
+	Builder names, so that builders can include characters like '/'
+	and ' ' without completely breaking the resulting HTML. Thanks to
+	Kevin Turner for the patch.
+	* buildbot/status/html.py: same
+	* buildbot/test/test_web.py (GetURL.testBuild): match changes
+
+	* NEWS: update in preparation for upcoming release
+
+2006-01-18  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: update to match the Twisted
+	buildbot: remove python2.2, switch to exarkun's buildslaves,
+	disable the .deb builder until we figure out how to build twisted
+	.debs from SVN, add some ktrace debugging to the OS-X build
+	process and remove the qt build, remove threadless builders,
+	change freebsd builder to use landonf's buildslave.
+
+2006-01-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (Manhole.__init__): let port= be a strports
+	specification string, but handle a regular int for backwards
+	compatibility. This allows "tcp:12345:interface=127.0.0.1" to be
+	used in master.cfg to limit connections to just the local host.
+	(BuildMaster.loadConfig): same for c['slavePortnum']
+	* buildbot/scheduler.py (Try_Userpass.__init__): same
+	* buildbot/status/client.py (PBListener.__init__): same
+	* buildbot/status/html.py (Waterfall.__init__): same, for both
+	http_port and distrib_port. Include backwards-compatibility checks
+	so distrib_port can be a filename string and still mean unix:/foo
+	* docs/buildbot.texinfo (Setting the slaveport): document it
+	(Debug options): same
+	(HTML Waterfall): same
+	(PBListener): same
+	(try): same
+	* buildbot/test/test_config.py (ConfigTest): test it
+
+	* buildbot/master.py (BuildMaster.loadConfig): wait for the
+	slaveport's disownServiceParent deferred to fire before opening
+	the new one. Fixes an annoying bug in the unit tests.
+
+2006-01-03  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (BuildMaster): remove the .schedulers
+	attribute, replacing it with an allSchedulers() method that looks
+	for all IService children that implement IScheduler. Having only
+	one parent/child relationship means fewer opportunities for bugs.
+	(BuildMaster.allSchedulers): new method
+	(BuildMaster.loadConfig_Schedulers): update to use allSchedulers,
+	also fix ugly bug that caused any config-file reload to
+	half-forget about the earlier Schedulers, causing an exception
+	when a Change arrived and was handed to a half-connected
+	Scheduler. The exception was in scheduler.py line 54ish:
+	  self.parent.submitBuildSet(bs)
+	  exceptions.AttributeError: 'NoneType' object has no attribute
+	  'submitBuildSet'
+	(BuildMaster.addChange): update to use allSchedulers()
+
+	* buildbot/scheduler.py (BaseScheduler.__implements__): fix this
+	to work properly with twisted-1.3.0, where you must explicitly
+	include the __implements__ from parent classes
+	(BaseScheduler.__repr__): make it easier to distinguish distinct
+	instances
+	(BaseUpstreamScheduler.__implements__): same
+
+	* buildbot/status/builder.py (Status.getSchedulers): update to
+	use allSchedulers()
+	* buildbot/test/test_run.py (Run.testMaster): same
+	* buildbot/test/test_dependencies.py (Dependencies.findScheduler): same
+	* buildbot/test/test_config.py (ConfigTest.testSchedulers): same,
+	make sure Scheduler instances are left alone when an identical
+	config file is reloaded
+	(ConfigElements.testSchedulers): make sure Schedulers are properly
+	comparable
+
+	* Makefile (TRIALARGS): my local default Twisted version is now
+	2.1.0, update the trial arguments accordingly
+
+2005-12-22  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: merge changes from pyr: add
+	new win32 builders
+
+	* buildbot/scheduler.py (BaseScheduler.addChange): include a dummy
+	addChange in the parent class, although I suspect this should be
+	fixed better in the future.
+
+2005-11-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scheduler.py (AnyBranchScheduler.addChange): don't
+	explode when branch==None, thanks to Kevin Turner for the catch
+	* buildbot/test/test_scheduler.py (Scheduling.testAnyBranch): test
+	it
+
+	* buildbot/__init__.py (version): bump to 0.7.1+ while between
+	releases
+	* docs/buildbot.texinfo: same
+
+2005-11-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.7.1
+	* docs/buildbot.texinfo: set version number to match
+
+2005-11-26  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update for 0.7.1
+
+	* buildbot/status/builder.py (BuildStepStatus.unsubscribe): make
+	sure that unsubscribe works even if we never sent an ETA update.
+	Also, don't explode on duplicate unsubscribe.
+	(BuildStepStatus.addLog): make the convenience "return self"-added
+	watcher automatically unsubscribe when the Step finishes.
+	(BuildStatus.unsubscribe): same handle-duplicate-unsubscribe
+	(BuildStatus.stepStarted): same auto-unsubscribe
+	(BuilderStatus.buildStarted): same auto-unsubscribe
+
+	* buildbot/interfaces.py (IStatusReceiver.buildStarted): document
+	auto-unsubscribe
+	(IStatusReceiver.stepStarted): same
+	(IStatusReceiver.logStarted): same
+
+	* buildbot/test/test_run.py (Status): move the Status test..
+	* buildbot/test/test_status.py (Subscription): .. to here
+
+2005-11-25  Brian Warner  <warner at lothar.com>
+
+	* NEWS: more updates
+
+	* buildbot/locks.py: fix the problem in which loading a master.cfg
+	file that changes some Builders (but not all of them) can result
+	in having multiple copies of the same Lock. Now, the real Locks
+	are kept in a table inside the BotMaster, and the Builders/Steps
+	use "LockIDs", which are still instances of MasterLock and
+	SlaveLock. The real Locks are instances of the new RealMasterLock
+	and RealSlaveLock classes.
+	* buildbot/master.py (BotMaster.getLockByID): new method to
+	convert LockIDs into real Locks.
+	* buildbot/process/base.py (Build.startBuild): convert LockIDs
+	into real Locks before building
+	* buildbot/process/step.py (BuildStep.startStep): same
+	* buildbot/test/test_locks.py (Locks.testLock1a): add a test which
+	exercises the problem
+
+
+	* docs/buildbot.texinfo (Scheduler Types): give a few hints about
+	what Schedulers are available
+
+	* buildbot/scheduler.py (Nightly): add new Scheduler based upon
+	work by Dobes Vandermeer and hacked mercilessly by me. This offers
+	'cron'-style build scheduling at certain times of day, week,
+	month, or year.
+	* buildbot/test/test_scheduler.py (Scheduling.testNightly): test it
+
+	* buildbot/scheduler.py (Scheduler): change fileIsImportant
+	handling: treat self.fileIsImportant more as an attribute that
+	contains a callable than as a method. If the attribute is None,
+	don't call it and assume all filenames are important. It is still
+	possible to provide a fileIsImportant method in a subclass,
+	however.
+	(AnyBranchScheduler): handle fileIsImportant=None, previously it
+	was broken
+	* buildbot/test/test_scheduler.py (Scheduling.testAnyBranch2):
+	test using AnyBranchScheduler with fileIsImportant=None
+
+2005-11-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_config.py (StartService): don't claim a fixed
+	port number, instead set slavePort=0 on the first pass, figure out
+	what port was allocated, then switch to a config file that uses
+	the allocated port.
+
+	* buildbot/master.py (BuildMaster.loadConfig): close the old
+	slaveport before opening the new one, because unit tests might
+	replace slavePort=0 with the same allocated portnumber, and if we
+	don't wait for the old port to close first, we get a "port already
+	in use" error. There is a tiny race condition here, but the only
+	threat is from other programs that bind (statically) to the same
+	port number we happened to be allocated, and only if those
+	programs use SO_REUSEADDR, and only if they get control in between
+	reactor turns.
+
+	* Makefile (TRIALARGS): update to handle Twisted > 2.1.0
+
+	* buildbot/master.py (BuildMaster.loadConfig_Sources): remove all
+	deleted ChangeSources before adding any new ones
+	* buildbot/changes/freshcvs.py (FreshCVSSourceNewcred): fix
+	compare_attrs, to make sure that a config-file reload does not
+	unnecessarily replace an unmodified ChangeSource instance
+	* buildbot/test/test_config.py (ConfigTest.testSources): update
+
+	* buildbot/scheduler.py (AnyBranchScheduler): fix branches=[] to
+	mean "don't build anything", and add a warning if it gets used
+	because it isn't actually useful.
+
+	* contrib/svn_buildbot.py: update example usage to match the port
+	number that gets used by the PBChangeSource
+	* buildbot/scripts/sample.cfg: add example of PBChangeSource
+
+2005-11-22  Brian Warner  <warner at lothar.com>
+
+	* NEWS: start collecting items for next release
+
+	* buildbot/process/step.py (SVN.computeSourceRevision): assume
+	revisions are strings
+	(P4Sync.computeSourceRevision): same
+
+	* buildbot/status/html.py (StatusResourceBuild.body): add a link
+	to the Buildbot's overall status page
+	(StatusResourceBuilder.body): same
+
+2005-11-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (BuildMaster.loadConfig): serialize the
+	config-file loading, specifically to make sure old StatusTargets
+	are finished shutting down before new ones start up (thus
+	resolving a bug in which changing the Waterfall object would fail
+	because both new and old instances were claiming the same
+	listening port). Also load new Schedulers after all the new
+	Builders are set up, in case they fire off a new build right away.
+	* buildbot/test/test_config.py (StartService): test it
+
+	* buildbot/status/mail.py (MailNotifier.buildMessage): oops, add
+	the branch name to the mail body
+
+	* buildbot/changes/pb.py (PBChangeSource.compare_attrs): add this.
+	Without it, a config-file reload fails to update an existing
+	PBChangeSource.
+	* buildbot/changes/freshcvs.py (FreshCVSSourceNewcred): add
+	username/passwd to compare_attrs, for the same reason
+	* buildbot/status/html.py (Waterfall): add favicon to
+	compare_attrs, same reason
+
+2005-11-05  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/tryclient.py (createJobfile): stringify the
+	baserev before stuffing it in the jobfile. This resolves problems
+	under SVN (and probably Arch) where revisions are expressed as
+	numbers. I'm inclined to use string-based revisions everywhere in
+	the future, but this fix should be safe for now. Thanks to Steven
+	Walter for the patch.
+
+	* buildbot/changes/changes.py (ChangeMaster.saveYourself): use
+	binary mode when opening pickle files, to make windows work
+	better. Thanks to Dobes Vandermeer for the catch.
+	* buildbot/status/builder.py (BuildStatus.saveYourself): same
+	(BuilderStatus.getBuildByNumber): same
+	(Status.builderAdded): same
+	* buildbot/master.py (BuildMaster.loadChanges): same
+
+	* buildbot/util.py (Swappable): delete unused leftover code
+
+	* buildbot/process/step.py (SVN): when building on a non-default
+	branch, add the word "[branch]" to the VC step's description, so
+	it is obvious that we're not building the usual stuff. Likewise,
+	when we are building a specific revision, add the text "rNNN" to
+	indicate what that revision number is. Thanks to Brad Hards and
+	Nathaniel Smith for the suggestion.
+	(Darcs.startVC): same
+	(Arch.startVC): same
+	(Bazaar.startVC): same
+
+	* buildbot/process/factory.py (GNUAutoconf.__init__): fix a silly
+	typo, caught by Mark Dillavou, closes SF#1216636.
+
+	* buildbot/test/test_status.py (Log.TODO_testDuplicate): add notes
+	about a test to add some day
+
+	* docs/examples/twisted_master.cfg: update: bot1 can now handle
+	the 'full-2.3' build, and the 'reactors' build is now run under
+	python-2.4 because the buildslave no longer has gtk/etc bindings
+	for earlier versions.
+
+2005-11-03  Brian Warner  <warner at lothar.com>
+
+	* buildbot/interfaces.py (IBuilderControl.resubmitBuild): new
+	method, takes an IBuildStatus and rebuilds it. It might make more
+	sense to add this to IBuildControl instead, but that instance goes
+	away completely once the build has finished, and resubmitting
+	builds can take place weeks later.
+	* buildbot/process/builder.py (BuilderControl.resubmitBuild): same
+	* buildbot/status/html.py (StatusResourceBuild): also stash an
+	IBuilderControl so we can use resubmitBuild.
+	(StatusResourceBuild.body): render "resubmit" button if we can.
+	Also add hrefs for each BuildStep
+	(StatusResourceBuild.rebuild): add action for "resubmit" button
+	(StatusResourceBuilder.getChild): give it an IBuilderControl
+
+	* buildbot/status/builder.py (Status.getURLForThing): change the
+	URL for BuildSteps to have a "step-" prefix, so the magic URLs
+	that live as targets of buttons like "stop" and "rebuild" can't
+	collide with them.
+	* buildbot/status/builder.py (Status.getURLForThing): same
+	* buildbot/status/html.py (StatusResourceBuild.getChild): same
+	(StepBox.getBox): same
+	* buildbot/test/test_web.py (GetURL): same
+	(Logfile): same
+
+	* buildbot/process/step.py (SVN.__init__): put svnurl/baseURL
+	exclusivity checks after Source.__init__ upcall, so misspelled
+	arguments will be reported more usefully
+	(Darcs.__init__): same
+
+2005-10-29  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: don't double-fire the 'quick'
+	builder. Move the Try scheduler off to a separate port.
+
+2005-10-27  Brian Warner  <warner at lothar.com>
+
+	* buildbot/clients/gtkPanes.py
+	(TwoRowClient.remote_builderRemoved): disappearing Builders used
+	to cause the app to crash, now they don't.
+
+	* buildbot/clients/debug.py: display the buildmaster's location
+	in the window's title bar
+
+2005-10-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/mail.py (MailNotifier): urllib.escape the URLs
+	in case they have spaces or whatnot. Patch from Dobes Vandermeer.
+	* buildbot/test/test_status.py (MyStatus.getURLForThing): fix it
+
+	* buildbot/status/html.py (td): put a single non-breaking space
+	inside otherwise empty <td> elements, as a workaround for buggy
+	browsers which would optimize them away (along with any associated
+	styles, like the kind that create the waterfall grid borders).
+	Patch from Frerich Raabe.
+
+	* buildbot/process/step_twisted.py (Trial): expose the trialMode=
+	argv-list as an argument, defaulting to ["-to"], which is
+	appropriate for the Trial that comes with Twisted-2.1.0 and
+	earlier. The Trial in current Twisted SVN wants
+	["--reporter=bwverbose"] instead. Also expose trialArgs=, which
+	defaults to an empty list.
+	* buildbot/process/process_twisted.py (TwistedTrial.trialMode):
+	match it, now that trialMode= is a list instead of a single string
+
+	* buildbot/__init__.py (version): bump to 0.7.0+ while between
+	releases
+	* docs/buildbot.texinfo: same
+
+2005-10-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.7.0
+	* docs/buildbot.texinfo: set version number to match
+
+2005-10-24  Brian Warner  <warner at lothar.com>
+
+	* README: update for 0.7.0
+	* NEWS: same
+	* docs/buildbot.texinfo: move the freshcvs stuff out of the README
+
+	* buildbot/clients/debug.glade: add 'branch' box to fake-commit
+	* buildbot/clients/debug.py (DebugWidget.do_commit): same. Don't
+	send the branch= argument unless the user really provided one, to
+	retain compatibility with older buildmasters that don't accept
+	that argument.
+	* buildbot/master.py (DebugPerspective.perspective_fakeChange):
+	same
+
+	* docs/buildbot.texinfo: update lots of stuff
+
+	* buildbot/scripts/runner.py (sendchange): add a --branch argument
+	to the 'buildbot sendchange' command
+	* buildbot/clients/sendchange.py (Sender.send): same
+	* buildbot/changes/pb.py (ChangePerspective): same
+	* buildbot/test/test_changes.py (Sender.testSender): test it
+
+	* buildbot/process/step.py (SVN.__init__): change 'base_url' and
+	'default_branch' argument names to 'baseURL' and 'defaultBranch',
+	for consistency with other BuildStep arguments that use camelCase.
+	Well, at least more of them use camelCase (like flunkOnWarnings)
+	than don't.. I wish I'd picked one style and stuck with it
+	earlier. Annoying, but it's best done before the release, since
+	these arguments didn't exist at all in 0.6.6 .
+	(Darcs): same
+	* buildbot/test/test_vc.py (SVN.testCheckout): same
+	(Darcs.testPatch): same
+	* docs/buildbot.texinfo (SVN): document the change
+	(Darcs): same, add some build-on-branch docs
+	* docs/examples/twisted_master.cfg: match change
+
+	* buildbot/process/step.py (BuildStep): rename
+	slaveVersionNewEnough to slaveVersionIsOlderThan, because that's
+	how it is normally used.
+	* buildbot/test/test_steps.py (Version.checkCompare): same
+
+	* buildbot/process/step.py (CVS.startVC): refuse to build
+	update/copy -style builds on a non-default branch with an old
+	buildslave (<=0.6.6) that doesn't know how to do it properly. The
+	concern is that it will do a VC 'update' in an existing tree when
+	it is supposed to be switching branches (and therefore clobbering
+	the tree to do a full checkout), thus building the wrong source.
+	This used to be a warning, but I think the confusion it is likely
+	to cause warrants making it an error.
+	(SVN.startVC): same, also make mode=export on old slaves an error
+	(Darcs.startVC): same
+	(Git.startVC): improve error message for non-Git-enabled slaves
+	(Arch.checkSlaveVersion): same. continue to emit a warning when a
+	specific revision is built on a slave that doesn't pay attention
+	to args['revision'], because for slowly-changing trees it will
+	probably do the right thing, and because we have no way to tell
+	whether we're asking it to build the most recent version or not.
+	* buildbot/interfaces.py (BuildSlaveTooOldError): new exception
+
+	* buildbot/scripts/runner.py (SlaveOptions.postOptions): assert
+	that 'master' is in host:portnum format, to catch errors sooner
+
+2005-10-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (ProcessDocs.createSummary):
+	when creating the list of warning messages, include the line
+	immediately after each WARNING: line, since that's usually where
+	the file and line number wind up.
+
+	* docs/examples/twisted_master.cfg: OS-X slave now does QT, add a
+	TryScheduler
+
+	* NEWS: update
+
+2005-10-22  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py (HtmlResource): incorporate valid-HTML
+	patch from Brad Hards
+	* buildbot/status/classic.css: same
+	* buildbot/test/test_web.py (Waterfall): match changes
+
+	* buildbot/test/test_steps.py (BuildStep.setUp): set
+	nextBuildNumber so the test passes
+	* buildbot/test/test_status.py (MyBuilder): same
+
+	* buildbot/status/html.py (StatusResourceBuild.body): revision
+	might be numeric, so stringify it before html-escapifying it
+	(CurrentBox.getBox): add a "waiting" state, and show a countdown
+	timer for the upcoming build
+	* buildbot/status/classic.css: add background-color attributes for
+	offline/waiting/building classes
+
+	* buildbot/status/builder.py (BuildStatus): derive from
+	styles.Versioned, fix upgrade of .sourceStamp attribute. Also set
+	the default (i.e. unknown) .slavename to "???" instead of None,
+	since even unknown slavenames need to be printed eventually.
+	(BuilderStatus): also derive from styles.Versioned . More
+	importantly, determine .nextBuildNumber at creation/unpickling
+	time by scanning the directory of saved BuildStatus instances and
+	choosing one larger than the highest-numbered one found. This
+	should fix the problem where random errors during upgrades cause
+	the buildbot to forget about earlier builds. .nextBuildNumber is
+	no longer stored in the pickle.
+	(Status.builderAdded): if we can't unpickle the BuilderStatus,
+	at least log the error. Also call Builder.determineNextBuildNumber
+	once the basedir is set.
+
+	* buildbot/master.py (BuildMaster.loadChanges): do
+	styles.doUpgrade afterwards, in case I decide to make Changes
+	derived from styles.Versioned some day and forget to make this
+	change later.
+
+
+	* buildbot/test/test_runner.py (Options.testForceOptions): skip
+	when running under older pythons (<2.3) in which the shlex module
+	doesn't have a 'split' function.
+
+	* buildbot/process/step.py (ShellCommand.start): make
+	errorMessages= be a list of strings to stuff in the log before the
+	command actually starts. This makes it easier to flag multiple
+	warning messages, e.g. when the Source steps have to deal with an
+	old buildslave.
+	(CVS.startVC): handle slaves that don't handle multiple branches
+	by switching into 'clobber' mode
+	(SVN.startVC): same. Also reject branches without base_url
+	(Darcs.startVC): same. Also reject revision= in older slaves
+	(Arch.checkSlaveVersion): same (just the multiple-branches stuff)
+	(Bazaar.startVC): same, and test for baz separately than for arch
+
+	* buildbot/slave/commands.py (cvs_ver): document new features
+
+	* buildbot/process/step.py (BuildStep.slaveVersion): document it
+	(BuildStep.slaveVersionNewEnough): more useful utility method
+	* buildbot/test/test_steps.py (Version): start testing it
+
+	* buildbot/status/words.py (IrcStatusBot.command_FORCE): note that
+	the 'force' command requires python2.3, for the shlex.split method
+
+	* docs/examples/twisted_master.cfg: remove old freshcvs stuff,
+	since we don't use it anymore. The Twisted buildbot uses a
+	PBChangeSource now.
+
+2005-10-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py: rework all BuildFactory
+	classes to take a 'source' step as an argument, instead of
+	building up the SVN instance in the factory.
+	* docs/examples/twisted_master.cfg: enable build-on-branch by
+	providing a base_url and default_branch
+
+	* buildbot/status/words.py (IrcStatusBot.command_FORCE): add
+	control over --branch and --revision, not that they are always
+	legal to provide
+	* buildbot/status/html.py (StatusResourceBuilder.force): same
+	(StatusResourceBuild.body): display SourceStamp components
+
+	* buildbot/scripts/runner.py (ForceOptions): option parser for the
+	IRC 'force' command, so it can be shared with an eventual
+	command-line-tool 'buildbot force' mode.
+	* buildbot/test/test_runner.py (Options.testForceOptions): test it
+
+2005-10-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/mail.py (MailNotifier.buildMessage): reformat
+
+	* docs/examples/twisted_master.cfg: update to use Schedulers
+
+	* buildbot/scripts/sample.cfg: update with Schedulers
+
+	* buildbot/interfaces.py (IBuilderControl.requestBuildSoon): new
+	method specifically for use by HTML "force build" button and the
+	IRC "force" command. Raises an immediate error if there are no
+	slaves available.
+	(IBuilderControl.requestBuild): make this just submit a build, not
+	try to check for existing slaves or set up any when-finished
+	Deferreds or anything.
+	* buildbot/process/builder.py (BuilderControl): same
+	* buildbot/status/html.py (StatusResourceBuilder.force): same
+	* buildbot/status/words.py (IrcStatusBot.command_FORCE): same
+	* buildbot/test/test_slaves.py: same
+	* buildbot/test/test_web.py: same
+
+2005-10-19  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: re-sync with reality: bring
+	back python2.2 tests, turn off OS-X threadedselect-reactor tests
+
+2005-10-18  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py: provide 'status' argument to most
+	StatusResourceFOO objects
+	(StatusResourceBuild.body): href-ify the Builder name, add "Steps
+	and Logfiles" section to make the Build page into a more-or-less
+	comprehensive source of status information about the build
+
+	* buildbot/status/mail.py (MailNotifier): include the Build's URL
+	* buildbot/status/words.py (IrcStatusBot.buildFinished): same
+
+2005-10-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py (TwistedTrial): update Trial
+	arguments to accomodate Twisted >=2.1.0 . I will have to figure
+	out what to do about other projects: the correct options for
+	recent Twisteds will not work for older ones.
+
+2005-10-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (Status.getURLForThing): add method
+	to provide a URL for arbitrary IStatusFoo objects. The idea is to
+	use this in email/IRC status clients to make them more useful, by
+	providing the end user with hints on where to learn more about the
+	object being reported on.
+	* buildbot/test/test_web.py (GetURL): tests for it
+
+2005-10-14  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_config.py (ConfigTest._testSources_1): oops,
+	fix bug resulting from deferredResult changes
+
+2005-10-13  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_changes.py: remove use of deferredResult
+	* buildbot/test/test_config.py: same
+	* buildbot/test/test_control.py: same
+	* buildbot/test/test_status.py: same
+	* buildbot/test/test_vc.py: this is the only remaining use, since
+	it gets used at module level. This needs to be replaced by some
+	sort of class-level run-once routine.
+
+	* buildbot/status/words.py (IrcStatusBot.command_WATCH): fix typo
+
+	* lots: implement multiple slaves per Builder, which means multiple
+	current builds per Builder. Some highlights:
+	* buildbot/interfaces.py (IBuilderStatus.getState): return a tuple
+	of (state,currentBuilds) instead of (state,currentBuild)
+	(IBuilderStatus.getCurrentBuilds): replace getCurrentBuild()
+	(IBuildStatus.getSlavename): new method, so you can tell which
+	slave got used. This only gets set when the build completes.
+	(IBuildRequestStatus.getBuilds): new method
+
+	* buildbot/process/builder.py (SlaveBuilder): add a .state
+	attribute to track things like ATTACHING and IDLE and BUILDING,
+	instead of..
+	(Builder): .. the .slaves attribute here, which has been turned
+	into a simple list of available slaves. Added a separate
+	attaching_slaves list to track ones that are not yet ready for
+	builds.
+	(Builder.fireTestEvent): put off the test-event callback for a
+	reactor turn, to make tests a bit more consistent.
+	(Ping): cleaned up the slaveping a bit, now it disconnects if the
+	ping fails due to an exception. This needs work, I'm worried that
+	a code error could lead to a constantly re-connecting slave.
+	Especially since I'm trying to move to a distinct remote_ping
+	method, separate from the remote_print that we currently use.
+	(BuilderControl.requestBuild): return a convenience Deferred that
+	provides an IBuildStatus when the build finishes.
+	(BuilderControl.ping): ping all connected slaves, only return True
+	if they all respond.
+
+	* buildbot/slave/bot.py (BuildSlave.stopService): stop trying to
+	reconnect when we shut down.
+
+	* buildbot/status/builder.py: implement new methods, convert
+	one-build-at-a-time methods to handle multiple builds
+	* buildbot/status/*.py: do the same in all default status targets
+	* buildbot/status/html.py: report the build's slavename in the
+	per-Build page, report all buildslaves on the per-Builder page
+
+	* buildbot/test/test_run.py: update/create tests
+	* buildbot/test/test_slaves.py: same
+	* buildbot/test/test_scheduler.py: remove stale test
+
+	* docs/buildbot.texinfo: document the new builder-specification
+	'slavenames' parameter
+
+2005-10-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/buildset.py (BuildSet): fix bug where BuildSet did not
+	report failure correctly, causing Dependent builds to run when
+	they shouldn't have.
+	* buildbot/status/builder.py (BuildSetStatus): same
+	* buildbot/test/test_buildreq.py (Set.testBuildSet): verify it
+	(Set.testSuccess): test the both-pass case too
+	* buildbot/test/test_dependencies.py (Dependencies.testRun_Fail):
+	fix this test: it was ending too early, masking the failure before
+	(Logger): specialized StatusReceiver to make sure the dependent
+	builds aren't even started, much less completed.
+
+2005-10-07  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/bot.py (SlaveBuilder.activity): survive
+	bot.SlaveBuilder being disowned in the middle of a build
+
+	* buildbot/status/base.py (StatusReceiverMultiService): oops, make
+	this inherit from StatusReceiver. Also upcall in __init__. This
+	fixes the embarrasing crash when the new buildSetSubmitted method
+	is invoked and Waterfall/etc don't implement their own.
+	* buildbot/test/test_run.py: add a TODO note about a test to catch
+	just this sort of thing.
+
+	* buildbot/process/builder.py (Builder.attached): remove the
+	already-attached warning, this situation is normal. Add some
+	comments explaining it.
+
+2005-10-02  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/maildir.py (Maildir.start): Tolerate
+	OverflowError when setting up dnotify, because some 64-bit systems
+	have problems with signed-vs-unsigned constants and trip up on the
+	DN_MULTISHOT flag. Patch from Brad Hards.
+
+2005-09-06  Fred Drake  <fdrake at users.sourceforge.net>
+
+	* buildbot/process/step.py (BuildStep, ShellCommand): Add
+	progressMetrics, description, descriptionDone to the 'parms' list,
+	and make use the 'parms' list from the implementation class
+	instead of only BuildStep to initialize the parameters.  This
+	allows buildbot.process.factory.s() to initialize all the parms,
+	not just those defined in directly by BuildStep.
+
+2005-09-03  Brian Warner  <warner at lothar.com>
+
+	* NEWS: start adding items for the next release
+
+	* docs/examples/twisted_master.cfg: (sync with reality) turn off
+	python2.2 tests, change 'Quick' builder to only use python2.3
+
+2005-09-02  Fred Drake  <fdrake at users.sourceforge.net>
+
+	* buildbot/status/html.py (StatusResourceBuilder.body): only show
+	the "Ping Builder" button if the build control is available; the
+	user sees an exception otherwise
+
+	* docs/buildbot.texinfo (PBChangeSource): fix a typo
+
+2005-09-01  Brian Warner  <warner at lothar.com>
+
+	* buildbot/interfaces.py (IBuilderStatus.getState): update
+	signature, point out that 'build' can be None
+	(IBuildStatus.getETA): point out ETA can be none
+
+	* buildbot/status/html.py (CurrentBox.getBox): tolerate build/ETA
+	being None
+	* buildbot/status/words.py (IrcStatusBot.emit_status): same
+
+2005-08-31  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/base.py (StatusReceiver.builderChangedState):
+	update to match correct signature: removed 'eta' argument
+	* buildbot/status/mail.py (MailNotifier.builderChangedState): same
+
+2005-08-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (LogFile): remove the assertion that
+	blows up when you try to overwrite an existing logfile, instead
+	just emit a warning. This case gets hit when the buildmaster is
+	killed and doesn't get a chance to write out the serialized
+	BuilderStatus object, so the .nextBuildNumber attribute gets out
+	of date.
+
+	* buildbot/scripts/runner.py (sendchange): add --revision_file to
+	the 'buildbot sendchange' arguments, for the Darcs context file
+	* docs/buildbot.texinfo (sendchange): document it
+
+	* buildbot/status/html.py: add pending/upcoming builds to CurrentBox
+	* buildbot/interfaces.py (IScheduler.getPendingBuildTimes): new method
+	(IStatus.getSchedulers): new method
+	* buildbot/status/builder.py (BuilderStatus): track pendingBuilds
+	(Status.getSchedulers): implement
+	* buildbot/process/builder.py (Builder): maintain
+	BuilderStatus.pendingBuilds
+	* buildbot/scheduler.py (Scheduler.getPendingBuildTimes): new method
+	(TryBase.addChange): Try schedulers should ignore Changes
+
+	* buildbot/scripts/tryclient.py (getTopdir): implement getTopdir
+	for 'try' on CVS/SVN
+	* buildbot/test/test_runner.py (Try.testGetTopdir): test case
+
+	* buildbot/scripts/tryclient.py (Try): make jobdir-style 'try'
+	report status properly.
+	(Try.createJob): implement unique buildset IDs
+
+	* buildbot/status/client.py (StatusClientPerspective): add a
+	perspective_getBuildSets method for the benefit of jobdir-style
+	'try'.
+	* docs/buildbot.texinfo (try): more docs
+	* buildbot/test/test_scheduler.py (Scheduling.testGetBuildSets):
+	new test case
+
+2005-08-18  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/tryclient.py (Try): make 'try' status reporting
+	actually work. It's functional but still kind of clunky. Also, it
+	only works with the pb-style.. needs to be made to work with the
+	jobdir-style too.
+
+	* buildbot/status/client.py (RemoteBuildSet): new class
+	(RemoteBuildRequest): same
+	(RemoteBuild.remote_waitUntilFinished): return the RemoteBuild
+	object, not the internal BuildStatus object.
+	(RemoteBuild.remote_subscribe): new method to subscribe to builds
+	outside of the usual buildStarted() return value.
+	(BuildSubscriber): support class for RemoteBuild.remote_subscribe
+
+	* buildbot/scheduler.py (Try_Jobdir): convey buildsetID properly
+	(Try_Userpass_Perspective.perspective_try): return a remotely
+	usable BuildSetStatus object
+
+	* buildbot/interfaces.py (IBuildStatus): remove obsolete
+	isStarted()/waitUntilStarted()
+
+2005-08-16  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py: implement IBuildSetStatus and
+	IBuildRequestStatus, wire them into place.
+	* buildbot/buildset.py: same. Add ID, move wait-until-finished
+	methods into the BuildSetStatus object.
+	* buildbot/interfaces.py: same
+	(IStatus.getBuildSets): new method to get pending BuildSets
+	(IStatusReceiver.buildsetSubmitted): new method which hears about
+	new BuildSets
+	* buildbot/master.py (BuildMaster.submitBuildSet): same
+	* buildbot/process/base.py (BuildRequest): same, replace
+	waitUntilStarted with subscribe/unsubscribe
+	* buildbot/process/builder.py (BuilderControl.forceBuild): use
+	subscribe instead of waitUntilStarted
+	* buildbot/status/base.py (StatusReceiver.buildsetSubmitted): stub
+	for new method
+	* buildbot/status/client.py (StatusClientPerspective.builderRemoved): 
+	same
+	* buildbot/test/test_buildreq.py: update for new code
+	* buildbot/test/test_control.py (Force.testRequest): same
+
+
+	* buildbot/slave/commands.py (Darcs.doVCFull): fix get-revision
+	for Darcs to not use the tempfile module, so it works under
+	python-2.2 too. We really didn't need the full cleverness of that
+	module, since the slave has exclusive control of its own builddir.
+
+2005-08-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/tryclient.py (CVSExtractor): implement 'try'
+	for CVS trees. It doesn't work for non-trunk branches,
+	unfortunately.
+	* buildbot/test/test_vc.py (CVS.testTry): test it, but skip the
+	branch test
+
+	* Makefile: make it easier to test against python2.2
+
+	* buildbot/test/test_vc.py (VCBase.tearDown): provide for
+	tearDown2, so things like Arch can unregister archives as they're
+	shutting down. The previous subclass-override-tearDown technique
+	resulted in a nested maybeWait() and test failures under
+	Twisted-1.3.0
+
+	* buildbot/scripts/tryclient.py (getSourceStamp): extract branches
+	where we can (Arch), add a branch= argument to set the branch used
+	when we can't
+	(BazExtractor): extract the branch too
+	(TlaExtractor): same
+	* buildbot/scripts/runner.py (TryOptions): add --branch
+	* docs/buildbot.texinfo (try): document --branch/try_branch
+
+	* buildbot/slave/commands.py (Darcs): implement get-revision for
+	Darcs, so that 'try' will work. This requires the tempfile module
+	from python-2.3 .
+
+	* buildbot/test/test_vc.py: rewrite tests, getting better coverage
+	of revisions, branches, and 'try' in the process.
+
+2005-08-11  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (DebugPerspective.perspective_pokeIRC): fix
+	this, it got broken at some point in the last few releases
+	* buildbot/status/words.py (IrcBuildRequest): reply was broken
+	(IrcStatusBot.emit_status): handle new IBuilderStatus.getState,
+	specifically the removal of ETA information from the tuple
+
+	* buildbot/locks.py: use %d for id() instead of %x, avoid a silly
+	warning message
+
+	* docs/buildbot.texinfo (try): document both --builder and
+	'try_builders' in .buildbot/options
+	* buildbot/scripts/runner.py (TryOptions): add --builder,
+	accumulate the values into opts['builders']
+	* buildbot/scripts/tryclient.py (Try.__init__): set builders
+	* buildbot/test/test_runner.py (Try): add some quick tests to make
+	sure 'buildbot try --options' and .buildbot/options get parsed
+	* buildbot/test/test_scheduler.py (Scheduling.testTryUserpass):
+	use --builder control
+
+	* docs/buildbot.texinfo (try): add --port argument to PB style
+
+	* buildbot/scripts/tryclient.py (SourceStampExtractor): return an
+	actual SourceStamp. Still need to extract a branch name, somehow.
+	(Try): finish implementing the try client side, still need a UI
+	for specifying which builders to use
+	(Try.getopt): factor our options/config-file reading
+	* buildbot/test/test_scheduler.py (Scheduling.testTryUserpass):
+	test it
+	* buildbot/test/test_vc.py: match SourceStampExtractor change
+
+	* buildbot/scripts/runner.py (Options.opt_verbose): --verbose
+	causes the twisted log to be sent to stderr
+
+	* buildbot/scheduler.py (Try_Userpass): implement the PB style
+
+2005-08-10  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py: Add 'buildbot try' command, jobdir
+	style is 90% done, still missing status reporting or waiting for
+	the buildsets to finish, and it is completely untested.
+
+	* buildbot/trybuild.py: delete file, move contents to ..
+	* buildbot/scripts/tryclient.py (getSourceStamp): .. here
+	* buildbot/test/test_vc.py: match the move
+
+	* buildbot/scheduler.py (Try_Jobdir): implement the jobdir style
+	of the TryScheduler, no buildsetID or status-tracking support yet
+	* buildbot/test/test_scheduler.py (Scheduling.testTryJobdir): test it
+
+	* buildbot/changes/maildir.py (Maildir.setBasedir): make it
+	possible to set the basedir after __init__ time, so it is easier
+	to use as a Service-child of the BuildMaster instance
+
+	* buildbot/changes/maildirtwisted.py (MaildirService): make a form
+	that delivers messages to its Service parent instead of requiring
+	a subclass to be useful. This turns out to be much easier to build
+	unit tests around.
+
+	* buildbot/scripts/tryclient.py (createJob): utility code to
+	create jobfiles, will eventually be used by 'buildbot try'
+
+2005-08-08  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (try): add docs on the
+	as-yet-unimplemented Try scheduler
+
+	* buildbot/test/test_buildreq.py: move Scheduling tests out to ..
+	* buildbot/test/test_scheduler.py: .. here
+	(Scheduling.testTryJobdir): add placeholder test for 'try'
+
+	* buildbot/test/test_status.py (Log.testMerge3): update to match new
+	addEntry merging (>=chunkSize) behavior
+	(Log.testConsumer): update to handle new callLater(0) behavior
+
+	* buildbot/test/test_web.py: rearrange tests a bit, add test for
+	both the MAX_LENGTH bugfix and the resumeProducing hang.
+
+	* buildbot/status/builder.py (LogFileProducer.resumeProducing):
+	put off the actual resumeProducing for a moment with
+	reactor.callLater(0). This works around a twisted-1.3.0 bug which
+	causes large logfiles to hang midway through.
+
+	* buildbot/process/step.py (BuildStep.addCompleteLog): break the
+	logfile up into chunks, both to avoid NetstringReceiver.MAX_LENGTH
+	and to improve memory usage when streaming the file out to a web
+	browser.
+	* buildbot/status/builder.py (LogFile.addEntry): change > to >= to
+	make this work cleanly
+
+2005-08-03  Brian Warner  <warner at lothar.com>
+
+	* buildbot/trybuild.py: new file for 'try' utilities
+	(getSourceStamp): run in a tree, find out the baserev+patch
+	* buildbot/test/test_vc.py (VCBase.do_getpatch): test it,
+	implemented for SVN and Darcs, still working on Arch. I don't know
+	how to make CVS work yet.
+
+	* docs/buildbot.texinfo: document the 'buildbot' command-line
+	tool, including the not-yet-implemented 'try' feature, and the
+	in-flux .buildbot/ options directory.
+
+2005-07-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/locks.py: added temporary id() numbers to Lock
+	descriptions, to track down a not-really-sharing-the-Lock bug
+
+	* buildbot/test/runutils.py: must import errno, cut-and-paste bug
+
+	* buildbot/test/test_slavecommand.py (ShellBase.failUnlessIn):
+	needed for python2.2 compatibility
+	* buildbot/test/test_vc.py: python2.2 compatibility: generators
+	are from the __future__
+
+2005-07-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (BuildMaster.loadConfig): give a better error
+	message when schedulers use unknown builders
+
+	* buildbot/process/builder.py (Builder.compareToSetup): make sure
+	SlaveLock('name') and MasterLock('name') are distinct
+
+	* buildbot/master.py (BuildMaster.loadConfig): oops, sanity-check
+	c['schedulers'] in such a way that we can actually accept
+	Dependent instances
+	* buildbot/test/test_config.py: check it
+
+	* buildbot/scheduler.py (Dependent.listBuilderNames): oops, add
+	utility method to *all* the Schedulers
+	(Periodic.listBuilderNames): same
+
+	* docs/buildbot.texinfo (Interlocks): update chapter to match
+	reality
+
+	* buildbot/master.py (BuildMaster.loadConfig): Add sanity checks
+	to make sure that c['sources'], c['schedulers'], and c['status']
+	are all lists of the appropriate objects, and that the Schedulers
+	all point to real Builders
+	* buildbot/interfaces.py (IScheduler, IUpstreamScheduler): add
+	'listBuilderNames' utility method to support this
+	* buildbot/scheduler.py: implement the utility method
+	* buildbot/test/test_config.py (ConfigTest.testSchedulers): test it
+
+	* docs/buildbot.texinfo: add some @cindex entries
+
+	* buildbot/test/test_vc.py (Arch.createRepository): set the tla ID
+	if it wasn't already set: most tla commands will fail unless one
+	has been set.
+	(Arch.createRepository): and disable bazaar's revision cache, since
+	they cause test failures (the multiple repositories we create all
+	interfere with each other through the cache)
+
+	* buildbot/test/test_web.py (WebTest): remove use of deferredResult,
+	bring it properly up to date with twisted-2.0 test guidelines
+
+	* buildbot/master.py (BuildMaster): remove references to old
+	'interlock' module, this caused a bunch of post-merge test
+	failures
+	* buildbot/test/test_config.py: same
+	* buildbot/process/base.py (Build): same
+
+	* buildbot/test/test_slaves.py: stubs for new test case
+
+	* buildbot/scheduler.py: add test-case-name tag
+	* buildbot/test/test_buildreq.py: same
+
+	* buildbot/slave/bot.py (SlaveBuilder.__init__): remove some
+	unnecessary init code
+	(Bot.remote_setBuilderList): match it
+
+	* docs/buildbot.texinfo (@settitle): don't claim version 1.0
+
+	* buildbot/changes/mail.py (parseSyncmail): update comment
+
+	* buildbot/test/test_slavecommand.py: disable Shell tests on
+	platforms that don't suport IReactorProcess
+
+	* buildbot/status/builder.py (LogFile): remove the 't' mode from
+	all places where we open logfiles. It causes OS-X to open the file
+	in some weird mode that that prevents us from mixing reads and
+	writes to the same filehandle, which we depend upon to implement
+	_generateChunks properly. This change doesn't appear to break
+	win32, on which "b" and "t" are treated differently but a missing
+	flag seems to be interpreted as "t".
+
+2005-07-18  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (ShellCommand): overhaul
+	error-handling code, to try and make timeout/interrupt work
+	properly, and make win32 happier
+	* buildbot/test/test_slavecommand.py: clean up, stop using
+	reactor.iterate, add tests for timeout and interrupt
+	* buildbot/test/sleep.py: utility for a new timeout test
+
+	* buildbot/twcompat.py: copy over twisted 1.3/2.0 compatibility
+	code from the local-usebranches branch
+
+2005-07-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py
+	(TwistedReactorsBuildFactory): change the treeStableTimer to 5
+	minutes, to match the other twisted BuildFactories, and don't
+	excuse failures in c/qt/win32 reactors any more.
+
+	* docs/examples/twisted_master.cfg: turn off the 'threadless' and
+	'freebsd' builders, since the buildslaves have been unavailable
+	for quite a while
+
+2005-07-13  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (VCBase.do_branch): test the new
+	build-on-branch feature
+
+	* buildbot/process/step.py (Darcs.__init__): add base_url and
+	default_branch arguments, just like SVN
+	(Arch.__init__): note that the version= argument is really the
+	default branch name
+
+	* buildbot/slave/commands.py (SourceBase): keep track of the
+	repository+branch that was used for the last checkout in
+	SRCDIR/.buildbot-sourcedata . If the contents of this file do not
+	match, we clobber the directory and perform a fresh checkout
+	rather than trying to do an in-place update. This should protect
+	us against trying to get to branch B by doing an update in a tree
+	obtained from branch A.
+	(CVS.setup): add CVS-specific sourcedata: root, module, and branch
+	(SVN.setup): same, just the svnurl
+	(Darcs.setup): same, just the repourl
+	(Arch.setup): same, arch coordinates (url), version, and
+	buildconfig. Also pull the buildconfig from the args dictionary,
+	which we weren't doing before, so the build-config was effectively
+	disabled.
+	(Arch.sourcedirIsUpdateable): don't try to update when we're
+	moving to a specific revision: arch can't go backwards, so it is
+	safer to just clobber the tree and checkout a new one at the
+	desired revision.
+	(Bazaar.setup): same sourcedata as Arch
+
+	* buildbot/test/test_dependencies.py (Dependencies.testRun_Fail):
+	use maybeWait, to work with twisted-1.3.0 and twcompat
+	(Dependencies.testRun_Pass): same
+
+	* buildbot/test/test_vc.py: rearrange, cleanup
+
+	* buildbot/twcompat.py: add defer.waitForDeferred and
+	utils.getProcessOutputAndValue, so test_vc.py (which uses them)
+	can work under twisted-1.3.0 .
+
+	* buildbot/test/test_vc.py: rewrite. The sample repositories are
+	now created at setUp time. This increases the runtime of the test
+	suite considerably (from 91 seconds to 151), but it removes the
+	need for an offline tarball, which should solve a problem I've
+	seen where the test host has a different version of svn than the
+	tarball build host. The new code also validates that mode=update
+	really picks up recent commits. This approach will also make it
+	easier to test out branches, because the code which creates the VC
+	branches is next to the code which uses them. It will also make it
+	possible to test some change-notification hooks, by actually
+	performing a VC commit and watching to see the ChangeSource get
+	notified.
+
+2005-07-12  Brian Warner  <warner at lothar.com>
+
+	* docs/buildbot.texinfo (SVN): add branches example
+	* docs/Makefile (buildbot.ps): add target for postscript manual
+
+	* buildbot/test/test_dependencies.py: s/test_interlocks/test_locks/ 
+	* buildbot/test/test_locks.py: same
+
+	* buildbot/process/step.py (Darcs): comment about default branches
+
+	* buildbot/master.py (BuildMaster.loadConfig): don't look for
+	c['interlocks'] in the config file, complain if it is present.
+	Scan all locks in c['builders'] to make sure the Locks they use
+	are uniquely named.
+	* buildbot/test/test_config.py: remove old c['interlocks'] test,
+	add some tests to check for non-uniquely-named Locks
+	* buildbot/test/test_vc.py (Patch.doPatch): fix factory.steps,
+	since the unique-Lock validation code requires it now
+
+	* buildbot/locks.py: fix test-case-name
+
+	* buildbot/interlock.py: remove old file
+
+2005-07-11  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_interlock.py: rename to..
+	* buildbot/test/test_locks.py: .. something shorter
+
+	* buildbot/slave/bot.py (BuildSlave.stopService): newer Twisted
+	versions (after 2.0.1) changed internet.TCPClient to shut down the
+	connection in stopService. Change the code to handle this
+	gracefully.
+
+	* buildbot/process/base.py (Build): handle whole-Build locks
+	* buildbot/process/builder.py (Builder.compareToSetup): same
+	* buildbot/test/test_interlock.py: make tests work
+
+	* buildbot/process/step.py (BuildStep.startStep): complain if a
+	Step tries to claim a lock that's owned by its own Build
+	(BuildStep.releaseLocks): typo
+
+	* buildbot/locks.py (MasterLock): use ComparableMixin so config
+	file reloads don't replace unchanged Builders
+	(SlaveLock): same
+	* buildbot/test/test_config.py (ConfigTest.testInterlocks):
+	rewrite to cover new Locks instead of old c['interlocks']
+	* buildbot/test/runutils.py (RunMixin.connectSlaves): remember
+	slave2 too
+
+
+	* buildbot/test/test_dependencies.py (Dependencies.setUp): always
+	start the master and connect the buildslave
+
+	* buildbot/process/step.py (FailingDummy.done): finish with a
+	FAILURE status rather than raising an exception
+
+	* buildbot/process/base.py (BuildRequest.mergeReasons): don't try to
+	stringify a BuildRequest.reason that is None
+
+	* buildbot/scheduler.py (BaseUpstreamScheduler.buildSetFinished):
+	minor fix
+	* buildbot/status/builder.py (BuildSetStatus): implement enough to
+	allow scheduler.Dependent to work
+	* buildbot/buildset.py (BuildSet): set .reason and .results
+
+	* buildbot/test/test_interlock.py (Locks.setUp): connect both
+	slaves, to make the test stop hanging. It still fails, of course,
+	because I haven't even started to implement Locks.
+
+	* buildbot/test/runutils.py (RunMixin.connectSlaves): new utility
+
+	* docs/buildbot.texinfo (Build-Dependencies): redesign the feature
+	* buildbot/interfaces.py (IUpstreamScheduler): new Interface
+	* buildbot/scheduler.py (BaseScheduler): factor out common stuff
+	(Dependent): new class for downstream build dependencies
+	* buildbot/test/test_dependencies.py: tests (still failing)
+
+	* buildbot/buildset.py (BuildSet.waitUntilSuccess): minor notes
+
+2005-07-07  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/runutils.py (RunMixin): factored this class out..
+	* buildbot/test/test_run.py: .. from here
+	* buildbot/test/test_interlock.py: removed old c['interlock'] tests,
+	added new buildbot.locks tests (which all hang right now)
+	* buildbot/locks.py (SlaveLock, MasterLock): implement Locks
+	* buildbot/process/step.py: claim/release per-BuildStep locks
+
+	* docs/Makefile: add 'buildbot.html' target
+
+	* buildbot/process/step.py (CVS.__init__): allow branch=None to be
+	interpreted as "HEAD", so that all VC steps can accept branch=None
+	and have it mean the "default branch".
+
+	* docs/buildbot.texinfo: add Schedulers, Dependencies, and Locks
+
+2005-07-07  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: update to match current usage
+
+	* docs/buildbot.texinfo (System Architecture): comment out the
+	image, it doesn't exist yet and just screws up the HTML manual.
+
+2005-07-05  Brian Warner  <warner at lothar.com>
+
+	* debian/.cvsignore: oops, missed one. Removing leftover file.
+
+2005-06-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (VCSupport.__init__): svn --version
+	changed its output in 1.2.0, don't mistakenly think that the
+	subversion we find isn't capable of supporting our tests.
+
+	* debian/*: remove the debian/ directory and its contents, to make
+	life easier for the proper Debian maintainer
+	* MANIFEST.in: same
+	* Makefile (release): same
+
+2005-06-07  Brian Warner  <warner at lothar.com>
+
+	* everything: create a distinct SourceStamp class to replace the
+	ungainly 4-tuple, let it handle merging instead of BuildRequest.
+	Changed the signature of Source.startVC to include the revision
+	information (instead of passing it through self.args). Implement
+	branches for SVN (now only Darcs/Git is missing support). Add more
+	Scheduler tests.
+
+2005-06-06  Brian Warner  <warner at lothar.com>
+
+	* everything: rearrange build scheduling. Create a new Scheduler
+	object (configured in c['schedulers'], which submit BuildSets to a
+	set of Builders. Builders can now use multiple slaves. Builds can
+	be run on alternate branches, either requested manually or driven
+	by changes. This changed some of the Status classes. Interlocks
+	are out of service until they've been properly split into Locks
+	and Dependencies. treeStableTimer, isFileImportant, and
+	periodicBuild have all been moved from the Builder to the
+	Scheduler.
+	(BuilderStatus.currentBigState): removed the 'waiting' and
+	'interlocked' states, removed the 'ETA' argument.
+
+2005-05-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/pbutil.py (ReconnectingPBClientFactory): Twisted-1.3
+	erroneously abandons the connection (in clientConnectionFailed)
+	for non-UserErrors, which means that if we lose the connection due
+	to a network problem or a timeout, we'll never try to reconnect.
+	Fix this by not upcalling to the buggy parent method. Note:
+	twisted-2.0 fixes this, but the function only has 3 lines so it
+	makes more sense to copy it than to try and detect the buggyness
+	of the parent class. Fixes SF#1207588.
+
+	* buildbot/changes/changes.py (Change.branch): doh! Add a
+	class-level attribute to accomodate old Change instances that were
+	pickled before 0.6.5 (where .branch was added for new Changes).
+	This fixes the exception that occurs when you try to look at an
+	old Change (through asHTML).
+
+	* buildbot/__init__.py (version): bump to 0.6.6+ while between
+	releases
+
+2005-05-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): release 0.6.6
+
+2005-05-23  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update for 0.6.6 release
+	* debian/changelog: same
+
+	* buildbot/scripts/runner.py (start): put the basedir in sys.path
+	before starting: this was done by twistd back when we spawned it,
+	now that we're importing the pieces and running them in the
+	current process, we have to do it ourselves. This allows
+	master.cfg to import files from the same directory without
+	explicitly manipulating PYTHONPATH. Thanks to Thomas Vander
+	Stichele for the catch.
+	(Options.opt_version): Add a --version command (actually, just make
+	the existing --version command emit Buildbot's version too)
+
+	* buildbot/status/builder.py (HTMLLogFile.upgrade): oops! second
+	fix to make this behave like other LogFiles, this time to handle
+	existing LogFiles on disk. (add the missing .upgrade method)
+	* buildbot/test/test_status.py (Log.testHTMLUpgrade): test it
+
+2005-05-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_runner.py (Create.testMaster): match the
+	rawstring change in runner.py:masterTAC
+
+	* buildbot/test/test_config.py (ConfigTest.testIRC): skip unless
+	TwistedWords is installed
+	* buildbot/test/test_status.py: same, with TwistedMail
+
+	* buildbot/master.py: remove old IRC/Waterfall imports (used by
+	some old, deprecated, and removed config keys). This should enable
+	you to use the base buildbot functionality with Twisted-2.0.0 when
+	you don't also have TwistedWeb and TwistedWords installed
+
+2005-05-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py (run): call sendchange(), not
+	do_sendchange(): thus 'buildbot sendchange' was broken in 0.6.5
+	(run): call stop("HUP"), not "-HUP", 'buildbot stop' was broken.
+	(stop): don't wait for process to die when sending SIGHUP
+	(masterTAC): use a rawstring for basedir=, otherwise '\' in the
+	directory name gets interpreted, which you don't want
+	(slaveTAC): same
+
+	* buildbot/__init__.py (version): bump to 0.6.5+ while between
+	releases
+
+2005-05-18  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.6.5
+
+2005-05-18  Brian Warner  <warner at lothar.com>
+
+	* README: update for 0.6.5
+	* debian/changelog: same
+
+	* buildbot/changes/changes.py: rename tag= to branch=, since
+	that's how we're using it, and my design for the upcoming "build a
+	specific branch" feature wants it. also, tag= was too CVS-centric
+	* buildbot/changes/mail.py (parseSyncmail): same
+	* buildbot/process/base.py (Build.isBranchImportant): same
+	* buildbot/test/test_mailparse.py (Test3.testMsgS4): same
+	* docs/buildbot.texinfo (Attributes of Changes): same
+
+	* NEWS: update tag=, update for upcoming release
+
+2005-05-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py (stop): actually poll once per
+	second, instead of re-killing the poor daemon once per second.
+	Sleep briefly (0.1s) before the first poll, since there's a good
+	chance we can avoid waiting the full second if the daemon shuts
+	down quickly. Also remove the sys.exit() at the end.
+	(start): remove the unneighborly sys.exit()
+
+	* Makefile: improve permission-setting to not kick Arch so badly
+
+	* buildbot/scripts/runner.py (SlaveOptions.optParameters): set a
+	default --keepalive=600, since it doesn't hurt very much, and it's
+	a hassle to discover that you need it.
+	* buildbot/test/test_runner.py (Create.testSlave): test it
+
+	* buildbot/status/words.py (IrcStatusBot.buildFinished): Teach the
+	IRC bot about EXCEPTION
+
+	* buildbot/status/client.py (PBListener): upcall more correctly
+
+	* buildbot/process/base.py (Build.allStepsDone): if a step caused
+	an exception mark the overall build with EXCEPTION, not SUCCESS
+
+	* buildbot/scripts/runner.py (makefile_sample): remove the leading
+	newline
+	* buildbot/status/mail.py (MailNotifier): oops, forgot to upcall
+	* Makefile: update some release-related stuff
+
+	* buildbot/slave/commands.py (ShellCommand.kill): if somehow this
+	gets called when there isn't actually an active process, just end
+	the Command instead of blowing up. I don't know how it gets into
+	this state, but the twisted win32 buildslave will sometimes hang,
+	and when it shakes its head and comes back, it thinks it's still
+	running a Command. The next build causes this command to be
+	interrupted, but the lack of self.process.pid breaks the interrupt
+	attempt.
+
+	* NEWS: document changes since the last release
+
+	* buildbot/scripts/runner.py (start): change 'buildbot start' to
+	look for Makefile.buildbot instead of a bare Makefile . The
+	'buildbot start' does not install this file, so you have to
+	manually copy it if you want to customize startup behavior.
+	(createMaster): change 'buildbot master' command to create
+	Makefile.sample instead of Makefile, to create master.cfg.sample
+	instead of master.cfg (requiring you to copy it before the
+	buildmaster can be started). Both sample files are kept up to
+	date, i.e. they are overwritten if they have been changed. The
+	'buildbot.tac' file is *not* overwritten, but if the new contents
+	don't match the old, a 'buildbot.tac.new' file is created and the
+	user is warned. This seems to be a much more sane way to handle
+	startup files. Also, don't sys.exit(0) when done, so we can run
+	unit tests against it.
+	(createSlave): same. Don't overwrite the sample info/ files.
+	* buildbot/scripts/sample.mk: remove. the contents were pulled
+	into runner.py, since they need to match the behavior of start()
+	* setup.py: same
+	* MANIFEST.in: same
+
+	* docs/buildbot.texinfo (Launching the daemons): document it
+	* buildbot/test/test_runner.py (Create): test it
+
+	* buildbot/test/test_vc.py (SetupMixin.failUnlessIn): Add a
+	version that can handle string-in-string tests, because otherwise
+	python-2.2 fails the tests. It'd be tremendous if Trial's test
+	took two strings under 2.2 too.
+
+	* everything: fixed all deprecation warnings when running against
+	Twisted-2.0 . (at least all the ones in buildbot code, there are a
+	few that come from Twisted itself). This involved putting most of
+	the Twisted-version specific code in the new buildbot.twcompat
+	module, and creating some abstract base classes in
+	buildbot.changes.base and buildbot.status.base (which might be
+	useful anyway). __implements__ is a nuisance and requires an ugly
+	'if' clause everywhere.
+
+	* buildbot/test/test_status.py (Mail.testMail): add a 0.1 second
+	delay before finishing the test: it seems that smtp.sendmail
+	doesn't hang up on the server, so we must wait a moment so it can
+	hang up on us. This removes the trial warning about an unclean
+	reactor.
+
+2005-05-16  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (Source): add 'retry' argument. It is a
+	tuple of (delay, repeats).
+	* buildbot/test/test_vc.py (Retry): test it
+	* docs/buildbot.texinfo (Source Checkout): document it
+	* buildbot/slave/commands.py (SourceBase): add 'retry' parameter.
+	(SourceBase.maybeDoVCRetry): If 'retry' is set, failures in
+	doVCFull() are handled by re-trying the checkout (after a delay)
+	some number of times.
+	(ShellCommand._startCommand): make header lines easier to read
+
+	* buildbot/test/test_web.py (WebTest.tearDown): factor out master
+	shutdown
+	(WebTest.test_logfile): make sure master gets shut down, silences
+	some "unclean reactor" test errors
+
+	* buildbot/test/test_changes.py (Sender.tearDown): spin the
+	reactor once after shutdown, something in certain versions of
+	Twisted trigger a test failure. 1.3.0 is ok, 2.0.0 fails, 2.0.1pre
+	fails, svn-trunk is ok.
+
+	* buildbot/test/test_slavecommand.py (Shell.testShellZ): add a
+	second win32 error message
+
+	* buildbot/test/test_run.py (Status.testSlave): be smarter about
+	validating the ETA, so the tests don't fail on slow systems
+
+2005-05-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (HTMLLogFile): make this behave like
+	the new LogFile class, so upgrading works properly
+	(LogFileProducer.resumeProducing): survive resumeProducing after
+	we've exhausted the chunkGenerator
+
+	* buildbot/test/test_web.py (WebTest.test_logfile): validate HTML
+	logs too
+	* buildbot/test/test_status.py (Log.testAdd): validate hasContents
+	(Log.testUpgrade): same
+
+	* docs/buildbot.texinfo (Maintenance): describe how to delete old
+	Builds and logs with a cron job.
+
+	* buildbot/status/builder.py (LogFile): revamp LogFiles. Got rid
+	of the old non-offline LogFile, added code to upgrade these to
+	new-style contents-live-on-disk instances at load time (in a way
+	that doesn't invalidate the old Build pickles, so upgrading to
+	0.6.5 is not a one-way operation). Got rid of everything related
+	to 'stub' builds.
+	(LogFile.__init__): create LogFiles with the parent step status,
+	the log's name, and a builder-relative filename where it can keep
+	the contents on disk.
+	(LogFile.hasContents): new method, clients are advised to call it
+	before getText or getChunks and friends. If it returns False, the
+	log's contents have been deleted and getText() will raise an
+	error.
+	(LogFile.getChunks): made it a generator
+	(LogFile.subscribeConsumer): new method, takes a Twisted-style
+	Consumer (except one that takes chunks instead of strings). This
+	enables streaming of very large logfiles without storing the whole
+	thing in memory.
+	(BuildStatus.generateLogfileName): create names like
+	12-log-compile-output, with a _0 suffix if required to be unique
+	(BuildStatus.upgradeLogfiles): transform any old-style (from 0.6.4
+	or earlier) logfiles into new-style ones
+	(BuilderStatus): remove everything related to 'stub' builds. There
+	is now only one build cache, and we don't strip logs from old
+	builds anymore.
+	(BuilderStatus.getBuildByNumber): check self.currentBuild too,
+	since we no longer fight to keep it in the cache
+
+	* buildbot/status/html.py (TextLog.render_GET): use a
+	ChunkConsumer to stream the log entries efficiently.
+	(ChunkConsumer): wrapper which consumes chunks and writes
+	formatted HTML.
+
+	* buildbot/test/test_twisted.py (Parse.testParse): use a
+	LogFile-like object instead of a real one
+
+	* buildbot/test/test_status.py (MyLog): handle new LogFile code
+	(Log.testMerge3): validate more merge behavior
+	(Log.testChunks): validate LogFile.getChunks
+	(Log.testUpgrade): validate old-style LogFile upgrading
+	(Log.testSubscribe): validate LogFile.subscribe
+	(Log.testConsumer): validate LogFile.subscribeConsumer
+
+	* buildbot/interfaces.py (IStatusLogStub): remove
+	(IStatusLog.subscribeConsumer): new method
+	(IStatusLog.hasContents): new method
+	(IStatusLogConsumer): describes things passed to subscribeConsumer
+
+	* buildbot/status/html.py (StepBox.getBox): Don't offer an href to
+	the log contents if it does not have any contents.
+	(StatusResourceBuildStep.body): same
+	(StatusResourceBuildStep.getChild): give a 404 for empty logs
+
+2005-05-14  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_web.py (WebTest.test_logfile): add 5-second
+	timeouts to try and make the windows metabuildslave not hang
+
+2005-05-13  Mike Taylor  <bear at code-bear.com>
+
+	* buildbot/slave/commands.py (rmdirRecursive): added a check
+	to ensure the path passed into rmdirRecursive actually exists.
+	On win32 a non-existant path would generate an exception.
+
+2005-05-13  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (rmdirRecursive): replacement for
+	shutil.rmtree which behaves correctly on windows in the face of
+	files that you have to chmod before deleting. Thanks to Bear at
+	the OSAF for the routine.
+	(SourceBase.doClobber): use rmdirRecursive
+
+2005-05-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (OfflineLogFile.getChunks): have this
+	method generate chunks instead of returning a big list. This
+	allows the same method to be used for both old LogFile and new
+	OfflineLogFile.
+	(OfflineLogFile.getText): use the generator
+	(OfflineLogFile.subscribe): same
+	* buildbot/status/html.py (TextLog.resumeProducing): same
+	* buildbot/interfaces.py (IStatusLog.getChunks): document it
+
+	* buildbot/test/test_web.py (WebTest.test_logfile): Add a test to
+	point out that OfflineLogFile does not currently work with
+	html.Waterfall . Fixing this is high-priority.
+
+	* buildbot/scripts/runner.py (start): add --logfile=twistd.log, since
+	apparently windows defaults to using stdout
+
+	* buildbot/test/test_slavecommand.py (Shell.testShellZ): log a
+	better message on failure so I can figure out the win32 problem
+
+	* buildbot/slave/commands.py (ShellCommand._startCommand): update
+	log messages to include more useful copies of the command being
+	run, the argv array, and the child command's environment.
+	(Git.doVCFull): update cg-close usage, patch from Brandon Philips.
+
+2005-05-11  Brian Warner  <warner at lothar.com>
+
+	* setup.py: oops, install debug.glade so 'buildbot debugclient'
+	will actually work
+	* Makefile: update the deb-snapshot version
+
+	* docs/buildbot.texinfo: move all .xhtml docs into a new
+	.texinfo-format document, adding a lot of material in the process.
+	This is starting to look like a real user's manual. Removed all
+	the Lore-related files: *.xhtml, *.css, template.tpl .
+	* docs/Makefile: simple makefile to run 'makeinfo'
+	* buildbot/scripts/sample.cfg: rearrange slightly
+	* MANIFEST.in: include .info and .textinfo, don't include *.xhtml
+
+2005-05-10  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py (start): Twisted-1.3.0 used a
+	different name for the internal twistw module, handle it.
+
+	* MANIFEST.in: we deleted plugins.tml, so stop shipping it
+	* setup.py: .. and stop trying to install it
+
+	* buildbot/process/step.py (Git): added support for 'cogito' (aka
+	'git'), the new linux kernel VC system (http://kernel.org/git/).
+	Thanks to Brandon Philips for the patch.
+	* buildbot/slave/commands.py (Git): same
+
+2005-05-06  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (OfflineLogFile): replace the default
+	LogFile with a form that appends its new contents to a disk file
+	as they arrive. The complete log data is never kept in RAM. This
+	is the first step towards handling very large (100MB+) logfiles
+	without choking quite so badly. (The other half is
+	producer/consumer on the HTML pages).
+	(BuildStepStatus.addLog): use OfflineLogFile by default
+	(BuildStatus.getLogfileName): helper code to give the
+	OfflineLogFile a filename to work with
+
+	* buildbot/test/test_status.py (Results.testAddResults): update
+	tests to handle new asserts
+	* buildbot/test/test_vc.py (Patch.doPatch): same
+	* buildbot/test/test_steps.py (BuildStep.setUp): same
+
+2005-05-05  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py (start): if there is no Makefile,
+	launch the app by importing twistd's internals and calling run(),
+	rather than spawning a new twistd process. This stands a much
+	better chance of working under windows.
+	(stop): kill the process with os.kill instead of spawning
+	/bin/kill, again to reduce the number of external programs which
+	windows might not have in the PATH. Also wait up to 5 seconds for
+	the process to go away, allowing things like 'buildbot stop;
+	buildbot start' to be reliable in the face of slow shutdowns.
+
+	* buildbot/master.py (Dispatcher.__getstate__): remove old
+	.tap-related methods
+	(BuildMaster.__getstate__): same
+	(makeService): same
+	* buildbot/slave/bot.py (makeService): same
+	(Options.longdesc): same
+	* buildbot/scripts/runner.py: copy over some old mktap option text
+
+	* buildbot/scripts/runner.py (masterTAC): stop using mktap.
+	'buildbot master' now creates a buildbot.tac file, so there is no
+	longer a create-instance/save/reload sequence. mktap is dead, long
+	live twistd -y.
+	* buildbot/scripts/sample.mk: use twistd -y, not -f
+	* buildbot/test/test_config.py: remove mktap-based test
+	* buildbot/bb_tap.py, buildbot/plugins.tml: delete old files
+	* README: don't reference mktap
+
+	* docs/source.xhtml: document some of the attributes that Changes
+	might have
+
+	* docs/steps.xhtml (Bazaar): document the Bazaar checkout step
+
+	* general: merge in Change(tag=) patch from Thomas Vander Stichele.
+	[org.apestaart at thomas--buildbot/buildbot--cvstag--0-dev--patch-2]
+	* buildbot/changes/changes.py (Change)
+	* buildbot/changes/mail.py (parseSyncmail)
+	* buildbot/test/test_mailparse.py (Test3.getNoPrefix)
+	(Test3.testMsgS5)
+	* buildbot/process/base.py (Build.isTagImportant)
+	(Build.addChange)
+
+
+2005-05-04  Brian Warner  <warner at lothar.com>
+
+	* buildbot/clients/sendchange.py (Sender.send): tear down the PB
+	connection after sending the change, so that unit tests don't
+	complain about sockets being left around
+
+	* buildbot/status/html.py (WaterfallStatusResource.body): fix
+	exception in phase=0 rendering
+	* buildbot/test/test_web.py (WebTest.test_waterfall): test it
+
+	* buildbot/changes/dnotify.py (DNotify.__init__): remove debug msg
+
+	* buildbot/master.py (BuildMaster.loadConfig): finally remove
+	deprecated config keys: webPortnum, webPathname, irc, manholePort,
+	and configuring builders with tuples.
+	* buildbot/test/test_config.py: stop testing compatibility with
+	deprecated config keys
+	* buildbot/test/test_run.py: same
+
+2005-05-03  Brian Warner  <warner at lothar.com>
+
+	* contrib/arch_buildbot.py: survive if there are no logfiles
+	(username): just use a string, os.getlogin isn't reliable
+
+	* buildbot/scripts/runner.py (sendchange): oops, fix the command
+	so 'buildbot sendchange' actually works. The earlier test only
+	covered the internal (non-reactor-running) form.
+
+	* contrib/arch_buildbot.py: utility that can run as an Arch hook
+	script to notify the buildmaster about changes
+
+	* buildbot/scripts/runner.py (sendchange): new command to send a
+	change to a buildbot.changes.pb.PBChangeSource receiver.
+	* buildbot/test/test_changes.py (Sender): test it
+
+	* buildbot/master.py (BuildMaster.startService): mark .readConfig
+	after any reading of the config file, not just when we do it in
+	startService. This makes some tests a bit cleaner.
+
+	* buildbot/changes/pb.py: add some log messages
+
+	* buildbot/process/base.py (Build.startBuild): fix a bug that
+	caused an exception when the build terminated in the very first
+	step.
+	(Build.stepDone): let steps return a status of EXCEPTION. This
+	terminates the build right away, and sets the build's overall
+	status to EXCEPTION too.
+	* buildbot/process/step.py (BuildStep.failed): return a status of
+	EXCEPTION when that is what has happened.
+
+	* buildbot/process/step.py (Arch.computeSourceRevision): finally
+	implement this, allowing Arch-based projects to get precise
+	checkouts instead of always using the latest code
+	(Bazaar): create variant of Arch to let folks use baz instead of
+	tla. Requires a new buildslave too.
+	* buildbot/slave/commands.py (Arch): add 'revision' argument
+	(Bazaar): create variant of Arch that uses baz instead of tla.
+	Remove the code that extracts the archive name from the
+	register-archive output, since baz doesn't provide it, and require
+	the user provide both the archive name and its location.
+	* buildbot/test/test_vc.py (VC.testBazaar): added tests
+
+2005-05-02  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/sample.cfg: improve docs for c['buildbotURL'],
+	thanks to Nick Trout.
+
+	* buildbot/scripts/runner.py (Maker.makefile): chmod before edit,
+	deals better with source Makefile coming from a read-only CVS
+	checkout. Thanks to Nick Trout for the catch.
+
+	* buildbot/__init__.py (version): bump to 0.6.4+ while between
+	releases
+
+2005-04-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.6.4
+
+	* debian/changelog: update for 0.6.4
+
+2005-04-28  Brian Warner  <warner at lothar.com>
+
+	* README.w32: add a checklist of steps for getting buildbot
+	running on windows.
+	* MANIFEST.in: include it in the tarball
+
+	* NEWS: update
+
+	* buildbot/master.py (BuildMaster.upgradeToVersion3): deal with
+	broken .tap files from 0.6.3 by getting rid of .services,
+	.namedServices, and .change_svc at load time.
+
+2005-04-27  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update in preparation for new release
+
+	* buildbot/test/test_config.py (Save.testSave): don't pull in
+	twisted.scripts.twistd, we don't need it and it isn't for windows
+	anyway.
+
+	* buildbot/changes/changes.py (ChangeMaster.saveYourself):
+	accomodate win32 which can't do atomic-rename
+
+2005-04-27  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_run.py (Disconnect.testBuild2): crank up some
+	timeouts to help the slow metabuildbot not flunk them so much
+	(Disconnect.testBuild3): same
+	(Disconnect.testBuild4): same
+	(Disconnect.testInterrupt): same
+
+	* buildbot/master.py (BuildMaster.loadChanges): fix change_svc
+	setup, it was completely broken for new buildmasters (those which
+	did not have a 'change.pck' already saved. Thanks to Paul Warren
+	for catching this (embarrassing!) bug.
+	(Dispatcher.__getstate__): don't save our registered avatar
+	factories, since they'll be re-populated when the config file is
+	re-read.
+	(BuildMaster.__init__): add a dummy ChangeMaster, used only by
+	tests (since the real mktap-generated BuildMaster doesn't save
+	this attribute).
+	(BuildMaster.__getstate__): don't save any service children,
+	they'll all be re-populated when the config file is re-read.
+	* buildbot/test/test_config.py (Save.testSave): test for this
+
+2005-04-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/buildbot.png: use a new, smaller (16x16) icon image,
+	rendered with Blender.. looks a bit nicer.
+	* buildbot/docs/images/icon.blend: add the Blender file for it
+
+	* buildbot/slave/commands.py (ShellCommand._startCommand): prepend
+	'cmd.exe' (or rather os.environ['COMSPEC']) to the argv list when
+	running under windows. This appears to be the best way to allow
+	BuildSteps to do something normal like 'trial -v buildbot.test' or
+	'make foo' and still expect it to work. The idea is to make the
+	BuildSteps look as much like what a developer would type when
+	compiling or testing the tree by hand. This approach probably has
+	problems when there are spaces in the arguments, so if you've got
+	windows buildslaves, you'll need to pay close attention to your
+	commands.
+
+	* buildbot/status/html.py (WaterfallStatusResource.body): add the
+	timezone to the timestamp column.
+	* buildbot/test/test_web.py (WebTest.test_waterfall): test it
+
+	* buildbot/scripts/runner.py (loadOptions): do something sane for
+	windows, I think. We use %APPDATA%/buildbot instead of
+	~/.buildbot, but we still search everywhere from the current
+	directory up to the root for a .buildbot/ subdir. The "is it under
+	$HOME" security test was replaced with "is it owned by the current
+	user", which is only performed under posix.
+	* buildbot/test/test_runner.py (Options.testFindOptions): update
+	tests to match. The "is it owned by the current user" check is
+	untested. The test has been re-enabled for windows.
+
+	* buildbot/test/test_slavecommand.py (Shell.checkOutput): replace
+	any "\n" in the expected output with the platform-specific line
+	separator. Make this separator "\r\n" on PTYs under unix, they
+	seem to do that and I don't know why
+
+	* buildbot/test/test_runner.py (Options.optionsFile): disable on
+	windows for now, I don't know what ~/.buildbot/ should mean there.
+
+	* buildbot/test/test_run.py (BuilderNames.testGetBuilderNames):
+	win32 compatibility, don't use "/tmp"
+	(Basedir.testChangeBuilddir): remove more unixisms
+
+2005-04-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_control.py (Force.rmtree): python2.2
+	compatibility, apparently its shutil.rmtree ignore_errors=
+	argument is ignored.
+	* buildbot/test/test_run.py (Run.rmtree): same
+	(RunMixin.setUp): same
+
+	* buildbot/test/test_runner.py (make): python2.2 has os.sep but
+	not os.path.sep
+
+	* buildbot/test/test_twisted.py (Parse.failUnlessIn): 2.2 has no
+	'substring in string' operator, must use string.find(substr)!=-1
+	* buildbot/test/test_vc.py (Patch.failUnlessIn): same
+	* buildbot/test/test_web.py (WebTest.failUnlessIn): same
+
+	* buildbot/scripts/runner.py (loadOptions): add code to search for
+	~/.buildbot/, a directory with things like 'options', containing
+	defaults for various 'buildbot' subcommands. .buildbot/ can be in
+	the current directory, your $HOME directory, or anywhere
+	inbetween, as long as you're somewhere inside your home directory.
+	(debugclient): look in ~/.buildbot/options for master and passwd
+	(statuslog): look in ~/.buildbot/options for 'masterstatus'
+	* buildbot/test/test_runner.py (Options.testFindOptions): test it
+
+	* buildbot/status/client.py (makeRemote): new approach to making
+	IRemote(None) be None, which works under Twisted-2.0
+	* buildbot/test/test_status.py (Client.testAdaptation): test it
+
+	* buildbot/status/builder.py (Status.builderAdded): when loading a
+	pickled BuilderStatus in from disk, set its name after loading.
+	The config file might have changed its name (but not its
+	directory) while it wasn't looking.
+	
+	* buildbot/process/builder.py (Builder.attached): always return a
+	Deferred, even if the builder was already attached
+	* buildbot/test/test_run.py (Basedir.testChangeBuilddir): test it
+
+2005-04-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py (IrcStatusBot.buildFinished): fix a
+	category-related exception when announcing a build has finished
+
+	* buildbot/status/html.py (StatusResourceChanges.body): oops, don't
+	reference no-longer-existent changemaster.sources
+	* buildbot/test/test_web.py (WebTest.test_waterfall): test for it
+
+	* buildbot/__init__.py (version): bump to 0.6.3+ while between
+	releases
+
+2005-04-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.6.3
+
+	* debian/changelog: update for 0.6.3
+
+2005-04-25  Brian Warner  <warner at lothar.com>
+
+	* MANIFEST.in: make sure debug.glade is in the tarball
+
+	* README (REQUIREMENTS): list necessary Twisted-2.0 packages
+
+	* NEWS: update for the imminent 0.6.3 release
+
+	* buildbot/status/html.py (HtmlResource.content): make the
+	stylesheet <link> always point at "buildbot.css".
+	(StatusResource.getChild): map "buildbot.css" to a static.File
+	containing whatever css= argument was provided to Waterfall()
+	(Waterfall): provide the "classic" css as the default.
+	* docs/waterfall.classic.css: move default CSS from here ..
+	* buildbot/status/classic.css: .. to here
+
+	* MANIFEST.in: make sure classic.css is included in the tarball
+	* setup.py: and that it is installed too, under buildbot/status/
+
+	* buildbot/master.py (BuildMaster): oops, set .change_svc=None at
+	the module level, because buildbot.tap files from 0.6.2 don't have
+	it in their attribute dictionary.
+
+	* buildbot/slave/bot.py (Bot.startService): make sure the basedir
+	really exists at startup, might save some confusion somewhere.
+
+2005-04-24  Thomas Vander Stichele  <thomas at apestaart dot org>
+
+	* docs/waterfall.classic.css:
+	  add a stylesheet that's almost the same as the "classic"
+	  buildbot style
+
+	* buildbot/status/builder.py:
+	  add EXCEPTION as a result - this is a problem for the bot
+	  maintainer, not a build problem for the changers
+	* buildbot/process/step.py:
+	  use EXCEPTION instead of FAILURE for exceptions
+	* buildbot/status/html.py:
+	  add build_get_class to get a class out of a build/buildstep
+	  finish naming the classes
+	  split out sourceNames to changeNames and builderNames so we
+	  can style them separately
+	* docs/config.xhtml:
+	  finish documenting classes as they are right now
+
+	* buildbot/status/html.py:
+	  name the classes as we agreed on IRC
+	* docs/config.xhtml:
+	  and document them
+
+	* buildbot/status/html.py:
+	  same for cssclass->class_
+
+	* buildbot/status/html.py:
+	  as decided on IRC, use class_ for the "class" attribute to not
+	  conflict with the class keyword, and clean up the messy **{} stuff.
+
+	* buildbot/status/mail.py:
+	  put back "builders" argument, and fix docstring, because the
+	  code *ignores* builders listed in this argument
+
+	* buildbot/process/builder.py:
+	  remove FIXME notes - category is now indeed a cvar of BuilderStatus
+
+	* docs/config.xhtml:
+	  describe the category argument for builders
+
+	* buildbot/status/builder.py:
+	  Fix a silly bug due to merging
+
+	* buildbot/process/builder.py:
+	  remove category from the process Builder ...
+	* buildbot/status/builder.py:
+	  ... and add it to BuilderStatus instead.
+	  Set category on unpickled builder statuses, they might not have it.
+	* buildbot/master.py:
+	  include category when doing builderAdded
+	* buildbot/status/mail.py:
+	  return None instead of self for builders we are not interested in.
+	* buildbot/test/test_run.py:
+	  fix a bug due to only doing deferredResult on "dummy" waiting
+	* buildbot/test/test_status.py:
+	  add checks for the Mail IStatusReceiver returning None or self
+
+	* buildbot/status/html.py:
+	  fix testsuite by prefixing page title with BuildBot
+
+	* buildbot/status/builder.py:
+	  have .category in builder status ...
+	* buildbot/process/builder.py:
+	  ... and set it from Builder
+	* buildbot/status/html.py:
+	  make .css a class variable 
+	* buildbot/test/test_status.py:
+	  write more tests to cover our categories stuff ...
+	* buildbot/status/mail.py:
+	  ... and fix the bug that this uncovered
+
+	* buildbot/changes/mail.py:
+	* buildbot/changes/pb.py:
+	* buildbot/master.py:
+	* buildbot/process/base.py:
+	* buildbot/process/factory.py:
+	* buildbot/process/interlock.py:
+	* buildbot/process/step.py:
+	* buildbot/process/step_twisted.py:
+	* buildbot/slave/commands.py:
+	* buildbot/status/builder.py:
+	* buildbot/status/client.py:
+	* buildbot/status/html.py:
+	* buildbot/status/mail.py:
+	* buildbot/status/progress.py:
+	* buildbot/test/test_changes.py:
+	* buildbot/test/test_config.py:
+	* buildbot/test/test_control.py:
+	* buildbot/test/test_interlock.py:
+	* buildbot/test/test_maildir.py:
+	* buildbot/test/test_mailparse.py:
+	* buildbot/test/test_run.py:
+	* buildbot/test/test_slavecommand.py:
+	* buildbot/test/test_status.py:
+	* buildbot/test/test_steps.py:
+	* buildbot/test/test_twisted.py:
+	* buildbot/test/test_util.py:
+	* buildbot/test/test_vc.py:
+	* buildbot/test/test_web.py:
+	* buildbot/util.py:
+	  add test-case-name at the top of a whole set of files
+
+	* buildbot/status/builder.py:
+	  keep order of addition when getting builder names
+	* buildbot/status/words.py:
+	* buildbot/test/test_run.py:
+	  add test for getBuilderNames
+
+	* buildbot/process/base.py:
+	* buildbot/process/step.py:
+	* buildbot/status/builder.py:
+	* buildbot/status/html.py:
+	  make buildbot css-able
+	  replace the color code for purple with purple, don't understand
+	  why it wasn't purple to start with
+
+	* buildbot/status/words.py:
+	  ok, so it doesn't look like BuilderStatus.remote is still valid.
+	  Use what waterfall uses instead.
+
+	* buildbot/interfaces.py:
+	* buildbot/status/builder.py:
+	* buildbot/status/html.py:
+	* buildbot/status/mail.py:
+	* buildbot/status/words.py:
+	* buildbot/test/test_run.py:
+	  use categories everywhere and make it be a list.  More sensible
+	  for the future.  Also make words actually respect this in
+	  buildFinished.
+
+	* buildbot/interfaces.py:
+	  add category argument to getBuilderNames
+	* buildbot/process/builder.py:
+	* buildbot/status/builder.py:
+	* buildbot/status/html.py:
+	* buildbot/status/mail.py:
+	* buildbot/status/words.py:
+	* buildbot/test/test_run.py:
+	  move from specifying builders by name to specifying the category
+
+	* buildbot/status/html.py:
+	* buildbot/status/words.py:
+	  add "builders=" to __init__ of status clients so they can
+	  limit themselves to the given list of builders to report on
+
+	* buildbot/status/html.py: set the title to the product name
+
+2005-04-23  Thomas Vander Stichele  <thomas at apestaart dot org>
+
+	* buildbot/interfaces.py:
+	* buildbot/status/builder.py:
+	  more documentation.  Hm, not sure if ChangeLog entries make sense
+	  here...
+
+2005-04-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (SetupMixin.do_vc): increase timeouts
+
+	* buildbot/test/test_slavecommand.py (Shell): increase timeouts
+
+	* buildbot/scripts/runner.py: make 'statuslog' and 'statusgui' be
+	the sub-commands that log buildmaster status to stdout and to a
+	GUI window, respectively.
+
+	* buildbot/clients/gtkPanes.py: overhaul. basic two-row
+	functionality is working again, but all the step-status and ETA
+	stuff is missing. Commented out a lot of code pending more
+	overhaul work.
+
+	* buildbot/status/client.py: make sure that IRemote(None) is None
+
+	* buildbot/changes/changes.py: import defer, oops
+	(ChangeMaster): remove the .sources list, rely upon the fact that
+	MultiServices can be treated as sequences of their children. This
+	cleans up the add/remove ChangeSource routines a lot, as we keep
+	exactly one list of the current sources instead of three.
+
+	* buildbot/master.py (BuildMaster.__init__): remove .sources, set
+	up an empty ChangeMaster at init time.
+	(BuildMaster.loadChanges): if there are changes to be had from
+	disk, replace self.change_svc with the new ones. If not, keep
+	using the empty ChangeMaster set up in __init__.
+	(BuildMaster.loadConfig_Sources): use list(self.change_svc)
+	instead of a separate list, makes the code a bit cleaner.
+	* buildbot/test/test_config.py (ConfigTest.testSimple): match it
+	(ConfigTest.testSources): same, also wait for loadConfig to finish.
+	Extend the test to make sure we can get rid of the sources when
+	we're done.
+
+2005-04-22  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py (Maker.mkinfo): create the info/admin
+	and info/host files when making the slave directory
+
+	* buildbot/test/test_run.py (RunMixin.shutdownSlave): remove the
+	whendone= argument, just return the Deferred and let the caller do
+	what they want with it.
+	(Disconnect.testBuild1): wait for shutdownSlave
+	(Basedir.testChangeBuilddir): new test to make sure changes to the
+	builddir actually get propagated to the slave
+
+	* buildbot/slave/bot.py (SlaveBuilder.setBuilddir): use an
+	explicit method, rather than passing the builddir in __init__ .
+	Make sure to update self.basedir too, this was broken before.
+	(Bot.remote_setBuilderList): use b.setBuilddir for both new
+	builders and for ones that have just had their builddir changed.
+	(BotFactory): add a class-level .perspective attribute, so
+	BuildSlave.waitUntilDisconnected won't get upset when the
+	connection hasn't yet been established
+	(BuildSlave.__init__): keep track of the bot.Bot instance, so
+	tests can reach through it to inspect the SlaveBuilders
+
+	* buildbot/process/base.py (Build.buildException): explain the
+	log.err with a log.msg
+	* buildbot/process/builder.py (Builder.startBuild): same
+	(Builder._startBuildFailed): improve error message
+
+	* buildbot/pbutil.py (RBCP.failedToGetPerspective): if the failure
+	occurred because we lost the brand-new connection, retry instead
+	of giving up. If not, it's probably an authorization failure, and
+	it makes sense to stop trying. Make sure we log.msg the reason
+	that we're log.err'ing the failure, otherwise test failures are
+	really hard to figure out.
+
+	* buildbot/master.py: change loadConfig() to return a Deferred
+	that doesn't fire until the change has been fully implemented.
+	This means any connected slaves have been updated with the new
+	builddir. This change makes it easier to test the code which
+	actually implements this builddir-updating.
+	(BotPerspective.addBuilder): return Deferred
+	(BotPerspective.removeBuilder): same
+	(BotPerspective.attached): same
+	(BotPerspective._attached): same. finish with remote_print before
+	starting the getSlaveInfo, instead of doing them in parallel
+	(BotPerspective.list_done): same
+	(BotMaster.removeSlave): same. Fix the typo that meant we weren't
+	actually calling slave.disconnect()
+	(BotMaster.addBuilder): same
+	(BotMaster.removeBuilder): same
+	(BuildMaster.loadConfig): same
+	(BuildMaster.loadConfig_Slaves): same
+	(BuildMaster.loadConfig_Sources): same
+	(BuildMaster.loadConfig_Builders): same
+	(BuildMaster.loadConfig_status): same
+
+	* buildbot/changes/changes.py (ChangeMaster.removeSource): return
+	a Deferred that fires when the source is finally removed
+
+	* buildbot/slave/commands.py (SourceBase.doClobber): when removing
+	the previous tree on win32, where we have to do it synchronously,
+	make sure we return a Deferred anyway.
+	(SourceBase.doCopy): same
+
+	* buildbot/scripts/runner.py (statusgui): use the text client for
+	now, while I rewrite the Gtk one
+	* buildbot/clients/base.py: strip out old code, leaving just the
+	basic print-message-on-event functionality. I also remove the
+	ReconnectingPBClientFactory, but it does at least quit when it
+	loses the connection instead of going silent
+
+2005-04-21  Brian Warner  <warner at lothar.com>
+
+	* Makefile: minor tweaks
+
+	* NEWS: point out deprecation warnings, new features for
+	/usr/bin/buildbot
+
+	* buildbot/master.py (BuildMaster.loadConfig): emit
+	DeprecationWarnings for Builders defined with tuples. Rearrange
+	code to facility removal of deprecated configuration keys in the
+	next release.
+
+	* buildbot/scripts/runner.py (createMaster,createSlave): rewrite
+	'buildbot' command to put a little Makefile in the target that
+	helps you re-create the buildbot.tap file, start or stop the
+	master/slave, and reconfigure (i.e. SIGHUP) the master. Also chmod
+	all the files 0600, since they contain passwords.
+	(start): if there is a Makefile, and /usr/bin/make exists, use
+	'make start' in preference to a raw twistd command. This lets
+	slave admins put things like PYTHONPATH variables in their
+	Makefiles and have them still work when the slave is started with
+	'buildbot start ~/slave/foo'. The test is a bit clunky, it would
+	be nice to first try the 'make' command and only fall back to
+	twistd if it fails. TODO: the Makefile's "start" command does not
+	add the --reactor=win32 argument when running under windows.
+	(Options.debugclient, Options.statusgui): add sub-commands to launch
+	the debug client (formerly in contrib/debugclient.py) and the
+	Gtk status application (currently broken)
+	* buildbot/clients/debug.py: move from contrib/debugclient.py
+	* buildbot/clients/debug.glade: same
+
+	* buildbot/test/test_trial.py: remove it. This requires some
+	functionality out of Twisted that isn't there yet, and until then
+	having it around just confuses things.
+
+	* buildbot/test/test_slavecommand.py (Shell): test both with and
+	without PTYs, and make sure that command output is properly
+	interleaved in the with-PTY case. I think the without-PTY test
+	should pass on windows, where we never use PTYs anyway.
+
+2005-04-20  Brian Warner  <warner at lothar.com>
+
+	* README (REQUIREMENTS): mention Twisted-2.0.0 compatibility
+
+	* MANIFEST.in: add epyrun, gen-reference, buildbot.png
+
+	* NEWS: start creating entries for the next release
+
+	* buildbot/slave/commands.py (ShellCommand.__init__): use os.pathsep
+
+	* buildbot/test/test_web.py (WebTest.test_webPortnum): add timeout
+	(WebTest.test_webPathname): same
+	(WebTest.test_webPathname_port): same
+	(WebTest.test_waterfall): use the default favicon rather than
+	rooting around the filesystem for it. Open the expected-icon file
+	in binary mode, to make win32 tests happier (thanks to Nick Trout
+	for the catch)
+	* buildbot/status/html.py (buildbot_icon): win32 portability
+
+	* buildbot/test/test_slavecommand.py (SlaveCommandTestCase.testShellZ):
+	win32-compatibility fixes from Nick Trout, the "file not found" message
+	is different under windows
+	(FakeSlaveBuilder.__init__): clean up setup a bit
+	* buildbot/test/test_vc.py (VCSupport.__init__): win32: use os.pathsep
+
+2005-04-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (SetupMixin.setUpClass): fix the
+	skip-if-repositories-are-unavailable test to not kill the trial
+	that comes with Twisted-1.3.0
+
+	* setup.py: install buildbot.png icon file when installing code
+
+	* buildbot/slave/commands.py (ShellCommand._startCommand): log the
+	environment used by the command, at least on the child side.
+
+	* buildbot/status/html.py (TextLog.pauseProducing): add a note,
+	this method needs to be added and implemented because it gets
+	called under heavy load. I don't quite understand the
+	producer/consumer API enough to write it.
+	(StatusResource.getChild): add a resource for /favicon.ico
+	(Waterfall.__init__): add favicon= argument
+	* buildbot/test/test_web.py (WebTest.test_waterfall): test it
+	(WebTest.test_webPortnum): stop using deprecated 'webPortnum'
+	(WebTest.test_webPathname): same
+	(WebTest.test_webPathname_port): same
+	* docs/config.xhtml: mention favicon=
+	* buildbot/buildbot.png: add a default icon, dorky as it is
+
+2005-04-18  Thomas Vander Stichele  <thomas at apestaart dot org>
+
+	* buildbot/master.py:
+	* buildbot/process/base.py:
+	* buildbot/process/builder.py:
+	* buildbot/process/interlock.py:
+	* buildbot/status/builder.py:
+	* buildbot/status/html.py:
+	* buildbot/status/mail.py:
+	* buildbot/status/words.py:
+	  new documentation while digging through the code
+
+2005-04-17  Brian Warner  <warner at lothar.com>
+
+	* general: try to fix file modes on all .py files: a+r, a-x,
+	but let buildbot/clients/*.py be +x since they're tools
+
+	* docs/epyrun (addMod): when an import fails, say why
+
+	* Makefile: Add a 'docs' target, hack on the PYTHONPATH stuff
+
+2005-04-17  Thomas Vander Stichele  <thomas at apestaart dot org>
+
+	* buildbot/process/base.py:
+	* buildbot/process/builder.py:
+	* buildbot/status/builder.py:
+	  new documentation while digging through the code
+
+2005-04-17  Thomas Vander Stichele  <thomas at apestaart dot org>
+
+	* buildbot/changes/changes.py:
+	* buildbot/changes/p4poller.py:
+	* buildbot/interfaces.py:
+	* buildbot/process/base.py:
+	* buildbot/process/builder.py:
+	* buildbot/process/step.py:
+	* buildbot/process/step_twisted.py:
+	* buildbot/slave/bot.py:
+	* buildbot/slave/commands.py:
+	* buildbot/status/builder.py:
+	  fix all docstrings to make epydoc happy.  In the process of fixing
+	  some, I also moved pieces of docs, and removed some deprecated
+	  documentation
+
+2005-04-17  Thomas Vander Stichele  <thomas at apestaart dot org>
+
+	* buildbot/process/builder.py:
+	* buildbot/process/interlock.py:
+	* buildbot/process/process_twisted.py:
+	* buildbot/process/step.py:
+	  BuildProcess -> Build, as it looks like that's what happened
+	* buildbot/process/base.py:
+	* buildbot/process/factory.py:
+	  update epydoc stuff
+
+2005-04-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py (QuickTwistedBuildFactory):
+	update compile command to accomodate the Twisted split.. now
+	instead of './setup.py build_ext -i', you do './setup.py all
+	build_ext -i', to run build_ext over all sub-projects.
+	(FullTwistedBuildFactory): same
+	(TwistedReactorsBuildFactory): same
+
+	* buildbot/status/html.py (TextLog.finished): null out self.req
+	when we're done, otherwise the reference cycle of TextLog to .req
+	to .notifications to a Deferred to TextLog.stop keeps them from
+	being collected, and consumes a huge (610MB on pyramid at last
+	check) amount of memory.
+
+2005-04-11  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (VCSupport.__init__): use abspath() to
+	normalize the VC-repository location.. makes SVN happier with
+	certain test environments.
+
+	* buildbot/process/step.py (RemoteShellCommand.__init__): let each
+	RemoteShellCommand gets its own .env dictionary, so that code in
+	start() doesn't mutate the original. I think this should fix the
+	step_twisted.Trial problem where multiple identical components
+	kept getting added to PYTHONPATH= over and over again.
+
+	* general: merge org.apestaart at thomas/buildbot--doc--0--patch-3,
+	adding epydoc-format docstrings to many classes. Thanks to Thomas
+	Vander Stichele for the patches.
+	* docs/epyrun, docs/gen-reference: add epydoc-generating tools
+	* buildbot/status/mail.py, buildbot/process/step_twisted.py: same
+	* buildbot/slave/bot.py, commands.py, registry.py: same
+
+2005-04-05  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (SourceBase.doCopy): use cp -p to
+	preserve timestamps, helps incremental builds of large trees.
+	Patch from Rene Rivera.
+
+	* buildbot/slave/bot.py (SlaveBuilder.commandComplete): oops, log
+	'failure' and not the non-existent 'why'. Thanks to Rene Rivera
+	for the catch.
+
+2005-04-03  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (BuildMaster.loadConfig): only call exec()
+	with one dict, apparently exec has some scoping bugs when used
+	with both global/local dicts. Thanks to Nathaniel Smith for the
+	catch.
+
+2005-04-02  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (countFailedTests): the new
+	trial in Twisted-2.0 emits a slightly different status line than
+	old trial ("PASSED.." instead of "OK.."). Handle it so we don't
+	mistakenly think the test count is unparseable.
+	(Trial.start): note that for some reason each build causes another
+	copy of self.testpath to be prepended to PYTHONPATH. This needs to
+	be fixed but I'm not sure quite where the problem is.
+
+2005-04-01  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_run.py (Run.testMaster): change some uses of
+	deferredResult to avoid hangs/warnings under twisted-2.0
+	(RunMixin.tearDown): same
+	(RunMixin.shutdownSlave): same
+	(Disconnect.testIdle1): same
+	(Disconnect.testBuild2): same: wait one second after the build
+	finishes for test to really be done.. this should be cleaned up to
+	avoid wasting that second. Builder.detach uses a callLater(0),
+	either that should be done in-line (something else needed that
+	behavior), or it should return a Deferred that fires when the
+	builder is really offline.
+	(Disconnect.testBuild3): same
+	(Disconnect.testDisappear): same
+
+	* buildbot/test/test_web.py: rearrange server-setup and teardown
+	code to remove unclean-reactor warnings from twisted-2.0
+
+	* buildbot/test/test_vc.py: rearrange probe-for-VC-program routine
+	so the tests don't hang under twisted-2.0
+
+2005-03-31  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/bot.py (Bot.remote_setBuilderList): fix typo that
+	caused a warning each time the master changed our set of builders
+
+	* buildbot/status/builder.py (BuildStatus.saveYourself): under
+	w32, don't unlink the file unless it already exists. Thanks to
+	Baptiste Lepilleur for the catch.
+	(BuilderStatus.saveYourself): same
+
+2005-02-01  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py (TextLog.getChild): use a /text child
+	URL, such as http://foo.com/svn-hello/builds/1/test/0/text instead
+	of http://foo.com/svn-hello/builds/1/test/0 , to retrieve the
+	logfile as text/plain (no markup, no headers). This replaces the
+	previous scheme (which used an ?text=1 argument), and gets us back
+	to a relative link (which works better when the buildbot lives
+	behind another web server, such as Apache configured as a reverse
+	proxy). Thanks to Gerald Combs for spotting the problem.
+
+	* buildbot/__init__.py (version): bump to 0.6.2+ while between
+	releases
+
+2004-12-13  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.6.2
+
+	* debian/changelog: update for 0.6.2
+	* NEWS: finalize for 0.6.2
+
+2004-12-11  Brian Warner  <warner at lothar.com>
+
+	* NEWS: bring it up to date
+
+	* buildbot/slave/bot.py (BotFactory): revamp keepalive/lost-master
+	detection code. Require some sign of life from the buildmaster
+	every BotFactory.keepaliveInterval seconds. Provoke this
+	indication at BotFactory.keepaliveTimeout seconds before the
+	deadline by sending a keepalive request. We don't actually care if
+	that request is answered in a timely fashion, what we care about
+	is that .activity() is called before the deadline. .activity() is
+	triggered by any PB message from the master (including an ack to
+	one of the slave's status-update messages). With this new scheme,
+	large status messages over slow pipes are OK, as long as any given
+	message can be sent (and thus acked) within .keepaliveTimeout
+	seconds (which defaults to 30).
+	(SlaveBuilder.remote_startCommand): record activity
+	(SlaveBuilder.ackUpdate): same
+	(SlaveBuilder.ackComplete): same
+	(BotFactory.gotPerspective): same
+	* buildbot/test/test_run.py (Disconnect.testSlaveTimeout): test it
+
+2004-12-09  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py (StatusResourceBuilder.getChild): remove
+	debug message
+
+	* buildbot/process/step_twisted.py (Trial._commandComplete):
+	update self.cmd when we start the 'cat test.log' transfer. Without
+	this, we cannot interrupt the correct RemoteCommand when we lose
+	the connection.
+
+	* buildbot/process/step.py (RemoteCommand.interrupt): don't bother
+	trying to tell the slave to stop the command if we're already
+	inactive, or if we no longer have a .remote
+
+	* buildbot/process/builder.py (Builder._detached): don't let an
+	exception in currentBuild.stopBuild() prevent the builder from
+	being marked offline
+
+2004-12-07  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py (IrcStatusBot.getBuilder): catch the
+	KeyError that happens when you ask for a non-existent Builder, and
+	translate it into a UsageError.
+
+	* buildbot/test/test_run.py (Disconnect.testBuild4): validate that
+	losing the slave in the middle of a remote step is handled too
+
+	* buildbot/process/step.py (ShellCommand.interrupt): 'reason' can
+	be a Failure, so be sure to stringify it before using it as the
+	contents of the 'interrupt' logfile
+	(RemoteCommand.interrupt): use stringified 'why' in
+	remote_interruptCommand too, just in case
+
+2004-12-06  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (Arch.doVCUpdate): use 'tla replay'
+	instead of 'tla update', which is more efficient in case we've
+	missed a couple of patches since the last update.
+
+	* debian/changelog: update for previous (0.6.1) release. Obviously
+	this needs to be handled better.
+
+2004-12-05  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update for stuff since last release
+
+	* buildbot/master.py (DebugPerspective.attached): return 'self', to
+	match the maybeDeferred change in Dispatcher.requestAvatar
+	* buildbot/changes/pb.py (ChangePerspective.attached): same
+	* buildbot/status/client.py (StatusClientPerspective.attached): same
+	* buildbot/process/builder.py (Builder._attached3): same
+	* buildbot/pbutil.py (NewCredPerspective.attached): same
+
+	* buildbot/status/html.py (WaterfallStatusResource.phase2): Add
+	the date to the top-most box, if it is not the same as today's
+	date.
+
+	* docs/slave.xhtml: provide a buildslave setup checklist
+
+	* docs/source.xhtml (Arch): correct terminology
+
+2004-12-04  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_slavecommand.py: use sys.executable instead
+	of hard-coding 'python' for child commands, might help portability
+
+	* docs/examples/twisted_master.cfg: update to current usage
+
+	* buildbot/status/words.py (IrcStatusBot.command_STOP): add a
+	'stop build' command to the IRC bot
+
+	* buildbot/master.py (Dispatcher.requestAvatar): remove debug
+	message that broke PBChangeSource
+
+	* buildbot/slave/bot.py: clean up shutdown/lose-master code
+	(SlaveBuilder): make some attributes class-level, remove the old
+	"update queue" which existed to support resuming a build after the
+	master connection was lost. Try to reimplement that feature later.
+	(SlaveBuilder.stopCommand): clear self.command when the
+	SlaveCommand finishes, so that we don't try to kill a leftover one
+	at shutdown time.
+	(SlaveBuilder.commandComplete): same, merge with commandFailed and
+	.finishCommand
+
+	* buildbot/slave/commands.py (SourceBase): set self.command for
+	all VC commands, so they can be interrupted.
+
+2004-12-03  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py: clean up slave-handling code, to handle
+	slave-disconnect and multiple-connect better
+	(BotPerspective): make these long-lasting, exactly one per bot
+	listed in the config file.
+	(BotPerspective.attached): if a slave connects while an existing
+	one appears to still be connected, disconnect the old one first.
+	(BotPerspective.disconnect): new method to forcibly disconnect a
+	buildslave. Use some hacks to empty the transmit buffer quickly to
+	avoid the long (20-min?) TCP timeout that could occur if the old
+	slave has dropped off the net.
+	(BotMaster): Keep persistent BotPerspectives in .slaves, let them
+	own their own SlaveStatus objects. Remove .attached/.detached, add
+	.addSlave/.removeSlave, treat slaves like Builders (config file
+	parsing sends deltas to the BotMaster). Inform the slave
+	instances, i.e. the BotPerspective, about addBuilder and
+	removeBuilder.
+	(BotMaster.getPerspective): turns into a single dict lookup
+	(Dispatcher.requestAvatar): allow .attached to return a Deferred,
+	which gives BotPerspective.attached a chance to disconnect the old
+	slave first.
+	(BuildMaster.loadConfig): add code (disabled) to validate that all
+	builders use known slaves (listed in c['bots']). The check won't
+	work with tuple-specified builders, which are deprecated but not
+	yet invalid, so the check is disabled for now.
+	(BuildMaster.loadConfig_Slaves): move slave-config into a separate
+	routine, do the add/changed/removed dance with them like we do
+	with builders.
+	(BuildMaster.loadConfig_Sources): move source-config into a
+	separate routine too
+
+	* buildbot/status/builder.py (Status.getSlave): get the
+	SlaveStatus object from the BotPerspective, not the BotMaster.
+
+	* buildbot/test/test_run.py: bunch of new tests for losing the
+	buildslave at various points in the build, handling a slave that
+	connects multiple times, and making sure we can interrupt a
+	running build
+
+	* buildbot/slave/bot.py (BuildSlave): make it possible to use
+	something other than 'Bot' for the Bot object, to make certain
+	test cases easier to write.
+	(BuildSlave.waitUntilDisconnected): utility method for testing
+
+2004-11-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_run.py (RunMixin): refactor, remove debug msg
+
+	* buildbot/interfaces.py (IBuilderControl.ping): add timeout=
+	argument, return a Deferred that always fires with True or False.
+	I don't use an errback to indicate 'ping failed' so that callers
+	are free to ignore the deferred without causing spurious errors in
+	the logs.
+	* buildbot/process/builder.py (BuilderControl.ping): implement it
+
+	* buildbot/test/test_run.py (Status.testDisappear): test ping
+	(Status.disappearSlave): fix it
+
+2004-11-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/interfaces.py (IBuildControl): add .stopBuild
+	(IBuilderControl): add .getBuild(num), only works for the current
+	build, of course, although it might be interesting to offer
+	something for builds in the .waiting or .interlocked state.
+
+	* buildbot/process/base.py (Build): have .stopBuild just do the
+	interrupt, then let the build die by itself.
+	(BuildControl): add .stopBuild, and add a point-event named
+	'interrupt' just after the build so status viewers can tell that
+	someone killed it.
+	(BuilderControl): add .getBuild
+
+	* buildbot/process/step.py (Dummy): use haltOnFailure so it really
+	stops when you kill it, good for testing
+	(ShellCommand.interrupt): add a logfile named 'interrupt' which
+	contains the 'reason' text.
+
+	* buildbot/status/html.py: Add Stop Build button, if the build can
+	still be stopped. Send a Redirect (to the top page) one second
+	later, hopefully long enough for the interrupt to have an effect.
+	Move make_row() up to top-level to share it between Stop Build and
+	Force Build.
+
+	* buildbot/slave/commands.py: only kill the child process once
+
+	* buildbot/test/test_run.py: add testInterrupt
+
+2004-11-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/base.py: Refactor command interruption. The
+	Build is now responsible for noticing that the slave has gone
+	away: Build.lostRemote() interrupts the current step and makes
+	sure that no further ones will be started.
+	
+	* buildbot/process/builder.py: When the initial remote_startBuild
+	message fails, log it: this usually indicates that the slave has
+	gone away, but we don't really start paying attention until they
+	fail to respond to the first step's command.
+
+	* buildbot/process/step.py (RemoteCommand): Does *not* watch for
+	slave disconnect. Now sports a new interrupt() method. Error
+	handling was simplified a lot by chaining deferreds, so
+	remoteFailed/remoteComplete were merged into a single
+	remoteComplete method (which can now get a Failure object).
+	Likewise failed/finished were merged into just _finished.
+	(BuildStep): Add interrupt(why) method, and if why is a
+	ConnectionLost Failure then the step is failed with some useful
+	error text.
+
+	* buildbot/slave/bot.py: stop the current command when the remote
+	Step reference is lost, and when the slave is shut down.
+	(Bot): make it a MultiService, so it can have children. Use
+	stopService to tell when the slave is shutting down.
+	(SlaveBuilder): make it a Service, and a child of the Bot. Add
+	remote_interruptCommand (which asks the current SlaveCommand to
+	stop but allows it to keep emitting status messages), and
+	stopCommand (which tells it to shut up and die).
+
+	* buildbot/slave/commands.py: make commands interruptible
+	(ShellCommand.kill): factor out os.kill logic
+	(Command): factor out setup()
+	(Command.sendStatus): don't send status if .running is false, this
+	happens when the command has been halted.
+	(Command.interrupt): new method, used to tell the command to die
+	(SlaveShellCommand): implement .interrupt
+	(DummyCommand): implement .interrupt
+	(SourceBase, etc): factor out setup(), don't continue substeps if
+	.interrupted is set
+
+	* buildbot/status/builder.py: fix all waitUntilFinished() methods
+	so they can be called after finishing
+
+	* buildbot/test/test_run.py: new tests for disconnect behavior,
+	refactor slave-shutdown routines, add different kinds of
+	slave-shutdown
+
+2004-11-27  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py (IrcStatusBot.convertTime): utility
+	method to express ETA time like "2m45s" instead of "165 seconds"
+
+2004-11-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (VC.testArch): unregister the test
+	archive after the test completes, to avoid cluttering the user's
+	'tla archives' listing with a bogus entry. Arch doesn't happen to
+	provide any way to override the use of ~/.arch-params/, so there
+	isn't a convenient way to avoid touching the setup of the user who
+	runs the test.
+	(VC_HTTP.testArchHTTP): same
+
+2004-11-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py (TextLog): split render() up into
+	render_HEAD and render_GET. Use a Producer when sending log
+	chunks, to reduce memory requirements and avoid sending huge
+	non-Banana-able strings over web.distrib connections. Requires
+	peeking under the covers of IStatusLog.
+	(TextLog.resumeProducing): fix the "as text" link, handle client
+	disconnects that occur while we're still sending old chunks.
+
+	* buildbot/status/builder.py (HTMLLogFile.waitUntilFinished): oops,
+	use defer.succeed, not the non-existent defer.success
+	(LogFile.waitUntilFinished): same
+	(LogFile.subscribe): don't add watchers to a finished logfile
+
+	* buildbot/__init__.py (version): bump to 0.6.1+ while between
+	releases
+
+2004-11-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): Releasing buildbot-0.6.1
+
+2004-11-23  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update for the 0.6.1 release
+	* MANIFEST.in: add new files
+
+	* README (INSTALLATION): explain how to enable the extra VC tests
+
+	* buildbot/status/builder.py (LogFile): add .runEntries at the class
+	level to, so old pickled builds can be displayed ok
+
+2004-11-22  Brian Warner  <warner at lothar.com>
+
+	* NEWS: summarize updates since last release
+
+	* README (SLAVE): fix usage of 'buildbot slave' command. Thanks to
+	Yoz Grahame. Closes SF#1050138.
+
+	* docs/changes.xhtml (FreshCVSSourceNewcred): fix typo. Closes
+	SF#1042563.
+
+	* buildbot/process/step_twisted.py (Trial): update docs a bit
+
+	* docs/factories.xhtml: fix Trial factory docs to match reality.
+	Closes: SF#1049758.
+
+	* buildbot/process/factory.py (Trial.__init__): add args for
+	randomly= and recurse=, making them available to instantiators
+	instead of only to subclassers. Closes: SF#1049759.
+
+2004-11-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py (QuickTwistedBuildFactory):
+	try to teach the Quick factory to use multiple versions of python
+
+2004-11-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (BuilderStatus.saveYourself): use a
+	safer w32-compatible approach, and only use it on windows
+	(BuildStatus.saveYourself): same
+
+2004-11-11  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (LogFile.addEntry): smarter way to do
+	it: one string merge per chunk. There are now separate .entries
+	and .runEntries lists: when enumerating over all chunks, make sure
+	to look at both.
+	* buildbot/test/test_status.py (Log): more tests
+
+	* buildbot/status/builder.py (LogFile.addEntry): Merge string
+	chunks together, up to 10kb per chunk. This ought to cut down on
+	the CPU-burning overhead of large log files. Thanks to Alexander
+	Staubo for spotting the problem.
+	* buildbot/test/test_status.py (Log): tests for same
+
+2004-11-10  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/mail.py (MailNotifier.buildMessage): add a Date
+	header to outbound mail
+	* buildbot/test/test_status.py (Mail.testBuild1): test for same
+
+2004-11-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/builder.py (BuilderStatus.saveYourself): w32
+	can't do os.rename() onto an existing file, so catch the exception
+	and unlink the target file first. This introduces a slight window
+	where the existing file could be lost, but the main failure case
+	(disk full) should still be handled safely.
+	(BuildStatus.saveYourself): same
+
+	* buildbot/changes/pb.py (ChangePerspective): use a configurable
+	separator character instead of os.sep, because the filenames being
+	split here are coming from the VC system, which can have a
+	different pathname convention than the local host. This should
+	help a buildmaster running on windows that uses a CVS repository
+	which runs under unix.
+	* buildbot/changes/mail.py (MaildirSource): same, for all parsers
+
+	* buildbot/process/step_twisted.py (Trial.createSummary): survive
+	when there are no test failures to be parsed
+
+	* buildbot/scripts/runner.py (createMaster): use shutil.copy()
+	instead of the unix-specific os.system("cp"), thanks to Elliot
+	Murphy for this and the other buildbot-vs-windows catches.
+	* buildbot/test/test_maildir.py (MaildirTest.deliverMail): same
+
+	* contrib/windows/buildbot.bat: prefix a '@', apparently to not
+	echo the command as it is run
+
+	* setup.py: install sample.mk too, not just sample.cfg
+	(scripts): install contrib/windows/buildbot.bat on windows
+
+2004-11-07  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/builder.py (Builder._detached): clear the
+	self.currentBuild reference, otherwise the next build will be
+	skipped because we think the Builder is already in use.
+
+	* docs/examples/twisted_master.cfg: update to match current usage
+	on the Twisted buildbot
+
+2004-10-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/mail.py (MailNotifier): fix typo in docs
+
+2004-10-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slave/commands.py (SourceBase): refactor subclasses to
+	have separate doVCUpdate/doVCFull methods. Catch an update failure
+	and respond by clobbering the source directory and re-trying. This
+	will handle local changes (like replacing a file with a directory)
+	that will cause CVS and SVN updates to fail.
+	* buildbot/test/test_vc.py (SetupMixin.do_vc): test the same
+
+	* buildbot/process/step.py (LoggedRemoteCommand.__repr__): avoid a
+	python-2.4 warning
+
+2004-10-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (Trial.createSummary): bugfixes
+
+	* buildbot/status/html.py (StatusResourceTestResults): display any
+	TestResults that the Build might have
+	(StatusResourceTestResult): and the logs for each TestResult
+	(StatusResourceBuild): add link from the per-build page
+
+2004-10-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (Trial.createSummary): parse
+	the 'problems' portion of stdout, add TestResults to our build
+	* buildbot/test/test_twisted.py (Parse.testParse): test it
+
+	* buildbot/interfaces.py (IBuildStatus.getTestResults): new method
+	to retrieve a dict of accumulated test results
+	(ITestResult): define what a single test result can do
+	* buildbot/status/builder.py (TestResult): implement ITestResult
+	(BuildStatus.getTestResults): retrieve dict of TestResults
+	(BuildStatus.addTestResult): add TestResults
+	* buildbot/test/test_status.py (Results.testAddResults): test it
+
+2004-10-14  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_maildir.py (MaildirTest): use shutil.rmtree
+	instead of os.system("rm -rf") for win32 portability
+
+	* buildbot/test/test_slavecommand.py (SlaveCommandTestCase): use
+	SignalMixin instead of starting/stopping the reactor, which is
+	likely to cause problems with other tests
+
+	* buildbot/slave/commands.py (SourceBase.doCopy): remove leftover
+	self.copyComplete() call. Yoz Grahame makes the catch.
+
+	* contrib/windows/buildbot.bat: helper script to deal with path
+	issues. Thanks to Yoz Grahame.
+
+	* buildbot/master.py (BuildMaster.startService): don't register a
+	SIGHUP handler if the signal module has no SIGHUP attribute.
+	Apparently win32 does this.
+
+	* buildbot/scripts/runner.py (start): add --reactor=win32 on win32
+
+	* buildbot/test/test_web.py (WebTest.test_webPathname): skip the
+	test if the reactor can't offer UNIX sockets
+
+	* buildbot/status/html.py (StatusResourceBuild.body): fix syntax
+	error introduced in the last commit. We really need that
+	metabuildbot :).
+
+2004-10-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/mail.py (MaildirSource.describe): fix exception
+	when describing a maildir source. Thanks to Stephen Davis.
+
+	* buildbot/status/words.py (IrcStatusBot.command_WATCH): round off
+	ETA seconds
+
+	* buildbot/scripts/runner.py (createMaster): install Makefile too
+	(start): add --no_save to 'start' command
+	* buildbot/scripts/sample.mk: simple convenience Makefile with 
+	start/stop/reload targets
+
+	* buildbot/__init__.py (version): bump to 0.6.0+ while between
+	releases
+
+2004-09-30  Brian Warner  <warner at lothar.com>
+
+	* setup.py: Releasing buildbot-0.6.0
+
+2004-09-30  Brian Warner  <warner at lothar.com>
+
+	* MANIFEST.in: add debian/*, sample.cfg, more docs files. Remove
+	test_trial.py from the source tarball until support is complete.
+
+	* NEWS: update for 0.6.0 release
+	* buildbot/__init__.py (version): same
+	* README: same
+
+	* buildbot/status/words.py (IrcStatusBot.command_SOURCE): add
+	'source' command to tell users where to get the Buildbot source
+
+	* docs/examples/*.cfg: update to modern standards
+
+	* NEWS: update for release
+
+	* buildbot/scripts/runner.py (createMaster): remove the
+	-shutdown.tap stuff now that it isn't necessary
+	(createSlave): same
+	(start): launch buildbot.tap, not buildbot-shutdown.tap
+
+
+	* buildbot/status/mail.py (Domain): shorten class name
+	(MailNotifier): if lookup= is a string, pass it to Domain()
+	* buildbot/test/test_status.py (Mail.testBuild1): new class name
+	(Mail.testBuild2): test the string-to-Domain shortcut
+	(Mail.testMail): fix test
+
+
+	* buildbot/scripts/sample.cfg: improve the build-the-buildbot
+	example config file
+
+	* buildbot/status/builder.py (BuildStatus.__setstate__): re-set
+	more attributes on load
+	(BuilderStatus.stubBuildCacheSize): bump to 30, this was too low
+	to accomodate the whole waterfall page at once, and the thrashing
+	results in a lot of unnecessary loads
+	(BuildStatus.saveYourself): use binary pickles, not fluffy text
+	(BuilderStatus.saveYourself): same
+	(BuilderStatus.eventGenerator): stop generating on the first missing
+	build. We assume that saved builds are deleted oldest-first.
+	(BuildStepStatus.__getstate__): .progress might not exist
+
+	* buildbot/changes/changes.py (ChangeMaster): make it
+	serializable, in $masterdir/changes.pck
+	(ChangeMaster.stopService): save on shutdown
+	* buildbot/master.py (BuildMaster.loadChanges): load at startup
+	* buildbot/test/test_config.py: load Changes before config file
+
+
+	* buildbot/slave/commands.py (ShellCommand.doTimeout): put the
+	"Oh my god, you killed the command" header on a separate line
+
+	* buildbot/status/builder.py (BuilderStatus.getStubBuildByNumber):
+	skip over corrupted build pickles
+	(BuilderStatus.getFullBuildByNumber): same
+	(BuilderStatus.eventGenerator): skip over unavailable builds
+	(BuildStatus.saveYourself): save builds to a .tmp file first, then
+	do an atomic rename. This prevents a corrupted pickle when some
+	internal serialization error occurs.
+	(BuilderStatus.saveYourself): same
+
+	* buildbot/slave/commands.py (SlaveShellCommand): oops, restore
+	the timeout for shell commands, it got lost somehow
+
+	* buildbot/status/builder.py (BuilderStatus.eventGenerator): if we
+	run out of build steps, return the rest of the builder events
+
+	* buildbot/interfaces.py (IBuilderControl.ping): add method
+
+	* buildbot/process/builder.py (BuilderControl.ping): move
+	slave-ping to BuilderControl, and fix the failure case in the
+	process (Event.finish() is the verb, Event.finished is the noun).
+
+	* buildbot/status/html.py (StatusResourceBuilder.ping): ping
+	through the BuilderControl instead of the BuilderStatus
+	(EventBox): add adapter for builder.Event, allowing builder events to
+	be displayed in the waterfall display
+
+	* buildbot/master.py (BotMaster.stopService): add a 'master
+	shutdown' event to the builder's log
+	(BuildMaster.startService): and a 'master started' on startup
+
+	* buildbot/status/builder.py (BuilderStatus.eventGenerator): merge
+	builder events into the BuildStep event stream
+	(Status.builderAdded): add a 'builder created' event
+
+
+	* buildbot/status/words.py (IrcStatusBot.command_WATCH): new
+	command to announce the completion of a running build
+	(IrcStatusBot.command_FORCE): announce when the build finishes
+
+	* buildbot/status/builder.py (BuilderStatus.addFullBuildToCache):
+	don't evict unfinished builds from the cache: they must stay in
+	the full-cache until their logfiles have stopped changing. Make
+	sure the eviction loop terminates if an unfinished build was hit.
+	(HTMLLogFile.getTextWithHeaders): return HTML as if it were text.
+	This lets exceptions be dumped in an email status message. Really
+	we need LogFiles which contain both text and HTML, instead of two
+	separate classes.
+	(BuildStatus.__getstate__): handle self.finished=False
+	(Status.builderAdded): if the pickle is corrupted, abandon the
+	history and create a new BuilderStatus object.
+
+	* buildbot/process/base.py (Build.stopBuild): tolerate lack of a
+	self.progress attribute, helped one test which doesn't fully set
+	up the Build object.
+
+	* buildbot/interfaces.py (IStatusLogStub): split out some of the
+	IStatusLog methods into an Interface that is implemented by "stub"
+	logs, for which all the actual text chunks are on disk (in the
+	pickled Build instance). To show the log contents, you must first
+	adapt the stub log to a full IStatusLog object.
+
+	* buildbot/status/builder.py (LogFileStub): create separate stub
+	log objects, which can be upgraded to a real one if necessary.
+	(LogFile): make them persistable, and let them stubify themselves
+	(HTMLLogFile): same
+	(BuildStepStatus): same
+	(BuildStatus): same
+	(BuildStatus.saveYourself): save the whole build out to disk
+	(BuilderStatus): make it persistable
+	(BuilderStatus.saveYourself): save the builder to disk
+	(BuilderStatus.addFullBuildToCache): implement two caches which
+	hold Build objects: a small one which holds full Builds, and a
+	larger one which holds "stubbed" Builds (ones with their LogFiles
+	turned into LogFileStubs). This reduces memory usage by the
+	buildmaster by not keeping more than a few (default is 2) whole
+	build logs in RAM all the time.
+	(BuilderStatus.getBuild): rewrite to pull from disk (through the
+	cache)
+	(BuilderStatus.eventGenerator): rewrite since .builds went away
+	(BuilderStatus.buildStarted): remove the .builds array. Add the
+	build to the "full" cache when it starts.
+	(BuilderStatus._buildFinished): save the build to disk when it
+	finishes
+	(Status): give it a basedir (same as the BuildMaster's basedir)
+	where the builder pickles can be saved
+	(Status.builderAdded): create the BuilderStatus ourselves, by
+	loading a pickle from disk (or creating a new instance if there
+	was none on disk). Return the BuilderStatus so the master can glue
+	it into the new Builder object.
+
+	* buildbot/master.py (BotMaster.stopService): on shutdown, tell
+	all BuilderStatuses to save themselves out to disk. This is in
+	lieu of saving anything important in the main Application pickle
+	 (the -shutdown.tap file).
+	(BuildMaster.__init__): give Status() a basedir for its files
+	(BuildMaster.loadConfig_Builders): do status.builderAdded first,
+	to get the BuilderStatus, then give it to the Builder (instead of
+	doing it the other way around). It's ok if the status announces
+	the new Builder before it's really ready, as the outside world can
+	only see the BuilderStatus object anyway (and it is ready before
+	builderAdded returns). Use the builder's "builddir" (which
+	normally specifies where the slave will run the builder) as the
+	master's basedir (for saving serialized builds).
+
+	* buildbot/status/html.py (StatusResourceBuildStep.getChild):
+	coerce the logfile to IStatusLog before trying to get the text
+	chunks out of it. This will pull the full (non-stubified) Build in
+	from disk if necessary.
+	(TextLog): fix the adapter registration
+
+	* buildbot/test/test_control.py (Force.setUp): create the basedir
+	* buildbot/test/test_web.py: same
+	* buildbot/test/test_vc.py (SetupMixin.setUp): same
+	* buildbot/test/test_status.py (Mail.makeBuild): match new setup
+	* buildbot/test/test_run.py (Run.testMaster): same
+	(Status.setUp): same
+
+2004-09-29  Fred L. Drake, Jr.  <fdrake at acm.org>
+
+	* buildbot/status/html.py (Waterfall.__init__): store actual
+	allowForce flag passed in rather than using True for everyone;
+	make sure setting it to False doesn't cause a NameError
+	(Waterfall.setup).
+	(StatusResourceBuilder.__init__) add the builder name to the page
+	title.
+	(StatusResourceBuilder.body) move HTML generation for a name/value
+	row into a helper method (StatusResourceBuilder.make_row); only
+	generate the "Force Build" form if allowForce was True and the
+	slave is connected.  Use class attributes in the generated HTML to
+	spread a little CSS-joy.
+
+2004-09-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (Trial.createSummary): fix
+	warning-scanner to not ignore things like
+	'ComponentsDeprecationWarning' and 'exceptions.RuntimeWarning'
+ 
+	* buildbot/status/html.py (StatusResource.control): add some
+	class-level values for .control in an attempt to make upgrading
+	smoother
+
+	* buildbot/util.py (ComparableMixin): survive missing attributes,
+	such as when a class is modified and we're comparing old instances
+	against new ones
+
+	* buildbot/status/words.py (IrcStatusBot.privmsg): clean up
+	failure handling, remove a redundant try/except block. Don't
+	return the full traceback to the IRC channel.
+	(IrcStatusBot.command_FORCE): catch new exceptions, return useful
+	error messages. Get ETA properly.
+
+	* buildbot/status/html.py (StatusResourceBuild.body): html.escape
+	the reason, since (at least) IRC message will have <> in them.
+	(StatusResourceBuilder.__init__): take an IBuilderControl
+	(StatusResourceBuilder.force): use the IBuilderControl we get in
+	the constructor instead of trying to make our own. Catch the
+	new exceptions and ignore them for now (until we make an
+	intermediate web page where we could show the error message)
+	(StatusResource): create with an IControl, use it to give an
+	IBuilderControl to all children
+	(Waterfall): take an allowForce= option, pass an IControl object
+	to StatusResource if it is True
+
+	* buildbot/test/test_web.py (ConfiguredMaster): handle IControl
+
+	* buildbot/master.py (BotPerspective.perspective_forceBuild):
+	catch new exceptions and return string forms
+
+	* buildbot/interfaces.py: add NoSlaveError, BuilderInUseError
+	* buildbot/process/builder.py (Builder.forceBuild): raise them
+	* buildbot/test/test_control.py (Force.testNoSlave): new test
+	(Force.testBuilderInUse): same
+
+
+	* buildbot/status/words.py (IrcStatusBot): enable build-forcing
+
+	* buildbot/test/test_run.py: use IControl
+	* buildbot/test/test_vc.py: same
+
+	* buildbot/status/html.py (StatusResourceBuilder.force): rewrite
+	to use IControl. Still offline.
+	* buildbot/status/words.py (IrcStatusBot.command_FORCE): same
+
+	* buildbot/process/builder.py (Builder.doPeriodicBuild): set
+	who=None so periodic builds don't send out status mail
+	(Builder.forceBuild): include reason in the log message
+	(BuilderControl.forceBuild): rename 'name' to 'who'
+
+	* buildbot/master.py (BotPerspective.perspective_forceBuild): add
+	'who' parameter, but make it None by default so builds forced by
+	slave admins don't cause status mail to be sent to anybody
+	(BotMaster.forceBuild): same. this method is deprecated.
+	(DebugPerspective.perspective_forceBuild): same, use IControl.
+	(DebugPerspective.perspective_fakeChange): use IControl..
+	(Dispatcher.requestAvatar): .. so don't set .changemaster
+
+	* buildbot/interfaces.py (IBuilderControl.forceBuild): rename 'who'
+	parameter to avoid confusion with the name of the builder
+
+
+	* buildbot/status/mail.py: refine comment about needing 2.3
+
+	* buildbot/status/html.py: move all imports to the top
+
+	* buildbot/test/test_control.py: test new interfaces
+	* buildbot/test/test_run.py (Status): handle new interfaces
+	* buildbot/test/test_vc.py (SetupMixin.doBuild): same
+
+	* buildbot/process/base.py (BuildControl): implement IBuildControl
+	and its lonely getStatus() method
+
+	* buildbot/process/builder.py (BuilderControl): implement
+	IBuilderControl, obtained by adapting the Builder instance
+	(Builder.startBuild): return a BuilderControl instead of a
+	Deferred. The caller can use bc.getStatus().waitUntilFinished() to
+	accomplish the same thing.
+
+	* buildbot/master.py: move all import statements to the top
+	(Control): implement IControl, obtained by adapting the
+	BuildMaster instance.
+
+	* buildbot/interfaces.py: add IControl, IBuilderControl, and
+	IBuildControl. These are used to force builds. Eventually they
+	will provide ways to reconfigure the Builders, pause or abandon a
+	Build, and perhaps control the BuildMaster itself.
+
+2004-09-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/util.py (ComparableMixin): survive twisted>1.3.0 which
+	ends up comparing us against something without a .__class__
+
+2004-09-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/scripts/runner.py: rearrange option parsing a lot, to get
+	usage text right.
+
+	* Makefile: add 'deb-snapshot' target, to create a timestamped
+	.deb package
+
+	* debian/rules (binary-indep): skip CVS/ files in dh_installexamples
+
+2004-09-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/__init__.py (version): move version string here
+	* setup.py: get version string from buildbot.version
+	* buildbot/status/html.py (WaterfallStatusResource.body): add
+	buildbot version to the page footer
+	* buildbot/status/words.py (IrcStatusBot.command_VERSION): provide
+	version when asked
+
+	* buildbot/master.py (BotMaster.getPerspective): detect duplicate
+	slaves, let the second know where the first one is coming from
+	(BuildMaster.__init__): turn on .unsafeTracebacks so the slave can
+	see our exceptions. It would be nice if there were a way to just
+	send them the exception type and value, not the full traceback.
+
+
+	* buildbot/status/mail.py (MailNotifier): add a new argument
+	sendToInterestedUsers=, which can be set to False to disable the
+	usual send-to-blamelist behavior.
+	(top): handle python-2.2 which has no email.MIMEMultipart
+	(MailNotifier.buildMessage): don't send logs without MIMEMultipart
+	(MailNotifier.disownServiceParent): unsubscribe on removal
+
+	* buildbot/test/test_status.py (Mail.testBuild2): test it
+
+
+	* buildbot/status/progress.py (Expectations.wavg): tolerate
+	current=None, which happens when steps start failing badly
+	* buildbot/test/test_status.py (Progress.testWavg): test for it
+
+	* buildbot/process/step.py (SVN.startVC): when the (old) slave
+	doesn't understand args['revision'], emit a warning instead of
+	bailing completely. Updating to -rHEAD is probably close enough.
+
+	* buildbot/process/step_twisted.py (Trial.start): fix sanity-check
+
+	* buildbot/test/test_status.py: at least import bb.status.client
+	even if we don't have any test coverage for it yet
+
+	* contrib/svn_buildbot.py: don't require python2.3
+	(main): wait, do require it (for sets.py), but explain how to
+	make it work under python2.2
+
+2004-09-23  Brian Warner  <warner at lothar.com>
+
+	* contrib/svn_buildbot.py: include the revision number in the Change
+
+	* buildbot/changes/freshcvs.py (FreshCVSSourceNewcred): use when=,
+	using util.now() because FreshCVS is a realtime service
+
+	* buildbot/status/event.py: delete dead code
+	* buildbot/process/step.py: don't import dead Event class
+	* buildbot/process/step_twisted.py: same
+	* buildbot/status/builder.py: same
+	* buildbot/status/client.py: same
+
+	* buildbot/test/test_process.py: kill buggy out-of-date disabled test
+
+	* buildbot/changes/changes.py (Change): set .when from an __init__
+	argument (which defaults to now()), rather than having
+	ChangeMaster.addChange set it later.
+	(ChangeMaster.addChange): same
+
+	* buildbot/changes/mail.py (parseFreshCVSMail): pass in when=
+	(parseSyncmail): same. Just use util.now() for now.
+	(parseBonsaiMail): parse the timestamp field for when=
+
+	* buildbot/test/test_vc.py (SourceStamp.addChange): page in when=
+	instead of setting .when after the fact
+
+2004-09-22  slyphon
+
+	* buildbot/slave/trial.py: new SlaveCommand to machine-parse test
+	results when the target project uses retrial. Still under
+	development.
+	* buildbot/test/test_trial.py: same
+
+2004-09-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/mail.py (MailNotifier.__init__): include
+	success/warnings/failure in the Subject line
+	(MailNotifier.buildMessage): add the buildbot's URL to the body,
+	use step.logname for the addLogs=True attachment filenames
+	* buildbot/test/test_status.py (Mail): test Subject lines
+	(Mail.testLogs): test attachment filenames
+
+	* buildbot/master.py (DebugPerspective.perspective_fakeChange):
+	accept a 'who' argument from the debug tool
+	* contrib/debugclient.py (DebugWidget.do_commit): send 'who'
+	* contrib/debug.glade: add text box to set 'who'
+
+	* buildbot/interfaces.py (IBuildStatus.getBuilder): replace
+	.getBuilderName with .getBuilder().getName(), more flexible
+	(IStatusLog.getName): logs have short names, but you can prefix
+	them with log.getStep().getName() to make them more useful
+	* buildbot/status/builder.py: same
+	* buildbot/status/client.py: same
+	* buildbot/status/html.py: same
+	* buildbot/test/test_run.py (Status.testSlave): same
+	* buildbot/process/step.py: tweak logfile names
+
+	* buildbot/status/mail.py (MailNotifier): add lookup, change
+	argument to extraRecipients. The notifier is now aimed at sending
+	mail to the people involved in a particular build, with additional
+	constant recipients as a secondary function.
+
+	* buildbot/test/test_status.py: add coverage for IEmailLookup,
+	including slow-lookup and failing-lookup. Make sure the blamelist
+	members are included.
+
+	* buildbot/interfaces.py: new interfaces IEmailSender+IEmailLookup
+	(IBuildStatus.getResponsibleUsers): rename from getBlamelist
+	(IBuildStatus.getInterestedUsers): new method
+	* buildbot/status/builder.py (BuildStatus.getResponsibleUsers): same
+	* buildbot/status/client.py (remote_getResponsibleUsers): same
+	* buildbot/status/html.py (StatusResourceBuild.body): same
+	* buildbot/test/test_run.py (Status.testSlave): same
+
+2004-09-20  Brian Warner  <warner at lothar.com>
+
+	* docs/users.xhtml: update concepts
+
+	* Makefile: add a convenience makefile, for things like 'make
+	test'. It is not included in the source tarball.
+
+2004-09-16  Brian Warner  <warner at lothar.com>
+
+	* NEWS: mention /usr/bin/buildbot, debian/*
+
+	* debian/*: add preliminary debian packaging. Many thanks to
+	Kirill Lapshin (and Kevin Turner) for the hard work. I've mangled
+	it considerably since it left their hands, I am responsible for
+	all breakage that's resulted.
+
+	* bin/buildbot: create a top-level 'buildbot' command, to be
+	installed in /usr/bin/buildbot . For now it's just a simple
+	frontend to mktap/twistd/kill, but eventually it will be the entry
+	point to the 'try' command and also a status client. It is also
+	intended to support the upcoming debian-packaging init.d scripts.
+	* buildbot/scripts/runner.py: the real work is done here
+	* buildbot/scripts/__init__.py: need this too
+	* buildbot/scripts/sample.cfg: this is installed in new
+	buildmaster directories
+	* setup.py: install new stuff
+
+2004-09-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py: skip SVN tests if svn can't handle the
+	'file:' schema (the version shipped with OS-X was built without the
+	ra_local plugin).
+	(SetupMixin.tearDown): stop the goofy twisted.web timer which
+	updates the log-timestamp, to make sure it isn't still running after
+	the test finishes
+
+	* docs/config.xhtml: Add projectName, projectURL, buildbotURL
+	values to the config file.
+	* docs/examples/hello.cfg: add examples
+	* buildbot/interfaces.py (IStatus.getBuildbotURL): define accessors
+	* buildbot/status/builder.py (Status.getProjectURL): implement them
+	* buildbot/master.py (BuildMaster.loadConfig): set them from config
+	* buildbot/test/test_config.py (ConfigTest.testSimple): test them
+	* buildbot/status/html.py (WaterfallStatusResource): display them
+
+
+	* buildbot/test/test_vc.py (FakeBuilder.name): add attribute so
+	certain error cases don't suffer a secondary exception.
+	(top): Skip tests if the corresponding VC tool is not installed.
+
+	* buildbot/process/factory.py (Trial): introduce separate
+	'buildpython' and 'trialpython' lists, since trialpython=[] is
+	what you want to invoke /usr/bin/python, whereas ./setup.py is
+	less likely to be executable. Add env= parameter to pass options
+	to test cases (which is how I usually write tests, I don't know if
+	anyone else does it this way).
+
+	* buildbot/process/step_twisted.py (Trial): handle python=None.
+	Require 'testpath' be a string, not a list. Fix tests= typo.
+	(Trial.start): sanity-check any PYTHONPATH value for stringness.
+
+	* buildbot/process/step.py (RemoteCommand._remoteFailed): goofy
+	way to deal with the possibility of removing the disconnect notify
+	twice.
+	(CVS): add a 'login' parameter to give a password to 'cvs login',
+	commonly used with pserver methods (where pw="" or pw="guest")
+
+	* buildbot/slave/commands.py (SourceBase): move common args
+	extraction and setup() to __init__, so everything is ready by the
+	time setup() is called
+	(CVS.start): call 'cvs login' if a password was supplied
+	(ShellCommand): special-case PYTHONPATH: prepend the master's
+	value to any existing slave-local value.
+
+	* buildbot/process/builder.py (Builder.updateBigStatus): if we
+	don't have a remote, mark the builder as Offline. This whole
+	function should probably go away and be replaced by individual
+	deltas.
+	(Builder.buildFinished): return the results to the build-finished
+	deferred callback, helps with testing
+
+2004-09-14  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py: put all the repositories needed to run
+	the complete tests into a single small (1.3MB) tarball, so I can
+	make that tarball available on the buildbot web site. Test HTTP
+	access (for Arch and Darcs) by spawning a temporary web server
+	while the test runs.
+
+	* docs/users.xhtml: new document, describe Buildbot's limited
+	understanding of different human users
+
+	* buildbot/test/test_vc.py: rearrange test cases a bit
+
+	* buildbot/process/step_twisted.py (Trial): handle testpath=
+	* buildbot/process/factory.py (Trial): update to use step.Trial
+
+	* buildbot/slave/commands.py (ShellCommandPP): fix fatal typo
+
+	* buildbot/status/builder.py (BuildStatus.getText): add text2 to
+	the overall build text (which gives you 'failed 2 tests' rather
+	than just 'failed')
+	(BuildStepStatus.text2): default to [], not None
+
+	* buildbot/process/step_twisted.py (Trial.commandComplete): text2
+	must be a list
+
+2004-09-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (BotPerspective._commandsUnavailable): don't
+	log the whole exception if it's just an AttributeError (old slave)
+
+	* buildbot/process/step.py (ShellCommand.__init__): stash .workdir
+	so (e.g.) sub-commands can be run in the right directory.
+	(ShellCommand.start): accept an optional errorMessage= argument
+	to make life easier for SVN.start
+	(SVN.startVC): put the "can't do mode=export" warning in the LogFile
+	headers
+	(ShellCommand.start): move ['dir'] compatibility hack..
+	(RemoteShellCommand.start): .. to here so everyone can use it
+
+	* buildbot/process/step_twisted.py (Trial): use .workdir
+
+	* buildbot/process/step_twisted.py (BuildDebs.getText): fix the
+	text displayed when debuild fails completely
+	(Trial): snarf _trial_temp/test.log from the slave and display it
+
+2004-09-11  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (ProcessDocs.getText): typo
+
+	* buildbot/process/process_twisted.py (TwistedTrial.tests): oops,
+	set to 'twisted', so --recurse can find twisted/web/test/*, etc
+
+	* buildbot/process/step.py (ShellCommand): call .createSummary
+	before .evaluateCommand instead of the other way around. This
+	makes it slightly easier to count warnings and then use that to
+	set results=WARNINGS
+	* buildbot/process/step_twisted.py: cosmetic, swap the methods
+
+	* buildbot/process/base.py (Build.buildFinished): update status
+	before doing progress. It's embarrassing for the build to be stuck
+	in the "building" state when an exceptions occurs elsewhere..
+
+	* buildbot/status/progress.py (Expectations.expectedBuildTime):
+	python2.2 doesn't have 'sum'
+
+	* buildbot/status/builder.py (Status.getBuilderNames): return a copy,
+	to prevent clients from accidentally sorting it
+
+	* buildbot/master.py (Manhole): add username/password
+	(BuildMaster.loadConfig): use c['manhole']=Manhole() rather than
+	c['manholePort'], deprecate old usage
+	* docs/config.xhtml: document c['manhole']
+	* docs/examples/hello.cfg: show example of using a Manhole
+
+
+	* buildbot/test/test_steps.py (FakeBuilder.getSlaveCommandVersion):
+	pretend the slave is up to date
+
+	* buildbot/status/builder.py (BuildStepStatus.stepFinished): 'log',
+	the module, overlaps with 'log', the local variable
+
+	* buildbot/status/html.py: oops, 2.2 needs __future__ for generators
+
+	* buildbot/process/builder.py (Builder.getSlaveCommandVersion):
+	new method to let Steps find out the version of their
+	corresponding SlaveCommand.
+	* buildbot/process/step.py (BuildStep.slaveVersion): utility method
+	(ShellCommand.start): add 'dir' argument for <=0.5.0 slaves
+	(CVS.startVC): backwards compatibility for <=0.5.0 slaves
+	(SVN.startVC): same
+	(Darcs.startVC): detect old slaves (missing the 'darcs' command)
+	(Arch.startVC): same
+	(P4Sync.startVC): same
+
+	* buildbot/process/step.py (LoggedRemoteCommand.start): return the
+	Deferred so we can catch errors in remote_startCommand
+	(RemoteShellCommand.start): same
+
+	* docs/examples/twisted_master.cfg: update sample config file
+
+	* buildbot/slave/commands.py (ShellCommandPP): write to stdin
+	after connectionMade() is called, not before. Close stdin at that
+	point too.
+
+	* buildbot/process/process_twisted.py: update to use Trial, clean
+	up argument passing (move to argv arrays instead of string
+	commands)
+
+	* buildbot/process/step_twisted.py (Trial): new step to replace
+	RunUnitTests, usable by any trial-using project (not just
+	Twisted). Arguments have changed, see the docstring for details.
+
+	* buildbot/process/base.py (Build.startBuild): this now returns a
+	Deferred. Exceptions that occur during setupBuild are now
+	caught better and lead to fewer build_status weirdnesses, like
+	finishing a build that was never started.
+	(Build.buildFinished): fire the Deferred instead of calling
+	builder.buildFinished directly. The callback argument is this
+	Build, everything else can be extracted from it, including the
+	new build.results attribute.
+	* buildbot/process/builder.py (Builder.startBuild): same
+	(Builder.buildFinished): same, extract results from build
+
+	* buildbot/process/step.py (ShellCommands): remove dead code
+
+2004-09-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_vc.py (VC.doPatch): verify that a new build
+	doesn't try to use the leftover patched workdir
+	(SourceStamp): test source-stamp computation for CVS and SVN
+
+	* buildbot/slave/commands.py (SourceBase.doPatch): mark the
+	patched workdir ('touch .buildbot-patched') so we don't try to
+	update it later
+	(SourceBase.start): add ['revision'] for all Source steps
+	(CVS): change args: use ['branch'] for -r, remove ['files']
+	(CVS.buildVC): fix revision/branch stuff
+	(SVN): add revision stuff
+
+	* buildbot/process/step.py (BuildStep.__init__): reject unknown
+	kwargs (except 'workdir') to avoid silent spelling errors
+	(ShellCommand.__init__): same
+	(Source): new base class for CVS/SVN/etc. Factor out everything
+	common, add revision computation (perform the checkout with a -D
+	DATE or -r REVISION that gets exactly the sources described by the
+	last Change), overridable with step.alwaysUseLatest. Add patch
+	handling (build.getSourceStamp can trigger the use of a base
+	revision and a patch).
+	(CVS, SVN, Darcs, Arch, P4Sync): refactor, remove leftover arguments
+	* docs/steps.xhtml: update docs
+	* docs/source.xhtml: mention .checkoutDelay
+	* docs/examples/hello.cfg: show use of checkoutDelay, alwaysUseLatest
+
+	* buildbot/process/base.py (Build.setSourceStamp): add a
+	.sourceStamp attribute to each Build. If set, this indicates that
+	the build should be done with something other than the most
+	recent source tree. This will be used to implement "try" builds.
+	(Build.allChanges): new support method
+	(Build.lastChangeTime): remove, functionality moved to Source steps
+	(Build.setupBuild): copy the Step args before adding ['workdir'],
+	to avoid modifying the BuildFactory (and thus triggering spurious
+	config changes)
+
+
+	* buildbot/status/html.py: rename s/commits/changes/
+	(StatusResourceChanges): same
+	(CommitBox.getBox): same, update URL
+	(WaterfallStatusResource): same
+	(StatusResource.getChild): same
+
+	* contrib/debugclient.py (DebugWidget.do_commit): send .revision
+	* contrib/debug.glade: add optional 'revision' to the fakeChange
+
+	* buildbot/changes/changes.py (html_tmpl): display .revision
+	(ChangeMaster.addChange): note .revision in log
+	* buildbot/changes/pb.py (ChangePerspective.perspective_addChange):
+	accept a ['revision'] attribute
+
+	* buildbot/process/factory.py (BuildFactory): use ComparableMixin
+
+	* buildbot/master.py (BotMaster.getPerspective): update the
+	.connected flag in SlaveStatus when it connects
+	(BotMaster.detach): and when it disconnects
+	(DebugPerspective.perspective_fakeChange): take a 'revision' attr
+	(BuildMaster.loadConfig_Builders): walk old list correctly
+
+	* buildbot/test/test_config.py: fix prefix= usage
+
+2004-09-06  Brian Warner  <warner at lothar.com>
+
+	* NEWS: mention P4
+
+	* buildbot/changes/p4poller.py (P4Source): New ChangeSource to
+	poll a P4 depot looking for recent changes. Thanks to Dave
+	Peticolas for the contribution. Probably needs some testing after
+	I mangled it.
+
+	* buildbot/process/step.py (P4Sync): simple P4 source-updater,
+	requires manual client setup for each buildslave. Rather
+	experimental. Thanks again to Dave Peticolas.
+	* buildbot/slave/commands.py (P4Sync): slave-side source-updater
+
+	* buildbot/changes/changes.py (Change): add a .revision attribute,
+	which will eventually be used to generate source-stamp values.
+
+	* buildbot/process/step.py (RemoteCommand.start): use
+	notifyOnDisconnect to notice when we lose the slave, then treat it
+	like an exception. This allows LogFiles to be closed and the build
+	to be wrapped up normally. Be sure to remove the disconnect
+	notification when the step completes so we don't accumulate a
+	bazillion such notifications which will fire weeks later (when the
+	slave finally disconnects normally). Fixes SF#915807, thanks to
+	spiv (Andrew Bennetts) for the report.
+	(LoggedRemoteCommand): move __init__ code to RemoteCommand, since it
+	really isn't Logged- specific
+	(LoggedRemoteCommand.remoteFailed): Add an extra newline to the
+	header, since it's almost always going to be appended to an
+	incomplete line
+	* buildbot/test/test_steps.py (BuildStep.testShellCommand1):
+	update test to handle use of notifyOnDisconnect
+
+	* buildbot/status/builder.py (BuilderStatus.currentlyOffline):
+	don't clear .ETA and .currentBuild when going offline, let the
+	current build clean up after itself
+
+	* buildbot/process/builder.py (Builder.detached): wait a moment
+	before doing things like stopping the current build, because the
+	current step will probably notice the disconnect and cleanup the
+	build by itself
+	* buildbot/test/test_run.py (Status.tearDown): update test to
+	handle asynchronous build-detachment
+
+	* buildbot/process/base.py (Build.stopBuild): minor shuffles
+
+	* buildbot/status/html.py (WaterfallStatusResource.buildGrid):
+	hush a debug message
+
+2004-09-05  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/maildir.py (Maildir.start): catch an IOError
+	when the dnotify fcntl() fails and fall back to polling. Linux 2.2
+	kernels do this: the fcntl module has the F_NOTIFY constant, but
+	the kernel itself doesn't support the operation. Thanks to Olly
+	Betts for spotting the problem.
+
+	* buildbot/process/step.py (Darcs): new source-checkout command
+	(Arch): new source-checkout command
+	(todo_P4): fix constructor syntax, still just a placeholder
+	* buildbot/test/test_vc.py (VC.testDarcs): test it
+	(VC.testDarcsHTTP): same, via localhost HTTP
+	(VC.testArch): same
+	(VC.testArchHTTP): same
+	* NEWS: mention new features
+
+	* buildbot/slave/commands.py (ShellCommand): add .keepStdout,
+	which tells the step to stash stdout text locally (in .stdout).
+	Slave-side Commands can use this to make decisions based upon the
+	output of the the ShellCommand (not just the exit code).
+	(Darcs): New source-checkout command
+	(Arch): New source-checkout command, uses .keepStdout in one place
+	where it needs to discover the archive's default name.
+
+	* docs/steps.xhtml: Document options taken by Darcs and Arch.
+	* docs/source.xhtml: add brief descriptions of Darcs and Arch
+	* docs/examples/hello.cfg: add examples of Darcs and Arch checkout
+
+	* buildbot/process/step.py (ShellCommand.describe): add an
+	alternate .descriptionDone attribute which provides descriptive
+	text when the step is complete. .description can be ["compiling"],
+	for use while the step is running, then .descriptionDone can be
+	["compile"], used alone when the step succeeds or with "failed" when
+	it does not. Updated other steps to use the new text.
+	* buildbot/process/step_twisted.py: same
+	* buildbot/test/test_run.py: update tests to match
+
+2004-08-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (ShellCommand.createSummary): fix docs
+	(CVS.__init__): send 'patch' argument to slave
+	(CVS.start): don't create the LoggedRemoteCommand until start(),
+	so we can catch a .patch added after __init__
+	(SVN.__init__): add 'patch' to SVN too
+	(SVN.start): same
+
+	* buildbot/slave/commands.py (ShellCommand): add a 'stdin'
+	argument, to let commands push data into the process' stdin pipe.
+	Move usePTY to a per-instance attribute, and clear it if 'stdin'
+	is in use, since closing a PTY doesn't really affect the process
+	in the right way (in particular, I couldn't run /usr/bin/patch
+	under a pty).
+	(SourceBase.doPatch): handle 'patch' argument
+
+	* buildbot/test/test_vc.py (VC.doPatch): test 'patch' argument for
+	both CVS and SVN
+
+	* buildbot/slave/commands.py (cvs_ver): fix version-parsing goo
+	* buildbot/slave/bot.py (Bot.remote_getCommands): send command
+	versions to master
+	* buildbot/master.py (BotPerspective.got_commands): get command
+	versions from slave, give to each builder
+	* buildbot/process/builder.py (Builder.attached): stash slave
+	command versions in .remoteCommands
+
+	* docs/steps.xhtml: bring docs in-line with reality
+
+	* buildbot/process/step.py (CVS.__init__): more brutal
+	compatibility code removal
+	(SVN.__init__): same
+
+	* buildbot/slave/commands.py (SlaveShellCommand): update docs
+	(SlaveShellCommand.start): require ['workdir'] argument, remove
+	the ['dir'] fallback (compatibility will come later)
+	(SourceBase): update docs
+	(SourceBase.start): remove ['directory'] fallback
+	(CVS): update docs
+	(SVN): update docs
+	* buildbot/test/test_config.py (ConfigTest.testBuilders): update test
+	* buildbot/test/test_steps.py (BuildStep.testShellCommand1): same
+	* buildbot/test/test_slavecommand.py (SlaveCommandTestCase): same
+
+	* buildbot/process/step.py (RemoteShellCommand.__init__): add
+	want_stdout/want_stderr. remove old 'dir' keyword (to simplify the
+	code.. I will figure out 0.5.0-compatibility hooks later)
+
+2004-08-30  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py: rewrite in terms of new
+	BuildFactory base class. It got significantly shorter. Yay
+	negative code days.
+
+	* buildbot/process/step_twisted.py (HLint.start): fix to make it
+	work with the new "self.build isn't nailed down until we call
+	step.start()" scheme: specifically, __init__ is called before the
+	build has decided on which Changes are going in, so we don't scan
+	build.allFiles() for .xhtml files until start()
+	(HLint.commandComplete): use getText(), not getStdout()
+	(RunUnitTests.start): same: don't use .build until start()
+	(RunUnitTests.describe): oops, don't report (None) when using
+	the default reactor
+	(RunUnitTests.commandComplete): use getText()
+	(RunUnitTests.createSummary): same
+	(BuildDebs.commandComplete): same
+
+	* buildbot/process/step.py (RemoteShellCommand.__init__): don't
+	set args['command'] until start(), since our BuildStep is allowed
+	to change their mind up until that point
+	(TreeSize.commandComplete): use getText(), not getStdout()
+
+	* docs/examples/twisted_master.cfg: update to current standards
+
+	* docs/factories.xhtml: update
+	* buildbot/process/factory.py: implement all the common factories
+	described in the docs. The Trial factory doesn't work yet, and
+	I've probably broken all the process_twisted.py factories in the
+	process. There are compatibility classes left in for things like
+	the old BasicBuildFactory, but subclasses of them are unlikely to
+	work.
+	* docs/examples/glib_master.cfg: use new BuildFactories
+	* docs/examples/hello.cfg: same
+
+	* buildbot/test/test_config.py (ConfigTest.testBuilders): remove
+	explicit 'workdir' args
+
+	* buildbot/process/base.py (BuildFactory): move factories to ..
+	* buildbot/process/factory.py (BuildFactory): .. here
+	* buildbot/process/process_twisted.py: handle move
+	* buildbot/test/test_config.py: same
+	* buildbot/test/test_run.py: same
+	* buildbot/test/test_steps.py: same
+	* buildbot/test/test_vc.py: same
+	* docs/factories.xhtml: same
+
+	* NEWS: mention config changes that require updating master.cfg
+
+	* buildbot/process/base.py (Build.setupBuild): add a 'workdir'
+	argument to all steps that weren't given one already, pointing at
+	the "build/" directory.
+
+	* docs/examples/hello.cfg: remove explicit 'workdir' args
+
+	* docs/factories.xhtml: document standard BuildFactory clases,
+	including a bunch which are have not yet been written
+
+2004-08-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/interfaces.py (IBuildStepStatus.getResults): move
+	result constants (SUCCESS, WARNINGS, FAILURE, SKIPPED) to
+	buildbot.status.builder so they aren't quite so internal
+	* buildbot/process/base.py, buildbot/process/builder.py: same
+	* buildbot/process/maxq.py, buildbot/process/step.py: same
+	* buildbot/process/step_twisted.py, buildbot/status/builder.py: same
+	* buildbot/status/mail.py, buildbot/test/test_run.py: same
+	* buildbot/test/test_status.py, buildbot/test/test_vc.py: same
+
+	* buildbot/status/html.py (StatusResourceBuildStep): oops, update
+	to handle new getLogs()-returns-list behavior
+	(StatusResourceBuildStep.getChild): same
+	(StepBox.getBox): same
+	(WaterfallStatusResource.phase0): same
+
+	* docs/source.xhtml: document how Buildbot uses version-control
+	systems (output side: how we get source trees)
+	* docs/changes.xhtml: rename from sources.xhtml, documents VC
+	systems (input side: how we learn about Changes)
+
+	* buildbot/master.py (Manhole): use ComparableMixin
+	* buildbot/changes/freshcvs.py (FreshCVSSourceNewcred): same
+	* buildbot/changes/mail.py (MaildirSource): same
+	* buildbot/status/client.py (PBListener): same
+	* buildbot/status/html.py (Waterfall): same
+	* buildbot/status/words.py (IRC): same
+
+	* NEWS: start describing new features
+
+	* buildbot/status/mail.py (MailNotifier): finish implementation.
+	The message body is still a bit sparse.
+	* buildbot/test/test_status.py (Mail): test it
+
+	* buildbot/util.py (ComparableMixin): class to provide the __cmp__
+	and __hash__ methods I wind up adding everywhere. Specifically
+	intended to support the buildbot config-file update scheme where
+	we compare, say, the old list of IStatusTargets against the new
+	one and don't touch something which shows up on both lists.
+	* buildbot/test/test_util.py (Compare): test case for it
+
+	* buildbot/interfaces.py (IBuildStatus): change .getLogs() to
+	return a list instead of a dict
+	(IBuildStepStatus.getLogs): same. The idea is that steps create
+	logs with vaguely unique names (although their uniqueness is not
+	guaranteed). Thus a compilation step should create its sole
+	logfile with the name 'compile', and contribute it to the
+	BuildStatus. If a step has two logfiles, try to create them with
+	different names (like 'test.log' and 'test.summary'), and only
+	contribute the important ones to the overall BuildStatus.
+	* buildbot/status/builder.py (Event.getLogs): same
+	(BuildStepStatus): fix default .text and .results
+	(BuildStepStatus.addLog): switch to list-like .getLogs()
+	(BuildStepStatus.stepFinished): same
+	(BuildStatus.text): fix default .text
+	(BuildStatus.getLogs): temporary hack to return all logs (from all
+	child BuildStepStatus objects). Needs to be fixed to only report
+	the significant ones (as contributed by the steps themselves)
+	* buildbot/test/test_run.py: handle list-like .getLogs()
+	* buildbot/test/test_steps.py (BuildStep.testShellCommand1): same
+
+2004-08-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/builder.py (Builder.attached): serialize the
+	attachment process, so the attach-watcher isn't called until the
+	slave is really available. Add detached watchers too, which makes
+	testing easier.
+
+	* buildbot/test/test_vc.py: test VC modes (clobber/update/etc)
+
+	* buildbot/test/test_swap.py: remove dead code
+
+	* buildbot/slave/commands.py (ShellCommandPP): add debug messages
+	(ShellCommand.start): treat errors in _startCommand/spawnProcess
+	sort of as if the command being run exited with a -1. There may
+	still be some holes in this scheme.
+	(CVSCommand): add 'revision' tag to the VC commands, make sure the
+	-r option appears before the module list
+	* buildbot/process/step.py (CVS): add 'revision' argument
+
+	* buildbot/slave/bot.py (SlaveBuilder._ackFailed): catch failures
+	when sending updates or stepComplete messages to the master, since
+	we don't currently care whether they arrive or not. When we revamp
+	the master/slave protocol to really resume interrupted builds,
+	this will need revisiting.
+	(lostRemote): remove spurious print
+
+	* buildbot/master.py (BotPerspective.attached): serialize the
+	new-builder interrogation process, to make testing easier
+	(BotMaster.waitUntilBuilderDetached): convenience function
+
+	* buildbot/status/builder.py (BuilderStatus): prune old builds
+	(BuildStatus.pruneSteps): .. and steps
+	(BuildStepStatus.pruneLogs): .. and logs
+	(BuilderStatus.getBuild): handle missing builds
+	* buildbot/status/html.py (StatusResourceBuild.body): display build
+	status in the per-build page
+	(BuildBox.getBox): color finished builds in the per-build box
+
+2004-08-27  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/mail.py (MailNotifier): new notification class,
+	not yet finished
+
+	* buildbot/slave/commands.py (SourceBase): refactor SVN and CVS into
+	variants of a common base class which handles all the mode= logic
+
+	* buildbot/interfaces.py (IBuildStatus.getPreviousBuild): add
+	convenience method
+	* buildbot/status/builder.py (BuildStatus.getPreviousBuild): same
+
+2004-08-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_slavecommand.py: accomodate new slavecommand
+	interfaces
+
+	* buildbot/test/test_run.py: update to new Logfile interface, new
+	buildbot.slave modules
+	* buildbot/test/test_steps.py: same, remove Swappable, add timeouts
+
+	* MANIFEST.in: new sample config file
+	* docs/examples/hello.cfg: same
+
+	* buildbot/process/step_twisted.py: remove dead import
+
+	* buildbot/process/step.py (RemoteCommand.run): catch errors
+	during .start
+	(RemoteCommand.remote_update): ignore updates that arrive after
+	we've shut down
+	(RemoteCommand.remote_complete): ignore duplicate complete msgs
+	(RemoteCommand._remoteComplete): cleanup failure handling, reduce
+	the responsibilities of the subclass's methods
+	(BuildStep.failed): catch errors during failure processing
+	(BuildStep.addHTMLLog): provide all-HTML logfiles (from Failures)
+	(CVS): move to a mode= argument (described in docstring), rather
+	than the ungainly clobber=/export=/copydir= combination.
+	(SVN): add mode= functionality to SVN too
+	(todo_Darcs, todo_Arch, todo_P4): placeholders for future work
+
+	* buildbot/process/base.py (Build.startNextStep): catch errors
+	during s.startStep()
+
+	* buildbot/clients/base.py: update to new PB client interface.
+	gtkPanes is still broken
+
+	* buildbot/bot.py, buildbot/slavecommand.py: move to..
+	* buildbot/slave/bot.py, buildbot/slave/commands.py: .. new directory
+	* setup.py: add buildbot.slave module
+	* buildbot/bb_tap.py: handle move
+	* buildbot/slave/registry.py: place to register commands, w/versions
+	* buildbot/slave/bot.py: major simplifications
+	(SlaveBuilder.remote_startCommand): use registry for slave commands,
+	instead of a fixed table. Eventually this will make the slave more
+	extensible. Use 'start' method on the command, not .startCommand.
+	Fix unsafeTracebacks handling (I think).
+	* buildbot/slave/commands.py: major cleanup. ShellCommand is now a
+	helper class with a .start method that returns a Deferred.
+	SlaveShellCommand is the form reached by the buildmaster. Commands
+	which use multiple ShellCommands can just chain them as Deferreds,
+	with some helper methods in Command (_abandonOnFailure and
+	_checkAbandoned) to bail on rc!=0.
+	(CVSCommand): prefer new mode= argument
+	(SVNFetch): add mode= argument
+
+	* buildbot/master.py (DebugPerspective.perspective_forceBuild):
+	put a useful reason string on the build
+
+	* buildbot/status/builder.py (LogFile): do LogFile right: move the
+	core functionality into an IStatusLog object
+	(BuildStatus.sendETAUpdate): don't send empty build-eta messages
+	* buildbot/status/html.py (TextLog): HTML-rendering goes here
+	(StatusResourceBuild.body): use proper accessor methods
+	* buildbot/status/client.py (RemoteLog): PB-access goes here
+	(StatusClientPerspective.perspective_subscribe): add "full" mode,
+	which delivers log contents too
+	(PBListener.__cmp__): make PBListeners comparable, thus removeable
+	* buildbot/status/event.py: remove old Logfile completely
+
+	* buildbot/interfaces.py (IStatusLog.subscribe): make the
+	subscription interface for IStatusLog subscriptions just like all
+	other the status subscriptions
+	(IStatusReceiver.logChunk): method called on subscribers
+
+2004-08-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/builder.py (Builder._pong): oops, ping response
+	includes a result (the implicit None returned by remote_print).
+	Accept it so the _pong method handles the response correctly.
+
+2004-08-06  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_config.py: update IRC, PBListener tests
+
+	* buildbot/status/client.py (StatusClientPerspective): total
+	rewrite to match new IStatus interfaces. New subscription scheme.
+	There are still a few optimizations to make (sending down extra
+	information with event messages so the client doesn't have to do a
+	round trip). The logfile-retrieval code is probably still broken.
+	Moved the PB service into its own port, you can no longer share a
+	TCP socket between a PBListener and, say, the slaveport (this
+	should be fixed eventually).
+	* buildbot/clients/base.py (Client): revamp to match. still needs
+	a lot of work, but basic event reporting works fine. gtkPanes is
+	completely broken.
+
+	* buildbot/status/words.py (IRC): move to c['status']. Each IRC
+	instance talks to a single irc server. Threw out all the old
+	multi-server handling code. Still need to add back in
+	builder-control (i.e. "force build")
+
+	* buildbot/status/html.py (StatusResourceBuildStep.body): add some
+	more random text to the as-yet-unreachable per-step page
+
+	* buildbot/status/builder.py (BuildStepStatus.sendETAUpdate):
+	rename to stepETAUpdate
+	(BuildStatus.subscribe): add build-wide ETA updates
+	(BuilderStatus.getState): remove more cruft
+	(BuilderStatus.getCurrentBuild): remove more cruft
+	(BuilderStatus.buildStarted): really handle tuple-subscription
+	* buildbot/test/test_run.py (Status.testSlave): handle the
+	stepETAUpdate rename
+
+	* buildbot/master.py (BuildMaster): don't add a default
+	StatusClientService. Don't add a default IrcStatusFactory. Both
+	are now added through c['status'] in the config file. c['irc'] is
+	accepted for backwards compatibility, the only quirk is you cannot
+	use c['irc'] to specify IRC servers on ports other than 6667.
+
+	* buildbot/interfaces.py (IBuildStatus.getCurrentStep): add method
+	(IStatusReceiver.buildStarted): allow update-interval on subscribe
+	(IStatusReceiver.buildETAUpdate): send build-wide ETA updates
+	(IStatusReceiver.stepETAUpdate): rename since it's step-specific
+
+
+	* buildbot/master.py (BuildMaster.startService): SIGHUP now causes
+	the buildmaster to re-read its config file
+
+
+	* buildbot/test/test_web.py (test_webPortnum): need a new hack to
+	find out the port our server is running on
+	(WebTest.test_webPathname_port): same
+
+	* buildbot/test/test_config.py (testWebPortnum): test it
+	(testWebPathname): ditto
+
+	* docs/config.xhtml: document new c['status'] configuration option
+
+	* buildbot/status/html.py (Waterfall): new top-level class which
+	can be added to c['status']. This creates the Site as well as the
+	necessary TCPServer/UNIXServer. It goes through the BuildMaster,
+	reachable as .parent, for everything.
+
+	* buildbot/master.py (Manhole): make it a normal service Child
+	(BuildMaster.loadConfig_status): c['status'] replaces webPortnum and
+	webPathname. It will eventually replace c['irc'] and the implicit
+	PB listener as well. c['webPortnum'] and c['webPathname'] are left
+	in as (deprecated) backward compatibility hooks for now.
+
+
+	* buildbot/process/builder.py (Builder.buildFinished): don't
+	inform out builder_status about a finished build, as it finds out
+	through its child BuildStatus object
+
+	* buildbot/status/html.py: extensive revamp. Use adapters to make
+	Boxes out of BuildStepStatus and friends. Acknowledge that Steps
+	have both starting and finishing times and adjust the waterfall
+	display accordingly, using spacers if necessary. Use SlaveStatus
+	to get buildslave info.
+	(StatusResourceBuildStep): new just-one-step resource, used to get
+	logfiles. No actual href to it yet.
+
+	* buildbot/status/event.py (Logfile.doSwap): disable Swappable for
+	the time being, until I get the file-naming scheme right
+
+	* buildbot/status/builder.py (Event): clean started/finished names
+	(BuildStatus.isFinished): .finished is not None is the right test
+	(BuildStatus.buildStarted): track started/finished times ourselves
+	(BuilderStatus.getSlave): provide access to SlaveStatus object
+	(BuilderStatus.getLastFinishedBuild): all builds are now in
+	.builds, even the currently-running one. Accomodate this change.
+	(BuilderStatus.eventGenerator): new per-builder event generator.
+	Returns BuildStepStatus and BuildStatus objects, since they can
+	both be adapted as necessary.
+	(BuilderStatus.addEvent): clean up started/finished attributes
+	(BuilderStatus.startBuild,finishBuild): remove dead code
+	(SlaveStatus): new object to provide ISlaveStatus
+
+	* buildbot/process/step.py (ShellCommand.getColor): actually
+	return the color instead of setting it ourselves
+	(CVS.__init__): pull .timeout and .workdir options out of
+	**kwargs, since BuildStep will ignore them. Without this neither
+	will be sent to the slave correctly.
+	(SVN.__init__): same
+
+	* buildbot/process/builder.py (Builder): move flags to class-level
+	attributes
+	(Builder.attached): remove .remoteInfo, let the BotPerspective and
+	SlaveStatus handle that
+
+	* buildbot/process/base.py (Build.firstEvent): remove dead code
+	(Build.stopBuild): bugfix
+
+	* buildbot/changes/pb.py (PBChangeSource.describe): add method
+
+	* buildbot/changes/changes.py (Change): add IStatusEvent methods
+	(ChangeMaster.eventGenerator): yield Changes, since there are now
+	Adapters to turn them into HTML boxes
+
+	* buildbot/master.py (BotMaster): track SlaveStatus from BotMaster
+	(BotPerspective.attached): feed a SlaveStatus object
+	(BuildMaster.loadConfig): add a manhole port (debug over telnet)
+	(BuildMaster.loadConfig_Builders): give BuilderStatus a parent
+
+	* buildbot/interfaces.py: API additions
+	(ISlaveStatus): place to get slave status
+
+2004-08-04  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slavecommand.py (DummyCommand.finished): send rc=0 when
+	the delay finishes, so the step is marked as SUCCESS
+
+	* buildbot/test/test_run.py (Status.testSlave): cover more of
+	IBuildStatus and IBuildStepStatus
+
+	* buildbot/status/progress.py (StepProgress): move some flags to
+	class-level attributes
+	(StepProgress.remaining): if there are no other progress metrics
+	to go by, fall back to elapsed time
+	(StepProgress.setExpectations): take a dict of metrics instead of
+	a list
+	(BuildProgress.setExpectationsFrom): pull expectations from the
+	Expectations, instead of having it push them to the BuildProgress
+	(Expectations): move some flags to class-level attributes
+	(Expectations.__init__): copy per-step times from the
+	BuildProgress too
+	(Expectations.expectedBuildTime): new method for per-build ETA
+
+	* buildbot/status/event.py (Logfile): move some flags to
+	class-level attributes
+	(Logfile.logProgressTo): better method name, let step set the
+	progress axis name (instead of always being "output")
+
+	* buildbot/status/builder.py (BuildStepStatus.getTimes): track the
+	times directly, rather than depending upon the (possibly missing)
+	.progress object. Use 'None' to indicate "not started/finished
+	yet"
+	(BuildStepStatus.getExpectations): oops, return the full list of
+	expectations
+	(BuilderStatus._buildFinished): append finished builds to .builds
+
+	* buildbot/process/step.py (BuildStep): add separate .useProgress
+	flag, since empty .progressMetrics[] still implies that time is a
+	useful predictor
+	(CVS): set up the cmd in __init__, instead of waiting for start()
+
+	* buildbot/process/base.py (Build.startBuild): disable the 'when'
+	calculation, this will eventually turn into a proper sourceStamp
+	(Build.setupBuild): tell the Progress to load from the Expectations,
+	instead of having the Expectations stuff things into the Progress
+	(Build.buildException): add a build-level errback to make sure the
+	build's Deferred fires even in case of exceptions
+
+	* buildbot/master.py (BotMaster.forceBuild): convey the reason into
+	the forced build
+	* buildbot/process/builder.py (Builder.forceBuild): convey the
+	reason instead of creating a fake Change
+
+	* docs/examples/twisted_master.cfg: update to match reality
+
+	* buildbot/test/test_config.py, buildbot/test/test_process.py:
+	* buildbot/test/test_run.py, buildbot/test/test_steps.py:
+	fix or remove broken/breaking tests
+
+	* buildbot/status/event.py (Logfile.__len__): remove evil method
+
+	* buildbot/status/builder.py (BuildStepStatus.stepStarted): tolerate
+	missing .build, for test convenience
+
+	* buildbot/process/step_twisted.py: import fixes
+
+	* buildbot/process/step.py (BuildStep.failed): exception is FAILURE
+
+	* buildbot/master.py (BuildMaster.loadConfig_Builders): leftover
+	.statusbag reference
+
+	* buildbot/bot.py (BuildSlave.stopService): tear down the TCP
+	connection at shutdown, and stop it from reconnecting
+
+	* buildbot/test/test_run.py (Run.testSlave): use a RemoteDummy to
+	chase down remote-execution bugs
+
+	* buildbot/process/step.py: more fixes, remove
+	BuildStep.setStatus()
+	* buildbot/status/builder.py: move setStatus() functionality into
+	BuildStatus.addStep
+	* buildbot/status/event.py: minor fixes
+
+2004-08-03  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/base.py, buildbot/process/builder.py
+	* buildbot/process/step.py, buildbot/status/builder.py
+	* buildbot/status/event.py, buildbot/test/test_run.py:
+	fix status delivery, get a basic test case working
+	* buildbot/master.py: finish implementing basic status delivery,
+	temporarily disable HTML/IRC/PB status sources
+
+	* buildbot/bot.py (Bot.remote_setBuilderList): remove debug noise
+
+	* buildbot/status/progress.py (BuildProgress): remove dead code
+
+	* buildbot/interfaces.py
+	* buildbot/process/base.py, buildbot/process/builder.py
+	* buildbot/process/step.py, buildbot/process/step_twisted.py
+	* buildbot/status/builder.py: Complete overhaul of the all
+	status-delivery code, unifying all types of status clients (HTML,
+	IRC, PB). See interfaces.IBuildStatus for an idea of what it will
+	look like. This commit is a checkpointing of the work-in-progress:
+	the input side is mostly done (Builders/Builds sending status
+	to the BuilderStatus/BuildStatus objects), but the output side has
+	not yet been started (HTML resources querying BuilderStatus
+	objects). Things are probably very broken right now and may remain
+	so for several weeks, I apologize for the disruption.
+
+	* buildbot/status/event.py: add a setHTML method to use pre-rendered
+	HTML as the log's contents. Currently used for exception tracebacks.
+	* buildbot/status/progress.py: minor spelling changes
+
+2004-08-02  Brian Warner  <warner at lothar.com>
+
+	* docs/config.xhtml: XHTML fixes, makes raw .xhtml files viewable
+	in mozilla. Also added stylesheets copied from Twisted's docs.
+	Remember that these files are meant to be run through Lore first.
+	Thanks to Philipp Frauenfelder for the fixes.
+	* docs/factories.xhtml, docs/sources.xhtml, docs/steps.xhtml: same
+	* docs/stylesheet-unprocessed.css, docs/stylesheet.css: same
+	* docs/template.tpl: added a Lore template
+
+2004-07-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/interfaces.py: revamp status delivery. This is the
+	preview: these are the Interfaces that will be provided by new
+	Builder code, and to which the current HTML/IRC/PB status
+	displayers will be adapted.
+
+	* buildbot/slavecommand.py (ShellCommand.start): look for .usePTY
+	on the SlaveBuilder, not the Bot.
+	* buildbot/bot.py (Bot.remote_setBuilderList): copy Bot.usePTY to
+	SlaveBuilder.usePTY
+	* buildbot/test/test_slavecommand.py (FakeSlaveBuilder.usePTY):
+	set .usePTY on the FakeSlaveBuilder
+
+2004-07-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/freshcvs.py: add some debug log messages
+	(FreshCVSConnectionFactory.gotPerspective): pre-emptively fix the
+	disabled 'setFilter' syntax
+	(FreshCVSSourceNewcred.__init__): warn about prefix= values that
+	don't end with a slash
+
+	* buildbot/process/base.py (Builder._pong_failed): add TODO note
+
+	* setup.py: bump to 0.5.0+ while between releases
+
+2004-07-23  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): Releasing buildbot-0.5.0
+
+2004-07-23  Brian Warner  <warner at lothar.com>
+
+	* README: update for 0.5.0 release
+
+	* NEWS: update for 0.5.0 release
+
+2004-07-22  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slavecommand.py (ShellCommand): make usePTY a
+	mktap-time configuration flag (--usepty=1, --usepty=0)
+	* buildbot/bot.py: same
+
+	* buildbot/master.py (BotPerspective.got_dirs): don't complain about
+	an 'info' directory being unwanted
+
+	* buildbot/changes/freshcvs.py (FreshCVSSource): flip the
+	newcred/oldcred switch. Newcred (for CVSToys-1.0.10 and later) is now
+	the default. To communicate with an oldcred daemond (CVSToys-1.0.9
+	and earlier), use a FreshCVSSourceOldcred instead.
+	(test): simple test routine: connect to server, print changes
+
+	* buildbot/changes/changes.py (Change.getTime): make it possible
+	to print un-timestamped changes
+
+	* buildbot/master.py (makeApp): delete ancient dead code
+	(BuildMaster.loadTheConfigFile): make "master.cfg" name configurable
+	* buildbot/test/test_config.py (testFindConfigFile): test it
+
+	* docs/examples/twisted_master.cfg (b22w32): use iocp reactor
+	instead of win32 one
+
+
+	* buildbot/master.py (BuildMaster.loadConfig_Builders): config file
+	now takes a dictionary instead of a tuple. See docs/config.xhtml for
+	details.
+
+	* buildbot/process/base.py (Builder.__init__): change constructor
+	to accept a dictionary of config data, rather than discrete
+	name/slave/builddir/factory arguments
+
+	* docs/examples/twisted_master.cfg: update to new syntax
+	* docs/examples/glib_master.cfg: same
+	* buildbot/test/test_config.py (ConfigTest.testBuilders): some
+	rough tests of the new syntax
+
+	
+	* buildbot/master.py (BuildMaster.loadConfig): allow webPathname
+	to be an int, which means "run a web.distrib sub-server on a TCP
+	port". This lets you publish the buildbot status page to a remote
+	twisted.web server (using distrib.ResourceSubscription). Also
+	rename the local attributes used to hold these web things so
+	they're more in touch with reality.
+	* buildbot/test/test_web.py: test webPortnum and webPathname
+	* docs/config.xhtml: document this new use of webPathname
+
+	* docs/config.xhtml: new document, slightly ahead of reality
+	
+	* buildbot/changes/freshcvs.py (FreshCVSSourceNewcred.notify): fix
+	'prefix' handling: treat it as a simple string to check with
+	.startswith, instead of treating it as a directory. This allows
+	sub-directories to be used. If you use prefix=, you should give it
+	a string that starts just below the CVSROOT and ends with a slash.
+	This prefix will be stripped from all filenames, and filenames
+	which do not start with it will be ignored.
+
+2004-07-20  Cory Dodt  <corydodt at twistedmatrix.com>
+
+	* contrib/svn_buildbot.py: Add --include (synonym for --filter)
+	and --exclude (inverse of --include).  SVN post-commit hooks
+	now have total control over which changes get sent to buildbot and which
+	do not.
+
+2004-07-10  Brian Warner  <warner at lothar.com>
+
+	* buildbot/test/test_twisted.py (Case1.testCountFailedTests): fix
+	test case to match new API
+
+	* buildbot/status/event.py (Logfile.getEntries): fix silly bug
+	which crashed HTML display when self.entries=[] (needed to
+	distinguish between [], which means "no entries yet", and None,
+	which means "the entries have been swapped out to disk, go fetch
+	them").
+
+2004-07-04  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (countFailedTests): Count
+	skips, expectedFailures, and unexpectedSuccesses. Start scanning
+	10kb from the end because any import errors are wedged there and
+	they would make us think the test log was unparseable.
+	(RunUnitTests.finishStatus): add skip/todo counts to the event box
+
+2004-06-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (RemovePYCs): turn the
+	delete-*.pyc command into an actual BuildStep, so we can label it
+	nicely
+	* buildbot/process/process_twisted.py (QuickTwistedBuildFactory):
+	(FullTwistedBuildFactory): same
+
+2004-06-25  Cory Dodt  <corydodt at twistedmatrix.com>
+
+	* contrib/fakechange.py: Add an errback when sending the fake 
+	change, so we know it didn't work.
+
+2004-06-25  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* buildbot/process/step_twisted.py: Delete *.pyc files before
+	calling trial, so it doesn't catch any old .pyc files whose .py
+	files have been moved or deleted.
+
+	* buildbot/process/step_twisted.py (RunUnitTests): 1) Add a new
+	parameter, 'recurse', that passes -R to trial. 2) have 'runAll'
+	imply 'recurse'. 3) Make the default 'allTests' be ["twisted"]
+	instead of ["twisted.test"], so that the end result is "trial -R
+	twisted".
+
+	* contrib/svn_buildbot.py: Add a --filter parameter that accepts a
+	regular expression to match filenames that should be ignored when
+	changed. Also add a --revision parameter that specifies the
+	revision to examine, which is useful for debugging.
+
+2004-06-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (trialTextSummarizer): create a
+	summary of warnings (like DeprecationWarnings), next to the
+	"summary" file
+
+2004-05-13  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: enable the win32 builder, as
+	we now have a w32 build slave courtesy of Mike Taylor.
+
+	* buildbot/process/base.py (Build.checkInterlocks): OMG this was
+	so broken. Fixed a race condition that tripped up interlocked
+	builds and caused the status to be stuck at "Interlocked" forever.
+	The twisted buildbot's one interlocked build just so happened to
+	never hit this case until recently (the feeding builds both pass
+	before the interlocked build is attempted.. usually it has to wait
+	a while).
+	(Builder._pong_failed): fix method signature
+
+	* setup.py: bump to 0.4.3+ while between releases
+
+2004-04-30  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): Releasing buildbot-0.4.3
+
+2004-04-30  Brian Warner  <warner at lothar.com>
+
+	* MANIFEST.in: add the doc fragments in  docs/*.xhtml
+
+	* README: update for 0.4.3 release
+
+	* NEWS: update for 0.4.3 release
+
+	* buildbot/master.py (BuildMaster.__getstate__): make sure
+	Versioned.__getstate__ is invoked, for upgrade from 0.4.2
+
+	* buildbot/process/step_twisted.py (RunUnitTests.trial): add
+	.trial as a class attribute, for upgrade from 0.4.2
+
+	* buildbot/changes/changes.py (Change.links): add .links for
+	upgrade from 0.4.2
+
+	* buildbot/status/event.py (Logfile.__getstate__): get rid of both
+	.textWatchers and .htmlWatchers at save time, since they are both
+	volatile, should allow smooth 0.4.2 upgrade
+
+	* buildbot/process/step.py (CVS.finishStatus): catch failed
+	CVS/SVN commands so we can make the status box red
+
+2004-04-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/freshcvs.py
+	(FreshCVSConnectionFactory.gotPerspective): add (commented-out)
+	code to do setFilter(), which tells the freshcvs daemon to not
+	send us stuff that we're not interested in. I will uncomment it
+	when a new version of CVSToys is available in which setFilter()
+	actually works, and I get a chance to test it better.
+
+	* docs/examples/twisted_master.cfg: start using a PBChangeSource
+
+	* buildbot/master.py (Dispatcher): use a registration scheme
+	instead of hardwired service names
+	(BuildMaster): keep track of the Dispatcher to support
+	registration
+
+	* buildbot/changes/changes.py (ChangeMaster): create a distinct
+	PBChangeSource class instead of having it be an undocumented
+	internal feature of the ChangeMaster. Split out the code into a
+	new file.
+	* buildbot/changes/pb.py (PBChangeSource): same
+	* buildbot/test/test_changes.py: a few tests for PBChangeSource
+
+	* docs/{factories|sources|steps}.xhtml: document some pieces
+
+	* docs/examples/twisted_master.cfg: use SVN instead of CVS, stop
+	using FCMaildirSource
+	(f23osx): update OS-X builder to use python2.3, since the slave
+	was updated to Panther (10.3.3)
+
+2004-03-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py: factor out doCheckout, change
+	to use SVN instead of CVS
+
+	* buildbot/process/base.py (BasicBuildFactory): refactor to make
+	an SVN subclass easier
+	(BasicSVN): subclass which uses Subversion instead of CVS
+
+2004-03-15  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* buildbot/slavecommand.py (ShellCommand.start): use COMSPEC instead
+	of /bin/sh on win32
+	(CVSCommand.cvsComplete): don't assume chdir worked on win32
+
+2004-02-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slavecommand.py (ShellCommand): ['commands'] argument
+	is now either a list (which is passed to spawnProcess directly) or
+	a string (which gets passed to /bin/sh -c). This removes the useSH
+	flag and the ArgslistCommand class. Also send status header at the
+	start and end of each command, instead of having the master-side
+	code do that.
+	(CVSCommand): fix the doUpdate command, it failed to do the 'cp
+	-r'. Update to use list-based arguments.
+	(SVNFetch): use list-based arguments, use ['dir'] argument to
+	simplify code.
+	* buildbot/test/test_steps.py (Commands): match changes
+
+	* buildbot/process/step.py (InternalShellCommand.words): handle
+	command lists
+	(SVN): inherit from CVS, cleanup
+
+	* buildbot/status/event.py (Logfile.content): render in HTML, with
+	stderr in red and headers (like the name of the command we're
+	about to run) in blue. Add link to a second URL (url + "?text=1")
+	to get just stdout/stderr in text/plain without markup. There is
+	still a problem with .entries=None causing a crash, it seems to occur
+	when the logfile is read before it is finished.
+
+	* buildbot/bot.py (BotFactory.doKeepalive): add a 30-second
+	timeout to the keepalives, and use it to explicitly do a
+	loseConnection instead of waiting for TCP to notice the loss. This
+	ought to clear up the silent-lossage problem.
+	(unsafeTracebacks): pass exception tracebacks back to the master,
+	makes it much easier to debug problems
+
+2004-02-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slavecommand.py (ShellCommand): add useSH flag to pass
+	the whole command to /bin/sh instead of execve [Johan Dahlin]
+	(CVSCommand): drop '-r BRANCH' if BRANCH==None instead of usiing
+	'-r HEAD' [Johan Dahlin]
+	(CVSCommand.start2): fix cvsdir calculation [Johan Dahlin]
+
+	* buildbot/changes/changes.py (Change): add links= argument, add
+	asHTML method [Johan Dahlin]. Modified to make a bit more
+	XHTMLish. Still not sure how to best use links= .
+
+	* buildbot/status/html.py (StatusResourceCommits.getChild): use 
+	Change.asHTML to display the change, not asText
+
+	* buildbot/status/html.py (StatusResourceBuilder): web button to
+	ping slave
+
+	* buildbot/test/test_run.py: test to actually start a buildmaster
+	and poke at it
+
+	* MANIFEST.in: bring back accidentally-dropped test helper files
+
+	* buildbot/test/test_config.py (ConfigTest.testSources): skip tests
+	that require cvstoys if it is not installed
+
+	* buildbot/process/step_twisted.py (RunUnitTests): allow other
+	values of "bin/trial" [Dave Peticolas]
+	(RunUnitTests.finishStatus): say "no tests run" instead of "0
+	tests passed" when we didn't happen to run any tests
+
+	* buildbot/process/step.py (Compile): use haltOnFailure instead of
+	flunkOnFailure [Johan Dahlin]
+
+	* buildbot/process/base.py (ConfigurableBuild.setSteps): allow
+	multiple instances of the same Step class by suffixing "_2", etc,
+	to the name until it is unique. This name needs to be unique
+	because it is used as a key in the dictionary that tracks build
+	progress.
+	* buildbot/test/test_steps.py (Steps.testMultipleStepInstances):
+	add test for it
+
+	* buildbot/process/base.py (Builder.ping): add "ping slave" command
+
+2004-01-14  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py (IrcStatusBot): when we leave or get
+	kicked from a channel, log it
+
+	* buildbot/master.py (Dispatcher): add "poke IRC" command to say
+	something over whatever IRC channels the buildmaster is currently
+	connected to. Added to try and track down a problem in which the
+	master thinks it is still connected but the IRCd doesn't see it. I
+	used a styles.Versioned this time, so hopefully users won't have
+	to rebuild their .tap files this time.
+	* contrib/debug.glade: add a "Poke IRC" button
+	* contrib/debugclient.py: same
+
+	* setup.py: bump to 0.4.2+ while between releases
+
+2004-01-08  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): Releasing buildbot-0.4.2
+
+2004-01-08  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update for 0.4.2 release
+
+	* README: document how to run the tests, now that they all pass
+
+	* buildbot/changes/maildir.py (Maildir.poll): minor comment
+
+	* buildbot/process/step.py (CVS): add a global_options= argument,
+	which lets you set CVS global options for the command like "-r"
+	for read-only checkout, or "-R" to avoid writing in the
+	repository.
+	* buildbot/slavecommand.py (CVSCommand): same
+
+	* buildbot/status/event.py (Logfile): add a .doSwap switch to make
+	testing easier (it is turned off when testing, to avoid the
+	leftover timer)
+
+	* buildbot/process/step.py (InternalBuildStep): shuffle code a bit
+	to make it easier to test: break generateStepID() out to a
+	separate function, only update statusbag if it exists.
+	(ShellCommands): create useful text for dict-based commands too.
+
+	* test/*, buildbot/test/*: move unit tests under the buildbot/
+	directory
+	* setup.py (packages): install buildbot.test too
+
+	* buildbot/test/test_slavecommand.py: fix it, tests pass now
+	* buildbot/test/test_steps.py: fix it, tests pass now
+
+2004-01-06  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/mail.py (parseFreshCVSMail): looks like new
+	freshcvs mail uses a slightly different syntax for new
+	directories. Update parser to handle either.
+	* test/test_mailparse.py (Test1.testMsg9): test for same
+
+2003-12-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py (TwistedDebsBuildFactory): set
+	'warnOnWarnings' so that lintian errors mark the build orange
+
+2003-12-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/changes/mail.py (parseBonsaiMail): parser for commit
+	messages emitted by Bonsai, contributed by Stephen Davis.
+
+	* test/*: moved all tests to use trial instead of unittest. Some
+	still fail (test_steps, test_slavecommand, and test_process).
+
+	* setup.py (version): bump to 0.4.1+ while between releases
+
+2003-12-09  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): Releasing buildbot-0.4.1
+
+2003-12-09  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update for 0.4.1 release
+
+	* docs/examples/twisted_master.cfg: add netbsd builder, shuffle
+	freebsd builder code a little bit
+
+	* buildbot/changes/freshcvs.py (FreshCVSSourceNewcred.__cmp__):
+	don't try to compare attributes of different classes
+	* buildbot/changes/mail.py (MaildirSource.__cmp__): same
+	(MaildirSource.messageReceived): fix Change delivery
+
+	* buildbot/master.py (BuildMaster.loadConfig): insert 'basedir'
+	into the config file's namespace before loading it, like the
+	documentation claims it does
+	* docs/examples/twisted_master.cfg: remove explicit 'basedir'
+	(useFreshCVS): switch to using a maildir until Twisted's freshcvs
+	daemon comes back online
+
+2003-12-08  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: provide an explicit 'basedir'
+	so the example will work with online=0 as well
+
+	* buildbot/changes/mail.py (FCMaildirSource, SyncmailMaildirSource):
+	fix the __implements__ line
+
+	* buildbot/changes/maildirtwisted.py (MaildirTwisted): make this
+	class a twisted.application.service.Service, use startService to
+	get it moving.
+
+	* buildbot/changes/dnotify.py (DNotify): use os.open to get the
+	directory fd instead of simple open(). I'm sure this used to work,
+	but the current version of python refuses to open directories with
+	open().
+
+2003-12-05  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): bump to 0.4.0+ while between releases
+
+2003-12-05  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): Releasing buildbot-0.4.0
+
+2003-12-05  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/glib_master.cfg: replace old sample scripts with
+	new-style config files
+	* MANIFEST.in: include .cfg files in distribution tarball
+
+	* buildbot/changes/freshcvs.py (FreshCVSListener.remote_goodbye):
+	implement a dummy method to avoid the exception that occurs when
+	freshcvs sends this to us.
+
+	* buildbot/pbutil.py (ReconnectingPBClientFactory.stopFactory):
+	removed the method, as it broke reconnection. Apparently
+	stopFactory is called each time the connection attempt fails. Must
+	rethink this.
+	(ReconnectingPBClientFactory.__getstate__): squash the _callID
+	attribute before serialization, since without stopFactory the
+	reconnect timer may still be active and they aren't serializable.
+
+	* test/test_mailparse.py (ParseTest): test with 'self' argument
+
+	* buildbot/changes/mail.py (parseFreshCVSMail): add (silly) 'self'
+	argument, as these "functions" are invoked like methods from class
+	attributes and therefore always get an instance as the first
+	argument.
+
+	* buildbot/changes/maildir.py (Maildir.start): fix error in error
+	message: thanks to Stephen Davis for the catch
+
+2003-12-04  Brian Warner  <warner at lothar.com>
+
+	* buildbot/pbutil.py: complete rewrite using PBClientFactory and
+	twisted's standard ReconnectingClientFactory. Handles both oldcred
+	and newcred connections. Also has a bug-workaround for
+	ReconnectingClientFactory serializing its connector when it
+	shouldn't.
+
+	* buildbot/bot.py (BotFactory): rewrite connection layer with new
+	pbutil. Replace makeApp stuff with proper newcred/mktap
+	makeService(). Don't serialize Ephemerals on shutdown.
+
+	* buildbot/changes/changes.py (ChangeMaster): make it a
+	MultiService and add the sources as children, to get startService
+	and stopService for free. This also gets rid of the .running flag.
+
+	* buildbot/changes/freshcvs.py (FreshCVSSource): rewrite to use
+	new pbutil, turn into a TCPClient at the same time (to get
+	startService for free). Two variants exist: FreshCVSSourceOldcred
+	and FreshCVSSourceNewcred (CVSToys doesn't actualy support newcred
+	yet, but when it does, we'll be ready).
+	(FreshCVSSource.notify): handle paths which are empty after the
+	prefix is stripped. This only happens when the top-level (prefix)
+	directory is added, at the very beginning of a Repository's life.
+
+	* buildbot/clients/base.py: use new pbutil, clean up startup code.
+	Now the only reconnecting code is in the factory where it belongs.
+	(Builder.unsubscribe): unregister the disconnect callback when we
+	delete the builder on command from the master (i.e. when the
+	buildmaster is reconfigured and that builder goes away). This
+	fixes a multiple-delete exception when the status client is shut
+	down afterwards.
+	* buildbot/clients/gtkPanes.py (GtkClient): cleanup, match the
+	base Client. 
+
+	* buildbot/status/words.py (IrcStatusBot): add some more sillyness
+	(IrcStatusBot.getBuilderStatus): fix minor exception in error message
+
+2003-10-20  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* contrib/run_maxq.py: Accept a testdir as an argument rather than
+	a list of globs (ugh). The testdir will be searched for files
+	named *.tests and run the tests in the order specified in each of
+	those files. This allows for "dependancies" between tests to be
+	codified.
+
+	* buildbot/process/maxq.py (MaxQ.__init__): Accept a testdir
+	argument to pass to run_maxq.py, instead of a glob.
+
+2003-10-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (HLint.start): ignore .xhtml
+	files that live in the sandbox
+
+2003-10-15  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (ProcessDocs.finished): fix
+	spelling error in "docs" count-warnings output
+	(HLint.start): stupid thinko meant .xhtml files were ignored
+
+	* docs/examples/twisted_master.cfg (reactors): disable cReactor
+	tests now that cReactor is banished to the sandbox
+
+2003-10-10  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (ProcessDocs, HLint): new Twisted
+	scheme: now .xhtml are sources and .html are generated
+
+2003-10-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (RunUnitTests.__init__): oops,
+	we were ignoring the 'randomly' parameter.
+
+2003-10-01  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slavecommand.py (ShellCommand.start): set usePTY=1 on
+	posix, to kill sub-children of aborted slavecommands.
+
+	* buildbot/status/builder.py: rename Builder to BuilderStatus.
+	Clean up initialization: lastBuildStatus remains None until the
+	first build has been completed.
+
+	* buildbot/status/html.py (WaterfallStatusResource.body): handle
+	None as a lastBuildStatus
+	* buildbot/clients/gtkPanes.py: same
+
+	* buildbot/status/client.py (StatusClientService): keep
+	BuilderStatus objects in self.statusbags . These objects now live
+	here in the StatusClientService and are referenced by the Builder
+	object, rather than the other way around.
+	* buildbot/status/words.py (IrcStatusBot.getBuilderStatus): same
+	* buildbot/process/base.py (Builder): same
+	* test/test_config.py (ConfigTest.testBuilders): same
+
+	* buildbot/master.py (BuildMaster.loadConfig_Builders): when modifying
+	an existing builder, leave the statusbag alone. This will preserve the
+	event history.
+
+	* buildbot/pbutil.py (ReconnectingPB.connect): add initial newcred
+	hook. This will probably go away in favor of a class in upcoming
+	Twisted versions.
+
+	* buildbot/changes/freshcvs.py (FreshCVSSource.start): Remove old
+	serviceName from newcred FreshCVSNotifiee setup
+
+2003-09-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py: switch to new reactor
+	abbreviations
+	* docs/examples/twisted_master.cfg: same
+
+	* README (REQUIREMENTS): mention twisted-1.0.8a3 requirement
+
+	* buildbot/status/words.py (IrcStatusBot.getBuilder): use the
+	botmaster reference instead of the oldapp service lookup
+
+	* buildbot/master.py (BuildMaster.__init__): give the
+	StatusClientService a reference to the botmaster to make it easier to
+	force builds
+
+2003-09-24  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* buildbot/status/html.py (Box.td): escape hreffy things so you
+	can have spaces in things like builder names
+	(StatusResourceBuilder.body)
+	(WaterfallStatusResource.body)
+	(WaterfallStatusResource.body0): same
+
+2003-09-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (BuildMaster.loadConfig_Builders): don't
+	rearrange the builder list when adding or removing builders: keep
+	them in the order the user requested.
+	* test/test_config.py (ConfigTest.testBuilders): verify it
+
+	* contrib/debug.glade: give the debug window a name
+
+	* buildbot/process/base.py (Builder.buildTimerFired): builders can
+	now wait on multiple interlocks. Fix code relating to that.
+	(Builder.checkInterlocks): same
+	* buildbot/status/builder.py (Builder.currentlyInterlocked): same
+
+	* buildbot/master.py (BuildMaster.loadConfig): move from
+	deprecated pb.BrokerFactory to new pb.PBServerFactory
+	* test/test_config.py (ConfigTest.testWebPathname): same
+
+	* docs/examples/twisted_master.cfg: fix interlock declaration
+
+	* buildbot/master.py (BotMaster.addInterlock): move code to attach
+	Interlocks to their Builders into interlock.py .
+	(BuildMaster.loadConfig_Interlocks): fix interlock handling
+
+	* test/test_config.py (ConfigTest.testInterlocks): validate
+	interlock handling
+
+	* buildbot/process/base.py (Builder.__init__): better comments
+	* buildbot/process/interlock.py (Interlock.__repr__): same
+	(Interlock.deactivate): add .active flag, move the code that
+	attaches/detaches builders into the Interlock
+
+2003-09-24  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* buildbot/process/maxq.py (MaxQ): support for running a set of MaxQ
+	tests using the new run_maxq.py script, and reporting failures by
+	parsing its output.
+
+	* contrib/run_maxq.py: Hacky little script for running a set of maxq
+	tests, reporting their success or failure in a buildbot-friendly 
+	manner.
+
+2003-09-24  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.cfg: example of a new-style config
+	file. This lives in the buildmaster base directory as
+	"master.cfg".
+
+	* contrib/debugclient.py (DebugWidget.do_rebuild): add 'reload'
+	button to make the master re-read its config file
+
+	* buildbot/master.py (BuildMaster.loadConfig): new code to load
+	buildmaster configuration from a file. This file can be re-read
+	later, and the buildmaster will update itself to match the new
+	desired configuration. Also use new Twisted Application class.
+	* test/Makefile, test/test_config.py: unit tests for same
+
+	* buildbot/changes/freshcvs.py (FreshCVSSource.__cmp__): make
+	FreshCVSSources comparable, to support reload.
+	* buildbot/changes/mail.py (MaildirSource.__cmp__): same
+
+	* buildbot/process/base.py (Builder): make them comparable, make
+	Interlocks easier to attach, to support reload. Handle
+	re-attachment of remote slaves.
+	* buildbot/process/interlock.py (Interlock): same
+
+	* buildbot/bot.py, bb_tap.py, changes/changes.py: move to
+	Twisted's new Application class. Requires Twisted >= 1.0.8 .
+	buildmaster taps are now constructed with mktap.
+	* buildbot/status/client.py (StatusClientService): same
+
+	* buildbot/status/words.py: move to new Services, add support to
+	connect to multiple networks, add reload support, allow nickname
+	to be configured on a per-network basis
+
+2003-09-20  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.py (twisted_app): use python2.3 for
+	the freebsd builder, now that the machine has been upgraded and no
+	longer has python2.2
+
+	* setup.py (version): bump to 0.3.5+ while between releases
+
+2003-09-19  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): Releasing buildbot-0.3.5
+
+2003-09-19  Brian Warner  <warner at lothar.com>
+
+	* NEWS: add post-0.3.4 notes
+
+	* README (REQUIREMENTS): note twisted-1.0.7 requirement
+
+	* MANIFEST.in: add contrib/*
+
+	* docs/examples/twisted_master.py (twisted_app): all build slaves must
+	use a remote root now: cvs.twistedmatrix.com
+
+	* buildbot/changes/freshcvs.py (FreshCVSNotifiee.connect): update
+	to newcred
+	(FreshCVSNotifieeOldcred): but retain a class that uses oldcred for
+	compatibility with old servers
+	(FreshCVSSource.start): and provide a way to use it
+	(FreshCVSNotifiee.disconnect): handle unconnected notifiee
+
+	* docs/examples/twisted_master.py (twisted_app): update to new
+	makeApp interface.
+	(twisted_app): listen on new ~buildbot socket
+	(twisted_app): Twisted CVS has moved to cvs.twistedmatrix.com
+
+	* buildbot/process/process_twisted.py: Use 'copydir' on CVS steps
+	to reduce cvs bandwidth (update instead of full checkout)
+
+2003-09-11  Brian Warner  <warner at lothar.com>
+
+	* contrib/fakechange.py: demo how to connect to the changemaster
+	port. You can use this technique to submit changes to the
+	buildmaster from source control systems that offer a hook to run a
+	script when changes are committed.
+
+	* contrib/debugclient.py: tool to connect to the debug port. You
+	can use it to force builds, submit fake changes, and wiggle the
+	builder state
+
+	* buildbot/master.py: the Big NewCred Reorganization. Use a single
+	'Dispatcher' realm to handle all the different kinds of
+	connections and Perspectives: buildslaves, the changemaster port,
+	the debug port, and the status client port. NewCredPerspectives
+	now have .attached/.detached methods called with the remote 'mind'
+	reference, much like old perspectives did. All the pb.Services
+	turned into ordinary app.ApplicationServices .
+	(DebugService): went away, DebugPerspectives are now created
+	directly by the Dispatcher.
+	(makeApp): changed interface a little bit
+
+	* buildbot/changes/changes.py: newcred
+	* buildbot/status/client.py: newcred
+
+	* buildbot/clients/base.py: newcred client side changes
+	* buildbot/bot.py: ditto
+
+	* docs/examples/glib_master.py: handle new makeApp() interface
+	* docs/examples/twisted_master.py: ditto
+
+	* buildbot/pbutil.py (NewCredPerspective): add a helper class to
+	base newcred Perspectives on. This should go away once Twisted
+	itself provides something sensible.
+
+
+2003-09-11  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* contrib/svn_buildbot.py: A program that you can call from your
+	SVNREPO/hooks/post-commit file that will notify a BuildBot master
+	when a change in an SVN repository has happened. See the top of
+	the file for some minimal usage info.
+
+2003-09-10  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* buildbot/slavecommand.py (ArglistCommand): Add new
+	ArglistCommand that takes an argument list rather than a string as
+	a parameter. Using a st.split() for argv is very bad.
+
+	* buildbot/slavecommand.py (SVNFetch): Now has the ability to
+	update to a particular revision rather than always checking out
+	(still not very smart about it, there may be cases where the
+	checkout becomes inconsistent).
+
+2003-09-10  Christopher Armstrong  <radix at twistedmatrix.com>
+
+	* buildbot/{bot.py,slavecommand.py,process/step.py}: Rudimentary
+	SVN fetch support. It can checkout (not update!) a specified
+	revision from a specified repository to a specified directory.
+
+	* buildbot/status/progress.py (Expectations.update): Fix an
+	obvious bug (apparently created by the change described in the
+	previous ChangeLog message) by moving a check to *after* the
+	variable it checks is defined.
+
+
+2003-09-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/progress.py (Expectations.update): hack to catch
+	an exception TTimo sees: sometimes the update() method seems to
+	get called before the step has actually finished, so the .stopTime
+	is not set, so no totalTime() is available and we average None
+	with the previous value. Catch this and just don't update the
+	metrics, and emit a log message.
+
+2003-08-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/base.py (BasicBuildFactory): accept 'cvsCopy'
+	parameter to set copydir='original' in CVS commands.
+
+	* buildbot/process/step.py (CVS): accept 'copydir' parameter.
+
+	* buildbot/slavecommand.py (CVSCommand): add 'copydir' parameter,
+	which tells the command to maintain a separate original-source CVS
+	workspace. For each build, this workspace will be updated, then
+	the tree copied into a new workdir. This reduces CVS bandwidth
+	(from a full checkout to a mere update) while doubling the local
+	disk usage (to keep two copies of the tree).
+
+2003-08-21  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/event.py (Logfile.addEntry): if the master web
+	server dies while we're serving a page, request.write raises
+	pb.DeadReferenceError . Catch this and treat it like a
+	notifyFinish event by dropping the request.
+
+2003-08-18  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py (IrcStatusBot.command_FORCE): complain
+	(instead of blowing up) if a force-build command is given without
+	a reason field
+
+	* buildbot/changes/changes.py (ChangeMaster.getChangeNumbered):
+	don't blow up if there aren't yet any Changes in the list
+
+2003-08-02  Brian Warner  <warner at lothar.com>
+
+	* buildbot/bot.py (updateApplication): don't set the .tap name,
+	since we shouldn't assume we own the whole .tap file
+
+	* buildbot/bb_tap.py (updateApplication): clean up code, detect
+	'mktap buildbot' (without a subcommand) better
+
+2003-07-29  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py
+	(IrcStatusFactory.clientConnectionLost): when we lose the
+	connection to the IRC server, schedule a reconnection attempt.
+
+	* buildbot/slavecommand.py (CVSCommand.doClobber): on non-posix,
+	use shutil.rmtree instead of forking off an "rm -rf" command.
+	rmtree may take a while and will block until it finishes, so we
+	use "rm -rf" if available.
+
+	* docs/examples/twisted_master.py: turn off kqreactor, it hangs
+	freebsd buildslave badly
+
+	* setup.py (version): bump to 0.3.4+ while between releases
+
+2003-07-28  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): Releasing buildbot-0.3.4
+
+2003-07-28  Brian Warner  <warner at lothar.com>
+
+	* NEWS: update in preparation for release
+
+	* buildbot/slavecommand.py (ShellCommand.doTimeout): use
+	process.signalProcess instead of os.kill, to improve w32
+	portability
+
+	* docs/examples/twisted_master.py (twisted_app): turn off
+	win32eventreactor: the tests hang the buildslave badly
+
+	* buildbot/process/base.py (Build.buildFinished): update ETA even on
+	failed builds, since usually the failures are consistent
+
+	* buildbot/process/process_twisted.py (TwistedReactorsBuildFactory):
+	add compileOpts/compileOpts2 to reactors build
+
+	* docs/examples/twisted_master.py (twisted_app): add "-c mingw32"
+	(twisted_app): use both default and win32eventreactor on w32 build.
+	Use both default and kqreactor on freebsd build.
+
+	* buildbot/process/process_twisted.py (FullTwistedBuildFactory):
+	add compileOpts2, which is put after the build_ext argument. w32
+	needs "-c mingw32" here.
+
+	* buildbot/status/html.py (StatusResourceBuilder.getChild): don't
+	touch .acqpath, it goes away in recent Twisted releases
+
+	* docs/examples/twisted_master.py (twisted_app): use "python" for
+	the w32 buildslave, not "python2.2"
+
+	* buildbot/bot.py (Bot.remote_getSlaveInfo): only look in info/ if
+	the directory exists.. should hush an exception under w32
+
+	* buildbot/slavecommand.py (ShellCommandPP.processEnded): use
+	ProcessTerminated -provided values for signal and exitCode rather
+	than parsing the unix status code directly. This should remove one
+	more roadblock for a w32-hosted buildslave.
+
+	* test/test_mailparse.py: add test cases for Syncmail parser
+
+	* Buildbot/changes/freshcvsmail.py: remove leftover code, leave a
+	temporary compatibility import. Note! Start importing
+	FCMaildirSource from changes.mail instead of changes.freshcvsmail
+
+	* buildbot/changes/mail.py (parseSyncmail): finish Syncmail parser
+
+2003-07-27  Brian Warner  <warner at lothar.com>
+
+	* NEWS: started adding new features
+
+	* buildbot/changes/mail.py: start work on Syncmail parser, move
+	mail sources into their own file
+
+	* buildbot/changes/freshcvs.py (FreshCVSNotifiee): mark the class
+	as implementing IChangeSource
+	* buildbot/changes/freshcvsmail.py (FCMaildirSource): ditto
+
+	* buildbot/interfaces.py: define the IChangeSource interface
+
+2003-07-26  Brian Warner  <warner at lothar.com>
+
+	* buildbot/master.py (makeApp): docstring (thanks to Kevin Turner)
+
+2003-06-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py (IrcStatusBot.emit_last): round off
+	seconds display
+
+2003-06-17  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py: clean up method usage to avoid error
+	in silly IRC command
+	(IrcStatusBot.emit_status): round off seconds display
+
+	* buildbot/process/base.py (Build): delete the timer when saving
+	to the .tap file, and restore it (if it should still be running)
+	upon restore. This should fix the "next build in -34 seconds"
+	messages that result when the master is restarted while builds are
+	sitting in the .waiting slot. If the time for the build has
+	already passed, start it very soon (in 1 second).
+
+	* buildbot/status/words.py: more silly commands
+
+	* README (REQUIREMENTS): add URLs to all required software
+
+	* buildbot/status/words.py ('last'): mention results of, and time
+	since last build
+
+2003-05-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/words.py: add 'last' command
+	(IrcStatusBot.emit_status): add current-small text to 'status' output
+
+	* docs/examples/twisted_master.py (twisted_app): turn on IRC bot
+	(twisted_app): remove spaces from OS-X builder name
+
+	* buildbot/master.py (makeApp): add knob to turn on IRC bot
+	* buildbot/status/words.py: IRC bot should actually be useful now
+
+2003-05-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/bot.py (Bot.remote_getSlaveInfo): add routines to get
+	"slave information" from $(slavedir)/info/* . These files are
+	maintained by the slave administrator, and describe the
+	machine/environment that is hosting the slave. Information from
+	them is put into the "Builder" HTML page. Still need to establish
+	a set of well-known filenames and meanings for this data: at the
+	moment, *all* info/* files are sent to the master, but only
+	'admin' and 'host' are used on that end.
+	* buildbot/status/html.py (StatusResourceBuilder.body): ditto
+	* buildbot/process/base.py (Builder.setRemoteInfo):  ditto
+	* buildbot/master.py (BotPerspective.got_info):  ditto
+
+2003-05-22  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): bump version to 0.3.3+ while between releases
+
+2003-05-21  Brian Warner  <warner at lothar.com>
+
+	* setup.py: Releasing buildbot-0.3.3
+
+2003-05-21  Brian Warner  <warner at lothar.com>
+
+	* NEWS: 0.3.3 news items
+
+	* README: describe --keepalive and life behind a NAT box
+
+	* buildbot/bot.py (Bot.connected): implement application-level
+	keepalives to deal with NAT timeouts, turn them on with
+	--keepalive option or when SO_KEEPALIVE doesn't work.
+
+	* buildbot/master.py (BotPerspective): accept keepalives silently
+
+	* buildbot/process/base.py (Build.buildException): CopiedFailures
+	don't carry as much information as local ones, so don't try to
+	create a big HTMLized version of them.
+
+	* buildbot/process/step.py (InternalShellCommand.stepFailed): close
+	log file when step fails due to an exception, such as when the slave
+	becomes unreachable
+
+	* buildbot/process/step_twisted.py (RunUnitTests): use trial's new
+	--testmodule argument instead of grepping for test-case-name tags
+	ourselves. Remove FindUnitTests code.
+	* buildbot/slavecommand.py, buildbot/bot.py: remove old code
+
+	* MANIFEST.in: Add docs/examples, files under test/ . Oops!
+
+2003-05-16  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/base.py (BasicBuildFactory): add 'configureEnv'
+	argument to allow things like CFLAGS=-O0 to be passed without relying
+	upon /bin/sh processing on the slave.
+
+	* buildbot/process/step.py (InternalShellCommand.start): send
+	'env' dict to slave
+	* buildbot/slavecommand.py (ShellCommand.start): create argv with
+	'split' instead of letting /bin/sh do it. This should also remove
+	the need for /bin/sh on the buildslave, making it more likely to
+	work with win32.
+
+	* buildbot/status/html.py: html-escape text in blamelist.
+	Add "force build" button to the Builder page.
+
+	* buildbot/process/step_twisted.py (countFailedTests): look at
+	last 1000 characters for status line, as import errors can put it
+	before the -200 point.
+
+2003-05-15  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.py: use clobber=0 for remote builds
+
+	* buildbot/process/process_twisted.py (FullTwistedBuildFactory):
+	make 'clobber' a parameter, so it is possible to have builds which
+	do full tests but do a cvs update instead of hammering the CVS
+	server with a full checkout each build
+
+	* buildbot/process/step.py (InternalShellCommand): bump default
+	timeout to 20 minutes
+
+	* buildbot/bot.py (Bot.debug_forceBuild): utility method to ask
+	the master to trigger a build. Run it via manhole.
+
+	* buildbot/master.py (BotPerspective.perspective_forceBuild):
+	allow slaves to trigger any build that they host, to make life
+	easier for slave admins who are testing out new build processes
+
+	* buildbot/process/process_twisted.py (TwistedReactorsBuildFactory):
+	don't flunk cReactor or qtreactor on failure, since they fail alot
+	these days. Do warnOnFailure instead.
+
+	* buildbot/process/base.py: change Builder.buildable from a list
+	into a single slot. When we don't have a slave, new builds (once
+	they make it past the timeout) are now merged into an existing
+	buildable one instead of being queued. With this change, a slave
+	which has been away for a while doesn't get pounded with all the
+	builds it missed, but instead just does a single build.
+
+2003-05-07  Brian Warner  <warner at lothar.com>
+
+	* setup.py (version): bump version to 0.3.2+ while between releases
+
+2003-05-07  Brian Warner  <warner at lothar.com>
+
+	* setup.py: Releasing buildbot-0.3.2
+
+2003-05-07  Brian Warner  <warner at lothar.com>
+
+	* setup.py: fix major packaging error: include subdirectories!
+	
+	* NEWS: add changes since last release
+
+	* README (REQUIREMENTS): update twisted/python dependencies
+
+	* buildbot/status/builder.py (Builder.startBuild): change
+	BuildProcess API: now they should call startBuild/finishBuild
+	instead of pushing firstEvent / setLastBuildStatus. Moving towards
+	keeping a list of builds in the statusbag, to support other kinds of
+	status delivery.
+	(Builder.addClient): send current-activity-small to new clients
+	* buildbot/process/base.py (Build.startBuild, .buildFinished): use
+	new API
+
+	* buildbot/status/client.py: drop RemoteReferences at shutdown
+
+	* buildbot/status/event.py (Event.stoppedObserving): oops, add it
+
+	* buildbot/status/progress.py (BuildProgress.remote_subscribe):
+	more debug messages for remote status client
+
+	* buildbot/process/step.py (InternalBuildStep.stepComplete)
+	(.stepFailed): only fire the Deferred once, even if both
+	stepComplete and stepFailed are called. I think this can happen if
+	an exception occurs at a weird time.
+
+	* buildbot/status/words.py: work-in-progress: IRC status delivery
+
+2003-05-05  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.py (twisted_app): hush internal
+	python2.3 distutils deprecation warnings
+	* buildbot/process/process_twisted.py (FullTwistedBuildFactory):
+	add compileOpts= argument which inserts extra args before the
+	"setup.py build_ext" command. This can be used to give -Wignore
+	warnings, to hush some internal python-2.3 deprecation messages.
+ 
+	* buildbot/process/step_twisted.py (RunUnitTests): parameterize
+	the ['twisted.test'] default test case to make it easier to change
+	in subclasses
+
+	* buildbot/clients/base.py: switch to pb.Cacheable-style Events
+	* buildbot/clients/gtkPanes.py: ditto
+
+	* buildbot/process/step_twisted.py (RunUnitTests): use randomly=
+	arg to collapse RunUnitTestsRandomly into RunUnitTests
+	* buildbot/process/process_twisted.py (FullTwistedBuildFactory):
+	use RunUnitTests(randomly=1) instead of RunUnitTestsRandomly
+
+	* buildbot/status/html.py (StatusResource): shuffle Resources
+	around to fix a bug: both 'http://foo:8080' and 'http://foo:8080/'
+	would serve the waterfall display, but the internal links were
+	only valid on the trailing-slash version. The correct behavior is
+	for the non-slashed one to serve a Redirect to the slashed one.
+	This only shows up when the buildbot page is hanging off another
+	server, like a Twisted-Web distributed server.
+
+	* buildbot/status/event.py (Event, RemoteEvent): make Events
+	pb.Cacheable, with RemoteEvent as the cached version. This removes
+	a lot of explicit send-an-update code.
+	* buildbot/status/builder.py (Builder): remove send-update code
+	* buildbot/status/client.py (ClientBuilder): remove send-update
+	code, and log errors that occur during callRemote (mostly to catch
+	InsecureJelly exceptions)
+
+	* buildbot/process/process_twisted.py (QuickTwistedBuildFactory):
+	run Lore with the same python used in the rest of the build
+
+	* buildbot/process/step_twisted2.py (RunUnitTestsJelly): moved
+
+	* buildbot/process/step_twisted.py (HLint): accept 'python'
+	argument. Catch rc!=0 and mark the step as failed. This marks the
+	build orange ("has warnings").
+	(RunUnitTestsJelly): move out to step_twisted2.py
+
+	* buildbot/util.py (ignoreStaleRefs): add utility function
+
+	* buildbot/master.py (DebugPerspective.perspective_setCurrentState):
+	don't fake ETA object, it's too hard to get right
+
+2003-05-02  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.py (twisted_app): add FreeBSD builder
+
+2003-05-01  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/html.py (StatusResource.body): oops, I was
+	missing a <tr>, causing the waterfall page to be misrendered in
+	everything except Galeon.
+
+2003-04-29  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/twisted_master.py: make debuild use python-2.2
+	explicitly, now that Twisted stopped supporting 2.1
+
+	* buildbot/process/step_twisted.py (BuildDebs.finishStatus): oops,
+	handle tuple results too. I keep forgetting this, which suggests
+	it needs to be rethought.
+
+	* setup.py (setup): bump version to 0.3.1+ while between releases
+	
+2003-04-29  Brian Warner  <warner at lothar.com>
+
+	* setup.py: Releasing buildbot-0.3.1
+
+2003-04-29  Brian Warner  <warner at lothar.com>
+
+	* README (SUPPORT): add plea to send questions to the mailing list
+
+	* NEWS, MANIFEST.in: add description of recent changes
+
+	* docs/examples/twisted_master.py: add the code used to create the
+	Twisted buildmaster, with passwords and such removed out to a
+	separate file.
+
+	* buildbot/changes/changes.py, freshcvs.py, freshcvsmail.py: split
+	out cvstoys-using bits from generic changes.py, to allow non-cvstoys
+	buildmasters to not require CVSToys be installed.
+	* README, docs/examples/glib_master: update to match the change
+
+	* buildbot/clients/base.py, buildbot/bot.py,
+	buildbot/changes/changes.py, buildbot/pbutil.py: copy
+	ReconnectingPB from CVSToys distribution to remove CVSToys
+	dependency for build slaves and status clients. Buildmasters which
+	use FreshCVSSources still require cvstoys be installed, of course.
+
+2003-04-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py (FullTwistedBuildFactory): add
+	runTestsRandomly arg to turn on trial -z
+
+	* buildbot/process/step_twisted.py (TwistedJellyTestResults):
+	experimental code to use trial's machine-parseable output to get
+	more detailed test results. Still has some major issues.
+	(RunUnitTestsRandomly): subclass to add "-z 0" option, runs tests
+	in random sequence
+
+	* buildbot/status/builder.py (Builder.setCurrentBuild):
+	anticipating moving build history into statusbag, not used yet
+
+	* buildbot/status/tests.py: code to centralize test results,
+	doesn't work quite yet
+
+	* buildbot/status/event.py (Event): use hasattr("setName") instead
+	of isinstance for now.. need better long-term solution
+
+	* buildbot/status/html.py: Remove old imports
+
+2003-04-24  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py (TwistedBuild.isFileImportant):
+	ignore changes under doc/fun/ and sandbox/
+
+	* buildbot/process/step_twisted.py: update pushEvent and friends.
+
+	* buildbot/status/html.py (Box.td): replace event.buildername with
+	event.parent.getSwappableName(). Needs more thought.
+
+	* buildbot/status/builder.py (Builder): Replace pushEvent and
+	getLastEvent with {set|update|addFileTo|finish}CurrentActivity.
+	Tell events they are being pruned with event.delete().
+
+	* buildbot/process/base.py (Build): Remove Builder status-handling
+	methods. s/pushEvent/setCurrentActivity/.
+
+	* buildbot/process/step.py (BuildStep): clean up status delivery.
+	Gouse builder.statusbag methods instead of intermediate builder
+	methods. s/updateLastEvent/updateCurrentActivity/.
+	s/finalizeLastEvent/finishCurrentActivity/. Use
+	addFileToCurrentActivity for summaryFunction.
+
+	* buildbot/status/event.py (Logfile): put data in a Swappable when
+	.finish is called.
+	(Event): add more setter methods. Remove .buildername, use .parent
+	and getSwappableName instead (needs more thought).
+
+	* buildbot/util.py (Swappable):
+	* test/test_swap.py: don't bother setting filename at __init__
+	time, do it later. Change setFilename args to take parent first,
+	since it provides the most significant part of the filename.
+
+2003-04-23  Brian Warner  <warner at lothar.com>
+
+	* buildbot/status/event.py (Logfile.addEntry): append to previous
+	entry, if possible
+
+	* buildbot/process/step.py (BuildStep.finalizeLastEvent):
+	anticipating Swappable
+	(InternalShellCommand.remoteUpdate): split out various log-adding
+	methods so subclasses can snarf stdout separately
+
+	* buildbot/process/base.py (Builder.finalizeLastEvent): more code
+	in anticipation of Swappable build logs
+	(Builder.testsFinished): anticipating TestResults, still disabled
+
+	* buildbot/status/builder.py (Builder.pruneEvents): only keep the
+	last 100 events
+
+	* buildbot/status/event.py (Logfile): add (disabled) support for
+	Swappable, not ready for use yet
+
+	* buildbot/util.py (Swappable): object which is swapped out to
+	disk after some period of no use.
+	* test/test_swap.py: test buildbot.utils.Swappable
+
+2003-04-14  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/base.py (Builder.doPeriodicBuild): add simple
+	periodic-build timer. Set the .periodicBuildTime on a builder
+	instance to some number of seconds to activate it.
+
+	* buildbot/master.py (BotMaster.forceBuild): change forceBuild API
+
+	* buildbot/process/step.py (ShellCommand.finishStatus): use log.msg in
+	a way that survives result tuples
+
+2003-04-12  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (ShellCommand.finishStatusSummary):
+	return a dict instead of a tuple: allow summarizers to provide
+	multiple summaries if they want
+	* buildbot/process/step_twisted.py (trialTextSummarizer): return dict
+	(debuildSummarizer): summarize lintian warnings/errors
+
+2003-04-10  Brian Warner  <warner at lothar.com>
+
+	* README (REQUIREMENTS): slave requires twisted-1.0.4a2
+
+2003-04-09  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (trialTextSummarizer): Don't create
+	empty summaries: happens when the tests fail so hard they don't emit
+	a parseable summary line.
+
+	* buildbot/process/step.py (ShellCommand.finishStatusSummary):
+	Allow summaryFunction to return None to indicate no summary should
+	be added.
+ 
+	* buildbot/status/event.py (Logfile.removeHtmlWatcher): avoid
+	writing to stale HTTP requests: notice when they disconnect and
+	remove the request from the list. Also add CacheToFile from
+	moshez, will be used later.
+
+2003-04-08  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (ProcessDocs.finished): warnings
+	should be an int, not a list of strings
+
+	* buildbot/changes/changes.py (FreshCVSSource.stop): don't disconnect
+	if we weren't actually connected
+
+	* buildbot/process/step_twisted.py (trialTextSummarizer): function
+	to show the tail end of the trial text output
+
+	* buildbot/process/step.py (ShellCommand.finishStatusSummary): add
+	hook to summarize the results of a ShellCommand
+
+2003-04-07  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (RunUnitTests): consolidate all
+	twisted test suite code into a single class.
+	* buildbot/process/process_twisted.py: same
+
+2003-04-04  Brian Warner  <warner at lothar.com>
+
+	* setup.py, MANIFEST.in: hack to make sure plugins.tml gets installed
+
+	* README (SLAVE): document use of mktap to create slave .tap file
+	(REQUIREMENTS): describe dependencies
+
+	* buildbot/bb_tap.py, buildbot/plugins.tml:
+	* buildbot/bot.py (updateApplication): Add mktap support for creating
+	buildslave .tap files
+
+2003-03-28  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (InternalShellCommand.finished): handle
+	new tuple result values (fix embarrasing bug that appeared during
+	PyCon demo)
+
+2003-03-27  Brian Warner  <warner at lothar.com>
+
+	* docs/examples/glib_master.py, README: add sample buildmaster.tap
+	-making program
+
+2003-03-25  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step.py (CVS, ShellCommand): add reason for failure
+	to overall build status
+	* buildbot/clients/base.py (Builder): improve event printing
+	* buildbot/process/base.py (BasicBuildFactory): use specific steps
+	instead of generic ShellCommand
+	(Build): Add .stopBuild, use it when slave is detached
+
+	* buildbot/process/step.py (Configure,Test): give the steps their
+	own names and status strings
+
+	* buildbot/status/html.py (StatusResource): add "show" argument,
+	lets you limit the set of Builders being displayed.
+
+2003-03-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/basic.py: removed
+
+2003-03-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/process_twisted.py (FullTwistedBuildFactory):
+	turn off process-docs by default
+
+	* buildbot/process/base.py (Builder.getBuildNumbered): Don't blow up
+	when displaying build information without anything in allBuilds[]
+
+	* buildbot/bot.py (makeApp): really take password from sys.argv
+
+2003-03-18  Brian Warner  <warner at lothar.com>
+
+	* buildbot/bot.py (buildApp): take password from sys.argv
+
+	* README: replace with more useful text
+
+	* setup.py: add a real one
+	* MANIFEST.in, .cvsignore: more distutils packaging stuff
+	
+	* docs/PyCon-2003/: added sources for PyCon paper.
+
+	* buildbot/process/base.py, step.py: revamp. BuildProcess is gone,
+	now Build objects control the process and Builder only handles
+	slave stuff and distribution of changes/status. A new BuildFactory
+	class creates Build objects on demand.
+
+	Created ConfigurableBuild which takes a list of steps to run. This
+	makes it a lot easier to set up a new kind of build and moves us
+	closer to being able to configure a build from a web page.
+
+	* buildbot/process/step_twisted.py, process_twisted.py: move to
+	new model. A lot of code went away.
+	
+	* buildbot/status/progress.py (BuildProgress.newProgress): Don't
+	send lots of empty progress messages to the client.
+
+	* buildbot/master.py (makeApp): enforce builder-name uniqueness
+
+2003-02-20  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py (BuildDebs): count lintian hits
+
+	* buildbot/slavecommand.py (ShellCommand): back to usePTY=0. The
+	Twisted bug that prevented non-pty processes from working just got
+	fixed, and the bug that leaks ptys is still being investigated.
+
+	* buildbot/process/step.py (CVS): send timeout arg to slave
+
+	* buildbot/clients/gtkPanes.py: add connection-status row, handle
+	builders coming and going
+	* buildbot/clients/base.py: clean up protocol, move to ReconnectingPB
+	from CVSToys, handle lost-buildmaster
+
+	* buildbot/status/client.py (StatusClientService.removeBuilder):
+	Clean up status client protocol: send builders (with references)
+	as they are created, rather than sending a list and requiring the
+	client to figure out which ones are new.
+	* buildbot/master.py (BotMaster.forceBuild): Log debugclient
+	attempts to force a build on an unknown builder
+
+2003-02-19  Brian Warner  <warner at lothar.com>
+
+	* buildbot/slavecommand.py (CVSCommand): add timeout to sub-commands
+	* buildbot/slavecommand.py (ShellCommand.start): stop using PTYs until
+	Twisted stops leaking them.
+	* buildbot/clients/gtkPanes.py (CompactBuilder): forget ETA when the
+	builder goes to an idle state.
+
+	* buildbot/slavecommand.py (ShellCommand.start): bring back PTYs until
+	I figure out why CVS commands hang without them, and/or I fix the
+	hung-command timeout
+
+2003-02-16  Brian Warner  <warner at lothar.com>
+
+	* buildbot/process/step_twisted.py: bin/hlint went away, replace
+	with 'bin/lore --output lint'. Use 'bin/trial -o' to remove
+	ansi-color markup. Remove GenerateLore step. Count hlint warnings in
+	GenerateDocs now that they are prefixed with WARNING:.
+
+	* buildbot/status/html.py (StatusResource.body): Fix Builder link,
+	use manual href target instead of request.childLink
+
+	* buildbot/clients/gtkPanes.py: Fix progress countdown: update the
+	display every second, but update the ETA every 5 seconds (or
+	whenever) as remote_progress messages arrive.
+
+
+2003-02-12  Brian Warner  <warner at lothar.com>
+
+	* *: import current sources from home CVS repository
+	
+
+# Local Variables:
+# add-log-time-format: add-log-iso8601-time-string
+# End:

Added: vendor/buildbot/current/MANIFEST.in
===================================================================
--- vendor/buildbot/current/MANIFEST.in	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/MANIFEST.in	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,14 @@
+
+include ChangeLog MANIFEST.in README README.w32 NEWS CREDITS
+include docs/examples/*.cfg
+include docs/buildbot.texinfo
+include docs/buildbot.info
+include docs/buildbot.html docs/images/*.png
+include docs/epyrun docs/gen-reference
+include buildbot/test/mail/* buildbot/test/subdir/*
+include buildbot/scripts/sample.cfg
+include buildbot/status/classic.css
+include buildbot/clients/debug.glade
+include buildbot/buildbot.png
+
+include contrib/* contrib/windows/*

Added: vendor/buildbot/current/NEWS
===================================================================
--- vendor/buildbot/current/NEWS	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/NEWS	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,1917 @@
+User visible changes in Buildbot.             -*- outline -*-
+
+* Release 0.7.5 (10 Dec 2006)
+
+** Things You Need To Know
+
+*** The Great BuildStep Renaming
+
+All BuildSteps have moved! They used to be classes in buildbot.process.step,
+but now they all have separate modules in buildbot.steps.* . They have been
+split out into separate categories: for example, the source checkout steps
+are now buildbot.steps.source.CVS, buildbot.steps.source.Darcs, etc. The most
+commonly used one is probably buildbot.steps.shell.ShellCommand . The
+python-specific steps are in buildbot.steps.python, and the Twisted-specific
+steps are in buildbot.steps.python_twisted .
+
+You will need to update your master.cfg files to use the new names. The old
+names are deprecated and will be removed altogether in the next release.
+
+*** Compatibility
+
+Buildbot now requires python-2.3 or later. Buildbot now requires
+Twisted-2.0.0 or later. Support for earlier versions of both has finally been
+removed. If you discover it works with unsupported versions, please return
+your Buildbot to the factory for repairs :-).
+
+Buildbot has *not* yet been tested against the recent python-2.5 release. It
+has been tested against the latest SVN version of Twisted, but only in
+conjunction with python-2.4 .
+
+** new features
+
+*** reconfiguring a Builder no longer causes a disconnect/reconnect cycle
+
+This means that sending SIGHUP to the master or running 'buildbot reconfig
+MASTERDIR' command no longer interrupts any current builds, nor does it lose
+pending builds like it did before. This involved a fairly substantial
+refactoring of the various internal BotPerspective/BotMaster/Builder classes.
+Note that reconfiguring Schedulers still loses any Changes that were waiting
+for the tree to become stable: hopefully this will be fixed in the next
+release.
+
+*** 'buildbot start/restart/reconfig' now show logs until startup is complete
+
+These commands now have additional code to follow twistd.log and display all
+the lines that are emitted from the beginning of the start/reconfig action
+until it has completed. This gives you a chance to see any problems detected
+in the config file without needing to manually look in twistd.log or use
+another shell to 'tail -f' it. This also makes it clear which config file is
+being used. This functionality is not available under windows.
+
+In addition, if any problems are detected during 'start' or 'restart' (but
+not reconfig), the buildbot command will terminate with a non-zero exit
+status, making it easier to use in scripts. Closes SF#1517975.
+
+*** Locks now take maxCount=N to allow multiple simultaneous owners
+
+This allows Locks to be non-exclusive but still limit maximum concurrency.
+Thanks to James Knight for the patch. Closes SF#1434997.
+
+*** filetransfer steps
+
+buildbot.steps.transfer.FileUpload is a buildstep that will move files from
+the slave to the master. Likewise, FileDownload will move files from the
+master down to the buildslave. Many thanks to Albert Hofkamp for contributing
+these classes. Closes SF#1504631.
+
+*** pyflakes step
+
+buildbot.steps.python.PyFlakes will run the simple 'pyflakes' static analysis
+tool and parse the results to tell you about undefined names, unused imports,
+etc. You'll need to tell it how to run pyflakes, usually with something like
+command=["pyflakes", "src/packagedir"] or the like. The default command is
+"make pyflakes", which assumes that you have a suitable target in your
+top-level Makefile.
+
+*** Monotone support
+
+Nathaniel Smith has contributed initial support for the Monotone version
+control system. The code still needs docs and tests, but on the other hand it
+has been in use by the Monotone buildbot for a long time now, so it is
+probably fairly stable.
+
+*** Tinderbox support
+
+Ben Hearsum and the Mozilla crew have contributed some classes to allow
+Buildbot to work with Tinderbox clients. One piece is
+buildbot.changes.bonsaipoller.BonsaiPoller, which is a ChangeSource that
+polls a Bonsai server (which is a kind of web-vased viewcvs CGI script) to
+discover source code changes. The other piece is
+buildbot.status.tinderbox.TinderboxMailNotifier, which is a status plugin
+that sends email in the same format as Tinderbox does, which allows a number
+of Tinderbox tools to be driven by Buildbot instead.
+
+*** SVN Poller
+
+Niklaus Giger contributed a ChangeSource (buildbot.changes.svnpoller) which
+polls a remote SVN repository on a periodic basis. This is useful when, for
+whatever reason, you cannot add a post-commit hook script to the repository.
+This obsoletes the external contrib/svn_watcher.py script.
+
+** notes for plugin developers
+
+*** IStatusLog.readlines()
+
+This new method makes it easier for a status plugin (or a
+BuildStep.createSummary method) to walk through a StatusLog one line at a
+time. For example, if you wanted to create an extra logfile that just
+contained all the GCC warnings from the main log, you could use the
+following:
+
+    def createSummary(self, log):
+        warnings = []
+        for line in log.readlines():
+            if "warning:" in line:
+                warnings.append()
+        self.addCompleteLog('warnings', "".join(warnings))
+
+The "BuildStep LogFiles" section of the user's manual contains more
+information. This method is not particularly memory-efficient yet (it reads
+the whole logfile into memory first, then splits it into lines); this will be
+improved in a future release.
+
+** bug fixes
+
+*** Update source.SVN to work with the new SVN-1.4.0
+
+The latest subversion changed the behavior in an unusual situation which
+caused the unit tests to fail. This was unlikely to cause a problem in actual
+usage, but the tests have been updated to pass with the new version.
+
+*** update svn_buildbot.py to avoid mangling filenames
+
+Older versions of this script were stripping the wrong number of columns from
+the output of 'svnlook changed', and would sometimes mangle filenames. This
+has been fixed. Closes SF#1545146.
+
+*** logfiles= caused subsequent build failures under Windows
+
+Earlier versions of buildbot didn't explicitly close any logfiles= file
+handles when the build finished. On windows (where you cannot delete a file
+that someone else is reading), this could cause the next build to fail as the
+source checkout step was unable to delete the old working directory. This has
+been fixed. Closes SF#1568415.
+
+*** logfiles= didn't work on OS-X
+
+Macintosh OS-X has a different behavior when reading files that have reached
+EOF, the result was that logfiles= sometimes didn't work. Thanks to Mark Rowe
+for the patch.
+
+** other changes
+
+The 'buildbot sighup MASTERDIR' command has been replaced with 'buildbot
+reconfig MASTERDIR', since that seems to be a slightly more meaningful name.
+The 'sighup' form will remain as an alias.
+
+
+* Release 0.7.4 (23 Aug 2006)
+
+** Things You Need To Know
+
+The PBChangeSource's prefix= argument has changed, you probably need to add a
+slash now. This is mostly used by sites which use Subversion and
+svn_buildbot.py.
+
+The subcommands that are used to create a buildmaster or a buildslave have
+changed. They used to be called 'buildbot master' and 'buildbot slave'. Now
+they are called 'buildbot create-master' and 'buildbot create-slave'. Zipf's
+Law suggests that these are more appropriate names for these
+infrequently-used commands.
+
+The syntax for the c['manhole'] feature has changed.
+
+** new features
+
+*** full Perforce support
+
+SF#1473939: large patch from Scott Lamb, with docs and unit tests! This
+includes both the step.P4 source-checkout BuildStep, and the changes.p4poller
+ChangeSource you'll want to feed it. P4 is now supported just as well as all
+the other VC systems. Thanks Scott!
+
+*** SSH-based Manhole
+
+The 'manhole' feature allows buildbot developers to get access to a python
+read/eval/print loop (REPL) inside the buildmaster through a network
+connection. Previously, this ran over unencrypted telnet, using a simple
+username/password for access control. The new release defaults to encrypted
+SSH access, using either username/password or an authorized_keys file (just
+like sshd). There also exists an unencrypted telnet form, but its use is
+discouraged. The syntax for setting up a manhole has changed, so master.cfg
+files that use them must be updated. The "Debug options" section in the
+user's manual provides a complete description.
+
+*** Multiple Logfiles
+
+BuildSteps can watch multiple log files in realtime, not just stdout/stderr.
+This works in a similar fashion to 'tail -f': the file is polled once per
+second, and any new data is sent to the buildmaster.
+
+This requires a buildslave running 0.7.4 or later, and a warning message is
+produced if used against an old buildslave (which will otherwise produce no
+data). Use "logfiles={'name': 'filename'}" to take advantage of this feature
+from master.cfg, and see the "ShellCommand" section of the user's manual for
+full documentation.
+
+The 'Trial' buildstep has been updated to use this, to display
+_trial_temp/test.log in realtime. It also knows to fall back to the previous
+"cat" command if the buildslave is too old.
+
+*** BuildStep URLs
+
+BuildSteps can now add arbitrary URLs which will be displayed on the
+Waterfall page in the same place that Logs are presented. This is intended to
+provide a link to generated HTML pages, such as the output of a code coverage
+tool. The step is responsible for somehow uploading the HTML to a web server:
+this feature merely provides an easy way to present the HREF link to the
+user. See the "BuildStep URLs" section of the user's manual for details and
+examples.
+
+*** LogObservers
+
+BuildSteps can now attach LogObservers to various logfiles, allowing them to
+get real-time log output. They can use this to watch for progress-indicating
+events (like counting the number of files compiled, or the number of tests
+which have run), and update both ETA/progress-tracking and step text. This
+allows for more accurate ETA information, and more information passed to the
+user about how much of the process has completed.
+
+The 'Trial' buildstep has been updated to use this for progress tracking, by
+counting how many test cases have run.
+
+** new documentation
+
+What classes are useful in your master.cfg file? A table of them has been
+added to the user's manual, in a section called "Index of Useful Classes".
+
+Want a list of all the keys in master.cfg? Look in the "Index of master.cfg
+keys" section.
+
+A number of pretty diagrams have been added to the "System Architecture"
+portion of the manual, explaining how all the buildbot pieces fit together.
+
+An HTML form of the user's manual is now shipped in the source tarball. This
+makes it a bit bigger: sorry about that. The old PyCon-2003 paper has been
+removed from the distribution, as it is mostly supplanted by the user's
+manual by this point.
+
+** bugfixes
+
+SF#1217699 + SF#1381867: The prefix= argument to PBChangeSource has been
+changed: now it does just a simple string-prefix match and strip. The
+previous behavior was buggy and unhelpful. NOTE: if you were using prefix=
+before, you probably need to add a slash to the end of it.
+
+SF#1398174: ignore SVN property changes better, fixed by Olivier Bonnet
+
+SF#1452801: don't double-escape the build URL, fixed by Olivier Bonnet
+
+SF#1401121: add support for running py2exe on windows, by Mark Hammond
+
+reloading unchanged config files with WithProperties shouldn't change anything.
+
+All svn commands now include --non-interactive so they won't ask for
+passwords. Instead, the command will fail if it cannot be performed without
+user input.
+
+Deprecation warnings with newer versions of Twisted have been hushed.
+
+** compatibility
+
+I haven't actually removed support for Twisted-1.3.0 yet, but I'd like to.
+
+The step_twisted default value for --reporter matches modern Twisteds,
+though, and won't work under 1.3.0.
+
+ShellCommand.flunkOnFailure now defaults to True, so any shell command which
+fails counts as a build failure. Set this to False if you don't want this
+behavior.
+
+** minor features
+
+contrib/darcs_buildbot.py contains a new script suitable for use in a darcs
+commit-hook.
+
+Hovering a cursor over the yellow "Build #123" box in the Waterfall display
+will pop up an HTML tooltip to show the reason for the build. Thanks to Zandr
+Milewski for the suggestion.
+
+contrib/CSS/*.css now contains several contributed stylesheets to make the
+Waterfall display a bit less ugly. Thanks to John O'Duinn for gathering them.
+
+ShellCommand and its derivatives can now accept either a string or a list of
+strings in the description= and descriptionDone= arguments. Thanks to Paul
+Winkler for the catch.
+
+
+* Release 0.7.3 (23 May 2006)
+
+** compatibility
+
+This release is compatible with Twisted-1.3.0, but the next one will not be.
+Please upgrade to at least Twisted-2.0.x soon, as the next buildbot release
+will require it.
+
+** new features
+
+*** Mercurial support
+
+Support for Mercurial version control system (http://selenic.com/mercurial)
+has been added. This adds a buildbot.process.step.Mercurial BuildStep. A
+suitable hook script to deliver changes to the buildmaster is still missing.
+
+*** 'buildbot restart' command
+
+The 'buildbot restart BASEDIR' command will perform a 'buildbot stop' and
+'buildbot start', and will attempt to wait for the buildbot process to shut
+down in between. This is useful when you need to upgrade the code on your
+buildmaster or buildslave and want to take it down for a minimum amount of
+time.
+
+*** build properties
+
+Each build now has a set of named "Build Properties", which can be set by
+steps and interpolated into ShellCommands. The 'revision' and 'got_revision'
+properties are the most interesting ones available at this point, and can be
+used e.g. to get the VC revision number into the filename of a generated
+tarball. See the user's manual section entited "Build Properties" for more
+details.
+
+** minor features
+
+*** IRC now takes password= argument
+
+Useful for letting your bot claim a persistent identity.
+
+*** svn_buildbot.py is easier to modify to understand branches
+*** BuildFactory has a new .addStep method
+*** p4poller has new arguments
+*** new contrib scripts: viewcvspoll, svnpoller, svn_watcher
+
+These poll an external VC repository to watch for changes, as opposed to
+adding a hook script to the repository that pushes changes into the
+buildmaster. This means higher latency but may be easier to configure,
+especially if you do not have authority on the repository host.
+
+*** VC build property 'got_revision'
+
+The 'got_revision' property reports what revision a VC step actually
+acquired, which may be useful to know when building from HEAD.
+
+*** improved CSS in Waterfall
+
+The Waterfall display has a few new class= tags, which may make it easier to
+write custom CSS to make it look prettier.
+
+*** robots_txt= argument in Waterfall
+
+You can now pass a filename to the robots_txt= argument, which will be served
+as the "robots.txt" file. This can be used to discourage search engine
+spiders from crawling through the numerous build-status pages.
+
+** bugfixes
+
+*** tests more likely to pass on non-English systems
+
+The unit test suite now sets $LANG='C' to make subcommands emit error
+messages in english instead of whatever native language is in use on the
+host. This improves the chances that the unit tests will pass on such
+systems. This affects certain VC-related subcommands too.
+
+test_vc was assuming that the system time was expressed with a numeric
+timezone, which is not always the case, especially under windows. This
+probably works better now than it did before. This only affects the CVS
+tests.
+
+'buildbot try' (for CVS) now uses UTC instead of the local timezone. The
+'got_revision' property is also expressed in UTC. Both should help deal with
+buggy versions of CVS that don't parse numeric timezones properly.
+
+
+* Release 0.7.2 (17 Feb 2006)
+
+** new features
+
+*** all TCP port numbers in config file now accept a strports string
+
+Sometimes it is useful to restrict certain TCP ports that the buildmaster
+listens on to use specific network interfaces. In particular, if the
+buildmaster and SVN repository live on the same machine, you may want to
+restrict the PBChangeSource to only listen on the loopback interface,
+insuring that no external entities can inject Changes into the buildbot.
+Likewise, if you are using something like Apache's reverse-proxy feature to
+provide access to the buildmaster's HTML status page, you might want to hide
+the real Waterfall port by having it only bind to the loopback interface.
+
+To accomplish this, use a string like "tcp:12345:interface=127.0.0.1" instead
+of a number like 12345. These strings are called "strports specification
+strings", and are documented in twisted's twisted.application.strports module
+(you can probably type 'pydoc twisted.application.strports' to see this
+documentation). Pretty much everywhere the buildbot takes a port number will
+now accept a strports spec, and any bare numbers are translated into TCP port
+numbers (listening on all network interfaces) for compatibility.
+
+*** buildslave --umask control
+
+Twisted's daemonization utility (/usr/bin/twistd) automatically sets the
+umask to 077, which means that all files generated by both the buildmaster
+and the buildslave will only be readable by the account under which the
+respective daemon is running. This makes it unnecessarily difficult to share
+build products (e.g. by symlinking ~/public_html/current_docs/ to a directory
+within the slave's build directory where each build puts the results of a
+"make docs" step).
+
+The 'buildbot slave <PARAMS>' command now accepts a --umask argument, which
+can be used to override the umask set by twistd. If you create the buildslave
+with '--umask=022', then all build products will be world-readable, making it
+easier for other processes (run under other accounts) to access them.
+
+** bug fixes
+
+The 0.7.1 release had a bug whereby reloading the config file could break all
+configured Schedulers, causing them to raise an exception when new changes
+arrived but not actually schedule a new build. This has been fixed.
+
+Fixed a bug which caused the AnyBranchScheduler to explode when branch==None.
+Thanks to Kevin Turner for the catch. I also think I fixed a bug whereby the
+TryScheduler would explode when it was given a Change (which it is supposed
+to simply ignore).
+
+The Waterfall display now does more quoting of names (including Builder
+names, BuildStep names, etc), so it is more likely that these names can
+contain unusual characters like spaces, quotes, and slashes. There may still
+be some problems with these kinds of names, however.. please report any bugs
+to the mailing list.
+
+
+* Release 0.7.1 (26 Nov 2005)
+
+** new features
+
+*** scheduler.Nightly
+
+Dobes Vandermeer contributed a cron-style 'Nightly' scheduler. Unlike the
+more-primitive Periodic class (which only lets you specify the duration
+between build attempts), Nightly lets you schedule builds for specific times
+of day, week, month, or year. The interface is very much like the crontab(5)
+file. See the buildbot.scheduler.Nightly docstring for complete details.
+
+** minor new features
+
+*** step.Trial can work with Trial from Twisted >2.1.0
+
+The 'Trial' step now accepts the trialMode= argument, which should be a list
+of strings to be added to trial's argv array. This defaults to ["-to"], which
+is appropriate for the Trial that ships in Twisted-2.1.0 and earlier, and
+tells Trial to emit non-colorized verbose output. To use this step with
+trials from later versions of Twisted, this should be changed to
+["--reporter=bwverbose"].
+
+In addition, you can now set other Trial command-line parameters through the
+trialArgs= argument. This is a list of strings, and defaults to an empty list.
+
+*** Added a 'resubmit this build' button to the web page
+
+*** Make the VC-checkout step's description more useful
+
+Added the word "[branch]" to the VC step's description (used in the Step's
+box on the Waterfall page, among others) when we're checking out a
+non-default branch. Also add "rNNN" where appropriate to indicate which
+revision is being checked out. Thanks to Brad Hards and Nathaniel Smith for
+the suggestion.
+
+** bugs fixed
+
+Several patches from Dobes Vandermeer: Escape the URLs in email, in case they
+have spaces and such. Fill otherwise-empty <td> elements, as a workaround for
+buggy browsers that might optimize them away. Also use binary mode when
+opening status pickle files, to make windows work better. The
+AnyBranchScheduler now works even when you don't provide a fileIsImportant=
+argument.
+
+Stringify the base revision before stuffing it into a 'try' jobfile, helping
+SVN and Arch implement 'try' builds better. Thanks to Steven Walter for the
+patch.
+
+Fix the compare_attrs list in PBChangeSource, FreshCVSSource, and Waterfall.
+Before this, certain changes to these objects in the master.cfg file were
+ignored, such that you would have to stop and re-start the buildmaster to
+make them take effect.
+
+The config file is now loaded serially, shutting down old (or replaced)
+Status/ChangeSource plugins before starting new ones. This fixes a bug in
+which changing an aspect of, say, the Waterfall display would cause an
+exception as both old and new instances fight over the same TCP port. This
+should also fix a bug whereby new Periodic Schedulers could fire a build
+before the Builders have finished being added.
+
+There was a bug in the way Locks were handled when the config file was
+reloaded: changing one Builder (but not the others) and reloading master.cfg
+would result in multiple instances of the same Lock object, so the Locks
+would fail to prevent simultaneous execution of Builds or Steps. This has
+been fixed.
+
+** other changes
+
+For a long time, certain StatusReceiver methods (like buildStarted and
+stepStarted) have been able to return another StatusReceiver instance
+(usually 'self') to indicate that they wish to subscribe to events within the
+new object. For example, if the buildStarted() method returns 'self', the
+status receiver will also receive events for the new build, like
+stepStarted() and buildETAUpdate(). Returning a 'self' from buildStarted() is
+equivalent to calling build.subscribe(self).
+
+Starting with buildbot-0.7.1, this auto-subscribe convenience will also
+register to automatically unsubscribe the target when the build or step has
+finished, just as if build.unsubscribe(self) had been called. Also, the
+unsubscribe() method has been changed to not explode if the same receiver is
+unsubscribed multiple times. (note that it will still explode is the same
+receiver is *subscribed* multiple times, so please continue to refrain from
+doing that).
+
+
+* Release 0.7.0 (24 Oct 2005)
+
+** new features
+
+*** new c['schedulers'] config-file element (REQUIRED)
+
+The code which decides exactly *when* a build is performed has been massively
+refactored, enabling much more flexible build scheduling. YOU MUST UPDATE
+your master.cfg files to match: in general this will merely require you to
+add an appropriate c['schedulers'] entry. Any old ".treeStableTime" settings
+on the BuildFactory instances will now be ignored. The user's manual has
+complete details with examples of how the new Scheduler classes work.
+
+*** c['interlocks'] removed, Locks and Dependencies now separate items
+
+The c['interlocks'] config element has been removed, and its functionality
+replaced with two separate objects. Locks are used to tell the buildmaster
+that certain Steps or Builds should not run at the same time as other Steps
+or Builds (useful for test suites that require exclusive access to some
+external resource: of course the real fix is to fix the tests, because
+otherwise your developers will be suffering from the same limitations). The
+Lock object is created in the config file and then referenced by a Step
+specification tuple or by the 'locks' key of the Builder specification
+dictionary. Locks come in two flavors: MasterLocks are buildmaster-wide,
+while SlaveLocks are specific to a single buildslave.
+
+When you want to have one Build run or not run depending upon whether some
+other set of Builds have passed or failed, you use a special kind of
+Scheduler defined in the scheduler.Dependent class. This scheduler watches an
+upstream Scheduler for builds of a given source version to complete, and only
+fires off its own Builders when all of the upstream's Builders have built
+that version successfully.
+
+Both features are fully documented in the user's manual.
+
+*** 'buildbot try'
+
+The 'try' feature has finally been added. There is some configuration
+involved, both in the buildmaster config and on the developer's side, but
+once in place this allows the developer to type 'buildbot try' in their
+locally-modified tree and to be given a report of what would happen if their
+changes were to be committed. This works by computing a (base revision,
+patch) tuple that describes the developer's tree, sending that to the
+buildmaster, then running a build with that source on a given set of
+Builders. The 'buildbot try' tool then emits status messages until the builds
+have finished.
+
+'try' exists to allow developers to run cross-platform tests on their code
+before committing it, reducing the chances they will inconvenience other
+developers by breaking the build. The UI is still clunky, but expect it to
+change and improve over the next few releases.
+
+Instructions for developers who want to use 'try' (and the configuration
+changes necessary to enable its use) are in the user's manual.
+
+*** Build-On-Branch
+
+When suitably configured, the buildbot can be used to build trees from a
+variety of related branches. You can set up Schedulers to build a tree using
+whichever branch was last changed, or users can request builds of specific
+branches through IRC, the web page, or (eventually) the CLI 'buildbot force'
+subcommand.
+
+The IRC 'force' command now takes --branch and --revision arguments (not that
+they always make sense). Likewise the HTML 'force build' button now has an
+input field for branch and revision. Your build's source-checkout step must
+be suitably configured to support this: for SVN it involves giving both a
+base URL and a default branch. Other VC systems are configured differently.
+The ChangeSource must also provide branch information: the 'buildbot
+sendchange' command now takes a --branch argument to help hook script writers
+accomplish this.
+
+*** Multiple slaves per Builder
+
+You can now attach multiple buildslaves to each Builder. This can provide
+redundancy or primitive load-balancing among many machines equally capable of
+running the build. To use this, define a key in the Builder specification
+dictionary named 'slavenames' with a list of buildslave names (instead of the
+usual 'slavename' that contains just a single slavename).
+
+*** minor new features
+
+The IRC and email status-reporting facilities now provide more specific URLs
+for particular builds, in addition to the generic buildmaster home page. The
+HTML per-build page now has more information.
+
+The Twisted-specific test classes have been modified to match the argument
+syntax preferred by Trial as of Twisted-2.1.0 and newer. The generic trial
+steps are still suitable for the Trial that comes with older versions of
+Twisted, but may produce deprecation warnings or errors when used with the
+latest Trial.
+
+** bugs fixed
+
+DNotify, used by the maildir-watching ChangeSources, had problems on some
+64-bit systems relating to signed-vs-unsigned constants and the DN_MULTISHOT
+flag. A workaround was provided by Brad Hards.
+
+The web status page should now be valid XHTML, thanks to a patch by Brad
+Hards. The charset parameter is specified to be UTF-8, so VC comments,
+builder names, etc, should probably all be in UTF-8 to be displayed properly.
+
+** creeping version dependencies
+
+The IRC 'force build' command now requires python2.3 (for the shlex.split
+function).
+
+
+* Release 0.6.6 (23 May 2005)
+
+** bugs fixed
+
+The 'sendchange', 'stop', and 'sighup' subcommands were broken, simple bugs
+that were not caught by the test suite. Sorry.
+
+The 'buildbot master' command now uses "raw" strings to create .tac files
+that will still function under windows (since we must put directory names
+that contain backslashes into that file).
+
+The keep-on-disk behavior added in 0.6.5 included the ability to upgrade old
+in-pickle LogFile instances. This upgrade function was not added to the
+HTMLLogFile class, so an exception would be raised when attempting to load or
+display any build with one of these logs (which are normally used only for
+showing build exceptions). This has been fixed.
+
+Several unnecessary imports were removed, so the Buildbot should function
+normally with just Twisted-2.0.0's "Core" module installed. (of course you
+will need TwistedWeb, TwistedWords, and/or TwistedMail if you use status
+targets that require them). The test suite should skip all tests that cannot
+be run because of missing Twisted modules.
+
+The master/slave's basedir is now prepended to sys.path before starting the
+daemon. This used to happen implicitly (as a result of twistd's setup
+preamble), but 0.6.5 internalized the invocation of twistd and did not copy
+this behavior. This change restores the ability to access "private.py"-style
+modules in the basedir from the master.cfg file with a simple "import
+private" statement. Thanks to Thomas Vander Stichele for the catch.
+
+
+* Release 0.6.5 (18 May 2005)
+
+** deprecated config keys removed
+
+The 'webPortnum', 'webPathname', 'irc', and 'manholePort' config-file keys,
+which were deprecated in the previous release, have now been removed. In
+addition, Builders must now always be configured with dictionaries: the
+support for configuring them with tuples has been removed.
+
+** master/slave creation and startup changed
+
+The buildbot no longer uses .tap files to store serialized representations of
+the buildmaster/buildslave applications. Instead, this release now uses .tac
+files, which are human-readable scripts that create new instances (rather
+than .tap files, which were pickles of pre-created instances). 'mktap
+buildbot' is gone.
+
+You will need to update your buildbot directories to handle this. The
+procedure is the same as creating a new buildmaster or buildslave: use
+'buildbot master BASEDIR' or 'buildbot slave BASEDIR ARGS..'. This will
+create a 'buildbot.tac' file in the target directory. The 'buildbot start
+BASEDIR' will use twistd to start the application.
+
+The 'buildbot start' command now looks for a Makefile.buildbot, and if it
+finds one (and /usr/bin/make exists), it will use it to start the application
+instead of calling twistd directly. This allows you to customize startup,
+perhaps by adding environment variables. The setup commands create a sample
+file in Makefile.sample, but you must copy this to Makefile.buildbot to
+actually use it. The previous release looked for a bare 'Makefile', and also
+installed a 'Makefile', so you were always using the customized approach,
+even if you didn't ask for it. That old Makefile launched the .tap file, so
+changing names was also necessary to make sure that the new 'buildbot start'
+doesn't try to run the old .tap file.
+
+'buildbot stop' now uses os.kill instead of spawning an external process,
+making it more likely to work under windows. It waits up to 5 seconds for the
+daemon to go away, so you can now do 'buildbot stop BASEDIR; buildbot start
+BASEDIR' with less risk of launching the new daemon before the old one has
+fully shut down. Likewise, 'buildbot start' imports twistd's internals
+directly instead of spawning an external copy, so it should work better under
+windows.
+
+** new documentation
+
+All of the old Lore-based documents were converted into a new Texinfo-format
+manual, and considerable new text was added to describe the installation
+process. The docs are not yet complete, but they're slowly shaping up to form
+a proper user's manual.
+
+** new features
+
+Arch checkouts can now use precise revision stamps instead of always using
+the latest revision. A separate Source step for using Bazaar (an alternative
+Arch client) instead of 'tla' was added. A Source step for Cogito (the new
+linux kernel VC system) was contributed by Brandon Philips. All Source steps
+now accept a retry= argument to indicate that failing VC checkouts should be
+retried a few times (SF#1200395), note that this requires an updated
+buildslave.
+
+The 'buildbot sendchange' command was added, to be used in VC hook scripts to
+send changes at a pb.PBChangeSource . contrib/arch_buildbot.py was added to
+use this tool; it should be installed using the 'Arch meta hook' scheme.
+
+Changes can now accept a branch= parameter, and Builders have an
+isBranchImportant() test that acts like isFileImportant(). Thanks to Thomas
+Vander Stichele. Note: I renamed his tag= to branch=, in anticipation of an
+upcoming feature to build specific branches. "tag" seemed too CVS-centric.
+
+LogFiles have been rewritten to stream the incoming data directly to disk
+rather than keeping a copy in memory all the time (SF#1200392). This
+drastically reduces the buildmaster's memory requirements and makes 100MB+
+log files feasible. The log files are stored next to the serialized Builds,
+in files like BASEDIR/builder-dir/12-log-compile-output, so you'll want a
+cron job to delete old ones just like you do with old Builds. Old-style
+Builds from 0.6.4 and earlier are converted when they are first read, so the
+first load of the Waterfall display after updating to this release may take
+quite some time.
+
+** build process updates
+
+BuildSteps can now return a status of EXCEPTION, which terminates the build
+right away. This allows exceptions to be caught right away, but still make
+sure the build stops quickly.
+
+** bug fixes
+
+Some more windows incompatibilities were fixed. The test suite now has two
+failing tests remaining, both of which appear to be Twisted issues that
+should not affect normal operation.
+
+The test suite no longer raises any deprecation warnings when run against
+twisted-2.0 (except for the ones which come from Twisted itself).
+
+
+* Release 0.6.4 (28 Apr 2005)
+
+** major bugs fixed
+
+The 'buildbot' tool in 0.6.3, when used to create a new buildmaster, failed
+unless it found a 'changes.pck' file. As this file is created by a running
+buildmaster, this made 0.6.3 completely unusable for first-time
+installations. This has been fixed.
+
+** minor bugs fixed
+
+The IRC bot had a bug wherein asking it to watch a certain builder (the "I'll
+give a shout when the build finishes" message) would cause an exception, so
+it would not, in fact, shout. The HTML page had an exception in the "change
+sources" page (reached by following the "Changes" link at the top of the
+column that shows the names of commiters). Re-loading the config file while
+builders were already attached would result in a benign error message. The
+server side of the PBListener status client had an exception when providing
+information about a non-existent Build (e.g., when the client asks for the
+Build that is currently running, and the server says "None").
+
+These bugs have all been fixed.
+
+The unit tests now pass under python2.2; they were failing before because of
+some 2.3isms that crept in. More unit tests which failed under windows now
+pass, only one (test_webPathname_port) is still failing.
+
+** 'buildbot' tool looks for a .buildbot/options file
+
+The 'statusgui' and the 'debugclient' subcommands can both look for a
+.buildbot/ directory, and an 'options' file therein, to extract default
+values for the location of the buildmaster. This directory is searched in the
+current directory, its parent, etc, all the way up to the filesystem root
+(assuming you own the directories in question). It also look in ~/.buildbot/
+for this file. This feature allows you to put a .buildbot at the top of your
+working tree, telling any 'buildbot' invocations you perform therein how to
+get to the buildmaster associated with that tree's project.
+
+Windows users get something similar, using %APPDATA%/buildbot instead of
+~/.buildbot .
+
+** windows ShellCommands are launched with 'cmd.exe'
+
+The buildslave has been modified to run all list-based ShellCommands by
+prepending [os.environ['COMSPEC'], '/c'] to the argv list before execution.
+This should allow the buildslave's PATH to be searched for commands,
+improving the chances that it can run the same 'trial -o foo' commands as a
+unix buildslave. The potential downside is that spaces in argv elements might
+be re-parsed, or quotes might be re-interpreted. The consensus on the mailing
+list was that this is a useful thing to do, but please report any problems
+you encounter with it.
+
+** minor features
+
+The Waterfall display now shows the buildbot's home timezone at the top of
+the timestamp column. The default favicon.ico is now much nicer-looking (it
+is generated with Blender.. the icon.blend file is available in CVS in
+docs/images/ should you care to play with it).
+
+
+
+* Release 0.6.3 (25 Apr 2005)
+
+** 'buildbot' tool gets more uses
+
+The 'buildbot' executable has acquired three new subcommands. 'buildbot
+debugclient' brings up the small remote-control panel that connects to a
+buildmaster (via the slave port and the c['debugPassword']). This tool,
+formerly in contrib/debugclient.py, lets you reload the config file, force
+builds, and simulate inbound commit messages. It requires gtk2, glade, and
+the python bindings for both to be installed.
+
+'buildbot statusgui' brings up a live status client, formerly available by
+running buildbot/clients/gtkPanes.py as a program. This connects to the PB
+status port that you create with:
+
+  c['status'].append(client.PBListener(portnum))
+
+and shows two boxes per Builder, one for the last build, one for current
+activity. These boxes are updated in realtime. The effect is primitive, but
+is intended as an example of what's possible with the PB status interface.
+
+'buildbot statuslog' provides a text-based running log of buildmaster events.
+
+Note: command names are subject to change. These should get much more useful
+over time.
+
+** web page has a favicon
+
+When constructing the html.Waterfall instance, you can provide the filename
+of an image that will be provided when the "favicon.ico" resource is
+requested. Many web browsers display this as an icon next to the URL or
+bookmark. A goofy little default icon is included.
+
+** web page has CSS
+
+Thanks to Thomas Vander Stichele, the Waterfall page is now themable through
+CSS. The default CSS is located in buildbot/status/classic.css, and creates a
+page that is mostly identical to the old, non-CSS based table.
+
+You can specify a different CSS file to use by passing it as the css=
+argument to html.Waterfall(). See the docstring for Waterfall for some more
+details.
+
+** builder "categories"
+
+Thomas has added code which places each Builder in an optional "category".
+The various status targets (Waterfall, IRC, MailNotifier) can accept a list
+of categories, and they will ignore any activity in builders outside this
+list. This makes it easy to create some Builders which are "experimental" or
+otherwise not yet ready for the world to see, or indicate that certain
+builders should not harass developers when their tests fail, perhaps because
+the build slaves for them are not yet fully functional.
+
+** Deprecated features
+
+*** defining Builders with tuples is deprecated
+
+For a long time, the preferred way to define builders in the config file has
+been with a dictionary. The less-flexible old style of a 4-item tuple (name,
+slavename, builddir, factory) is now officially deprecated (i.e., it will
+emit a warning if you use it), and will be removed in the next release.
+Dictionaries are more flexible: additional keys like periodicBuildTime are
+simply unavailable to tuple-defined builders.
+
+Note: it is a good idea to watch the logfile (usually in twistd.log) when you
+first start the buildmaster, or whenever you reload the config file. Any
+warnings or errors in the config file will be found there.
+
+*** c['webPortnum'], c['webPathname'], c['irc'] are deprecated
+
+All status reporters should be defined in the c['status'] array, using
+buildbot.status.html.Waterfall or buildbot.status.words.IRC . These have been
+deprecated for a while, but this is fair warning that these keys will be
+removed in the next release.
+
+*** c['manholePort'] is deprecated
+
+Again, this has been deprecated for a while, in favor of:
+
+ c['manhole'] = master.Manhole(port, username, password)
+
+The preferred syntax will eventually let us use other, better kinds of debug
+shells, such as the experimental curses-based ones in the Twisted sandbox
+(which would offer command-line editing and history).
+
+** bug fixes
+
+The waterfall page has been improved a bit. A circular-reference bug in the
+web page's TextLog class was fixed, which caused a major memory leak in a
+long-running buildmaster with large logfiles that are viewed frequently.
+Modifying the config file in a way which only changed a builder's base
+directory now works correctly. The 'buildbot' command tries to create
+slightly more useful master/slave directories, adding a Makefile entry to
+re-create the .tap file, and removing global-read permissions from the files
+that may contain buildslave passwords.
+
+** twisted-2.0.0 compatibility
+
+Both buildmaster and buildslave should run properly under Twisted-2.0 . There
+are still some warnings about deprecated functions, some of which could be
+fixed, but there are others that would require removing compatibility with
+Twisted-1.3, and I don't expect to do that until 2.0 has been out and stable
+for at least several months. The unit tests should pass under 2.0, whereas
+the previous buildbot release had tests which could hang when run against the
+new "trial" framework in 2.0.
+
+The Twisted-specific steps (including Trial) have been updated to match 2.0
+functionality.
+
+** win32 compatibility
+
+Thankt to Nick Trout, more compatibility fixes have been incorporated,
+improving the chances that the unit tests will pass on windows systems. There
+are still some problems, and a step-by-step "running buildslaves on windows"
+document would be greatly appreciated.
+
+** API docs
+
+Thanks to Thomas Vander Stichele, most of the docstrings have been converted
+to epydoc format. There is a utility in docs/gen-reference to turn these into
+a tree of cross-referenced HTML pages. Eventually these docs will be
+auto-generated and somehow published on the buildbot web page.
+
+
+
+* Release 0.6.2 (13 Dec 2004)
+
+** new features
+
+It is now possible to interrupt a running build. Both the web page and the
+IRC bot feature 'stop build' commands, which can be used to interrupt the
+current BuildStep and accelerate the termination of the overall Build. The
+status reporting for these still leaves something to be desired (an
+'interrupt' event is pushed into the column, and the reason for the interrupt
+is added to a pseudo-logfile for the step that was stopped, but if you only
+look at the top-level status it appears that the build failed on its own).
+
+Builds are also halted if the connection to the buildslave is lost. On the
+slave side, any active commands are halted if the connection to the
+buildmaster is lost.
+
+** minor new features
+
+The IRC log bot now reports ETA times in a MMSS format like "2m45s" instead
+of the clunky "165 seconds".
+
+** bug fixes
+
+*** Slave Disconnect
+
+Slave disconnects should be handled better now: the current build should be
+abandoned properly. Earlier versions could get into weird states where the
+build failed to finish, clogging the builder forever (or at least until the
+buildmaster was restarted).
+
+In addition, there are weird network conditions which could cause a
+buildslave to attempt to connect twice to the same buildmaster. This can
+happen when the slave is sending large logfiles over a slow link, while using
+short keepalive timeouts. The buildmaster has been fixed to allow the second
+connection attempt to take precedence over the first, so that the older
+connection is jettisoned to make way for the newer one.
+
+In addition, the buildslave has been fixed to be less twitchy about timeouts.
+There are now two parameters: keepaliveInterval (which is controlled by the
+mktap 'keepalive' argument), and keepaliveTimeout (which requires editing the
+.py source to change from the default of 30 seconds). The slave expects to
+see *something* from the master at least once every keepaliveInterval
+seconds, and will try to provoke a response (by sending a keepalive request)
+'keepaliveTimeout' seconds before the end of this interval just in case there
+was no regular traffic. Any kind of traffic will qualify, including
+acknowledgements of normal build-status updates.
+
+The net result is that, as long as any given PB message can be sent over the
+wire in less than 'keepaliveTimeout' seconds, the slave should not mistakenly
+disconnect because of a timeout. There will be traffic on the wire at least
+every 'keepaliveInterval' seconds, which is what you want to pay attention to
+if you're trying to keep an intervening NAT box from dropping what it thinks
+is an abandoned connection. A quiet loss of connection will be detected
+within 'keepaliveInterval' seconds.
+
+*** Large Logfiles
+
+The web page rendering code has been fixed to deliver large logfiles in
+pieces, using a producer/consumer apparatus. This avoids the large spike in
+memory consumption when the log file body was linearized into a single string
+and then buffered in the socket's application-side transmit buffer. This
+should also avoid the 640k single-string limit for web.distrib servers that
+could be hit by large (>640k) logfiles.
+
+
+
+* Release 0.6.1 (23 Nov 2004)
+
+** win32 improvements/bugfixes
+
+Several changes have gone in to improve portability to non-unix systems. It
+should be possible to run a build slave under windows without major issues
+(although step-by-step documentation is still greatly desired: check the
+mailing list for suggestions from current win32 users).
+
+*** PBChangeSource: use configurable directory separator, not os.sep
+
+The PBChangeSource, which listens on a TCP socket for change notices
+delivered from tools like contrib/svn_buildbot.py, was splitting source
+filenames with os.sep . This is inappropriate, because those file names are
+coming from the VC repository, not the local filesystem, and the repository
+host may be running a different OS (with a different separator convention)
+than the buildmaster host. In particular, a win32 buildmaster using a CVS
+repository running on a unix box would be confused.
+
+PBChangeSource now takes a sep= argument to indicate the separator character
+to use.
+
+*** build saving should work better
+
+windows cannot do the atomic os.rename() trick that unix can, so under win32
+the buildmaster falls back to save/delete-old/rename, which carries a slight
+risk of losing a saved build log (if the system were to crash between the
+delete-old and the rename).
+
+** new features
+
+*** test-result tracking
+
+Work has begun on fine-grained test-result handling. The eventual goal is to
+be able to track individual tests over time, and create problem reports when
+a test starts failing (which then are resolved when the test starts passing
+again). The first step towards this is an ITestResult interface, and code in
+the TrialTestParser to create such results for all non-passing tests (the
+ones for which Trial emits exception tracebacks).
+
+These test results are currently displayed in a tree-like display in a page
+accessible from each Build's page (follow the numbered link in the yellow
+box at the start of each build to get there).
+
+This interface is still in flux, as it really wants to be able to accomodate
+things like compiler warnings and tests that are skipped because of missing
+libraries or unsupported architectures.
+
+** bug fixes
+
+*** VC updates should survive temporary failures
+
+Some VC systems (CVS and SVN in particular) get upset when files are turned
+into directories or vice versa, or when repository items are moved without
+the knowledge of the VC system. The usual symptom is that a 'cvs update'
+fails where a fresh checkout succeeds.
+
+To avoid having to manually intervene, the build slaves' VC commands have
+been refactored to respond to update failures by deleting the tree and
+attempting a full checkout. This may cause some unnecessary effort when,
+e.g., the CVS server falls off the net, but in the normal case it will only
+come into play when one of these can't-cope situations arises.
+
+*** forget about an existing build when the slave detaches
+
+If the slave was lost during a build, the master did not clear the
+.currentBuild reference, making that builder unavailable for later builds.
+This has been fixed, so that losing a slave should be handled better. This
+area still needs some work, I think it's still possible to get both the
+slave and the master wedged by breaking the connection at just the right
+time. Eventually I want to be able to resume interrupted builds (especially
+when the interruption is the result of a network failure and not because the
+slave or the master actually died).
+
+*** large logfiles now consume less memory
+
+Build logs are stored as lists of (type,text) chunks, so that
+stdout/stderr/headers can be displayed differently (if they were
+distinguishable when they were generated: stdout and stderr are merged when
+usePTY=1). For multi-megabyte logfiles, a large list with many short strings
+could incur a large overhead. The new behavior is to merge same-type string
+chunks together as they are received, aiming for a chunk size of about 10kb,
+which should bring the overhead down to a more reasonable level.
+
+There remains an issue with actually delivering large logfiles over, say,
+the HTML interface. The string chunks must be merged together into a single
+string before delivery, which causes a spike in the memory usage when the
+logfile is viewed. This can also break twisted.web.distrib -type servers,
+where the underlying PB protocol imposes a 640k limit on the size of
+strings. This will be fixed (with a proper Producer/Consumer scheme) in the
+next release.
+
+
+* Release 0.6.0 (30 Sep 2004)
+
+** new features
+
+*** /usr/bin/buildbot control tool
+
+There is now an executable named 'buildbot'. For now, this just provides a
+convenient front-end to mktap/twistd/kill, but eventually it will provide
+access to other client functionality (like the 'try' builds, and a status
+client). Assuming you put your buildbots in /var/lib/buildbot/master/FOO,
+you can do 'buildbot create-master /var/lib/buildbot/master/FOO' and it will
+create the .tap file and set up a sample master.cfg for you. Later,
+'buildbot start /var/lib/buildbot/master/FOO' will start the daemon.
+
+
+*** build status now saved in external files, -shutdown.tap unnecessary
+
+The status rewrite included a change to save all build status in a set of
+external files. These files, one per build, are put in a subdirectory of the
+master's basedir (named according to the 'builddir' parameter of the Builder
+configuration dictionary). This helps keep the buildmaster's memory
+consumption small: the (potentially large) build logs are kept on disk
+instead of in RAM. There is a small cache (2 builds per builder) kept in
+memory, but everything else lives on disk.
+
+The big change is that the buildmaster now keeps *all* status in these
+files. It is no longer necessary to preserve the buildbot-shutdown.tap file
+to run a persistent buildmaster. The buildmaster may be launched with
+'twistd -f buildbot.tap' each time, in fact the '-n' option can be added to
+prevent twistd from automatically creating the -shutdown.tap file.
+
+There is still one lingering bug with this change: the Expectations object
+for each builder (which records how long the various steps took, to provide
+an ETA value for the next time) is not yet saved. The result is that the
+first build after a restart will not provide an ETA value.
+
+0.6.0 keeps status in a single file per build, as opposed to 0.5.0 which
+kept status in many subdirectories (one layer for builds, another for steps,
+and a third for logs). 0.6.0 will detect and delete these subdirectories as
+it overwrites them.
+
+The saved builds are optional. To prevent disk usage from growing without
+bounds, you may want to set up a cron job to run 'find' and delete any which
+are too old. The status displays will happily survive without those saved
+build objects.
+
+The set of recorded Changes is kept in a similar file named 'changes.pck'.
+
+
+*** source checkout now uses timestamp/revision
+
+Source checkouts are now performed with an appropriate -D TIMESTAMP (for
+CVS) or -r REVISION (for SVN) marker to obtain the exact sources that were
+specified by the most recent Change going into the current Build. This
+avoids a race condition in which a change might be committed after the build
+has started but before the source checkout has completed, resulting in a
+mismatched set of source files. Such changes are now ignored.
+
+This works by keeping track of repository-wide revision/transaction numbers
+(for version control systems that offer them, like SVN). The checkout or
+update is performed with the highest such revision number. For CVS (which
+does not have them), the timestamp of each commit message is used, and a -D
+argument is created to place the checkout squarely in the middle of the "tree
+stable timer"'s window.
+
+This also provides the infrastructure for the upcoming 'try' feature. All
+source-checkout commands can now obtain a base revision marker and a patch
+from the Build, allowing certain builds to be performed on something other
+than the most recent sources.
+
+See source.xhtml and steps.xhtml for details.
+
+
+*** Darcs and Arch support added
+
+There are now build steps which retrieve a source tree from Darcs and Arch
+repositories. See steps.xhtml for details.
+
+Preliminary P4 support has been added, thanks to code from Dave Peticolas.
+You must manually set up each build slave with an appropriate P4CLIENT: all
+buildbot does is run 'p4 sync' at the appropriate times.
+
+
+*** Status reporting rewritten
+
+Status reporting was completely revamped. The config file now accepts a
+BuildmasterConfig['status'] entry, with a list of objects that perform status
+delivery. The old config file entries which controlled the web status port
+and the IRC bot have been deprecated in favor of adding instances to
+['status']. The following status-delivery classes have been implemented, all
+in the 'buildbot.status' package:
+
+ client.PBListener(port, username, passwd)
+ html.Waterfall(http_port, distrib_port)
+ mail.MailNotifier(fromaddr, mode, extraRecipients..)
+ words.IRC(host, nick, channels)
+
+See the individual docstrings for details about how to use each one. You can
+create new status-delivery objects by following the interfaces found in the
+buildbot.interfaces module.
+
+
+*** BuildFactory configuration process changed
+
+The basic BuildFactory class is now defined in buildbot.process.factory
+rather than buildbot.process.base, so you will have to update your config
+files. factory.BuildFactory is the base class, which accepts a list of Steps
+to run. See docs/factories.xhtml for details.
+
+There are now easier-to-use BuildFactory classes for projects which use GNU
+Autoconf, perl's MakeMaker (CPAN), python's distutils (but no unit tests),
+and Twisted's Trial. Each one takes a separate 'source' Step to obtain the
+source tree, and then fills in the rest of the Steps for you.
+
+
+*** CVS/SVN VC steps unified, simplified
+
+The confusing collection of arguments for the CVS step ('clobber=',
+'copydir=', and 'export=') have been removed in favor of a single 'mode'
+argument. This argument describes how you want to use the sources: whether
+you want to update and compile everything in the same tree (mode='update'),
+or do a fresh checkout and full build each time (mode='clobber'), or
+something in between.
+
+The SVN (Subversion) step has been unified and accepts the same mode=
+parameter as CVS. New version control steps will obey the same interface.
+
+Most of the old configuration arguments have been removed. You will need to
+update your configuration files to use the new arguments. See
+docs/steps.xhtml for a description of all the new parameters.
+
+
+*** Preliminary Debian packaging added
+
+Thanks to the contributions of Kirill Lapshin, we can now produce .deb
+installer packages. These are still experimental, but they include init.d
+startup/shutdown scripts, which the the new /usr/bin/buildbot to invoke
+twistd. Create your buildmasters in /var/lib/buildbot/master/FOO, and your
+slaves in /var/lib/buildbot/slave/BAR, then put FOO and BAR in the
+appropriate places in /etc/default/buildbot . After that, the buildmasters
+and slaves will be started at every boot.
+
+Pre-built .debs are not yet distributed. Use 'debuild -uc -us' from the
+source directory to create them.
+
+
+** minor features
+
+
+*** Source Stamps
+
+Each build now has a "source stamp" which describes what sources it used. The
+idea is that the sources for this particular build can be completely
+regenerated from the stamp. The stamp is a tuple of (revision, patch), where
+the revision depends on the VC system being used (for CVS it is either a
+revision tag like "BUILDBOT-0_5_0" or a datestamp like "2004/07/23", for
+Subversion it is a revision number like 11455). This must be combined with
+information from the Builder that is constant across all builds (something to
+point at the repository, and possibly a branch indicator for CVS and other VC
+systems that don't fold this into the repository string).
+
+The patch is an optional unified diff file, ready to be applied by running
+'patch -p0 <PATCH' from inside the workdir. This provides support for the
+'try' feature that will eventually allow developers to run buildbot tests on
+their code before checking it in.
+
+
+*** SIGHUP causes the buildmaster's configuration file to be re-read
+
+*** IRC bot now has 'watch' command
+
+You can now tell the buildbot's IRC bot to 'watch <buildername>' on a builder
+which is currently performing a build. When that build is finished, the
+buildbot will make an announcement (including the results of the build).
+
+The IRC 'force build' command will also announce when the resulting build has
+completed.
+
+
+*** the 'force build' option on HTML and IRC status targets can be disabled
+
+The html.Waterfall display and the words.IRC bot may be constructed with an
+allowForce=False argument, which removes the ability to force a build through
+these interfaces. Future versions will be able to restrict this build-forcing
+capability to authenticated users. The per-builder HTML page no longer
+displays the 'Force Build' buttons if it does not have this ability. Thanks
+to Fred Drake for code and design suggestions.
+
+
+*** master now takes 'projectName' and 'projectURL' settings
+
+These strings allow the buildbot to describe what project it is working for.
+At the moment they are only displayed on the Waterfall page, but in the next
+release they will be retrieveable from the IRC bot as well.
+
+
+*** survive recent (SVN) Twisted versions
+
+The buildbot should run correctly (albeit with plenty of noisy deprecation
+warnings) under the upcoming Twisted-2.0 release.
+
+
+*** work-in-progress realtime Trial results acquisition
+
+Jonathan Simms (<slyphon>) has been working on 'retrial', a rewrite of
+Twisted's unit test framework that will most likely be available in
+Twisted-2.0 . Although it is not yet complete, the buildbot will be able to
+use retrial in such a way that build status is reported on a per-test basis,
+in real time. This will be the beginning of fine-grained test tracking and
+Problem management, described in docs/users.xhtml .
+
+
+* Release 0.5.0 (22 Jul 2004)
+
+** new features
+
+*** web.distrib servers via TCP
+
+The 'webPathname' config option, which specifies a UNIX socket on which to
+publish the waterfall HTML page (for use by 'mktap web -u' or equivalent),
+now accepts a numeric port number. This publishes the same thing via TCP,
+allowing the parent web server to live on a separate machine.
+
+This config option could be named better, but it will go away altogether in
+a few releases, when status delivery is unified. It will be replaced with a
+WebStatusTarget object, and the config file will simply contain a list of
+various kinds of status targets.
+
+*** 'master.cfg' filename is configurable
+
+The buildmaster can use a config file named something other than
+"master.cfg". Use the --config=foo.cfg option to mktap to control this.
+
+*** FreshCVSSource now uses newcred (CVSToys >= 1.0.10)
+
+The FreshCVSSource class now defaults to speaking to freshcvs daemons from
+modern CVSToys releases. If you need to use the buildbot with a daemon from
+CVSToys-1.0.9 or earlier, use FreshCVSSourceOldcred instead. Note that the
+new form only requires host/port/username/passwd: the "serviceName"
+parameter is no longer meaningful.
+
+*** Builders are now configured with a dictionary, not a tuple
+
+The preferred way to set up a Builder in master.cfg is to provide a
+dictionary with various keys, rather than a (non-extensible) 4-tuple. See
+docs/config.xhtml for details. The old tuple-way is still supported for now,
+it will probably be deprecated in the next release and removed altogether in
+the following one.
+
+*** .periodicBuildTime is now exposed to the config file
+
+To set a builder to run at periodic intervals, simply add a
+'periodicBuildTime' key to its master.cfg dictionary. Again, see
+docs/config.xhtml for details.
+
+*** svn_buildbot.py adds --include, --exclude
+
+The commit trigger script now gives you more control over which files are
+sent to the buildmaster and which are not.
+
+*** usePTY is controllable at slave mktap time
+
+The buildslaves usually run their child processes in a pty, which creates a
+process group for all the children, which makes it much easier to kill them
+all at once (i.e. if a test hangs). However this causes problems on some
+systems. Rather than hacking slavecommand.py to disable the use of these
+ptys, you can now create the slave's .tap file with --usepty=0 at mktap
+time.
+
+** Twisted changes
+
+A summary of warnings (e.g. DeprecationWarnings) is provided as part of the
+test-case summarizer. The summarizer also counts Skips, expectedFailures,
+and unexpectedSuccesses, displaying the counts on the test step's event box.
+
+The RunUnitTests step now uses "trial -R twisted" instead of "trial
+twisted.test", which is a bit cleaner. All .pyc files are deleted before
+starting trial, to avoid getting tripped up by deleted .py files.
+
+** documentation
+
+docs/config.xhtml now describes the syntax and allowed contents of the
+'master.cfg' configuration file.
+
+** bugfixes
+
+Interlocks had a race condition that could cause the lock to get stuck
+forever.
+
+FreshCVSSource has a prefix= argument that was moderately broken (it used to
+only work if the prefix was a single directory component). It now works with
+subdirectories.
+
+The buildmaster used to complain when it saw the "info" directory in a
+slave's workspace. This directory is used to publish information about the
+slave host and its administrator, and is not a leftover build directory as
+the complaint suggested. This complain has been silenced.
+
+
+* Release 0.4.3 (30 Apr 2004)
+
+** PBChangeSource made explicit
+
+In 0.4.2 and before, an internal interface was available which allowed
+special clients to inject changes into the Buildmaster. This interface is
+used by the contrib/svn_buildbot.py script. The interface has been extracted
+into a proper PBChangeSource object, which should be created in the
+master.cfg file just like the other kinds of ChangeSources. See
+docs/sources.xhtml for details.
+
+If you were implicitly using this change source (for example, if you use
+Subversion and the svn_buildbot.py script), you *must* add this source to
+your master.cfg file, or changes will not be delivered and no builds will be
+triggered.
+
+The PBChangeSource accepts the same "prefix" argument as all other
+ChangeSources. For a SVN repository that follows the recommended practice of
+using "trunk/" for the trunk revisions, you probably want to construct the
+source like this:
+
+ source = PBChangeSource(prefix="trunk")
+
+to make sure that the Builders are given sensible (trunk-relative)
+filenames for each changed source file.
+
+** Twisted changes
+
+*** step_twisted.RunUnitTests can change "bin/trial"
+
+The twisted RunUnitTests step was enhanced to let you run something other
+than "bin/trial", making it easier to use a buildbot on projects which use
+Twisted but aren't actually Twisted itself.
+
+*** Twisted now uses Subversion
+
+Now that Twisted has moved from CVS to SVN, the Twisted build processes have
+been modified to perform source checkouts from the Subversion repository.
+
+** minor feature additions
+
+*** display Changes with HTML
+
+Changes are displayed with a bit more pizazz, and a links= argument was
+added to allow things like ViewCVS links to be added to the display
+(although it is not yet clear how this argument should be used: the
+interface remains subject to change untill it has been documented).
+
+*** display ShellCommand logs with HTML
+
+Headers are in blue, stderr is in red (unless usePTY=1 in which case stderr
+and stdout are indistinguishable). A link is provided which returns the same
+contents as plain text (by appending "?text=1" to the URL).
+
+*** buildslaves send real tracebacks upon error
+
+The .unsafeTracebacks option has been turned on for the buildslaves,
+allowing them to send a full stack trace when an exception occurs, which is
+logged in the buildmaster's twistd.log file. This makes it much easier to
+determine what went wrong on the slave side.
+
+*** BasicBuildFactory refactored
+
+The BasicBuildFactory class was refactored to make it easier to create
+derivative classes, in particular the BasicSVN variant.
+
+*** "ping buildslave" web button added
+
+There is now a button on the "builder information" page that lets a web user
+initiate a ping of the corresponding build slave (right next to the button
+that lets them force a build). This was added to help track down a problem
+with the slave keepalives.
+
+** bugs fixed:
+
+You can now have multiple BuildSteps with the same name (the names are used
+as hash keys in the data structure that helps determine ETA values for each
+step, the new code creates unique key names if necessary to avoid
+collisions). This means that, for example, you do not have to create a
+BuildStep subclass just to have two Compile steps in the same process.
+
+If CVSToys is not installed, the tests that depend upon it are skipped.
+
+Some tests in 0.4.2 failed because of a missing set of test files, they are
+now included in the tarball properly.
+
+Slave keepalives should work better now in the face of silent connection
+loss (such as when an intervening NAT box times out the association), the
+connection should be reestablished in minutes instead of hours.
+
+Shell commands on the slave are invoked with an argument list instead of the
+ugly and error-prone split-on-spaces approach. If the ShellCommand is given
+a string (instead of a list), it will fall back to splitting on spaces.
+Shell commands should work on win32 now (using COMSPEC instead of /bin/sh).
+
+Buildslaves under w32 should theoretically work now, and one was running for
+the Twisted buildbot for a while until the machine had to be returned.
+
+The "header" lines in ShellCommand logs (which include the first line, that
+displays the command being run, and the last, which shows its exit status)
+are now generated by the buildslave side instead of the local (buildmaster)
+side. This can provide better error handling and is generally cleaner.
+However, if you have an old buildslave (running 0.4.2 or earlier) and a new
+buildmaster, then neither end will generate these header lines.
+
+CVSCommand was improved, in certain situations 0.4.2 would perform
+unnecessary checkouts (when an update would have sufficed). Thanks to Johan
+Dahlin for the patches. The status output was fixed as well, so that
+failures in CVS and SVN commands (such as not being able to find the 'svn'
+executable) make the step status box red.
+
+Subversion support was refactored to make it behave more like CVS. This is a
+work in progress and will be improved in the next release.
+
+
+* Release 0.4.2 (08 Jan 2004)
+
+** test suite updated
+
+The test suite has been completely moved over to Twisted's "Trial"
+framework, and all tests now pass. To run the test suite (consisting of 64
+tests, probably covering about 30% of BuildBot's logic), do this:
+
+ PYTHONPATH=. trial -v buildbot.test
+
+** Mail parsers updated
+
+Several bugs in the mail-parsing code were fixed, allowing a buildmaster to
+be triggered by mail sent out by a CVS repository. (The Twisted Buildbot is
+now using this to trigger builds, as their CVS server machine is having some
+difficulties with FreshCVS). The FreshCVS mail format for directory
+additions appears to have changed recently: the new parser should handle
+both old and new-style messages.
+
+A parser for Bonsai commit messages (buildbot.changes.mail.parseBonsaiMail)
+was contributed by Stephen Davis. Thanks Stephen!
+
+** CVS "global options" now available
+
+The CVS build step can now accept a list of "global options" to give to the
+cvs command. These go before the "update"/"checkout" word, and are described
+fully by "cvs --help-options". Two useful ones might be "-r", which causes
+checked-out files to be read-only, and "-R", which assumes the repository is
+read-only (perhaps by not attempting to write to lock files).
+
+
+* Release 0.4.1 (09 Dec 2003)
+
+** MaildirSources fixed
+
+Several bugs in MaildirSource made them unusable. These have been fixed (for
+real this time). The Twisted buildbot is using an FCMaildirSource while they
+fix some FreshCVS daemon problems, which provided the encouragement for
+getting these bugs fixed.
+
+In addition, the use of DNotify (only available under linux) was somehow
+broken, possibly by changes in some recent version of Python. It appears to
+be working again now (against both python-2.3.3c1 and python-2.2.1).
+
+** master.cfg can use 'basedir' variable
+
+As documented in the sample configuration file (but not actually implemented
+until now), a variable named 'basedir' is inserted into the namespace used
+by master.cfg . This can be used with something like:
+
+  os.path.join(basedir, "maildir")
+
+to obtain a master-basedir-relative location.
+
+
+* Release 0.4.0 (05 Dec 2003)
+
+** newapp
+
+I've moved the codebase to Twisted's new 'application' framework, which
+drastically cleans up service startup/shutdown just like newcred did for
+authorization. This is mostly an internal change, but the interface to
+IChangeSources was modified, so in the off chance that someone has written a
+custom change source, it may have to be updated to the new scheme.
+
+The most user-visible consequence of this change is that now both
+buildmasters and buildslaves are generated with the standard Twisted 'mktap'
+utility. Basic documentation is in the README file.
+
+Both buildmaster and buildslave .tap files need to be re-generated to run
+under the new code. I have not figured out the styles.Versioned upgrade path
+well enough to avoid this yet. Sorry.
+
+This also means that both buildslaves and the buildmaster require
+Twisted-1.1.0 or later.
+
+** reloadable master.cfg
+
+Most aspects of a buildmaster is now controlled by a configuration file
+which can be re-read at runtime without losing build history. This feature
+makes the buildmaster *much* easier to maintain.
+
+In the previous release, you would create the buildmaster by writing a
+program to define the Builders and ChangeSources and such, then run it to
+create the .tap file. In the new release, you use 'mktap' to create the .tap
+file, and the only parameter you give it is the base directory to use. Each
+time the buildmaster starts, it will look for a file named 'master.cfg' in
+that directory and parse it as a python script. That script must define a
+dictionary named 'BuildmasterConfig' with various keys to define the
+builders, the known slaves, what port to use for the web server, what IRC
+channels to connect to, etc.
+
+This config file can be re-read at runtime, and the buildmaster will compute
+the differences and add/remove services as necessary. The re-reading is
+currently triggered through the debug port (contrib/debugclient.py is the
+debug port client), but future releases will add the ability to trigger the
+reconfiguration by IRC command, web page button, and probably a local UNIX
+socket (with a helper script to trigger a rebuild locally).
+
+docs/examples/twisted_master.cfg contains a sample configuration file, which
+also lists all the keys that can be set.
+
+There may be some bugs lurking, such as re-configuring the buildmaster while
+a build is running. It needs more testing.
+
+** MaxQ support
+
+Radix contributed some support scripts to run MaxQ test scripts. MaxQ
+(http://maxq.tigris.org/) is a web testing tool that allows you to record
+HTTP sessions and play them back.
+
+** Builders can now wait on multiple Interlocks
+
+The "Interlock" code has been enhanced to allow multiple builders to wait on
+each one. This was done to support the new config-file syntax for specifying
+Interlocks (in which each interlock is a tuple of A and [B], where A is the
+builder the Interlock depends upon, and [B] is a list of builders that
+depend upon the Interlock).
+
+"Interlock" is misnamed. In the next release it will be changed to
+"Dependency", because that's what it really expresses. A new class (probably
+called Interlock) will be created to express the notion that two builders
+should not run at the same time, useful when multiple builders are run on
+the same machine and thrashing results when several CPU- or disk- intensive
+compiles are done simultaneously.
+
+** FreshCVSSource can now handle newcred-enabled FreshCVS daemons
+
+There are now two FreshCVSSource classes: FreshCVSSourceNewcred talks to
+newcred daemons, and FreshCVSSourceOldcred talks to oldcred ones. Mind you,
+FreshCVS doesn't yet do newcred, but when it does, we'll be ready.
+
+'FreshCVSSource' maps to the oldcred form for now. That will probably change
+when the current release of CVSToys supports newcred by default.
+
+** usePTY=1 on posix buildslaves
+
+When a buildslave is running under POSIX (i.e. pretty much everything except
+windows), child processes are created with a pty instead of separate
+stdin/stdout/stderr pipes. This makes it more likely that a hanging build
+(when killed off by the timeout code) will have all its sub-childred cleaned
+up. Non-pty children would tend to leave subprocesses running because the
+buildslave was only able to kill off the top-level process (typically
+'make').
+
+Windows doesn't have any concept of ptys, so non-posix systems do not try to
+enable them.
+
+** mail parsers should actually work now
+
+The email parsing functions (FCMaildirSource and SyncmailMaildirSource) were
+broken because of my confused understanding of how python class methods
+work. These sources should be functional now.
+
+** more irc bot sillyness
+
+The IRC bot can now perform half of the famous AYBABTO scene.
+
+
+* Release 0.3.5 (19 Sep 2003)
+
+** newcred
+
+Buildbot has moved to "newcred", a new authorization framework provided by
+Twisted, which is a good bit cleaner and easier to work with than the
+"oldcred" scheme in older versions. This causes both buildmaster and
+buildslaves to depend upon Twisted 1.0.7 or later. The interface to
+'makeApp' has changed somewhat (the multiple kinds of remote connections all
+use the same TCP port now).
+
+Old buildslaves will get "_PortalWrapper instance has no attribute
+'remote_username'" errors when they try to connect. They must be upgraded.
+
+The FreshCVSSource uses PB to connect to the CVSToys server. This has been
+upgraded to use newcred too. If you get errors (TODO: what do they look
+like?) in the log when the buildmaster tries to connect, you need to upgrade
+your FreshCVS service or use the 'useOldcred' argument when creating your
+FreshCVSSource. This is a temporary hack to allow the buildmaster to talk to
+oldcred CVSToys servers. Using it will trigger deprecation warnings. It will
+go away eventually.
+
+In conjunction with this change, makeApp() now accepts a password which can
+be applied to the debug service.
+
+** new features
+
+*** "copydir" for CVS checkouts
+
+The CVS build step can now accept a "copydir" parameter, which should be a
+directory name like "source" or "orig". If provided, the CVS checkout is
+done once into this directory, then copied into the actual working directory
+for compilation etc. Later updates are done in place in the copydir, then
+the workdir is replaced with a copy.
+
+This reduces CVS bandwidth (update instead of full checkout) at the expense
+of twice the disk space (two copies of the tree).
+
+*** Subversion (SVN) support
+
+Radix (Christopher Armstrong) contributed early support for building
+Subversion-based trees. The new 'SVN' buildstep behaves roughly like the
+'CVS' buildstep, and the contrib/svn_buildbot.py script can be used as a
+checkin trigger to feed changes to a running buildmaster.
+
+** notable bugfixes
+
+*** .tap file generation
+
+We no longer set the .tap filename, because the buildmaster/buildslave
+service might be added to an existing .tap file and we shouldn't presume to
+own the whole thing. You may want to manually rename the "buildbot.tap" file
+to something more meaningful (like "buildslave-bot1.tap").
+
+*** IRC reconnect
+
+If the IRC server goes away (it was restarted, or the network connection was
+lost), the buildmaster will now schedule a reconnect attempt.
+
+*** w32 buildslave fixes
+
+An "rm -rf" was turned into shutil.rmtree on non-posix systems.
+
+
+* Release 0.3.4 (28 Jul 2003)
+
+** IRC client
+
+The buildmaster can now join a set of IRC channels and respond to simple
+queries about builder status.
+
+** slave information
+
+The build slaves can now report information from a set of info/* files in
+the slave base directory to the buildmaster. This will be used by the slave
+administrator to announce details about the system hosting the slave,
+contact information, etc. For now, info/admin should contain the name/email
+of the person who is responsible for the buildslave, and info/host should
+describe the system hosting the build slave (OS version, CPU speed, memory,
+etc). The contents of these files are made available through the waterfall
+display.
+
+** change notification email parsers
+
+A parser for Syncmail (syncmail.sourceforge.net) was added. SourceForge
+provides examples of setting up syncmail to deliver CVS commit messages to
+mailing lists, so hopefully this will make it easier for sourceforge-hosted
+projects to set up a buildbot.
+
+email processors were moved into buildbot.changes.mail . FCMaildirSource was
+moved, and the compatibility location (buildbot.changes.freshcvsmail) will
+go away in the next release.
+
+** w32 buildslave ought to work
+
+Some non-portable code was changed to make it more likely that the
+buildslave will run under windows. The Twisted buildbot now has a
+(more-or-less) working w32 buildslave.
+
+
+* Release 0.3.3 (21 May 2003):
+
+** packaging changes
+
+*** include doc/examples in the release. Oops again.
+
+** network changes
+
+*** add keepalives to deal with NAT boxes
+
+Some NAT boxes drop port mappings if the TCP connection looks idle for too
+long (maybe 30 minutes?). Add application-level keepalives (dummy commands
+sent from slave to master every 10 minutes) to appease the NAT box and keep
+our connection alive. Enable this with --keepalive in the slave mktap
+command line. Check the README for more details.
+
+** UI changes
+
+*** allow slaves to trigger any build that they host
+
+Added an internal function to ask the buildmaster to start one of their
+builds. Must be triggered with a debugger or manhole on the slave side for
+now, will add a better UI later.
+
+*** allow web page viewers to trigger any build
+
+Added a button to the per-build page (linked by the build names on the third
+row of the waterfall page) to allow viewers to manually trigger builds.
+There is a field for them to indicate who they are and why they are
+triggering the build. It is possible to abuse this, but for now the benefits
+outweigh the damage that could be done (worst case, someone can make your
+machine run builds continuously).
+
+** generic buildprocess changes
+
+*** don't queue multiple builds for offline slaves
+
+If a slave is not online when a build is ready to run, that build is queued
+so the slave will run it when it next connects. However, the buildmaster
+used to queue every such build, so the poor slave machine would be subject
+to tens or hundreds of builds in a row when they finally did come online.
+The buildmaster has been changed to merge these multiple builds into a
+single one.
+
+*** bump ShellCommand default timeout to 20 minutes
+
+Used for testing out the win32 twisted builder. I will probably revert this
+in the next relese.
+
+*** split args in ShellCommand ourselves instead of using /bin/sh
+
+This should remove the need for /bin/sh on the slave side, improving the
+chances that the buildslave can run on win32.
+
+*** add configureEnv argument to Configure step, pass env dict to slave
+
+Allows build processes to do things like 'CFLAGS=-O0 ./configure' without
+using /bin/sh to set the environment variable
+
+** Twisted buildprocess changes
+
+*** warn instead of flunk the build when cReactor or qtreactor tests fail
+
+These two always fail. For now, downgrade those failures to a warning
+(orange box instead of red).
+
+*** don't use 'clobber' on remote builds
+
+Builds that run on remote machines (freebsd, OS-X) now use 'cvs update'
+instead of clobbering their trees and doing a fresh checkout. The multiple
+simultaneous CVS checkouts were causing a strain on Glyph's upstream
+bandwidth.
+
+*** use trial --testmodule instead of our own test-case-name grepper
+
+The Twisted coding/testing convention has developers put 'test-case-name'
+tags (emacs local variables, actually) in source files to indicate which
+test cases should be run to exercise that code. Twisted's unit-test
+framework just acquired an argument to look for these tags itself. Use that
+instead of the extra FindUnitTestsForFiles build step we were doing before.
+Removes a good bit of code from buildbot and into Twisted where it really
+belongs.
+
+
+* Release 0.3.2 (07 May 2003):
+
+** packaging changes
+
+*** fix major packaging bug: none of the buildbot/* subdirectories were
+included in the 0.3.1 release. Sorry, I'm still figuring out distutils
+here..
+
+** internal changes
+
+*** use pb.Cacheable to update Events in remote status client. much cleaner.
+
+*** start to clean up BuildProcess->status.builder interface
+
+** bug fixes
+
+*** waterfall display was missing a <tr>, causing it to be misrendered in most
+browsers (except the one I was testing it with, of course)
+
+*** URL without trailing slash (when served in a twisted-web distributed
+server, with a url like "http://twistedmatrix.com/~warner.twistd") should do
+redirect to URL-with-trailing-slash, otherwise internal hrefs are broken.
+
+*** remote status clients: forget RemoteReferences at shutdown, removes
+warnings about "persisting Ephemerals"
+
+** Twisted buildprocess updates:
+
+*** match build process as of twisted-1.0.5
+**** use python2.2 everywhere now that twisted rejects python2.1
+**** look for test-result constants in multiple places
+*** move experimental 'trial --jelly' code to separate module
+*** add FreeBSD builder
+*** catch rc!=0 in HLint step
+*** remove RunUnitTestsRandomly, use randomly=1 parameter instead
+*** parameterize ['twisted.test'] default test case to make subclassing easier
+*** ignore internal distutils warnings in python2.3 builder
+
+
+* Release 0.3.1 (29 Apr 2003):
+
+** First release.
+
+** Features implemented:
+
+ change notification from FreshCVS server or parsed maildir contents
+
+ timed builds
+
+ basic builds, configure/compile/test
+
+ some Twisted-specific build steps: docs, unit tests, debuild
+
+ status reporting via web page
+
+** Features still experimental/unpolished
+
+ status reporting via PB client

Added: vendor/buildbot/current/PKG-INFO
===================================================================
--- vendor/buildbot/current/PKG-INFO	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/PKG-INFO	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,30 @@
+Metadata-Version: 1.0
+Name: buildbot
+Version: 0.7.5
+Summary: BuildBot build automation system
+Home-page: http://buildbot.sourceforge.net/
+Author: Brian Warner
+Author-email: warner-buildbot at lothar.com
+License: GNU GPL
+Description: 
+        The BuildBot is a system to automate the compile/test cycle required by
+        most software projects to validate code changes. By automatically
+        rebuilding and testing the tree each time something has changed, build
+        problems are pinpointed quickly, before other developers are
+        inconvenienced by the failure. The guilty developer can be identified
+        and harassed without human intervention. By running the builds on a
+        variety of platforms, developers who do not have the facilities to test
+        their changes everywhere before checkin will at least know shortly
+        afterwards whether they have broken the build or not. Warning counts,
+        lint checks, image size, compile time, and other build parameters can
+        be tracked over time, are more visible, and are therefore easier to
+        improve.
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: No Input/Output (Daemon)
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: Topic :: Software Development :: Testing

Added: vendor/buildbot/current/README
===================================================================
--- vendor/buildbot/current/README	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/README	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,193 @@
+
+BuildBot: build/test automation
+  http://buildbot.sourceforge.net/
+  Brian Warner <warner-buildbot @ lothar . com>
+
+
+Abstract:
+
+The BuildBot is a system to automate the compile/test cycle required by most
+software projects to validate code changes. By automatically rebuilding and
+testing the tree each time something has changed, build problems are
+pinpointed quickly, before other developers are inconvenienced by the
+failure. The guilty developer can be identified and harassed without human
+intervention. By running the builds on a variety of platforms, developers
+who do not have the facilities to test their changes everywhere before
+checkin will at least know shortly afterwards whether they have broken the
+build or not. Warning counts, lint checks, image size, compile time, and
+other build parameters can be tracked over time, are more visible, and
+are therefore easier to improve.
+
+The overall goal is to reduce tree breakage and provide a platform to run
+tests or code-quality checks that are too annoying or pedantic for any human
+to waste their time with. Developers get immediate (and potentially public)
+feedback about their changes, encouraging them to be more careful about
+testing before checkin.
+
+
+Features:
+
+ * run builds on a variety of slave platforms
+ * arbitrary build process: handles projects using C, Python, whatever
+ * minimal host requirements: python and Twisted
+ * slaves can be behind a firewall if they can still do checkout
+ * status delivery through web page, email, IRC, other protocols
+ * track builds in progress, provide estimated completion time
+ * flexible configuration by subclassing generic build process classes
+ * debug tools to force a new build, submit fake Changes, query slave status
+ * released under the GPL
+
+
+DOCUMENTATION:
+
+The PyCon paper has a good description of the overall architecture. It is
+available in HTML form in docs/PyCon-2003/buildbot.html, or on the web page.
+
+The User's Manual is in docs/buildbot.info, and the Installation chapter is
+the best guide to use for setup instructions. The .texinfo source can also be
+turned into printed documentation. An HTML representation is available on the
+Buildbot home page.
+
+REQUIREMENTS:
+
+ Python: http://www.python.org
+
+   Buildbot requires python-2.3 or later, and is primarily developed against
+   python-2.4 . It has *not* yet been tested against python-2.5 .
+
+ Twisted: http://twistedmatrix.com
+
+   Both the buildmaster and the buildslaves require Twisted-2.0.x or later.
+   As always, the most recent version is recommended. It has been tested
+   against Twisted-2.4.0 and Twisted SVN as of the date of release.
+
+   Twisted is delivered as a collection of subpackages. You'll need at least
+   "Twisted" (the core package), and you'll also want TwistedMail,
+   TwistedWeb, and TwistedWords (for sending email, serving a web status
+   page, and delivering build status via IRC, respectively). You might also
+   want TwistedConch (for the encrypted Manhole debug port). Note that
+   Twisted requires ZopeInterface to be installed as well.
+
+ACCESSORIES:
+
+ CVSToys: http://purl.net/net/CVSToys
+
+   If your buildmaster uses FreshCVSSource to receive change notification
+   from a cvstoys daemon, it will require CVSToys be installed (tested with
+   CVSToys-1.0.10). If the it doesn't use that source (i.e. if you only use
+   a mail-parsing change source, or the SVN notification script), you will
+   not need CVSToys.
+
+INSTALLATION:
+
+Please read the User's Manual in docs/buildbot.info or docs/buildbot.html for
+complete instructions. This file only contains a brief summary.
+
+ RUNNING THE UNIT TESTS
+
+If you would like to run the unit test suite, use a command like this:
+
+ PYTHONPATH=. trial buildbot.test
+
+This should run up to 221 tests, depending upon what VC tools you have
+installed. On my desktop machine it takes about six minutes to complete.
+Nothing should fail (at least under unix), a few might be skipped. If any of
+the tests fail, you should stop and investigate the cause before continuing
+the installation process, as it will probably be easier to track down the bug
+early. There are a few known failures under windows and OS-X, but please
+report these to the mailing list so we can isolate and resolve them.
+
+Neither CVS nor SVN support file based repositories on network filesystem
+(or network drives in Windows parlance). Therefore it is recommended to run
+all unit tests on local hard disks.
+
+ INSTALLING THE LIBRARIES:
+
+The first step is to install the python libraries. This package uses the
+standard 'distutils' module, so installing them is usually a matter of
+doing something like:
+
+ python ./setup.py install
+
+To test this, shift to a different directory (like /tmp), and run:
+
+ buildbot --version
+
+If it announces the versions of Buildbot and Twisted, the install went ok.
+
+
+ SETTING UP A BUILD SLAVE:
+
+If you want to run a build slave, you need to obtain the following pieces of
+information from the administrator of the buildmaster you intend to connect
+to:
+
+ your buildslave's name
+ the password assigned to your buildslave
+ the hostname and port number of the buildmaster, i.e. example.com:8007
+ 
+You also need to pick a working directory for the buildslave. All commands
+will be run inside this directory.
+
+Now run the 'buildbot' command as follows:
+
+ buildbot create-slave WORKDIR MASTERHOST:PORT SLAVENAME PASSWORD
+
+This will create a file called "buildbot.tac", which bundles up all the state
+needed by the build slave application. Twisted has a tool called "twistd"
+which knows how to load these saved applications and start running them.
+twistd takes care of logging and daemonization (running the program in the
+background). /usr/bin/buildbot is a front end which runs twistd for you.
+
+Once you've set up the directory with the .tac file, you start it running
+like this:
+
+ buildbot start WORKDIR
+
+This will start the build slave in the background and finish, so you don't
+need to put it in the background yourself with "&". The process ID of the
+background task is written to a file called "twistd.pid", and all output from
+the program is written to a log file named "twistd.log". Look in twistd.log
+to make sure the buildslave has started.
+
+To shut down the build slave, use:
+
+ buildbot stop WORKDIR
+
+
+ RUNNING BEHIND A NAT BOX:
+
+Some network environments will not properly maintain a TCP connection that
+appears to be idle. NAT boxes which do some form of connection tracking may
+drop the port mapping if it looks like the TCP session has been idle for too
+long. The buildslave attempts to turn on TCP "keepalives" (supported by
+Twisted 1.0.6 and later), and if these cannot be activated, it uses
+application level keepalives (which send a dummy message to the build master
+on a periodic basis). The TCP keepalive is typically sent at intervals of
+about 2 hours, and is configurable through the kernel. The application-level
+keepalive defaults to running once every 10 minutes.
+
+To manually turn on application-level keepalives, or to set them to use some
+other interval, add "--keepalive NNN" to the 'buildbot slave' command line.
+NNN is the number of seconds between keepalives. Use as large a value as your
+NAT box allows to reduce the amount of unnecessary traffic on the wire. 600
+seconds (10 minutes) is a reasonable value.
+
+
+ SETTING UP A BUILD MASTER:
+
+Please read the user's manual for instructions. The short form is that you
+use 'buildbot create-master MASTERDIR' to create the base directory, then you
+edit the 'master.cfg' file to configure the buildmaster. Once this is ready,
+you use 'buildbot START MASTERDIR' to launch it.
+
+A sample configuration file will be created for you in WORKDIR/master.cfg .
+There are more examples in docs/examples/, and plenty of documentation in the
+user's manual. Everything is controlled by the config file.
+
+
+SUPPORT:
+
+ Please send questions, bugs, patches, etc, to the buildbot-devel mailing
+ list reachable through http://buildbot.sourceforge.net/, so that everyone
+ can see them.

Added: vendor/buildbot/current/README.w32
===================================================================
--- vendor/buildbot/current/README.w32	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/README.w32	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,95 @@
+Several users have reported success in running a buildslave under Windows.
+The following list of steps might help you accomplish the same. They are a
+list of what I did as a unix guy struggling to make a winXP box run the
+buildbot unit tests. When I was done, most of the unit tests passed.
+
+If you discover things that are missing or incorrect, please send your
+corrections to the buildbot-devel mailing list (archives and subscription
+information are available at http://buildbot.sourceforge.net).
+
+Many thanks to Mike "Bear" Taylor for developing this list.
+
+
+0. Check to make sure your PATHEXT environment variable has ";.PY" in 
+it -- if not set your global environment to include it.
+
+ Control Panels / System / Advanced / Environment Variables / System variables
+
+1. Install python -- 2.4 -- http://python.org
+	* run win32 installer - no special options needed so far
+
+2. install zope interface package -- 3.0.1final -- 
+http://www.zope.org/Products/ZopeInterface
+	* run win32 installer - it should auto-detect your python 2.4
+          installation
+
+3. python for windows extensions -- build 203 -- 
+http://pywin32.sourceforge.net/
+	* run win32 installer - it should auto-detect your python 2.4 
+          installation
+
+ the installer complains about a missing DLL. Download mfc71.dll from the
+ site mentioned in the warning
+ (http://starship.python.net/crew/mhammond/win32/) and move it into
+ c:\Python24\DLLs
+
+4. at this point, to preserve my own sanity, I grabbed cygwin.com's setup.exe
+   and started it. It behaves a lot like dselect. I installed bash and other
+   tools (but *not* python). I added C:\cygwin\bin to PATH, allowing me to
+   use tar, md5sum, cvs, all the usual stuff. I also installed emacs, going
+   from the notes at http://www.gnu.org/software/emacs/windows/ntemacs.html .
+   Their FAQ at http://www.gnu.org/software/emacs/windows/faq3.html#install
+   has a note on how to swap CapsLock and Control.
+
+ I also modified PATH (in the same place as PATHEXT) to include C:\Python24
+ and C:\Python24\Scripts . This will allow 'python' and (eventually) 'trial'
+ to work in a regular command shell.
+
+5. twisted -- 2.0 -- http://twistedmatrix.com/projects/core/
+	* unpack tarball and run
+		python setup.py install
+	Note: if you want to test your setup - run:
+		python c:\python24\Scripts\trial.py -o -R twisted
+	(the -o will format the output for console and the "-R twisted" will 
+         recursively run all unit tests)
+
+ I had to edit Twisted (core)'s setup.py, to make detectExtensions() return
+ an empty list before running builder._compile_helper(). Apparently the test
+ it uses to detect if the (optional) C modules can be compiled causes the
+ install process to simply quit without actually installing anything.
+
+ I installed several packages: core, Lore, Mail, Web, and Words. They all got
+ copied to C:\Python24\Lib\site-packages\
+
+ At this point
+
+   trial --version
+
+ works, so 'trial -o -R twisted' will run the Twisted test suite. Note that
+ this is not necessarily setting PYTHONPATH, so it may be running the test
+ suite that was installed, not the one in the current directory.
+
+6. I used CVS to grab a copy of the latest Buildbot sources. To run the
+   tests, you must first add the buildbot directory to PYTHONPATH. Windows
+   does not appear to have a Bourne-shell-style syntax to set a variable just
+   for a single command, so you have to set it once and remember it will
+   affect all commands for the lifetime of that shell session.
+
+  set PYTHONPATH=.
+  trial -o -r win32 buildbot.test
+
+ To run against both buildbot-CVS and, say, Twisted-SVN, do:
+
+  set PYTHONPATH=.;C:\path to\Twisted-SVN
+
+
+All commands are done using the normal cmd.exe command shell. As of
+buildbot-0.6.4, only one unit test fails (test_webPathname_port) when you run
+under the 'win32' reactor. (if you run under the default reactor, many of the
+child-process-spawning commands fail, but test_webPathname_port passes. go
+figure.)
+
+Actually setting up a buildslave is not yet covered by this document. Patches
+gladly accepted.
+
+ -Brian

Added: vendor/buildbot/current/bin/buildbot
===================================================================
--- vendor/buildbot/current/bin/buildbot	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/bin/buildbot	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,4 @@
+#!/usr/bin/python
+
+from buildbot.scripts import runner
+runner.run()


Property changes on: vendor/buildbot/current/bin/buildbot
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/buildbot/__init__.py
===================================================================
--- vendor/buildbot/current/buildbot/__init__.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/__init__.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,3 @@
+#! /usr/bin/python
+
+version = "0.7.5"

Added: vendor/buildbot/current/buildbot/buildbot.png
===================================================================
(Binary files differ)


Property changes on: vendor/buildbot/current/buildbot/buildbot.png
___________________________________________________________________
Name: svn:mime-type
   + application/octet-stream

Added: vendor/buildbot/current/buildbot/buildset.py
===================================================================
--- vendor/buildbot/current/buildbot/buildset.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/buildset.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,75 @@
+
+from buildbot.process import base
+from buildbot.status import builder
+
+
+class BuildSet:
+    """I represent a set of potential Builds, all of the same source tree,
+    across a specified list of Builders. I can represent a build of a
+    specific version of the source tree (named by source.branch and
+    source.revision), or a build of a certain set of Changes
+    (source.changes=list)."""
+
+    def __init__(self, builderNames, source, reason=None, bsid=None):
+        """
+        @param source: a L{buildbot.sourcestamp.SourceStamp}
+        """
+        self.builderNames = builderNames
+        self.source = source
+        self.reason = reason
+        self.stillHopeful = True
+        self.status = bss = builder.BuildSetStatus(source, reason,
+                                                   builderNames, bsid)
+
+    def waitUntilSuccess(self):
+        return self.status.waitUntilSuccess()
+    def waitUntilFinished(self):
+        return self.status.waitUntilFinished()
+
+    def start(self, builders):
+        """This is called by the BuildMaster to actually create and submit
+        the BuildRequests."""
+        self.requests = []
+        reqs = []
+
+        # create the requests
+        for b in builders:
+            req = base.BuildRequest(self.reason, self.source, b.name)
+            reqs.append((b, req))
+            self.requests.append(req)
+            d = req.waitUntilFinished()
+            d.addCallback(self.requestFinished, req)
+
+        # tell our status about them
+        req_statuses = [req.status for req in self.requests]
+        self.status.setBuildRequestStatuses(req_statuses)
+
+        # now submit them
+        for b,req in reqs:
+            b.submitBuildRequest(req)
+
+    def requestFinished(self, buildstatus, req):
+        # TODO: this is where individual build status results are aggregated
+        # into a BuildSet-wide status. Consider making a rule that says one
+        # WARNINGS results in the overall status being WARNINGS too. The
+        # current rule is that any FAILURE means FAILURE, otherwise you get
+        # SUCCESS.
+        self.requests.remove(req)
+        results = buildstatus.getResults()
+        if results == builder.FAILURE:
+            self.status.setResults(results)
+            if self.stillHopeful:
+                # oh, cruel reality cuts deep. no joy for you. This is the
+                # first failure. This flunks the overall BuildSet, so we can
+                # notify success watchers that they aren't going to be happy.
+                self.stillHopeful = False
+                self.status.giveUpHope()
+                self.status.notifySuccessWatchers()
+        if not self.requests:
+            # that was the last build, so we can notify finished watchers. If
+            # we haven't failed by now, we can claim success.
+            if self.stillHopeful:
+                self.status.setResults(builder.SUCCESS)
+                self.status.notifySuccessWatchers()
+            self.status.notifyFinishedWatchers()
+

Added: vendor/buildbot/current/buildbot/changes/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/changes/base.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/base.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/base.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,13 @@
+#! /usr/bin/python
+
+from twisted.application import service
+
+from buildbot.twcompat import implements
+from buildbot.interfaces import IChangeSource
+from buildbot import util
+
+class ChangeSource(service.Service, util.ComparableMixin):
+    if implements:
+        implements(IChangeSource)
+    else:
+        __implements__ = IChangeSource, service.Service.__implements__

Added: vendor/buildbot/current/buildbot/changes/bonsaipoller.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/bonsaipoller.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/bonsaipoller.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,300 @@
+import time
+from urllib import urlopen
+from xml.dom import minidom, Node
+
+from twisted.python import log, failure
+from twisted.internet import defer, reactor
+from twisted.internet.task import LoopingCall
+
+from buildbot.changes import base, changes
+
+class InvalidResultError(Exception):
+    def __init__(self, value="InvalidResultError"):
+        self.value = value
+    def __str__(self):
+        return repr(self.value)
+
+class EmptyResult(Exception):
+    pass
+
+class NoMoreCiNodes(Exception):
+    pass
+
+class NoMoreFileNodes(Exception):
+    pass
+
+class BonsaiResult:
+    """I hold a list of CiNodes"""
+    def __init__(self, nodes=[]):
+        self.nodes = nodes
+
+    def __cmp__(self, other):
+        if len(self.nodes) != len(other.nodes):
+            return False
+        for i in range(len(self.nodes)):
+            if self.nodes[i].log != other.nodes[i].log \
+              or self.nodes[i].who != other.nodes[i].who \
+              or self.nodes[i].date != other.nodes[i].date \
+              or len(self.nodes[i].files) != len(other.nodes[i].files):
+                return -1
+
+	        for j in range(len(self.nodes[i].files)):
+	            if self.nodes[i].files[j].revision \
+	              != other.nodes[i].files[j].revision \
+	              or self.nodes[i].files[j].filename \
+	              != other.nodes[i].files[j].filename:
+	                return -1
+
+        return 0
+
+class CiNode:
+    """I hold information baout one <ci> node, including a list of files"""
+    def __init__(self, log="", who="", date=0, files=[]):
+        self.log = log
+        self.who = who
+        self.date = date
+        self.files = files
+
+class FileNode:
+    """I hold information about one <f> node"""
+    def __init__(self, revision="", filename=""):
+        self.revision = revision
+        self.filename = filename
+
+class BonsaiParser:
+    """I parse the XML result from a bonsai cvsquery."""
+
+    def __init__(self, bonsaiQuery):
+        try:
+            self.dom = minidom.parse(bonsaiQuery)
+        except:
+            raise InvalidResultError("Malformed XML in result")
+
+        self.ciNodes = self.dom.getElementsByTagName("ci")
+        self.currentCiNode = None # filled in by _nextCiNode()
+        self.fileNodes = None # filled in by _nextCiNode()
+        self.currentFileNode = None # filled in by _nextFileNode()
+        self.bonsaiResult = self._parseData()
+
+    def getData(self):
+        return self.bonsaiResult
+
+    def _parseData(self):
+        """Returns data from a Bonsai cvsquery in a BonsaiResult object"""
+        nodes = []
+        try:
+            while self._nextCiNode():
+                files = []
+                try:
+                    while self._nextFileNode():
+                        files.append(FileNode(self._getRevision(),
+                                              self._getFilename()))
+                except NoMoreFileNodes:
+                    pass
+                except InvalidResultError:
+                    raise
+                nodes.append(CiNode(self._getLog(), self._getWho(),
+                                    self._getDate(), files))
+
+        except NoMoreCiNodes:
+            pass
+        except InvalidResultError, EmptyResult:
+            raise
+
+        return BonsaiResult(nodes)
+
+
+    def _nextCiNode(self):
+        """Iterates to the next <ci> node and fills self.fileNodes with
+           child <f> nodes"""
+        try:
+            self.currentCiNode = self.ciNodes.pop(0)
+            if len(self.currentCiNode.getElementsByTagName("files")) > 1:
+                raise InvalidResultError("Multiple <files> for one <ci>")
+
+            self.fileNodes = self.currentCiNode.getElementsByTagName("f")
+        except IndexError:
+            # if there was zero <ci> nodes in the result
+            if not self.currentCiNode:
+                raise EmptyResult
+            else:
+                raise NoMoreCiNodes
+
+        return True
+
+    def _nextFileNode(self):
+        """Iterates to the next <f> node"""
+        try:
+            self.currentFileNode = self.fileNodes.pop(0)
+        except IndexError:
+            raise NoMoreFileNodes
+
+        return True
+
+    def _getLog(self):
+        """Returns the log of the current <ci> node"""
+        logs = self.currentCiNode.getElementsByTagName("log")
+        if len(logs) < 1:
+            raise InvalidResultError("No log present")
+        elif len(logs) > 1:
+            raise InvalidResultError("Multiple logs present")
+
+        return logs[0].firstChild.data
+
+    def _getWho(self):
+        """Returns the e-mail address of the commiter"""
+        # convert unicode string to regular string
+        return str(self.currentCiNode.getAttribute("who"))
+
+    def _getDate(self):
+        """Returns the date (unix time) of the commit"""
+        # convert unicode number to regular one
+        try:
+            commitDate = int(self.currentCiNode.getAttribute("date"))
+        except ValueError:
+            raise InvalidResultError
+
+        return commitDate
+
+    def _getFilename(self):
+        """Returns the filename of the current <f> node"""
+        try:
+            filename = self.currentFileNode.firstChild.data
+        except AttributeError:
+            raise InvalidResultError("Missing filename")
+
+        return filename
+
+    def _getRevision(self):
+        """Returns the revision of the current <f> node"""
+        rev = self.currentFileNode.getAttribute("rev")
+        if rev == "":
+            raise InvalidResultError("A revision was missing from a file")
+
+        return rev
+
+
+class BonsaiPoller(base.ChangeSource):
+    """This source will poll a bonsai server for changes and submit
+    them to the change master."""
+
+    compare_attrs = ["bonsaiURL", "pollInterval", "tree",
+                     "module", "branch", "cvsroot"]
+
+    parent = None # filled in when we're added
+    loop = None
+    volatile = ['loop']
+    working = False
+
+    def __init__(self, bonsaiURL, module, branch, tree="default",
+                 cvsroot="/cvsroot", pollInterval=30):
+        """
+        @type   bonsaiURL:      string
+        @param  bonsaiURL:      The base URL of the Bonsai server
+                                (ie. http://bonsai.mozilla.org)
+        @type   module:         string
+        @param  module:         The module to look for changes in. Commonly
+                                this is 'all'
+        @type   branch:         string
+        @param  branch:         The branch to look for changes in. This must
+                                match the
+                                'branch' option for the Scheduler.
+        @type   tree:           string
+        @param  tree:           The tree to look for changes in. Commonly this
+                                is 'all'
+        @type   cvsroot:        string
+        @param  cvsroot:        The cvsroot of the repository. Usually this is
+                                '/cvsroot'
+        @type   pollInterval:   int
+        @param  pollInterval:   The time (in seconds) between queries for changes
+        """
+
+        self.bonsaiURL = bonsaiURL
+        self.module = module
+        self.branch = branch
+        self.tree = tree
+        self.cvsroot = cvsroot
+        self.pollInterval = pollInterval
+        self.lastChange = time.time()
+        self.lastPoll = time.time()
+
+    def startService(self):
+        self.loop = LoopingCall(self.poll)
+        base.ChangeSource.startService(self)
+
+        reactor.callLater(0, self.loop.start, self.pollInterval)
+
+    def stopService(self):
+        self.loop.stop()
+        return base.ChangeSource.stopService(self)
+
+    def describe(self):
+        str = ""
+        str += "Getting changes from the Bonsai service running at %s " \
+                % self.bonsaiURL
+        str += "<br>Using tree: %s, branch: %s, and module: %s" % (self.tree, \
+                self.branch, self.module)
+        return str
+
+    def poll(self):
+        if self.working:
+            log.msg("Not polling Bonsai because last poll is still working")
+        else:
+            self.working = True
+            d = self._get_changes()
+            d.addCallback(self._process_changes)
+            d.addBoth(self._finished)
+        return
+
+    def _finished(self, res):
+        assert self.working
+        self.working = False
+
+        # check for failure
+        if isinstance(res, failure.Failure):
+            log.msg("Bonsai poll failed: %s" % res)
+        return res
+
+    def _make_url(self):
+        args = ["treeid=%s" % self.tree, "module=%s" % self.module,
+                "branch=%s" % self.branch, "branchtype=match",
+                "sortby=Date", "date=explicit",
+                "mindate=%d" % self.lastChange,
+                "maxdate=%d" % int(time.time()),
+                "cvsroot=%s" % self.cvsroot, "xml=1"]
+        # build the bonsai URL
+        url = self.bonsaiURL
+        url += "/cvsquery.cgi?"
+        url += "&".join(args)
+
+        return url
+
+    def _get_changes(self):
+        url = self._make_url()
+        log.msg("Polling Bonsai tree at %s" % url)
+
+        self.lastPoll = time.time()
+        # get the page, in XML format
+        return defer.maybeDeferred(urlopen, url)
+
+    def _process_changes(self, query):
+        files = []
+        try:
+            bp = BonsaiParser(query)
+            result = bp.getData()
+        except InvalidResultError, e:
+            log.msg("Could not process Bonsai query: " + e.value)
+            return
+        except EmptyResult:
+            return
+
+        for cinode in result.nodes:
+            for file in cinode.files:
+                files.append(file.filename+' (revision '+file.revision+')')
+            c = changes.Change(who = cinode.who,
+                               files = files,
+                               comments = cinode.log,
+                               when = cinode.date,
+                               branch = self.branch)
+            self.parent.addChange(c)
+            self.lastChange = self.lastPoll

Added: vendor/buildbot/current/buildbot/changes/changes.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/changes.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/changes.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,264 @@
+#! /usr/bin/python
+
+from __future__ import generators
+import sys, os, time
+try:
+    import cPickle
+    pickle = cPickle
+except ImportError:
+    import pickle
+
+from twisted.python import log
+from twisted.internet import defer
+from twisted.application import service
+from twisted.web import html
+
+from buildbot import interfaces, util
+from buildbot.twcompat import implements, providedBy
+
+html_tmpl = """
+<p>Changed by: <b>%(who)s</b><br />
+Changed at: <b>%(at)s</b><br />
+%(branch)s
+%(revision)s
+<br />
+
+Changed files:
+%(files)s
+
+Comments:
+%(comments)s
+</p>
+"""
+
+class Change:
+    """I represent a single change to the source tree. This may involve
+    several files, but they are all changed by the same person, and there is
+    a change comment for the group as a whole.
+
+    If the version control system supports sequential repository- (or
+    branch-) wide change numbers (like SVN, P4, and Arch), then revision=
+    should be set to that number. The highest such number will be used at
+    checkout time to get the correct set of files.
+
+    If it does not (like CVS), when= should be set to the timestamp (seconds
+    since epoch, as returned by time.time()) when the change was made. when=
+    will be filled in for you (to the current time) if you omit it, which is
+    suitable for ChangeSources which have no way of getting more accurate
+    timestamps.
+
+    Changes should be submitted to ChangeMaster.addChange() in
+    chronologically increasing order. Out-of-order changes will probably
+    cause the html.Waterfall display to be corrupted."""
+
+    if implements:
+        implements(interfaces.IStatusEvent)
+    else:
+        __implements__ = interfaces.IStatusEvent,
+
+    number = None
+
+    links = []
+    branch = None
+    revision = None # used to create a source-stamp
+
+    def __init__(self, who, files, comments, isdir=0, links=[],
+                 revision=None, when=None, branch=None):
+        self.who = who
+        self.files = files
+        self.comments = comments
+        self.isdir = isdir
+        self.links = links
+        self.revision = revision
+        if when is None:
+            when = util.now()
+        self.when = when
+        self.branch = branch
+
+    def asText(self):
+        data = ""
+        data += self.getFileContents() 
+        data += "At: %s\n" % self.getTime()
+        data += "Changed By: %s\n" % self.who
+        data += "Comments: %s\n\n" % self.comments
+        return data
+
+    def asHTML(self):
+        links = []
+        for file in self.files:
+            link = filter(lambda s: s.find(file) != -1, self.links)
+            if len(link) == 1:
+                # could get confused
+                links.append('<a href="%s"><b>%s</b></a>' % (link[0], file))
+            else:
+                links.append('<b>%s</b>' % file)
+        revision = ""
+        if self.revision:
+            revision = "Revision: <b>%s</b><br />\n" % self.revision
+        branch = ""
+        if self.branch:
+            branch = "Branch: <b>%s</b><br />\n" % self.branch
+
+        kwargs = { 'who'     : html.escape(self.who),
+                   'at'      : self.getTime(),
+                   'files'   : html.UL(links) + '\n',
+                   'revision': revision,
+                   'branch'  : branch,
+                   'comments': html.PRE(self.comments) }
+        return html_tmpl % kwargs
+
+    def getTime(self):
+        if not self.when:
+            return "?"
+        return time.strftime("%a %d %b %Y %H:%M:%S",
+                             time.localtime(self.when))
+
+    def getTimes(self):
+        return (self.when, None)
+
+    def getText(self):
+        return [html.escape(self.who)]
+    def getColor(self):
+        return "white"
+    def getLogs(self):
+        return {}
+
+    def getFileContents(self):
+        data = ""
+        if len(self.files) == 1:
+            if self.isdir:
+                data += "Directory: %s\n" % self.files[0]
+            else:
+                data += "File: %s\n" % self.files[0]
+        else:
+            data += "Files:\n"
+            for f in self.files:
+                data += " %s\n" % f
+        return data
+        
+class ChangeMaster(service.MultiService):
+
+    """This is the master-side service which receives file change
+    notifications from CVS. It keeps a log of these changes, enough to
+    provide for the HTML waterfall display, and to tell
+    temporarily-disconnected bots what they missed while they were
+    offline.
+
+    Change notifications come from two different kinds of sources. The first
+    is a PB service (servicename='changemaster', perspectivename='change'),
+    which provides a remote method called 'addChange', which should be
+    called with a dict that has keys 'filename' and 'comments'.
+
+    The second is a list of objects derived from the ChangeSource class.
+    These are added with .addSource(), which also sets the .changemaster
+    attribute in the source to point at the ChangeMaster. When the
+    application begins, these will be started with .start() . At shutdown
+    time, they will be terminated with .stop() . They must be persistable.
+    They are expected to call self.changemaster.addChange() with Change
+    objects.
+
+    There are several different variants of the second type of source:
+    
+      - L{buildbot.changes.mail.MaildirSource} watches a maildir for CVS
+        commit mail. It uses DNotify if available, or polls every 10
+        seconds if not.  It parses incoming mail to determine what files
+        were changed.
+
+      - L{buildbot.changes.freshcvs.FreshCVSSource} makes a PB
+        connection to the CVSToys 'freshcvs' daemon and relays any
+        changes it announces.
+    
+    """
+
+    debug = False
+    # todo: use Maildir class to watch for changes arriving by mail
+
+    def __init__(self):
+        service.MultiService.__init__(self)
+        self.changes = []
+        # self.basedir must be filled in by the parent
+        self.nextNumber = 1
+
+    def addSource(self, source):
+        assert providedBy(source, interfaces.IChangeSource)
+        assert providedBy(source, service.IService)
+        if self.debug:
+            print "ChangeMaster.addSource", source
+        source.setServiceParent(self)
+
+    def removeSource(self, source):
+        assert source in self
+        if self.debug:
+            print "ChangeMaster.removeSource", source, source.parent
+        d = defer.maybeDeferred(source.disownServiceParent)
+        return d
+
+    def addChange(self, change):
+        """Deliver a file change event. The event should be a Change object.
+        This method will timestamp the object as it is received."""
+        log.msg("adding change, who %s, %d files, rev=%s, branch=%s, "
+                "comments %s" % (change.who, len(change.files),
+                                 change.revision, change.branch,
+                                 change.comments))
+        change.number = self.nextNumber
+        self.nextNumber += 1
+        self.changes.append(change)
+        self.parent.addChange(change)
+        # TODO: call pruneChanges after a while
+
+    def pruneChanges(self):
+        self.changes = self.changes[-100:] # or something
+
+    def eventGenerator(self):
+        for i in range(len(self.changes)-1, -1, -1):
+            c = self.changes[i]
+            yield c
+
+    def getChangeNumbered(self, num):
+        if not self.changes:
+            return None
+        first = self.changes[0].number
+        if first + len(self.changes)-1 != self.changes[-1].number:
+            log.msg(self,
+                    "lost a change somewhere: [0] is %d, [%d] is %d" % \
+                    (self.changes[0].number,
+                     len(self.changes) - 1,
+                     self.changes[-1].number))
+            for c in self.changes:
+                log.msg("c[%d]: " % c.number, c)
+            return None
+        offset = num - first
+        log.msg(self, "offset", offset)
+        return self.changes[offset]
+
+    def __getstate__(self):
+        d = service.MultiService.__getstate__(self)
+        del d['parent']
+        del d['services'] # lose all children
+        del d['namedServices']
+        return d
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+        # self.basedir must be set by the parent
+        self.services = [] # they'll be repopulated by readConfig
+        self.namedServices = {}
+
+
+    def saveYourself(self):
+        filename = os.path.join(self.basedir, "changes.pck")
+        tmpfilename = filename + ".tmp"
+        try:
+            pickle.dump(self, open(tmpfilename, "wb"))
+            if sys.platform == 'win32':
+                # windows cannot rename a file on top of an existing one
+                if os.path.exists(filename):
+                    os.unlink(filename)
+            os.rename(tmpfilename, filename)
+        except Exception, e:
+            log.msg("unable to save changes")
+            log.err()
+
+    def stopService(self):
+        self.saveYourself()
+        return service.MultiService.stopService(self)

Added: vendor/buildbot/current/buildbot/changes/dnotify.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/dnotify.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/dnotify.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,101 @@
+#! /usr/bin/python
+
+import fcntl, signal, os
+
+class DNotify_Handler:
+    def __init__(self):
+        self.watchers = {}
+        self.installed = 0
+    def install(self):
+        if self.installed:
+            return
+        signal.signal(signal.SIGIO, self.fire)
+        self.installed = 1
+    def uninstall(self):
+        if not self.installed:
+            return
+        signal.signal(signal.SIGIO, signal.SIG_DFL)
+        self.installed = 0
+    def add(self, watcher):
+        self.watchers[watcher.fd] = watcher
+        self.install()
+    def remove(self, watcher):
+        if self.watchers.has_key(watcher.fd):
+            del(self.watchers[watcher.fd])
+            if not self.watchers:
+                self.uninstall()
+    def fire(self, signum, frame):
+        # this is the signal handler
+        # without siginfo_t, we must fire them all
+        for watcher in self.watchers.values():
+            watcher.callback()
+            
+class DNotify:
+    DN_ACCESS = fcntl.DN_ACCESS  # a file in the directory was read
+    DN_MODIFY = fcntl.DN_MODIFY  # a file was modified (write,truncate)
+    DN_CREATE = fcntl.DN_CREATE  # a file was created
+    DN_DELETE = fcntl.DN_DELETE  # a file was unlinked
+    DN_RENAME = fcntl.DN_RENAME  # a file was renamed
+    DN_ATTRIB = fcntl.DN_ATTRIB  # a file had attributes changed (chmod,chown)
+
+    handler = [None]
+    
+    def __init__(self, dirname, callback=None,
+                 flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
+
+        """This object watches a directory for changes. The .callback
+        attribute should be set to a function to be run every time something
+        happens to it. Be aware that it will be called more times than you
+        expect."""
+
+        if callback:
+            self.callback = callback
+        else:
+            self.callback = self.fire
+        self.dirname = dirname
+        self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
+        self.fd = os.open(dirname, os.O_RDONLY)
+        # ideally we would move the notification to something like SIGRTMIN,
+        # (to free up SIGIO) and use sigaction to have the signal handler
+        # receive a structure with the fd number. But python doesn't offer
+        # either.
+        if not self.handler[0]:
+            self.handler[0] = DNotify_Handler()
+        self.handler[0].add(self)
+        fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
+    def remove(self):
+        self.handler[0].remove(self)
+        os.close(self.fd)
+    def fire(self):
+        print self.dirname, "changed!"
+
+def test_dnotify1():
+    d = DNotify(".")
+    while 1:
+        signal.pause()
+
+def test_dnotify2():
+    # create ./foo/, create/delete files in ./ and ./foo/ while this is
+    # running. Notice how both notifiers are fired when anything changes;
+    # this is an unfortunate side-effect of the lack of extended sigaction
+    # support in Python.
+    count = [0]
+    d1 = DNotify(".")
+    def fire1(count=count, d1=d1):
+        print "./ changed!", count[0]
+        count[0] += 1
+        if count[0] > 5:
+            d1.remove()
+            del(d1)
+    # change the callback, since we can't define it until after we have the
+    # dnotify object. Hmm, unless we give the dnotify to the callback.
+    d1.callback = fire1
+    def fire2(): print "foo/ changed!"
+    d2 = DNotify("foo", fire2)
+    while 1:
+        signal.pause()
+        
+    
+if __name__ == '__main__':
+    test_dnotify2()
+    

Added: vendor/buildbot/current/buildbot/changes/freshcvs.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/freshcvs.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/freshcvs.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,148 @@
+#! /usr/bin/python
+
+import os.path
+
+from twisted.cred import credentials
+from twisted.spread import pb
+from twisted.application.internet import TCPClient
+from twisted.python import log
+
+import cvstoys.common # to make sure VersionedPatch gets registered
+
+from buildbot.twcompat import implements
+from buildbot.interfaces import IChangeSource
+from buildbot.pbutil import ReconnectingPBClientFactory
+from buildbot.changes.changes import Change
+from buildbot import util
+
+class FreshCVSListener(pb.Referenceable):
+    def remote_notify(self, root, files, message, user):
+        try:
+            self.source.notify(root, files, message, user)
+        except Exception, e:
+            print "notify failed"
+            log.err()
+
+    def remote_goodbye(self, message):
+        pass
+
+class FreshCVSConnectionFactory(ReconnectingPBClientFactory):
+
+    def gotPerspective(self, perspective):
+        log.msg("connected to FreshCVS daemon")
+        ReconnectingPBClientFactory.gotPerspective(self, perspective)
+        self.source.connected = True
+        # TODO: freshcvs-1.0.10 doesn't handle setFilter correctly, it will
+        # be fixed in the upcoming 1.0.11 . I haven't been able to test it
+        # to make sure the failure mode is survivable, so I'll just leave
+        # this out for now.
+        return
+        if self.source.prefix is not None:
+            pathfilter = "^%s" % self.source.prefix
+            d = perspective.callRemote("setFilter",
+                                       None, pathfilter, None)
+            # ignore failures, setFilter didn't work in 1.0.10 and this is
+            # just an optimization anyway
+            d.addErrback(lambda f: None)
+
+    def clientConnectionLost(self, connector, reason):
+        ReconnectingPBClientFactory.clientConnectionLost(self, connector,
+                                                         reason)
+        self.source.connected = False
+
+class FreshCVSSourceNewcred(TCPClient, util.ComparableMixin):
+    """This source will connect to a FreshCVS server associated with one or
+    more CVS repositories. Each time a change is committed to a repository,
+    the server will send us a message describing the change. This message is
+    used to build a Change object, which is then submitted to the
+    ChangeMaster.
+
+    This class handles freshcvs daemons which use newcred. CVSToys-1.0.9
+    does not, later versions might.
+    """
+
+    if implements:
+        implements(IChangeSource)
+    else:
+        __implements__ = IChangeSource, TCPClient.__implements__
+    compare_attrs = ["host", "port", "username", "password", "prefix"]
+
+    changemaster = None # filled in when we're added
+    connected = False
+
+    def __init__(self, host, port, user, passwd, prefix=None):
+        self.host = host
+        self.port = port
+        self.username = user
+        self.password = passwd
+        if prefix is not None and not prefix.endswith("/"):
+            log.msg("WARNING: prefix '%s' should probably end with a slash" \
+                    % prefix)
+        self.prefix = prefix
+        self.listener = l = FreshCVSListener()
+        l.source = self
+        self.factory = f = FreshCVSConnectionFactory()
+        f.source = self
+        self.creds = credentials.UsernamePassword(user, passwd)
+        f.startLogin(self.creds, client=l)
+        TCPClient.__init__(self, host, port, f)
+
+    def __repr__(self):
+        return "<FreshCVSSource where=%s, prefix=%s>" % \
+               ((self.host, self.port), self.prefix)
+
+    def describe(self):
+        online = ""
+        if not self.connected:
+            online = " [OFFLINE]"
+        return "freshcvs %s:%s%s" % (self.host, self.port, online)
+
+    def notify(self, root, files, message, user):
+        pathnames = []
+        isdir = 0
+        for f in files:
+            if not isinstance(f, (cvstoys.common.VersionedPatch,
+                                  cvstoys.common.Directory)):
+                continue
+            pathname, filename = f.pathname, f.filename
+            #r1, r2 = getattr(f, 'r1', None), getattr(f, 'r2', None)
+            if isinstance(f, cvstoys.common.Directory):
+                isdir = 1
+            path = os.path.join(pathname, filename)
+            log.msg("FreshCVS notify '%s'" % path)
+            if self.prefix:
+                if path.startswith(self.prefix):
+                    path = path[len(self.prefix):]
+                else:
+                    continue
+            pathnames.append(path)
+        if pathnames:
+            # now() is close enough: FreshCVS *is* realtime, after all
+            when=util.now()
+            c = Change(user, pathnames, message, isdir, when=when)
+            self.parent.addChange(c)
+
+class FreshCVSSourceOldcred(FreshCVSSourceNewcred):
+    """This is for older freshcvs daemons (from CVSToys-1.0.9 and earlier).
+    """
+
+    def __init__(self, host, port, user, passwd,
+                 serviceName="cvstoys.notify", prefix=None):
+        self.host = host
+        self.port = port
+        self.prefix = prefix
+        self.listener = l = FreshCVSListener()
+        l.source = self
+        self.factory = f = FreshCVSConnectionFactory()
+        f.source = self
+        f.startGettingPerspective(user, passwd, serviceName, client=l)
+        TCPClient.__init__(self, host, port, f)
+
+    def __repr__(self):
+        return "<FreshCVSSourceOldcred where=%s, prefix=%s>" % \
+               ((self.host, self.port), self.prefix)
+
+# this is suitable for CVSToys-1.0.10 and later. If you run CVSToys-1.0.9 or
+# earlier, use FreshCVSSourceOldcred instead.
+FreshCVSSource = FreshCVSSourceNewcred
+

Added: vendor/buildbot/current/buildbot/changes/freshcvsmail.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/freshcvsmail.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/freshcvsmail.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,5 @@
+#! /usr/bin/python
+
+# leftover import for compatibility
+
+from buildbot.changes.mail import FCMaildirSource

Added: vendor/buildbot/current/buildbot/changes/mail.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/mail.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/mail.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,338 @@
+# -*- test-case-name: buildbot.test.test_mailparse -*-
+
+"""
+Parse various kinds of 'CVS notify' email.
+"""
+import os, re
+from rfc822 import Message
+
+from buildbot import util
+from buildbot.twcompat import implements
+from buildbot.changes import base, changes, maildirtwisted
+
+def parseFreshCVSMail(self, fd, prefix=None, sep="/"):
+    """Parse mail sent by FreshCVS"""
+    # this uses rfc822.Message so it can run under python2.1 . In the future
+    # it will be updated to use python2.2's "email" module.
+
+    m = Message(fd)
+    # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+    # modified by the MTA (to include a local domain)
+    name, addr = m.getaddr("from")
+    if not name:
+        return None # no From means this message isn't from FreshCVS
+    cvs = name.find(" CVS")
+    if cvs == -1:
+        return None # this message isn't from FreshCVS
+    who = name[:cvs]
+
+    # we take the time of receipt as the time of checkin. Not correct, but it
+    # avoids the out-of-order-changes issue. See the comment in parseSyncmail
+    # about using the 'Date:' header
+    when = util.now()
+
+    files = []
+    comments = ""
+    isdir = 0
+    lines = m.fp.readlines()
+    while lines:
+        line = lines.pop(0)
+        if line == "Modified files:\n":
+            break
+    while lines:
+        line = lines.pop(0)
+        if line == "\n":
+            break
+        line = line.rstrip("\n")
+        linebits = line.split(None, 1)
+        file = linebits[0]
+        if prefix:
+            # insist that the file start with the prefix: FreshCVS sends
+            # changes we don't care about too
+            bits = file.split(sep)
+            if bits[0] == prefix:
+                file = sep.join(bits[1:])
+            else:
+                break
+        if len(linebits) == 1:
+            isdir = 1
+        elif linebits[1] == "0 0":
+            isdir = 1
+        files.append(file)
+    while lines:
+        line = lines.pop(0)
+        if line == "Log message:\n":
+            break
+    # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+    while lines:
+        line = lines.pop(0)
+        if line == "ViewCVS links:\n":
+            break
+        if line.find("Index: ") == 0:
+            break
+        comments += line
+    comments = comments.rstrip() + "\n"
+
+    if not files:
+        return None
+    
+    change = changes.Change(who, files, comments, isdir, when=when)
+
+    return change
+
+def parseSyncmail(self, fd, prefix=None, sep="/"):
+    """Parse messages sent by the 'syncmail' program, as suggested by the
+    sourceforge.net CVS Admin documentation. Syncmail is maintained at
+    syncmail.sf.net .
+    """
+    # pretty much the same as freshcvs mail, not surprising since CVS is the
+    # one creating most of the text
+
+    m = Message(fd)
+    # The mail is sent from the person doing the checkin. Assume that the
+    # local username is enough to identify them (this assumes a one-server
+    # cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
+    # model)
+    name, addr = m.getaddr("from")
+    if not addr:
+        return None # no From means this message isn't from FreshCVS
+    at = addr.find("@")
+    if at == -1:
+        who = addr # might still be useful
+    else:
+        who = addr[:at]
+
+    # we take the time of receipt as the time of checkin. Not correct (it
+    # depends upon the email latency), but it avoids the out-of-order-changes
+    # issue. Also syncmail doesn't give us anything better to work with,
+    # unless you count pulling the v1-vs-v2 timestamp out of the diffs, which
+    # would be ugly. TODO: Pulling the 'Date:' header from the mail is a
+    # possibility, and email.Utils.parsedate_tz may be useful. It should be
+    # configurable, however, because there are a lot of broken clocks out
+    # there.
+    when = util.now()
+
+    subject = m.getheader("subject")
+    # syncmail puts the repository-relative directory in the subject:
+    # mprefix + "%(dir)s %(file)s,%(oldversion)s,%(newversion)s", where
+    # 'mprefix' is something that could be added by a mailing list
+    # manager.
+    # this is the only reasonable way to determine the directory name
+    space = subject.find(" ")
+    if space != -1:
+        directory = subject[:space]
+    else:
+        directory = subject
+    
+    files = []
+    comments = ""
+    isdir = 0
+    branch = None
+
+    lines = m.fp.readlines()
+    while lines:
+        line = lines.pop(0)
+
+        if (line == "Modified Files:\n" or
+            line == "Added Files:\n" or
+            line == "Removed Files:\n"):
+            break
+
+    while lines:
+        line = lines.pop(0)
+        if line == "\n":
+            break
+        if line == "Log Message:\n":
+            lines.insert(0, line)
+            break
+        line = line.lstrip()
+        line = line.rstrip()
+        # note: syncmail will send one email per directory involved in a
+        # commit, with multiple files if they were in the same directory.
+        # Unlike freshCVS, it makes no attempt to collect all related
+        # commits into a single message.
+
+        # note: syncmail will report a Tag underneath the ... Files: line
+        # e.g.:       Tag: BRANCH-DEVEL
+
+        if line.startswith('Tag:'):
+            branch = line.split(' ')[-1].rstrip()
+            continue
+
+        # note: it doesn't actually make sense to use portable functions
+        # like os.path.join and os.sep, because these filenames all use
+        # separator conventions established by the remote CVS server (which
+        # is probably running on unix), not the local buildmaster system.
+        thesefiles = line.split(" ")
+        for f in thesefiles:
+            f = sep.join([directory, f])
+            if prefix:
+                # insist that the file start with the prefix: we may get
+                # changes we don't care about too
+                bits = f.split(sep)
+                if bits[0] == prefix:
+                    f = sep.join(bits[1:])
+                else:
+                    break
+            # TODO: figure out how new directories are described, set .isdir
+            files.append(f)
+
+    if not files:
+        return None
+
+    while lines:
+        line = lines.pop(0)
+        if line == "Log Message:\n":
+            break
+    # message is terminated by "Index:..." (patch) or "--- NEW FILE.."
+    # or "--- filename DELETED ---". Sigh.
+    while lines:
+        line = lines.pop(0)
+        if line.find("Index: ") == 0:
+            break
+        if re.search(r"^--- NEW FILE", line):
+            break
+        if re.search(r" DELETED ---$", line):
+            break
+        comments += line
+    comments = comments.rstrip() + "\n"
+    
+    change = changes.Change(who, files, comments, isdir, when=when,
+                            branch=branch)
+
+    return change
+
+# Bonsai mail parser by Stephen Davis.
+#
+# This handles changes for CVS repositories that are watched by Bonsai
+# (http://www.mozilla.org/bonsai.html)
+
+# A Bonsai-formatted email message looks like:
+# 
+# C|1071099907|stephend|/cvs|Sources/Scripts/buildbot|bonsai.py|1.2|||18|7
+# A|1071099907|stephend|/cvs|Sources/Scripts/buildbot|master.cfg|1.1|||18|7
+# R|1071099907|stephend|/cvs|Sources/Scripts/buildbot|BuildMaster.py|||
+# LOGCOMMENT
+# Updated bonsai parser and switched master config to buildbot-0.4.1 style.
+# 
+# :ENDLOGCOMMENT
+#
+# In the first example line, stephend is the user, /cvs the repository,
+# buildbot the directory, bonsai.py the file, 1.2 the revision, no sticky
+# and branch, 18 lines added and 7 removed. All of these fields might not be
+# present (during "removes" for example).
+#
+# There may be multiple "control" lines or even none (imports, directory
+# additions) but there is one email per directory. We only care about actual
+# changes since it is presumed directory additions don't actually affect the
+# build. At least one file should need to change (the makefile, say) to
+# actually make a new directory part of the build process. That's my story
+# and I'm sticking to it.
+
+def parseBonsaiMail(self, fd, prefix=None, sep="/"):
+    """Parse mail sent by the Bonsai cvs loginfo script."""
+
+    msg = Message(fd)
+
+    # we don't care who the email came from b/c the cvs user is in the msg
+    # text
+    
+    who = "unknown"
+    timestamp = None
+    files = []
+    lines = msg.fp.readlines()
+
+    # read the control lines (what/who/where/file/etc.)
+    while lines:
+        line = lines.pop(0)
+        if line == "LOGCOMMENT\n":
+            break;
+        line = line.rstrip("\n")
+        
+        # we'd like to do the following but it won't work if the number of
+        # items doesn't match so...
+        #   what, timestamp, user, repo, module, file = line.split( '|' )
+        items = line.split('|')
+        if len(items) < 6:
+            # not a valid line, assume this isn't a bonsai message
+            return None
+
+        try:
+            # just grab the bottom-most timestamp, they're probably all the
+            # same. TODO: I'm assuming this is relative to the epoch, but
+            # this needs testing.
+            timestamp = int(items[1])
+        except ValueError:
+            pass
+
+        user = items[2]
+        if user:
+            who = user
+
+        module = items[4]
+        file = items[5]
+        if module and file:
+            path = "%s/%s" % (module, file)
+            files.append(path)
+        sticky = items[7]
+        branch = items[8]
+
+    # if no files changed, return nothing
+    if not files:
+        return None
+
+    # read the comments
+    comments = ""
+    while lines:
+        line = lines.pop(0)
+        if line == ":ENDLOGCOMMENT\n":
+            break
+        comments += line
+    comments = comments.rstrip() + "\n"
+
+    # return buildbot Change object
+    return changes.Change(who, files, comments, when=timestamp, branch=branch)
+
+
+
+class MaildirSource(maildirtwisted.MaildirTwisted, base.ChangeSource):
+    """This source will watch a maildir that is subscribed to a FreshCVS
+    change-announcement mailing list.
+    """
+    # we need our own implements() here, at least for twisted-1.3, because
+    # the double-inheritance of Service shadows __implements__ from
+    # ChangeSource.
+    if not implements:
+        __implements__ = base.ChangeSource.__implements__
+
+    compare_attrs = ["basedir", "newdir", "pollinterval", "parser"]
+    parser = None
+    name = None
+
+    def __init__(self, maildir, prefix=None, sep="/"):
+        maildirtwisted.MaildirTwisted.__init__(self, maildir)
+        self.prefix = prefix
+        self.sep = sep
+
+    def describe(self):
+        return "%s mailing list in maildir %s" % (self.name, self.basedir)
+
+    def messageReceived(self, filename):
+        path = os.path.join(self.basedir, "new", filename)
+        change = self.parser(open(path, "r"), self.prefix, self.sep)
+        if change:
+            self.parent.addChange(change)
+        os.rename(os.path.join(self.basedir, "new", filename),
+                  os.path.join(self.basedir, "cur", filename))
+
+class FCMaildirSource(MaildirSource):
+    parser = parseFreshCVSMail
+    name = "FreshCVS"
+
+class SyncmailMaildirSource(MaildirSource):
+    parser = parseSyncmail
+    name = "Syncmail"
+
+class BonsaiMaildirSource(MaildirSource):
+    parser = parseBonsaiMail
+    name = "Bonsai"

Added: vendor/buildbot/current/buildbot/changes/maildir.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/maildir.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/maildir.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,115 @@
+#! /usr/bin/python
+
+# This is a class which watches a maildir for new messages. It uses the
+# linux dirwatcher API (if available) to look for new files. The
+# .messageReceived method is invoked with the filename of the new message,
+# relative to the 'new' directory of the maildir.
+
+# this is an abstract base class. It must be subclassed by something to
+# provide a delay function (which polls in the case that DNotify isn't
+# available) and a way to safely schedule code to run after a signal handler
+# has fired. See maildirgtk.py and maildirtwisted.py for forms that use the
+# event loops provided by Gtk+ and Twisted.
+
+try:
+    from dnotify import DNotify
+    have_dnotify = 1
+except:
+    have_dnotify = 0
+import os
+
+class Maildir:
+    """This is a class which watches a maildir for new messages. Once
+    started, it will run its .messageReceived method when a message is
+    available.
+    """
+    def __init__(self, basedir=None):
+        """Create the Maildir watcher. BASEDIR is the maildir directory (the
+        one which contains new/ and tmp/)
+        """
+        self.basedir = basedir
+        self.files = []
+        self.pollinterval = 10  # only used if we don't have DNotify
+        self.running = 0
+        self.dnotify = None
+
+    def setBasedir(self, basedir):
+        self.basedir = basedir
+
+    def start(self):
+        """You must run start to receive any messages."""
+        assert self.basedir
+        self.newdir = os.path.join(self.basedir, "new")
+        if self.running:
+            return
+        self.running = 1
+        if not os.path.isdir(self.basedir) or not os.path.isdir(self.newdir):
+            raise "invalid maildir '%s'" % self.basedir
+        # we must hold an fd open on the directory, so we can get notified
+        # when it changes.
+        global have_dnotify
+        if have_dnotify:
+            try:
+                self.dnotify = DNotify(self.newdir, self.dnotify_callback,
+                                       [DNotify.DN_CREATE])
+            except (IOError, OverflowError):
+                # IOError is probably linux<2.4.19, which doesn't support
+                # dnotify. OverflowError will occur on some 64-bit machines
+                # because of a python bug
+                print "DNotify failed, falling back to polling"
+                have_dnotify = 0
+
+        self.poll()
+
+    def startTimeout(self):
+        raise NotImplemented
+    def stopTimeout(self):
+        raise NotImplemented
+    def dnotify_callback(self):
+        print "callback"
+        self.poll()
+        raise NotImplemented
+        
+    def stop(self):
+        if self.dnotify:
+            self.dnotify.remove()
+            self.dnotify = None
+        else:
+            self.stopTimeout()
+        self.running = 0
+
+    def poll(self):
+        assert self.basedir
+        # see what's new
+        for f in self.files:
+            if not os.path.isfile(os.path.join(self.newdir, f)):
+                self.files.remove(f)
+        newfiles = []
+        for f in os.listdir(self.newdir):
+            if not f in self.files:
+                newfiles.append(f)
+        self.files.extend(newfiles)
+        # TODO: sort by ctime, then filename, since safecat uses a rather
+        # fine-grained timestamp in the filename
+        for n in newfiles:
+            # TODO: consider catching exceptions in messageReceived
+            self.messageReceived(n)
+        if not have_dnotify:
+            self.startTimeout()
+
+    def messageReceived(self, filename):
+        """Called when a new file is noticed. Override it in subclasses.
+        Will receive path relative to maildir/new."""
+        print filename
+
+
+def test1():
+    m = Maildir("ddir")
+    m.start()
+    import signal
+    while 1:
+        signal.pause()
+    
+if __name__ == '__main__':
+    test1()
+    

Added: vendor/buildbot/current/buildbot/changes/maildirgtk.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/maildirgtk.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/maildirgtk.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,55 @@
+#! /usr/bin/python
+
+# This is a class which watches a maildir for new messages. It uses the
+# linux dirwatcher API (if available) to look for new files. The
+# .messageReceived method is invoked with the filename of the new message,
+# relative to the top of the maildir (so it will look like "new/blahblah").
+
+# This form uses the Gtk event loop to handle polling and signal safety
+
+if __name__ == '__main__':
+    import pygtk
+    pygtk.require("2.0")
+
+import gtk
+from maildir import Maildir
+
+class MaildirGtk(Maildir):
+    def __init__(self, basedir):
+        Maildir.__init__(self, basedir)
+        self.idler = None
+    def startTimeout(self):
+        self.timeout = gtk.timeout_add(self.pollinterval*1000, self.doTimeout)
+    def doTimeout(self):
+        self.poll()
+        return gtk.TRUE # keep going
+    def stopTimeout(self):
+        if self.timeout:
+            gtk.timeout_remove(self.timeout)
+            self.timeout = None
+    def dnotify_callback(self):
+        # make it safe
+        self.idler = gtk.idle_add(self.idlePoll)
+    def idlePoll(self):
+        gtk.idle_remove(self.idler)
+        self.idler = None
+        self.poll()
+        return gtk.FALSE
+
+def test1():
+    class MaildirTest(MaildirGtk):
+        def messageReceived(self, filename):
+            print "changed:", filename
+    m = MaildirTest("ddir")
+    print "watching ddir/new/"
+    m.start()
+    #gtk.main()
+    # to allow the python-side signal handler to run, we must surface from
+    # gtk (which blocks on the C-side) every once in a while.
+    while 1:
+        gtk.mainiteration() # this will block until there is something to do
+    m.stop()
+    print "done"
+    
+if __name__ == '__main__':
+    test1()

Added: vendor/buildbot/current/buildbot/changes/maildirtwisted.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/maildirtwisted.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/maildirtwisted.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,76 @@
+#! /usr/bin/python
+
+# This is a class which watches a maildir for new messages. It uses the
+# linux dirwatcher API (if available) to look for new files. The
+# .messageReceived method is invoked with the filename of the new message,
+# relative to the top of the maildir (so it will look like "new/blahblah").
+
+# This version is implemented as a Twisted Python "Service". It uses the
+# twisted Reactor to handle polling and signal safety.
+
+from twisted.application import service
+from twisted.internet import reactor
+from maildir import Maildir
+
+class MaildirTwisted(Maildir, service.Service):
+    timeout = None
+
+    def startService(self):
+        self.start()
+        service.Service.startService(self)
+    def stopService(self):
+        self.stop()
+        service.Service.stopService(self)
+
+    def startTimeout(self):
+        self.timeout = reactor.callLater(self.pollinterval, self.poll)
+    def stopTimeout(self):
+        if self.timeout:
+            self.timeout.cancel()
+            self.timeout = None
+
+    def dnotify_callback(self):
+        # make it safe
+        #reactor.callFromThread(self.poll)
+        reactor.callLater(1, self.poll)
+        # give it a moment. I found that qmail had problems when the message
+        # was removed from the maildir instantly. It shouldn't, that's what
+        # maildirs are made for. I wasn't able to eyeball any reason for the
+        # problem, and safecat didn't behave the same way, but qmail reports
+        # "Temporary_error_on_maildir_delivery" (qmail-local.c:165,
+        # maildir_child() process exited with rc not in 0,2,3,4). Not sure why,
+        # would have to hack qmail to investigate further, easier to just
+        # wait a second before yanking the message out of new/ .
+
+##     def messageReceived(self, filename):
+##         if self.callback:
+##             self.callback(filename)
+
+class MaildirService(MaildirTwisted):
+    """I watch a maildir for new messages. I should be placed as the service
+    child of some MultiService instance. When running, I use the linux
+    dirwatcher API (if available) or poll for new files in the 'new'
+    subdirectory of my maildir path. When I discover a new message, I invoke
+    my parent's .messageReceived() method with the short filename of the new
+    message, so the full name of the new file can be obtained with
+    os.path.join(maildir, 'new', filename). I will not move or delete the
+    file on my own: the parent should do this in messageReceived().
+    """
+    def messageReceived(self, filename):
+        self.parent.messageReceived(filename)
+
+
+def test1():
+    class MaildirTest(MaildirTwisted):
+        def messageReceived(self, filename):
+            print "changed:", filename
+    m = MaildirTest(basedir="ddir")
+    print "watching ddir/new/"
+    m.startService()
+    reactor.run()
+    print "done"
+    
+if __name__ == '__main__':
+    test1()
+    
+

Added: vendor/buildbot/current/buildbot/changes/monotone.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/monotone.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/monotone.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,306 @@
+
+import tempfile
+import os
+import os.path
+from cStringIO import StringIO
+
+from twisted.python import log
+from twisted.application import service
+from twisted.internet import defer, protocol, error, reactor
+from twisted.internet.task import LoopingCall
+
+from buildbot import util
+from buildbot.interfaces import IChangeSource
+from buildbot.changes.changes import Change
+
+class _MTProtocol(protocol.ProcessProtocol):
+
+    def __init__(self, deferred, cmdline):
+        self.cmdline = cmdline
+        self.deferred = deferred
+        self.s = StringIO()
+
+    def errReceived(self, text):
+        log.msg("stderr: %s" % text)
+
+    def outReceived(self, text):
+        log.msg("stdout: %s" % text)
+        self.s.write(text)
+
+    def processEnded(self, reason):
+        log.msg("Command %r exited with value %s" % (self.cmdline, reason))
+        if isinstance(reason.value, error.ProcessDone):
+            self.deferred.callback(self.s.getvalue())
+        else:
+            self.deferred.errback(reason)
+
+class Monotone:
+    """All methods of this class return a Deferred."""
+
+    def __init__(self, bin, db):
+        self.bin = bin
+        self.db = db
+
+    def _run_monotone(self, args):
+        d = defer.Deferred()
+        cmdline = (self.bin, "--db=" + self.db) + tuple(args)
+        p = _MTProtocol(d, cmdline)
+        log.msg("Running command: %r" % (cmdline,))
+        log.msg("wd: %s" % os.getcwd())
+        reactor.spawnProcess(p, self.bin, cmdline)
+        return d
+
+    def _process_revision_list(self, output):
+        if output:
+            return output.strip().split("\n")
+        else:
+            return []
+
+    def get_interface_version(self):
+        d = self._run_monotone(["automate", "interface_version"])
+        d.addCallback(self._process_interface_version)
+        return d
+
+    def _process_interface_version(self, output):
+        return tuple(map(int, output.strip().split(".")))
+
+    def db_init(self):
+        return self._run_monotone(["db", "init"])
+
+    def db_migrate(self):
+        return self._run_monotone(["db", "migrate"])
+
+    def pull(self, server, pattern):
+        return self._run_monotone(["pull", server, pattern])
+
+    def get_revision(self, rid):
+        return self._run_monotone(["cat", "revision", rid])
+
+    def get_heads(self, branch, rcfile=""):
+        cmd = ["automate", "heads", branch]
+        if rcfile:
+            cmd += ["--rcfile=" + rcfile]
+        d = self._run_monotone(cmd)
+        d.addCallback(self._process_revision_list)
+        return d
+
+    def erase_ancestors(self, revs):
+        d = self._run_monotone(["automate", "erase_ancestors"] + revs)
+        d.addCallback(self._process_revision_list)
+        return d
+
+    def ancestry_difference(self, new_rev, old_revs):
+        d = self._run_monotone(["automate", "ancestry_difference", new_rev]
+                               + old_revs)
+        d.addCallback(self._process_revision_list)
+        return d
+
+    def descendents(self, rev):
+        d = self._run_monotone(["automate", "descendents", rev])
+        d.addCallback(self._process_revision_list)
+        return d
+
+    def log(self, rev, depth=None):
+        if depth is not None:
+            depth_arg = ["--last=%i" % (depth,)]
+        else:
+            depth_arg = []
+        return self._run_monotone(["log", "-r", rev] + depth_arg)
+
+
+class MonotoneSource(service.Service, util.ComparableMixin):
+    """This source will poll a monotone server for changes and submit them to
+    the change master.
+
+    @param server_addr: monotone server specification (host:portno)
+
+    @param branch: monotone branch to watch
+
+    @param trusted_keys: list of keys whose code you trust
+
+    @param db_path: path to monotone database to pull into
+
+    @param pollinterval: interval in seconds between polls, defaults to 10 minutes
+    @param monotone_exec: path to monotone executable, defaults to "monotone"
+    """
+
+    __implements__ = IChangeSource, service.Service.__implements__
+    compare_attrs = ["server_addr", "trusted_keys", "db_path",
+                     "pollinterval", "branch", "monotone_exec"]
+
+    parent = None # filled in when we're added
+    done_revisions = []
+    last_revision = None
+    loop = None
+    d = None
+    tmpfile = None
+    monotone = None
+    volatile = ["loop", "d", "tmpfile", "monotone"]
+
+    def __init__(self, server_addr, branch, trusted_keys, db_path,
+                 pollinterval=60 * 10, monotone_exec="monotone"):
+        self.server_addr = server_addr
+        self.branch = branch
+        self.trusted_keys = trusted_keys
+        self.db_path = db_path
+        self.pollinterval = pollinterval
+        self.monotone_exec = monotone_exec
+        self.monotone = Monotone(self.monotone_exec, self.db_path)
+
+    def startService(self):
+        self.loop = LoopingCall(self.start_poll)
+        self.loop.start(self.pollinterval)
+        service.Service.startService(self)
+
+    def stopService(self):
+        self.loop.stop()
+        return service.Service.stopService(self)
+
+    def describe(self):
+        return "monotone_source %s %s" % (self.server_addr,
+                                          self.branch)
+
+    def start_poll(self):
+        if self.d is not None:
+            log.msg("last poll still in progress, skipping next poll")
+            return
+        log.msg("starting poll")
+        self.d = self._maybe_init_db()
+        self.d.addCallback(self._do_netsync)
+        self.d.addCallback(self._get_changes)
+        self.d.addErrback(self._handle_error)
+
+    def _handle_error(self, failure):
+        log.err(failure)
+        self.d = None
+
+    def _maybe_init_db(self):
+        if not os.path.exists(self.db_path):
+            log.msg("init'ing db")
+            return self.monotone.db_init()
+        else:
+            log.msg("db already exists, migrating")
+            return self.monotone.db_migrate()
+
+    def _do_netsync(self, output):
+        return self.monotone.pull(self.server_addr, self.branch)
+
+    def _get_changes(self, output):
+        d = self._get_new_head()
+        d.addCallback(self._process_new_head)
+        return d
+
+    def _get_new_head(self):
+        # This function returns a deferred that resolves to a good pick of new
+        # head (or None if there is no good new head.)
+
+        # First need to get all new heads...
+        rcfile = """function get_revision_cert_trust(signers, id, name, val)
+                      local trusted_signers = { %s }
+                      local ts_table = {}
+                      for k, v in pairs(trusted_signers) do ts_table[v] = 1 end
+                      for k, v in pairs(signers) do
+                        if ts_table[v] then
+                          return true
+                        end
+                      end
+                      return false
+                    end
+        """
+        trusted_list = ", ".join(['"' + key + '"' for key in self.trusted_keys])
+        # mktemp is unsafe, but mkstemp is not 2.2 compatible.
+        tmpfile_name = tempfile.mktemp()
+        f = open(tmpfile_name, "w")
+        f.write(rcfile % trusted_list)
+        f.close()
+        d = self.monotone.get_heads(self.branch, tmpfile_name)
+        d.addCallback(self._find_new_head, tmpfile_name)
+        return d
+
+    def _find_new_head(self, new_heads, tmpfile_name):
+        os.unlink(tmpfile_name)
+        # Now get the old head's descendents...
+        if self.last_revision is not None:
+            d = self.monotone.descendents(self.last_revision)
+        else:
+            d = defer.succeed(new_heads)
+        d.addCallback(self._pick_new_head, new_heads)
+        return d
+
+    def _pick_new_head(self, old_head_descendents, new_heads):
+        for r in new_heads:
+            if r in old_head_descendents:
+                return r
+        return None
+
+    def _process_new_head(self, new_head):
+        if new_head is None:
+            log.msg("No new head")
+            self.d = None
+            return None
+        # Okay, we have a new head; we need to get all the revisions since
+        # then and create change objects for them.
+        # Step 1: simplify set of processed revisions.
+        d = self._simplify_revisions()
+        # Step 2: get the list of new revisions
+        d.addCallback(self._get_new_revisions, new_head)
+        # Step 3: add a change for each
+        d.addCallback(self._add_changes_for_revisions)
+        # Step 4: all done
+        d.addCallback(self._finish_changes, new_head)
+        return d
+
+    def _simplify_revisions(self):
+        d = self.monotone.erase_ancestors(self.done_revisions)
+        d.addCallback(self._reset_done_revisions)
+        return d
+
+    def _reset_done_revisions(self, new_done_revisions):
+        self.done_revisions = new_done_revisions
+        return None
+
+    def _get_new_revisions(self, blah, new_head):
+        if self.done_revisions:
+            return self.monotone.ancestry_difference(new_head,
+                                                     self.done_revisions)
+        else:
+            # Don't force feed the builder with every change since the
+            # beginning of time when it's first started up.
+            return defer.succeed([new_head])
+
+    def _add_changes_for_revisions(self, revs):
+        d = defer.succeed(None)
+        for rid in revs:
+            d.addCallback(self._add_change_for_revision, rid)
+        return d
+
+    def _add_change_for_revision(self, blah, rid):
+        d = self.monotone.log(rid, 1)
+        d.addCallback(self._add_change_from_log, rid)
+        return d
+
+    def _add_change_from_log(self, log, rid):
+        d = self.monotone.get_revision(rid)
+        d.addCallback(self._add_change_from_log_and_revision, log, rid)
+        return d
+
+    def _add_change_from_log_and_revision(self, revision, log, rid):
+        # Stupid way to pull out everything inside quotes (which currently
+        # uniquely identifies filenames inside a changeset).
+        pieces = revision.split('"')
+        files = []
+        for i in range(len(pieces)):
+            if (i % 2) == 1:
+                files.append(pieces[i])
+        # Also pull out author key and date
+        author = "unknown author"
+        pieces = log.split('\n')
+        for p in pieces:
+            if p.startswith("Author:"):
+                author = p.split()[1]
+        self.parent.addChange(Change(author, files, log, revision=rid))
+
+    def _finish_changes(self, blah, new_head):
+        self.done_revisions.append(new_head)
+        self.last_revision = new_head
+        self.d = None

Added: vendor/buildbot/current/buildbot/changes/p4poller.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/p4poller.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/p4poller.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,204 @@
+# -*- test-case-name: buildbot.test.test_p4poller -*-
+
+# Many thanks to Dave Peticolas for contributing this module
+
+import re
+import time
+
+from twisted.python import log, failure
+from twisted.internet import defer, reactor
+from twisted.internet.utils import getProcessOutput
+from twisted.internet.task import LoopingCall
+
+from buildbot import util
+from buildbot.changes import base, changes
+
+def get_simple_split(branchfile):
+    """Splits the branchfile argument and assuming branch is 
+       the first path component in branchfile, will return
+       branch and file else None."""
+
+    index = branchfile.find('/')
+    if index == -1: return None, None
+    branch, file = branchfile.split('/', 1)
+    return branch, file
+
+class P4Source(base.ChangeSource, util.ComparableMixin):
+    """This source will poll a perforce repository for changes and submit
+    them to the change master."""
+
+    compare_attrs = ["p4port", "p4user", "p4passwd", "p4base",
+                     "p4bin", "pollinterval", "histmax"]
+
+    changes_line_re = re.compile(
+            r"Change (?P<num>\d+) on \S+ by \S+@\S+ '.+'$")
+    describe_header_re = re.compile(
+            r"Change \d+ by (?P<who>\S+)@\S+ on (?P<when>.+)$")
+    file_re = re.compile(r"^\.\.\. (?P<path>[^#]+)#\d+ \w+$")
+    datefmt = '%Y/%m/%d %H:%M:%S'
+
+    parent = None # filled in when we're added
+    last_change = None
+    loop = None
+    working = False
+
+    def __init__(self, p4port=None, p4user=None, p4passwd=None,
+                 p4base='//', p4bin='p4',
+                 split_file=lambda branchfile: (None, branchfile),
+                 pollinterval=60 * 10, histmax=100):
+        """
+        @type  p4port:       string
+        @param p4port:       p4 port definition (host:portno)
+        @type  p4user:       string
+        @param p4user:       p4 user
+        @type  p4passwd:     string
+        @param p4passwd:     p4 passwd
+        @type  p4base:       string
+        @param p4base:       p4 file specification to limit a poll to
+                             without the trailing '...' (i.e., //)
+        @type  p4bin:        string
+        @param p4bin:        path to p4 binary, defaults to just 'p4'
+        @type  split_file:   func
+        $param split_file:   splits a filename into branch and filename.
+        @type  pollinterval: int
+        @param pollinterval: interval in seconds between polls
+        @type  histmax:      int
+        @param histmax:      maximum number of changes to look back through
+        """
+
+        self.p4port = p4port
+        self.p4user = p4user
+        self.p4passwd = p4passwd
+        self.p4base = p4base
+        self.p4bin = p4bin
+        self.split_file = split_file
+        self.pollinterval = pollinterval
+        self.histmax = histmax
+        self.loop = LoopingCall(self.checkp4)
+
+    def startService(self):
+        base.ChangeSource.startService(self)
+
+        # Don't start the loop just yet because the reactor isn't running.
+        # Give it a chance to go and install our SIGCHLD handler before
+        # spawning processes.
+        reactor.callLater(0, self.loop.start, self.pollinterval)
+
+    def stopService(self):
+        self.loop.stop()
+        return base.ChangeSource.stopService(self)
+
+    def describe(self):
+        return "p4source %s %s" % (self.p4port, self.p4base)
+
+    def checkp4(self):
+        # Our return value is only used for unit testing.
+        if self.working:
+            log.msg("Skipping checkp4 because last one has not finished")
+            return defer.succeed(None)
+        else:
+            self.working = True
+            d = self._get_changes()
+            d.addCallback(self._process_changes)
+            d.addBoth(self._finished)
+            return d
+
+    def _finished(self, res):
+        assert self.working
+        self.working = False
+
+        # Again, the return value is only for unit testing.
+        # If there's a failure, log it so it isn't lost.
+        if isinstance(res, failure.Failure):
+            log.msg('P4 poll failed: %s' % res)
+        return res
+
+    def _get_changes(self):
+        args = []
+        if self.p4port:
+            args.extend(['-p', self.p4port])
+        if self.p4user:
+            args.extend(['-u', self.p4user])
+        if self.p4passwd:
+            args.extend(['-P', self.p4passwd])
+        args.extend(['changes', '-m', str(self.histmax), self.p4base + '...'])
+        env = {}
+        return getProcessOutput(self.p4bin, args, env)
+
+    def _process_changes(self, result):
+        last_change = self.last_change
+        changelists = []
+        for line in result.split('\n'):
+            line = line.strip()
+            if not line: continue
+            m = self.changes_line_re.match(line)
+            assert m, "Unexpected 'p4 changes' output: %r" % result
+            num = m.group('num')
+            if last_change is None:
+                log.msg('P4Poller: starting at change %s' % num)
+                self.last_change = num
+                return []
+            if last_change == num:
+                break
+            changelists.append(num)
+        changelists.reverse() # oldest first
+
+        # Retrieve each sequentially.
+        d = defer.succeed(None)
+        for c in changelists:
+            d.addCallback(self._get_describe, c)
+            d.addCallback(self._process_describe, c)
+        return d
+
+    def _get_describe(self, dummy, num):
+        args = []
+        if self.p4port:
+            args.extend(['-p', self.p4port])
+        if self.p4user:
+            args.extend(['-u', self.p4user])
+        if self.p4passwd:
+            args.extend(['-P', self.p4passwd])
+        args.extend(['describe', '-s', num])
+        env = {}
+        d = getProcessOutput(self.p4bin, args, env)
+        return d
+
+    def _process_describe(self, result, num):
+        lines = result.split('\n')
+        # SF#1555985: Wade Brainerd reports a stray ^M at the end of the date
+        # field. The rstrip() is intended to remove that.
+        lines[0] = lines[0].rstrip()
+        m = self.describe_header_re.match(lines[0])
+        assert m, "Unexpected 'p4 describe -s' result: %r" % result
+        who = m.group('who')
+        when = time.mktime(time.strptime(m.group('when'), self.datefmt))
+        comments = ''
+        while not lines[0].startswith('Affected files'):
+            comments += lines.pop(0) + '\n'
+        lines.pop(0) # affected files
+
+        branch_files = {} # dict for branch mapped to file(s)
+        while lines:
+            line = lines.pop(0).strip()
+            if not line: continue
+            m = self.file_re.match(line)
+            assert m, "Invalid file line: %r" % line
+            path = m.group('path')
+            if path.startswith(self.p4base):
+                branch, file = self.split_file(path[len(self.p4base):])
+                if (branch == None and file == None): continue
+                if branch_files.has_key(branch):
+                    branch_files[branch].append(file)
+                else:
+                    branch_files[branch] = [file]
+
+        for branch in branch_files:
+            c = changes.Change(who=who,
+                               files=branch_files[branch],
+                               comments=comments,
+                               revision=num,
+                               when=when,
+                               branch=branch)
+            self.parent.addChange(c)
+
+        self.last_change = num

Added: vendor/buildbot/current/buildbot/changes/pb.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/pb.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/pb.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,108 @@
+# -*- test-case-name: buildbot.test.test_changes -*-
+
+from twisted.python import log
+
+from buildbot.pbutil import NewCredPerspective
+from buildbot.changes import base, changes
+
+class ChangePerspective(NewCredPerspective):
+
+    def __init__(self, changemaster, prefix):
+        self.changemaster = changemaster
+        self.prefix = prefix
+
+    def attached(self, mind):
+        return self
+    def detached(self, mind):
+        pass
+
+    def perspective_addChange(self, changedict):
+        log.msg("perspective_addChange called")
+        pathnames = []
+        prefixpaths = None
+        for path in changedict['files']:
+            if self.prefix:
+                if not path.startswith(self.prefix):
+                    # this file does not start with the prefix, so ignore it
+                    continue
+                path = path[len(self.prefix):]
+            pathnames.append(path)
+
+        if pathnames:
+            change = changes.Change(changedict['who'],
+                                    pathnames,
+                                    changedict['comments'],
+                                    branch=changedict.get('branch'),
+                                    revision=changedict.get('revision'),
+                                    )
+            self.changemaster.addChange(change)
+
+class PBChangeSource(base.ChangeSource):
+    compare_attrs = ["user", "passwd", "port", "prefix"]
+
+    def __init__(self, user="change", passwd="changepw", port=None,
+                 prefix=None, sep=None):
+        """I listen on a TCP port for Changes from 'buildbot sendchange'.
+
+        I am a ChangeSource which will accept Changes from a remote source. I
+        share a TCP listening port with the buildslaves.
+
+        Both the 'buildbot sendchange' command and the
+        contrib/svn_buildbot.py tool know how to send changes to me.
+
+        @type prefix: string (or None)
+        @param prefix: if set, I will ignore any filenames that do not start
+                       with this string. Moreover I will remove this string
+                       from all filenames before creating the Change object
+                       and delivering it to the Schedulers. This is useful
+                       for changes coming from version control systems that
+                       represent branches as parent directories within the
+                       repository (like SVN and Perforce). Use a prefix of
+                       'trunk/' or 'project/branches/foobranch/' to only
+                       follow one branch and to get correct tree-relative
+                       filenames.
+
+        @param sep: DEPRECATED (with an axe). sep= was removed in
+                    buildbot-0.7.4 . Instead of using it, you should use
+                    prefix= with a trailing directory separator. This
+                    docstring (and the better-than-nothing error message
+                    which occurs when you use it) will be removed in 0.7.5 .
+        """
+
+        # sep= was removed in 0.7.4 . This more-helpful-than-nothing error
+        # message will be removed in 0.7.5 .
+        assert sep is None, "prefix= is now a complete string, do not use sep="
+        # TODO: current limitations
+        assert user == "change"
+        assert passwd == "changepw"
+        assert port == None
+        self.user = user
+        self.passwd = passwd
+        self.port = port
+        self.prefix = prefix
+
+    def describe(self):
+        # TODO: when the dispatcher is fixed, report the specific port
+        #d = "PB listener on port %d" % self.port
+        d = "PBChangeSource listener on all-purpose slaveport"
+        if self.prefix is not None:
+            d += " (prefix '%s')" % self.prefix
+        return d
+
+    def startService(self):
+        base.ChangeSource.startService(self)
+        # our parent is the ChangeMaster object
+        # find the master's Dispatch object and register our username
+        # TODO: the passwd should be registered here too
+        master = self.parent.parent
+        master.dispatcher.register(self.user, self)
+
+    def stopService(self):
+        base.ChangeSource.stopService(self)
+        # unregister our username
+        master = self.parent.parent
+        master.dispatcher.unregister(self.user)
+
+    def getPerspective(self):
+        return ChangePerspective(self.parent, self.prefix)
+

Added: vendor/buildbot/current/buildbot/changes/svnpoller.py
===================================================================
--- vendor/buildbot/current/buildbot/changes/svnpoller.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/changes/svnpoller.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,446 @@
+# -*- test-case-name: buildbot.test.test_svnpoller -*-
+
+# Based on the work of Dave Peticolas for the P4poll
+# Changed to svn (using xml.dom.minidom) by Niklaus Giger
+# Hacked beyond recognition by Brian Warner
+
+import time
+
+from twisted.python import log
+from twisted.internet import defer, reactor, utils
+from twisted.internet.task import LoopingCall
+
+from buildbot import util
+from buildbot.changes import base
+from buildbot.changes.changes import Change
+
+import xml.dom.minidom
+
+def _assert(condition, msg):
+    if condition:
+        return True
+    raise AssertionError(msg)
+
+def dbgMsg(myString):
+    log.msg(myString)
+    return 1
+
+# these split_file_* functions are available for use as values to the
+# split_file= argument.
+def split_file_alwaystrunk(path):
+    return (None, path)
+
+def split_file_branches(path):
+    # turn trunk/subdir/file.c into (None, "subdir/file.c")
+    # and branches/1.5.x/subdir/file.c into ("branches/1.5.x", "subdir/file.c")
+    pieces = path.split('/')
+    if pieces[0] == 'trunk':
+        return (None, '/'.join(pieces[1:]))
+    elif pieces[0] == 'branches':
+        return ('/'.join(pieces[0:2]), '/'.join(pieces[2:]))
+    else:
+        return None
+
+
+class SVNPoller(base.ChangeSource, util.ComparableMixin):
+    """This source will poll a Subversion repository for changes and submit
+    them to the change master."""
+
+    compare_attrs = ["svnurl", "split_file_function",
+                     "svnuser", "svnpasswd",
+                     "pollinterval", "histmax",
+                     "svnbin"]
+
+    parent = None # filled in when we're added
+    last_change = None
+    loop = None
+    working = False
+
+    def __init__(self, svnurl, split_file=None,
+                 svnuser=None, svnpasswd=None,
+                 pollinterval=10*60, histmax=100,
+                 svnbin='svn'):
+        """
+        @type  svnurl: string
+        @param svnurl: the SVN URL that describes the repository and
+                       subdirectory to watch. If this ChangeSource should
+                       only pay attention to a single branch, this should
+                       point at the repository for that branch, like
+                       svn://svn.twistedmatrix.com/svn/Twisted/trunk . If it
+                       should follow multiple branches, point it at the
+                       repository directory that contains all the branches
+                       like svn://svn.twistedmatrix.com/svn/Twisted and also
+                       provide a branch-determining function.
+
+                       Each file in the repository has a SVN URL in the form
+                       (SVNURL)/(BRANCH)/(FILEPATH), where (BRANCH) could be
+                       empty or not, depending upon your branch-determining
+                       function. Only files that start with (SVNURL)/(BRANCH)
+                       will be monitored. The Change objects that are sent to
+                       the Schedulers will see (FILEPATH) for each modified
+                       file.
+
+        @type  split_file: callable or None
+        @param split_file: a function that is called with a string of the
+                           form (BRANCH)/(FILEPATH) and should return a tuple
+                           (BRANCH, FILEPATH). This function should match
+                           your repository's branch-naming policy. Each
+                           changed file has a fully-qualified URL that can be
+                           split into a prefix (which equals the value of the
+                           'svnurl' argument) and a suffix; it is this suffix
+                           which is passed to the split_file function.
+
+                           If the function returns None, the file is ignored.
+                           Use this to indicate that the file is not a part
+                           of this project.
+                           
+                           For example, if your repository puts the trunk in
+                           trunk/... and branches are in places like
+                           branches/1.5/..., your split_file function could
+                           look like the following (this function is
+                           available as svnpoller.split_file_branches)::
+
+                            pieces = path.split('/')
+                            if pieces[0] == 'trunk':
+                                return (None, '/'.join(pieces[1:]))
+                            elif pieces[0] == 'branches':
+                                return ('/'.join(pieces[0:2]),
+                                        '/'.join(pieces[2:]))
+                            else:
+                                return None
+
+                           If instead your repository layout puts the trunk
+                           for ProjectA in trunk/ProjectA/... and the 1.5
+                           branch in branches/1.5/ProjectA/..., your
+                           split_file function could look like::
+
+                            pieces = path.split('/')
+                            if pieces[0] == 'trunk':
+                                branch = None
+                                pieces.pop(0) # remove 'trunk'
+                            elif pieces[0] == 'branches':
+                                pieces.pop(0) # remove 'branches'
+                                # grab branch name
+                                branch = 'branches/' + pieces.pop(0)
+                            else:
+                                return None # something weird
+                            projectname = pieces.pop(0)
+                            if projectname != 'ProjectA':
+                                return None # wrong project
+                            return (branch, '/'.join(pieces))
+
+                           The default of split_file= is None, which
+                           indicates that no splitting should be done. This
+                           is equivalent to the following function::
+
+                            return (None, path)
+
+                           If you wish, you can override the split_file
+                           method with the same sort of function instead of
+                           passing in a split_file= argument.
+
+
+        @type  svnuser:      string
+        @param svnuser:      If set, the --username option will be added to
+                             the 'svn log' command. You may need this to get
+                             access to a private repository.
+        @type  svnpasswd:    string
+        @param svnpasswd:    If set, the --password option will be added.
+
+        @type  pollinterval: int
+        @param pollinterval: interval in seconds between polls. The default
+                             is 600 seconds (10 minutes). Smaller values
+                             decrease the latency between the time a change
+                             is recorded and the time the buildbot notices
+                             it, but it also increases the system load.
+
+        @type  histmax:      int
+        @param histmax:      maximum number of changes to look back through.
+                             The default is 100. Smaller values decrease
+                             system load, but if more than histmax changes
+                             are recorded between polls, the extra ones will
+                             be silently lost.
+
+        @type  svnbin:       string
+        @param svnbin:       path to svn binary, defaults to just 'svn'. Use
+                             this if your subversion command lives in an
+                             unusual location.
+        """
+
+        if svnurl.endswith("/"):
+            svnurl = svnurl[:-1] # strip the trailing slash
+        self.svnurl = svnurl
+        self.split_file_function = split_file or split_file_alwaystrunk
+        self.svnuser = svnuser
+        self.svnpasswd = svnpasswd
+
+        self.svnbin = svnbin
+        self.pollinterval = pollinterval
+        self.histmax = histmax
+        self._prefix = None
+        self.overrun_counter = 0
+        self.loop = LoopingCall(self.checksvn)
+
+    def split_file(self, path):
+        # use getattr() to avoid turning this function into a bound method,
+        # which would require it to have an extra 'self' argument
+        f = getattr(self, "split_file_function")
+        return f(path)
+
+    def startService(self):
+        log.msg("SVNPoller(%s) starting" % self.svnurl)
+        base.ChangeSource.startService(self)
+        # Don't start the loop just yet because the reactor isn't running.
+        # Give it a chance to go and install our SIGCHLD handler before
+        # spawning processes.
+        reactor.callLater(0, self.loop.start, self.pollinterval)
+
+    def stopService(self):
+        log.msg("SVNPoller(%s) shutting down" % self.svnurl)
+        self.loop.stop()
+        return base.ChangeSource.stopService(self)
+
+    def describe(self):
+        return "SVNPoller watching %s" % self.svnurl
+
+    def checksvn(self):
+        # Our return value is only used for unit testing.
+
+        # we need to figure out the repository root, so we can figure out
+        # repository-relative pathnames later. Each SVNURL is in the form
+        # (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something
+        # like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a
+        # physical repository at /svn/Twisted on that host), (PROJECT) is
+        # something like Projects/Twisted (i.e. within the repository's
+        # internal namespace, everything under Projects/Twisted/ has
+        # something to do with Twisted, but these directory names do not
+        # actually appear on the repository host), (BRANCH) is something like
+        # "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative
+        # filename like "twisted/internet/defer.py".
+
+        # our self.svnurl attribute contains (ROOT)/(PROJECT) combined
+        # together in a way that we can't separate without svn's help. If the
+        # user is not using the split_file= argument, then self.svnurl might
+        # be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will
+        # get back from 'svn log' will be of the form
+        # (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove
+        # that (PROJECT) prefix from them. To do this without requiring the
+        # user to tell us how svnurl is split into ROOT and PROJECT, we do an
+        # 'svn info --xml' command at startup. This command will include a
+        # <root> element that tells us ROOT. We then strip this prefix from
+        # self.svnurl to determine PROJECT, and then later we strip the
+        # PROJECT prefix from the filenames reported by 'svn log --xml' to
+        # get a (BRANCH)/(FILEPATH) that can be passed to split_file() to
+        # turn into separate BRANCH and FILEPATH values.
+
+        # whew.
+
+        if self.working:
+            log.msg("SVNPoller(%s) overrun: timer fired but the previous "
+                    "poll had not yet finished.")
+            self.overrun_counter += 1
+            return defer.succeed(None)
+        self.working = True
+
+        log.msg("SVNPoller polling")
+        if not self._prefix:
+            # this sets self._prefix when it finishes. It fires with
+            # self._prefix as well, because that makes the unit tests easier
+            # to write.
+            d = self.get_root()
+            d.addCallback(self.determine_prefix)
+        else:
+            d = defer.succeed(self._prefix)
+
+        d.addCallback(self.get_logs)
+        d.addCallback(self.parse_logs)
+        d.addCallback(self.get_new_logentries)
+        d.addCallback(self.create_changes)
+        d.addCallback(self.submit_changes)
+        d.addBoth(self.finished)
+        return d
+
+    def getProcessOutput(self, args):
+        # this exists so we can override it during the unit tests
+        d = utils.getProcessOutput(self.svnbin, args, {})
+        return d
+
+    def get_root(self):
+        args = ["info", "--xml", "--non-interactive", self.svnurl]
+        if self.svnuser:
+            args.extend(["--username=%s" % self.svnuser])
+        if self.svnpasswd:
+            args.extend(["--password=%s" % self.svnpasswd])
+        d = self.getProcessOutput(args)
+        return d
+
+    def determine_prefix(self, output):
+        try:
+            doc = xml.dom.minidom.parseString(output)
+        except xml.parsers.expat.ExpatError:
+            dbgMsg("_process_changes: ExpatError in %s" % output)
+            log.msg("SVNPoller._determine_prefix_2: ExpatError in '%s'"
+                    % output)
+            raise
+        rootnodes = doc.getElementsByTagName("root")
+        if not rootnodes:
+            # this happens if the URL we gave was already the root. In this
+            # case, our prefix is empty.
+            self._prefix = ""
+            return self._prefix
+        rootnode = rootnodes[0]
+        root = "".join([c.data for c in rootnode.childNodes])
+        # root will be a unicode string
+        _assert(self.svnurl.startswith(root),
+                "svnurl='%s' doesn't start with <root>='%s'" %
+                (self.svnurl, root))
+        self._prefix = self.svnurl[len(root):]
+        if self._prefix.startswith("/"):
+            self._prefix = self._prefix[1:]
+        log.msg("SVNPoller: svnurl=%s, root=%s, so prefix=%s" %
+                (self.svnurl, root, self._prefix))
+        return self._prefix
+
+    def get_logs(self, ignored_prefix=None):
+        args = []
+        args.extend(["log", "--xml", "--verbose", "--non-interactive"])
+        if self.svnuser:
+            args.extend(["--username=%s" % self.svnuser])
+        if self.svnpasswd:
+            args.extend(["--password=%s" % self.svnpasswd])
+        args.extend(["--limit=%d" % (self.histmax), self.svnurl])
+        d = self.getProcessOutput(args)
+        return d
+
+    def parse_logs(self, output):
+        # parse the XML output, return a list of <logentry> nodes
+        try:
+            doc = xml.dom.minidom.parseString(output)
+        except xml.parsers.expat.ExpatError:
+            dbgMsg("_process_changes: ExpatError in %s" % output)
+            log.msg("SVNPoller._parse_changes: ExpatError in '%s'" % output)
+            raise
+        logentries = doc.getElementsByTagName("logentry")
+        return logentries
+
+
+    def _filter_new_logentries(self, logentries, last_change):
+        # given a list of logentries, return a tuple of (new_last_change,
+        # new_logentries), where new_logentries contains only the ones after
+        # last_change
+        if not logentries:
+            # no entries, so last_change must stay at None
+            return (None, [])
+
+        mostRecent = int(logentries[0].getAttribute("revision"))
+
+        if last_change is None:
+            # if this is the first time we've been run, ignore any changes
+            # that occurred before now. This prevents a build at every
+            # startup.
+            log.msg('svnPoller: starting at change %s' % mostRecent)
+            return (mostRecent, [])
+
+        if last_change == mostRecent:
+            # an unmodified repository will hit this case
+            log.msg('svnPoller: _process_changes last %s mostRecent %s' % (
+                      last_change, mostRecent))
+            return (mostRecent, [])
+
+        new_logentries = []
+        for el in logentries:
+            if last_change == int(el.getAttribute("revision")):
+                break
+            new_logentries.append(el)
+        new_logentries.reverse() # return oldest first
+        return (mostRecent, new_logentries)
+
+    def get_new_logentries(self, logentries):
+        last_change = self.last_change
+        (new_last_change,
+         new_logentries) = self._filter_new_logentries(logentries,
+                                                       self.last_change)
+        self.last_change = new_last_change
+        log.msg('svnPoller: _process_changes %s .. %s' %
+                (last_change, new_last_change))
+        return new_logentries
+
+
+    def _get_text(self, element, tag_name):
+        child_nodes = element.getElementsByTagName(tag_name)[0].childNodes
+        text = "".join([t.data for t in child_nodes])
+        return text
+
+    def _transform_path(self, path):
+        _assert(path.startswith(self._prefix),
+                "filepath '%s' should start with prefix '%s'" %
+                (path, self._prefix))
+        relative_path = path[len(self._prefix):]
+        if relative_path.startswith("/"):
+            relative_path = relative_path[1:]
+        where = self.split_file(relative_path)
+        # 'where' is either None or (branch, final_path)
+        return where
+
+    def create_changes(self, new_logentries):
+        changes = []
+
+        for el in new_logentries:
+            branch_files = [] # get oldest change first
+            # TODO: revisit this, I think I've settled on Change.revision
+            # being a string everywhere, and leaving the interpretation
+            # of that string up to b.s.source.SVN methods
+            revision = int(el.getAttribute("revision"))
+            dbgMsg("Adding change revision %s" % (revision,))
+            # TODO: the rest of buildbot may not be ready for unicode 'who'
+            # values
+            author   = self._get_text(el, "author")
+            comments = self._get_text(el, "msg")
+            # there is a "date" field, but it provides localtime in the
+            # repository's timezone, whereas we care about buildmaster's
+            # localtime (since this will get used to position the boxes on
+            # the Waterfall display, etc). So ignore the date field and use
+            # our local clock instead.
+            #when     = self._get_text(el, "date")
+            #when     = time.mktime(time.strptime("%.19s" % when,
+            #                                     "%Y-%m-%dT%H:%M:%S"))
+            branches = {}
+            pathlist = el.getElementsByTagName("paths")[0]
+            for p in pathlist.getElementsByTagName("path"):
+                path = "".join([t.data for t in p.childNodes])
+                # the rest of buildbot is certaily not yet ready to handle
+                # unicode filenames, because they get put in RemoteCommands
+                # which get sent via PB to the buildslave, and PB doesn't
+                # handle unicode.
+                path = path.encode("ascii")
+                if path.startswith("/"):
+                    path = path[1:]
+                where = self._transform_path(path)
+                # if 'where' is None, the file was outside any project that
+                # we care about and we should ignore it
+                if where:
+                    branch, filename = where
+                    if not branch in branches:
+                        branches[branch] = []
+                    branches[branch].append(filename)
+
+            for branch in branches:
+                c = Change(who=author,
+                           files=branches[branch],
+                           comments=comments,
+                           revision=revision,
+                           branch=branch)
+                changes.append(c)
+
+        return changes
+
+    def submit_changes(self, changes):
+        for c in changes:
+            self.parent.addChange(c)
+
+    def finished(self, res):
+        log.msg("SVNPoller finished polling")
+        dbgMsg('_finished : %s' % res)
+        assert self.working
+        self.working = False
+        return res

Added: vendor/buildbot/current/buildbot/clients/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/clients/base.py
===================================================================
--- vendor/buildbot/current/buildbot/clients/base.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/clients/base.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,126 @@
+#! /usr/bin/python
+
+import sys, re
+
+from twisted.spread import pb
+from twisted.cred import credentials, error
+from twisted.internet import reactor
+
+class StatusClient(pb.Referenceable):
+    """To use this, call my .connected method with a RemoteReference to the
+    buildmaster's StatusClientPerspective object.
+    """
+
+    def __init__(self, events):
+        self.builders = {}
+        self.events = events
+
+    def connected(self, remote):
+        print "connected"
+        self.remote = remote
+        remote.callRemote("subscribe", self.events, 5, self)
+
+    def remote_builderAdded(self, buildername, builder):
+        print "builderAdded", buildername
+
+    def remote_builderRemoved(self, buildername):
+        print "builderRemoved", buildername
+
+    def remote_builderChangedState(self, buildername, state, eta):
+        print "builderChangedState", buildername, state, eta
+
+    def remote_buildStarted(self, buildername, build):
+        print "buildStarted", buildername
+
+    def remote_buildFinished(self, buildername, build, results):
+        print "buildFinished", results
+
+    def remote_buildETAUpdate(self, buildername, build, eta):
+        print "ETA", buildername, eta
+
+    def remote_stepStarted(self, buildername, build, stepname, step):
+        print "stepStarted", buildername, stepname
+
+    def remote_stepFinished(self, buildername, build, stepname, step, results):
+        print "stepFinished", buildername, stepname, results
+
+    def remote_stepETAUpdate(self, buildername, build, stepname, step,
+                             eta, expectations):
+        print "stepETA", buildername, stepname, eta
+
+    def remote_logStarted(self, buildername, build, stepname, step,
+                          logname, log):
+        print "logStarted", buildername, stepname
+
+    def remote_logFinished(self, buildername, build, stepname, step,
+                           logname, log):
+        print "logFinished", buildername, stepname
+
+    def remote_logChunk(self, buildername, build, stepname, step, logname, log,
+                        channel, text):
+        ChunkTypes = ["STDOUT", "STDERR", "HEADER"]
+        print "logChunk[%s]: %s" % (ChunkTypes[channel], text)
+
+class TextClient:
+    def __init__(self, master, events="steps"):
+        """
+        @type  events: string, one of builders, builds, steps, logs, full
+        @param events: specify what level of detail should be reported.
+         - 'builders': only announce new/removed Builders
+         - 'builds': also announce builderChangedState, buildStarted, and
+           buildFinished
+         - 'steps': also announce buildETAUpdate, stepStarted, stepFinished
+         - 'logs': also announce stepETAUpdate, logStarted, logFinished
+         - 'full': also announce log contents
+        """        
+        self.master = master
+        self.listener = StatusClient(events)
+
+    def run(self):
+        """Start the TextClient."""
+        self.startConnecting()
+        reactor.run()
+
+    def startConnecting(self):
+        try:
+            host, port = re.search(r'(.+):(\d+)', self.master).groups()
+            port = int(port)
+        except:
+            print "unparseable master location '%s'" % self.master
+            print " expecting something more like localhost:8007"
+            raise
+        cf = pb.PBClientFactory()
+        creds = credentials.UsernamePassword("statusClient", "clientpw")
+        d = cf.login(creds)
+        reactor.connectTCP(host, port, cf)
+        d.addCallbacks(self.connected, self.not_connected)
+        return d
+    def connected(self, ref):
+        ref.notifyOnDisconnect(self.disconnected)
+        self.listener.connected(ref)
+    def not_connected(self, why):
+        if why.check(error.UnauthorizedLogin):
+            print """
+Unable to login.. are you sure we are connecting to a
+buildbot.status.client.PBListener port and not to the slaveport?
+"""
+        reactor.stop()
+        return why
+    def disconnected(self, ref):
+        print "lost connection"
+        # we can get here in one of two ways: the buildmaster has
+        # disconnected us (probably because it shut itself down), or because
+        # we've been SIGINT'ed. In the latter case, our reactor is already
+        # shut down, but we have no easy way of detecting that. So protect
+        # our attempt to shut down the reactor.
+        try:
+            reactor.stop()
+        except RuntimeError:
+            pass
+
+if __name__ == '__main__':
+    master = "localhost:8007"
+    if len(sys.argv) > 1:
+        master = sys.argv[1]
+    c = TextClient()
+    c.run()

Added: vendor/buildbot/current/buildbot/clients/debug.glade
===================================================================
--- vendor/buildbot/current/buildbot/clients/debug.glade	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/clients/debug.glade	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,684 @@
+<?xml version="1.0" standalone="no"?> <!--*- mode: xml -*-->
+<!DOCTYPE glade-interface SYSTEM "http://glade.gnome.org/glade-2.0.dtd">
+
+<glade-interface>
+<requires lib="gnome"/>
+
+<widget class="GtkWindow" id="window1">
+  <property name="visible">True</property>
+  <property name="title" translatable="yes">Buildbot Debug Tool</property>
+  <property name="type">GTK_WINDOW_TOPLEVEL</property>
+  <property name="window_position">GTK_WIN_POS_NONE</property>
+  <property name="modal">False</property>
+  <property name="resizable">True</property>
+  <property name="destroy_with_parent">False</property>
+  <property name="decorated">True</property>
+  <property name="skip_taskbar_hint">False</property>
+  <property name="skip_pager_hint">False</property>
+  <property name="type_hint">GDK_WINDOW_TYPE_HINT_NORMAL</property>
+  <property name="gravity">GDK_GRAVITY_NORTH_WEST</property>
+  <property name="focus_on_map">True</property>
+  <property name="urgency_hint">False</property>
+
+  <child>
+    <widget class="GtkVBox" id="vbox1">
+      <property name="visible">True</property>
+      <property name="homogeneous">False</property>
+      <property name="spacing">0</property>
+
+      <child>
+	<widget class="GtkHBox" id="connection">
+	  <property name="visible">True</property>
+	  <property name="homogeneous">False</property>
+	  <property name="spacing">0</property>
+
+	  <child>
+	    <widget class="GtkButton" id="connectbutton">
+	      <property name="visible">True</property>
+	      <property name="can_focus">True</property>
+	      <property name="label" translatable="yes">Connect</property>
+	      <property name="use_underline">True</property>
+	      <property name="relief">GTK_RELIEF_NORMAL</property>
+	      <property name="focus_on_click">True</property>
+	      <signal name="clicked" handler="do_connect"/>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">False</property>
+	      <property name="fill">False</property>
+	    </packing>
+	  </child>
+
+	  <child>
+	    <widget class="GtkLabel" id="connectlabel">
+	      <property name="visible">True</property>
+	      <property name="label" translatable="yes">Disconnected</property>
+	      <property name="use_underline">False</property>
+	      <property name="use_markup">False</property>
+	      <property name="justify">GTK_JUSTIFY_CENTER</property>
+	      <property name="wrap">False</property>
+	      <property name="selectable">False</property>
+	      <property name="xalign">0.5</property>
+	      <property name="yalign">0.5</property>
+	      <property name="xpad">0</property>
+	      <property name="ypad">0</property>
+	      <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+	      <property name="width_chars">-1</property>
+	      <property name="single_line_mode">False</property>
+	      <property name="angle">0</property>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">True</property>
+	      <property name="fill">True</property>
+	    </packing>
+	  </child>
+	</widget>
+	<packing>
+	  <property name="padding">0</property>
+	  <property name="expand">False</property>
+	  <property name="fill">False</property>
+	</packing>
+      </child>
+
+      <child>
+	<widget class="GtkHBox" id="commands">
+	  <property name="visible">True</property>
+	  <property name="homogeneous">False</property>
+	  <property name="spacing">0</property>
+
+	  <child>
+	    <widget class="GtkButton" id="reload">
+	      <property name="visible">True</property>
+	      <property name="can_focus">True</property>
+	      <property name="label" translatable="yes">Reload .cfg</property>
+	      <property name="use_underline">True</property>
+	      <property name="relief">GTK_RELIEF_NORMAL</property>
+	      <property name="focus_on_click">True</property>
+	      <signal name="clicked" handler="do_reload" last_modification_time="Wed, 24 Sep 2003 20:47:55 GMT"/>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">False</property>
+	      <property name="fill">False</property>
+	    </packing>
+	  </child>
+
+	  <child>
+	    <widget class="GtkButton" id="rebuild">
+	      <property name="visible">True</property>
+	      <property name="sensitive">False</property>
+	      <property name="can_focus">True</property>
+	      <property name="label" translatable="yes">Rebuild .py</property>
+	      <property name="use_underline">True</property>
+	      <property name="relief">GTK_RELIEF_NORMAL</property>
+	      <property name="focus_on_click">True</property>
+	      <signal name="clicked" handler="do_rebuild" last_modification_time="Wed, 24 Sep 2003 20:49:18 GMT"/>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">False</property>
+	      <property name="fill">False</property>
+	    </packing>
+	  </child>
+
+	  <child>
+	    <widget class="GtkButton" id="button7">
+	      <property name="visible">True</property>
+	      <property name="can_focus">True</property>
+	      <property name="label" translatable="yes">poke IRC</property>
+	      <property name="use_underline">True</property>
+	      <property name="relief">GTK_RELIEF_NORMAL</property>
+	      <property name="focus_on_click">True</property>
+	      <signal name="clicked" handler="do_poke_irc" last_modification_time="Wed, 14 Jan 2004 22:23:59 GMT"/>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">False</property>
+	      <property name="fill">False</property>
+	    </packing>
+	  </child>
+	</widget>
+	<packing>
+	  <property name="padding">0</property>
+	  <property name="expand">True</property>
+	  <property name="fill">True</property>
+	</packing>
+      </child>
+
+      <child>
+	<widget class="GtkHBox" id="hbox3">
+	  <property name="visible">True</property>
+	  <property name="homogeneous">False</property>
+	  <property name="spacing">0</property>
+
+	  <child>
+	    <widget class="GtkCheckButton" id="usebranch">
+	      <property name="visible">True</property>
+	      <property name="can_focus">True</property>
+	      <property name="label" translatable="yes">Branch:</property>
+	      <property name="use_underline">True</property>
+	      <property name="relief">GTK_RELIEF_NORMAL</property>
+	      <property name="focus_on_click">True</property>
+	      <property name="active">False</property>
+	      <property name="inconsistent">False</property>
+	      <property name="draw_indicator">True</property>
+	      <signal name="toggled" handler="on_usebranch_toggled" last_modification_time="Tue, 25 Oct 2005 01:42:45 GMT"/>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">False</property>
+	      <property name="fill">False</property>
+	    </packing>
+	  </child>
+
+	  <child>
+	    <widget class="GtkEntry" id="branch">
+	      <property name="visible">True</property>
+	      <property name="can_focus">True</property>
+	      <property name="editable">True</property>
+	      <property name="visibility">True</property>
+	      <property name="max_length">0</property>
+	      <property name="text" translatable="yes"></property>
+	      <property name="has_frame">True</property>
+	      <property name="invisible_char">*</property>
+	      <property name="activates_default">False</property>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">True</property>
+	      <property name="fill">True</property>
+	    </packing>
+	  </child>
+	</widget>
+	<packing>
+	  <property name="padding">0</property>
+	  <property name="expand">True</property>
+	  <property name="fill">True</property>
+	</packing>
+      </child>
+
+      <child>
+	<widget class="GtkHBox" id="hbox1">
+	  <property name="visible">True</property>
+	  <property name="homogeneous">False</property>
+	  <property name="spacing">0</property>
+
+	  <child>
+	    <widget class="GtkCheckButton" id="userevision">
+	      <property name="visible">True</property>
+	      <property name="can_focus">True</property>
+	      <property name="label" translatable="yes">Revision:</property>
+	      <property name="use_underline">True</property>
+	      <property name="relief">GTK_RELIEF_NORMAL</property>
+	      <property name="focus_on_click">True</property>
+	      <property name="active">False</property>
+	      <property name="inconsistent">False</property>
+	      <property name="draw_indicator">True</property>
+	      <signal name="toggled" handler="on_userevision_toggled" last_modification_time="Wed, 08 Sep 2004 17:58:33 GMT"/>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">False</property>
+	      <property name="fill">False</property>
+	    </packing>
+	  </child>
+
+	  <child>
+	    <widget class="GtkEntry" id="revision">
+	      <property name="visible">True</property>
+	      <property name="can_focus">True</property>
+	      <property name="editable">True</property>
+	      <property name="visibility">True</property>
+	      <property name="max_length">0</property>
+	      <property name="text" translatable="yes"></property>
+	      <property name="has_frame">True</property>
+	      <property name="invisible_char">*</property>
+	      <property name="activates_default">False</property>
+	    </widget>
+	    <packing>
+	      <property name="padding">0</property>
+	      <property name="expand">True</property>
+	      <property name="fill">True</property>
+	    </packing>
+	  </child>
+	</widget>
+	<packing>
+	  <property name="padding">0</property>
+	  <property name="expand">True</property>
+	  <property name="fill">True</property>
+	</packing>
+      </child>
+
+      <child>
+	<widget class="GtkFrame" id="Commit">
+	  <property name="border_width">4</property>
+	  <property name="visible">True</property>
+	  <property name="label_xalign">0</property>
+	  <property name="label_yalign">0.5</property>
+	  <property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
+
+	  <child>
+	    <widget class="GtkAlignment" id="alignment1">
+	      <property name="visible">True</property>
+	      <property name="xalign">0.5</property>
+	      <property name="yalign">0.5</property>
+	      <property name="xscale">1</property>
+	      <property name="yscale">1</property>
+	      <property name="top_padding">0</property>
+	      <property name="bottom_padding">0</property>
+	      <property name="left_padding">0</property>
+	      <property name="right_padding">0</property>
+
+	      <child>
+		<widget class="GtkVBox" id="vbox3">
+		  <property name="visible">True</property>
+		  <property name="homogeneous">False</property>
+		  <property name="spacing">0</property>
+
+		  <child>
+		    <widget class="GtkHBox" id="commit">
+		      <property name="visible">True</property>
+		      <property name="homogeneous">False</property>
+		      <property name="spacing">0</property>
+
+		      <child>
+			<widget class="GtkButton" id="button2">
+			  <property name="visible">True</property>
+			  <property name="can_focus">True</property>
+			  <property name="label" translatable="yes">commit</property>
+			  <property name="use_underline">True</property>
+			  <property name="relief">GTK_RELIEF_NORMAL</property>
+			  <property name="focus_on_click">True</property>
+			  <signal name="clicked" handler="do_commit"/>
+			</widget>
+			<packing>
+			  <property name="padding">0</property>
+			  <property name="expand">False</property>
+			  <property name="fill">False</property>
+			</packing>
+		      </child>
+
+		      <child>
+			<widget class="GtkEntry" id="filename">
+			  <property name="visible">True</property>
+			  <property name="can_focus">True</property>
+			  <property name="editable">True</property>
+			  <property name="visibility">True</property>
+			  <property name="max_length">0</property>
+			  <property name="text" translatable="yes">twisted/internet/app.py</property>
+			  <property name="has_frame">True</property>
+			  <property name="invisible_char">*</property>
+			  <property name="activates_default">False</property>
+			</widget>
+			<packing>
+			  <property name="padding">0</property>
+			  <property name="expand">True</property>
+			  <property name="fill">True</property>
+			</packing>
+		      </child>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">True</property>
+		      <property name="fill">True</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <widget class="GtkHBox" id="hbox2">
+		      <property name="visible">True</property>
+		      <property name="homogeneous">False</property>
+		      <property name="spacing">0</property>
+
+		      <child>
+			<widget class="GtkLabel" id="label5">
+			  <property name="visible">True</property>
+			  <property name="label" translatable="yes">Who: </property>
+			  <property name="use_underline">False</property>
+			  <property name="use_markup">False</property>
+			  <property name="justify">GTK_JUSTIFY_LEFT</property>
+			  <property name="wrap">False</property>
+			  <property name="selectable">False</property>
+			  <property name="xalign">0.5</property>
+			  <property name="yalign">0.5</property>
+			  <property name="xpad">0</property>
+			  <property name="ypad">0</property>
+			  <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+			  <property name="width_chars">-1</property>
+			  <property name="single_line_mode">False</property>
+			  <property name="angle">0</property>
+			</widget>
+			<packing>
+			  <property name="padding">0</property>
+			  <property name="expand">False</property>
+			  <property name="fill">False</property>
+			</packing>
+		      </child>
+
+		      <child>
+			<widget class="GtkEntry" id="who">
+			  <property name="visible">True</property>
+			  <property name="can_focus">True</property>
+			  <property name="editable">True</property>
+			  <property name="visibility">True</property>
+			  <property name="max_length">0</property>
+			  <property name="text" translatable="yes">bob</property>
+			  <property name="has_frame">True</property>
+			  <property name="invisible_char">*</property>
+			  <property name="activates_default">False</property>
+			</widget>
+			<packing>
+			  <property name="padding">0</property>
+			  <property name="expand">True</property>
+			  <property name="fill">True</property>
+			</packing>
+		      </child>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">True</property>
+		      <property name="fill">True</property>
+		    </packing>
+		  </child>
+		</widget>
+	      </child>
+	    </widget>
+	  </child>
+
+	  <child>
+	    <widget class="GtkLabel" id="label4">
+	      <property name="visible">True</property>
+	      <property name="label" translatable="yes">Commit</property>
+	      <property name="use_underline">False</property>
+	      <property name="use_markup">False</property>
+	      <property name="justify">GTK_JUSTIFY_LEFT</property>
+	      <property name="wrap">False</property>
+	      <property name="selectable">False</property>
+	      <property name="xalign">0.5</property>
+	      <property name="yalign">0.5</property>
+	      <property name="xpad">2</property>
+	      <property name="ypad">0</property>
+	      <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+	      <property name="width_chars">-1</property>
+	      <property name="single_line_mode">False</property>
+	      <property name="angle">0</property>
+	    </widget>
+	    <packing>
+	      <property name="type">label_item</property>
+	    </packing>
+	  </child>
+	</widget>
+	<packing>
+	  <property name="padding">0</property>
+	  <property name="expand">True</property>
+	  <property name="fill">True</property>
+	</packing>
+      </child>
+
+      <child>
+	<widget class="GtkFrame" id="builderframe">
+	  <property name="border_width">4</property>
+	  <property name="visible">True</property>
+	  <property name="label_xalign">0</property>
+	  <property name="label_yalign">0.5</property>
+	  <property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
+
+	  <child>
+	    <widget class="GtkVBox" id="vbox2">
+	      <property name="visible">True</property>
+	      <property name="homogeneous">False</property>
+	      <property name="spacing">0</property>
+
+	      <child>
+		<widget class="GtkHBox" id="builder">
+		  <property name="visible">True</property>
+		  <property name="homogeneous">False</property>
+		  <property name="spacing">3</property>
+
+		  <child>
+		    <widget class="GtkLabel" id="label1">
+		      <property name="visible">True</property>
+		      <property name="label" translatable="yes">Builder:</property>
+		      <property name="use_underline">False</property>
+		      <property name="use_markup">False</property>
+		      <property name="justify">GTK_JUSTIFY_CENTER</property>
+		      <property name="wrap">False</property>
+		      <property name="selectable">False</property>
+		      <property name="xalign">0.5</property>
+		      <property name="yalign">0.5</property>
+		      <property name="xpad">0</property>
+		      <property name="ypad">0</property>
+		      <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+		      <property name="width_chars">-1</property>
+		      <property name="single_line_mode">False</property>
+		      <property name="angle">0</property>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <widget class="GtkEntry" id="buildname">
+		      <property name="visible">True</property>
+		      <property name="can_focus">True</property>
+		      <property name="editable">True</property>
+		      <property name="visibility">True</property>
+		      <property name="max_length">0</property>
+		      <property name="text" translatable="yes">one</property>
+		      <property name="has_frame">True</property>
+		      <property name="invisible_char">*</property>
+		      <property name="activates_default">False</property>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">True</property>
+		      <property name="fill">True</property>
+		    </packing>
+		  </child>
+		</widget>
+		<packing>
+		  <property name="padding">0</property>
+		  <property name="expand">True</property>
+		  <property name="fill">True</property>
+		</packing>
+	      </child>
+
+	      <child>
+		<widget class="GtkHBox" id="buildercontrol">
+		  <property name="visible">True</property>
+		  <property name="homogeneous">False</property>
+		  <property name="spacing">0</property>
+
+		  <child>
+		    <widget class="GtkButton" id="button1">
+		      <property name="visible">True</property>
+		      <property name="can_focus">True</property>
+		      <property name="label" translatable="yes">Request
+Build</property>
+		      <property name="use_underline">True</property>
+		      <property name="relief">GTK_RELIEF_NORMAL</property>
+		      <property name="focus_on_click">True</property>
+		      <signal name="clicked" handler="do_build"/>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <widget class="GtkButton" id="button8">
+		      <property name="visible">True</property>
+		      <property name="can_focus">True</property>
+		      <property name="label" translatable="yes">Ping
+Builder</property>
+		      <property name="use_underline">True</property>
+		      <property name="relief">GTK_RELIEF_NORMAL</property>
+		      <property name="focus_on_click">True</property>
+		      <signal name="clicked" handler="do_ping" last_modification_time="Fri, 24 Nov 2006 05:18:51 GMT"/>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <placeholder/>
+		  </child>
+		</widget>
+		<packing>
+		  <property name="padding">0</property>
+		  <property name="expand">True</property>
+		  <property name="fill">True</property>
+		</packing>
+	      </child>
+
+	      <child>
+		<widget class="GtkHBox" id="status">
+		  <property name="visible">True</property>
+		  <property name="homogeneous">False</property>
+		  <property name="spacing">0</property>
+
+		  <child>
+		    <widget class="GtkLabel" id="label2">
+		      <property name="visible">True</property>
+		      <property name="label" translatable="yes">Currently:</property>
+		      <property name="use_underline">False</property>
+		      <property name="use_markup">False</property>
+		      <property name="justify">GTK_JUSTIFY_CENTER</property>
+		      <property name="wrap">False</property>
+		      <property name="selectable">False</property>
+		      <property name="xalign">0.5</property>
+		      <property name="yalign">0.5</property>
+		      <property name="xpad">7</property>
+		      <property name="ypad">0</property>
+		      <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+		      <property name="width_chars">-1</property>
+		      <property name="single_line_mode">False</property>
+		      <property name="angle">0</property>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <widget class="GtkButton" id="button3">
+		      <property name="visible">True</property>
+		      <property name="can_focus">True</property>
+		      <property name="label" translatable="yes">offline</property>
+		      <property name="use_underline">True</property>
+		      <property name="relief">GTK_RELIEF_NORMAL</property>
+		      <property name="focus_on_click">True</property>
+		      <signal name="clicked" handler="do_current_offline"/>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <widget class="GtkButton" id="button4">
+		      <property name="visible">True</property>
+		      <property name="can_focus">True</property>
+		      <property name="label" translatable="yes">idle</property>
+		      <property name="use_underline">True</property>
+		      <property name="relief">GTK_RELIEF_NORMAL</property>
+		      <property name="focus_on_click">True</property>
+		      <signal name="clicked" handler="do_current_idle"/>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <widget class="GtkButton" id="button5">
+		      <property name="visible">True</property>
+		      <property name="can_focus">True</property>
+		      <property name="label" translatable="yes">waiting</property>
+		      <property name="use_underline">True</property>
+		      <property name="relief">GTK_RELIEF_NORMAL</property>
+		      <property name="focus_on_click">True</property>
+		      <signal name="clicked" handler="do_current_waiting"/>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+
+		  <child>
+		    <widget class="GtkButton" id="button6">
+		      <property name="visible">True</property>
+		      <property name="can_focus">True</property>
+		      <property name="label" translatable="yes">building</property>
+		      <property name="use_underline">True</property>
+		      <property name="relief">GTK_RELIEF_NORMAL</property>
+		      <property name="focus_on_click">True</property>
+		      <signal name="clicked" handler="do_current_building"/>
+		    </widget>
+		    <packing>
+		      <property name="padding">0</property>
+		      <property name="expand">False</property>
+		      <property name="fill">False</property>
+		    </packing>
+		  </child>
+		</widget>
+		<packing>
+		  <property name="padding">0</property>
+		  <property name="expand">True</property>
+		  <property name="fill">True</property>
+		</packing>
+	      </child>
+	    </widget>
+	  </child>
+
+	  <child>
+	    <widget class="GtkLabel" id="label3">
+	      <property name="visible">True</property>
+	      <property name="label" translatable="yes">Builder</property>
+	      <property name="use_underline">False</property>
+	      <property name="use_markup">False</property>
+	      <property name="justify">GTK_JUSTIFY_LEFT</property>
+	      <property name="wrap">False</property>
+	      <property name="selectable">False</property>
+	      <property name="xalign">0.5</property>
+	      <property name="yalign">0.5</property>
+	      <property name="xpad">2</property>
+	      <property name="ypad">0</property>
+	      <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+	      <property name="width_chars">-1</property>
+	      <property name="single_line_mode">False</property>
+	      <property name="angle">0</property>
+	    </widget>
+	    <packing>
+	      <property name="type">label_item</property>
+	    </packing>
+	  </child>
+	</widget>
+	<packing>
+	  <property name="padding">0</property>
+	  <property name="expand">True</property>
+	  <property name="fill">True</property>
+	</packing>
+      </child>
+    </widget>
+  </child>
+</widget>
+
+</glade-interface>

Added: vendor/buildbot/current/buildbot/clients/debug.py
===================================================================
--- vendor/buildbot/current/buildbot/clients/debug.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/clients/debug.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,183 @@
+#! /usr/bin/python
+
+from twisted.internet import gtk2reactor
+gtk2reactor.install()
+from twisted.internet import reactor
+from twisted.python import util
+from twisted.spread import pb
+from twisted.cred import credentials
+import gtk, gtk.glade, gnome.ui
+import sys, re
+
+class DebugWidget:
+    def __init__(self, master="localhost:8007", passwd="debugpw"):
+        self.connected = 0
+        try:
+            host, port = re.search(r'(.+):(\d+)', master).groups()
+        except:
+            print "unparseable master location '%s'" % master
+            print " expecting something more like localhost:8007"
+            raise
+        self.host = host
+        self.port = int(port)
+        self.passwd = passwd
+        self.remote = None
+        xml = self.xml = gtk.glade.XML(util.sibpath(__file__, "debug.glade"))
+        g = xml.get_widget
+        self.buildname = g('buildname')
+        self.filename = g('filename')
+        self.connectbutton = g('connectbutton')
+        self.connectlabel = g('connectlabel')
+        g('window1').connect('destroy', lambda win: gtk.main_quit())
+        # put the master info in the window's titlebar
+        g('window1').set_title("Buildbot Debug Tool: %s" % master)
+        c = xml.signal_connect
+        c('do_connect', self.do_connect)
+        c('do_reload', self.do_reload)
+        c('do_rebuild', self.do_rebuild)
+        c('do_poke_irc', self.do_poke_irc)
+        c('do_build', self.do_build)
+        c('do_ping', self.do_ping)
+        c('do_commit', self.do_commit)
+        c('on_usebranch_toggled', self.usebranch_toggled)
+        self.usebranch_toggled(g('usebranch'))
+        c('on_userevision_toggled', self.userevision_toggled)
+        self.userevision_toggled(g('userevision'))
+        c('do_current_offline', self.do_current, "offline")
+        c('do_current_idle', self.do_current, "idle")
+        c('do_current_waiting', self.do_current, "waiting")
+        c('do_current_building', self.do_current, "building")
+
+    def do_connect(self, widget):
+        if self.connected:
+            self.connectlabel.set_text("Disconnecting...")
+            if self.remote:
+                self.remote.broker.transport.loseConnection()
+        else:
+            self.connectlabel.set_text("Connecting...")
+            f = pb.PBClientFactory()
+            creds = credentials.UsernamePassword("debug", self.passwd)
+            d = f.login(creds)
+            reactor.connectTCP(self.host, int(self.port), f)
+            d.addCallbacks(self.connect_complete, self.connect_failed)
+    def connect_complete(self, ref):
+        self.connectbutton.set_label("Disconnect")
+        self.connectlabel.set_text("Connected")
+        self.connected = 1
+        self.remote = ref
+        self.remote.callRemote("print", "hello cleveland")
+        self.remote.notifyOnDisconnect(self.disconnected)
+    def connect_failed(self, why):
+        self.connectlabel.set_text("Failed")
+        print why
+    def disconnected(self, ref):
+        self.connectbutton.set_label("Connect")
+        self.connectlabel.set_text("Disconnected")
+        self.connected = 0
+        self.remote = None
+
+    def do_reload(self, widget):
+        if not self.remote:
+            return
+        d = self.remote.callRemote("reload")
+        d.addErrback(self.err)
+    def do_rebuild(self, widget):
+        print "Not yet implemented"
+        return
+    def do_poke_irc(self, widget):
+        if not self.remote:
+            return
+        d = self.remote.callRemote("pokeIRC")
+        d.addErrback(self.err)
+
+    def do_build(self, widget):
+        if not self.remote:
+            return
+        name = self.buildname.get_text()
+        branch = None
+        if self.xml.get_widget("usebranch").get_active():
+            branch = self.xml.get_widget('branch').get_text()
+            if branch == '':
+                branch = None
+        revision = None
+        if self.xml.get_widget("userevision").get_active():
+            revision = self.xml.get_widget('revision').get_text()
+            if revision == '':
+                revision = None
+        reason = "debugclient 'Request Build' button pushed"
+        d = self.remote.callRemote("requestBuild",
+                                   name, reason, branch, revision)
+        d.addErrback(self.err)
+
+    def do_ping(self, widget):
+        if not self.remote:
+            return
+        name = self.buildname.get_text()
+        d = self.remote.callRemote("pingBuilder", name)
+        d.addErrback(self.err)
+
+    def usebranch_toggled(self, widget):
+        rev = self.xml.get_widget('branch')
+        if widget.get_active():
+            rev.set_sensitive(True)
+        else:
+            rev.set_sensitive(False)
+
+    def userevision_toggled(self, widget):
+        rev = self.xml.get_widget('revision')
+        if widget.get_active():
+            rev.set_sensitive(True)
+        else:
+            rev.set_sensitive(False)
+
+    def do_commit(self, widget):
+        if not self.remote:
+            return
+        filename = self.filename.get_text()
+        who = self.xml.get_widget("who").get_text()
+
+        branch = None
+        if self.xml.get_widget("usebranch").get_active():
+            branch = self.xml.get_widget('branch').get_text()
+            if branch == '':
+                branch = None
+
+        revision = None
+        if self.xml.get_widget("userevision").get_active():
+            revision = self.xml.get_widget('revision').get_text()
+            try:
+                revision = int(revision)
+            except ValueError:
+                pass
+            if revision == '':
+                revision = None
+
+        kwargs = { 'revision': revision, 'who': who }
+        if branch:
+            kwargs['branch'] = branch
+        d = self.remote.callRemote("fakeChange", filename, **kwargs)
+        d.addErrback(self.err)
+
+    def do_current(self, widget, state):
+        if not self.remote:
+            return
+        name = self.buildname.get_text()
+        d = self.remote.callRemote("setCurrentState", name, state)
+        d.addErrback(self.err)
+    def err(self, failure):
+        print "received error"
+        failure.printTraceback()
+        
+
+    def run(self):
+        reactor.run()
+
+if __name__ == '__main__':
+    master = "localhost:8007"
+    if len(sys.argv) > 1:
+        master = sys.argv[1]
+    passwd = "debugpw"
+    if len(sys.argv) > 2:
+        passwd = sys.argv[2]
+    d = DebugWidget(master, passwd)
+    d.run()

Added: vendor/buildbot/current/buildbot/clients/gtkPanes.py
===================================================================
--- vendor/buildbot/current/buildbot/clients/gtkPanes.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/clients/gtkPanes.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,526 @@
+#! /usr/bin/python
+
+from twisted.internet import gtk2reactor
+gtk2reactor.install()
+
+from twisted.internet import reactor
+
+import sys, time
+
+import pygtk
+pygtk.require("2.0")
+import gobject, gtk
+assert(gtk.Window) # in gtk1 it's gtk.GtkWindow
+
+from twisted.spread import pb
+
+#from buildbot.clients.base import Builder, Client
+from buildbot.clients.base import TextClient
+from buildbot.util import now
+
+'''
+class Pane:
+    def __init__(self):
+        pass
+
+class OneRow(Pane):
+    """This is a one-row status bar. It has one square per Builder, and that
+    square is either red, yellow, or green. """
+
+    def __init__(self):
+        Pane.__init__(self)
+        self.widget = gtk.VBox(gtk.FALSE, 2)
+        self.nameBox = gtk.HBox(gtk.TRUE)
+        self.statusBox = gtk.HBox(gtk.TRUE)
+        self.widget.add(self.nameBox)
+        self.widget.add(self.statusBox)
+        self.widget.show_all()
+        self.builders = []
+        
+    def getWidget(self):
+        return self.widget
+    def addBuilder(self, builder):
+        print "OneRow.addBuilder"
+        # todo: ordering. Should follow the order in which they were added
+        # to the original BotMaster
+        self.builders.append(builder)
+        # add the name to the left column, and a label (with background) to
+        # the right
+        name = gtk.Label(builder.name)
+        status = gtk.Label('??')
+        status.set_size_request(64,64)
+        box = gtk.EventBox()
+        box.add(status)
+        name.show()
+        box.show_all()
+        self.nameBox.add(name)
+        self.statusBox.add(box)
+        builder.haveSomeWidgets([name, status, box])
+    
+class R2Builder(Builder):
+    def start(self):
+        self.nameSquare.set_text(self.name)
+        self.statusSquare.set_text("???")
+        self.subscribe()
+    def haveSomeWidgets(self, widgets):
+        self.nameSquare, self.statusSquare, self.statusBox = widgets
+
+    def remote_newLastBuildStatus(self, event):
+        color = None
+        if event:
+            text = "\n".join(event.text)
+            color = event.color
+        else:
+            text = "none"
+        self.statusSquare.set_text(text)
+        if color:
+            print "color", color
+            self.statusBox.modify_bg(gtk.STATE_NORMAL,
+                                     gtk.gdk.color_parse(color))
+
+    def remote_currentlyOffline(self):
+        self.statusSquare.set_text("offline")
+    def remote_currentlyIdle(self):
+        self.statusSquare.set_text("idle")
+    def remote_currentlyWaiting(self, seconds):
+        self.statusSquare.set_text("waiting")
+    def remote_currentlyInterlocked(self):
+        self.statusSquare.set_text("interlocked")
+    def remote_currentlyBuilding(self, eta):
+        self.statusSquare.set_text("building")
+
+
+class CompactRow(Pane):
+    def __init__(self):
+        Pane.__init__(self)
+        self.widget = gtk.VBox(gtk.FALSE, 3)
+        self.nameBox = gtk.HBox(gtk.TRUE, 2)
+        self.lastBuildBox = gtk.HBox(gtk.TRUE, 2)
+        self.statusBox = gtk.HBox(gtk.TRUE, 2)
+        self.widget.add(self.nameBox)
+        self.widget.add(self.lastBuildBox)
+        self.widget.add(self.statusBox)
+        self.widget.show_all()
+        self.builders = []
+        
+    def getWidget(self):
+        return self.widget
+        
+    def addBuilder(self, builder):
+        self.builders.append(builder)
+
+        name = gtk.Label(builder.name)
+        name.show()
+        self.nameBox.add(name)
+
+        last = gtk.Label('??')
+        last.set_size_request(64,64)
+        lastbox = gtk.EventBox()
+        lastbox.add(last)
+        lastbox.show_all()
+        self.lastBuildBox.add(lastbox)
+
+        status = gtk.Label('??')
+        status.set_size_request(64,64)
+        statusbox = gtk.EventBox()
+        statusbox.add(status)
+        statusbox.show_all()
+        self.statusBox.add(statusbox)
+
+        builder.haveSomeWidgets([name, last, lastbox, status, statusbox])
+
+    def removeBuilder(self, name, builder):
+        self.nameBox.remove(builder.nameSquare)
+        self.lastBuildBox.remove(builder.lastBuildBox)
+        self.statusBox.remove(builder.statusBox)
+        self.builders.remove(builder)
+    
+class CompactBuilder(Builder):
+    def setup(self):
+        self.timer = None
+        self.text = []
+        self.eta = None
+    def start(self):
+        self.nameSquare.set_text(self.name)
+        self.statusSquare.set_text("???")
+        self.subscribe()
+    def haveSomeWidgets(self, widgets):
+        (self.nameSquare,
+         self.lastBuildSquare, self.lastBuildBox,
+         self.statusSquare, self.statusBox) = widgets
+        
+    def remote_currentlyOffline(self):
+        self.eta = None
+        self.stopTimer()
+        self.statusSquare.set_text("offline")
+        self.statusBox.modify_bg(gtk.STATE_NORMAL,
+                                 gtk.gdk.color_parse("red"))
+    def remote_currentlyIdle(self):
+        self.eta = None
+        self.stopTimer()
+        self.statusSquare.set_text("idle")
+    def remote_currentlyWaiting(self, seconds):
+        self.nextBuild = now() + seconds
+        self.startTimer(self.updateWaiting)
+    def remote_currentlyInterlocked(self):
+        self.stopTimer()
+        self.statusSquare.set_text("interlocked")
+    def startTimer(self, func):
+        # the func must clear self.timer and return gtk.FALSE when the event
+        # has arrived
+        self.stopTimer()
+        self.timer = gtk.timeout_add(1000, func)
+        func()
+    def stopTimer(self):
+        if self.timer:
+            gtk.timeout_remove(self.timer)
+            self.timer = None
+    def updateWaiting(self):
+        when = self.nextBuild
+        if now() < when:
+            next = time.strftime("%H:%M:%S", time.localtime(when))
+            secs = "[%d seconds]" % (when - now())
+            self.statusSquare.set_text("waiting\n%s\n%s" % (next, secs))
+            return gtk.TRUE # restart timer
+        else:
+            # done
+            self.statusSquare.set_text("waiting\n[RSN]")
+            self.timer = None
+            return gtk.FALSE
+
+    def remote_currentlyBuilding(self, eta):
+        self.stopTimer()
+        self.statusSquare.set_text("building")
+        if eta:
+            d = eta.callRemote("subscribe", self, 5)
+
+    def remote_newLastBuildStatus(self, event):
+        color = None
+        if event:
+            text = "\n".join(event.text)
+            color = event.color
+        else:
+            text = "none"
+        if not color: color = "gray"
+        self.lastBuildSquare.set_text(text)
+        self.lastBuildBox.modify_bg(gtk.STATE_NORMAL,
+                                    gtk.gdk.color_parse(color))
+
+    def remote_newEvent(self, event):
+        assert(event.__class__ == GtkUpdatingEvent)
+        self.current = event
+        event.builder = self
+        self.text = event.text
+        if not self.text: self.text = ["idle"]
+        self.eta = None
+        self.stopTimer()
+        self.updateText()
+        color = event.color
+        if not color: color = "gray"
+        self.statusBox.modify_bg(gtk.STATE_NORMAL,
+                                 gtk.gdk.color_parse(color))
+
+    def updateCurrent(self):
+        text = self.current.text
+        if text:
+            self.text = text
+            self.updateText()
+        color = self.current.color
+        if color:
+            self.statusBox.modify_bg(gtk.STATE_NORMAL,
+                                     gtk.gdk.color_parse(color))
+    def updateText(self):
+        etatext = []
+        if self.eta:
+            etatext = [time.strftime("%H:%M:%S", time.localtime(self.eta))]
+            if now() > self.eta:
+                etatext += ["RSN"]
+            else:
+                seconds = self.eta - now()
+                etatext += ["[%d secs]" % seconds]
+        text = "\n".join(self.text + etatext)
+        self.statusSquare.set_text(text)
+    def updateTextTimer(self):
+        self.updateText()
+        return gtk.TRUE # restart timer
+    
+    def remote_progress(self, seconds):
+        if seconds == None:
+            self.eta = None
+        else:
+            self.eta = now() + seconds
+        self.startTimer(self.updateTextTimer)
+        self.updateText()
+    def remote_finished(self, eta):
+        self.eta = None
+        self.stopTimer()
+        self.updateText()
+        eta.callRemote("unsubscribe", self)
+'''
+
+class Box:
+    def __init__(self, text="?"):
+        self.text = text
+        self.box = gtk.EventBox()
+        self.label = gtk.Label(text)
+        self.box.add(self.label)
+        self.box.set_size_request(64,64)
+        self.timer = None
+
+    def getBox(self):
+        return self.box
+
+    def setText(self, text):
+        self.text = text
+        self.label.set_text(text)
+
+    def setColor(self, color):
+        if not color:
+            return
+        self.box.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(color))
+
+    def setETA(self, eta):
+        if eta:
+            self.when = now() + eta
+            self.startTimer()
+        else:
+            self.stopTimer()
+
+    def startTimer(self):
+        self.stopTimer()
+        self.timer = gobject.timeout_add(1000, self.update)
+        self.update()
+
+    def stopTimer(self):
+        if self.timer:
+            gobject.source_remove(self.timer)
+            self.timer = None
+        self.label.set_text(self.text)
+
+    def update(self):
+        if now() < self.when:
+            next = time.strftime("%H:%M:%S", time.localtime(self.when))
+            secs = "[%d secs]" % (self.when - now())
+            self.label.set_text("%s\n%s\n%s" % (self.text, next, secs))
+            return True # restart timer
+        else:
+            # done
+            self.label.set_text("%s\n[soon]\n[overdue]" % (self.text,))
+            self.timer = None
+            return False
+
+
+
+class ThreeRowBuilder:
+    def __init__(self, name, ref):
+        self.name = name
+
+        self.last = Box()
+        self.current = Box()
+        self.step = Box("idle")
+        self.step.setColor("white")
+
+        self.ref = ref
+
+    def getBoxes(self):
+        return self.last.getBox(), self.current.getBox(), self.step.getBox()
+
+    def getLastBuild(self):
+        d = self.ref.callRemote("getLastFinishedBuild")
+        d.addCallback(self.gotLastBuild)
+    def gotLastBuild(self, build):
+        if build:
+            build.callRemote("getText").addCallback(self.gotLastText)
+            build.callRemote("getColor").addCallback(self.gotLastColor)
+
+    def gotLastText(self, text):
+        self.last.setText("\n".join(text))
+    def gotLastColor(self, color):
+        self.last.setColor(color)
+
+    def getState(self):
+        self.ref.callRemote("getState").addCallback(self.gotState)
+    def gotState(self, res):
+        state, ETA, builds = res
+        # state is one of: offline, idle, waiting, interlocked, building
+        # TODO: ETA is going away, you have to look inside the builds to get
+        # that value
+        currentmap = {"offline": "red",
+                      "idle": "white",
+                      "waiting": "yellow",
+                      "interlocked": "yellow",
+                      "building": "yellow",}
+        text = state
+        self.current.setColor(currentmap[state])
+        if ETA is not None:
+            text += "\nETA=%s secs" % ETA
+        self.current.setText(state)
+
+    def buildStarted(self, build):
+        print "[%s] buildStarted" % (self.name,)
+        self.current.setColor("yellow")
+
+    def buildFinished(self, build, results):
+        print "[%s] buildFinished: %s" % (self.name, results)
+        self.gotLastBuild(build)
+        self.current.setColor("white")
+        self.current.stopTimer()
+
+    def buildETAUpdate(self, eta):
+        print "[%s] buildETAUpdate: %s" % (self.name, eta)
+        self.current.setETA(eta)
+
+
+    def stepStarted(self, stepname, step):
+        print "[%s] stepStarted: %s" % (self.name, stepname)
+        self.step.setText(stepname)
+        self.step.setColor("yellow")
+    def stepFinished(self, stepname, step, results):
+        print "[%s] stepFinished: %s %s" % (self.name, stepname, results)
+        self.step.setText("idle")
+        self.step.setColor("white")
+        self.step.stopTimer()
+    def stepETAUpdate(self, stepname, eta):
+        print "[%s] stepETAUpdate: %s %s" % (self.name, stepname, eta)
+        self.step.setETA(eta)
+
+
+class ThreeRowClient(pb.Referenceable):
+    def __init__(self, window):
+        self.window = window
+        self.buildernames = []
+        self.builders = {}
+
+    def connected(self, ref):
+        print "connected"
+        self.ref = ref
+        self.pane = gtk.VBox(False, 2)
+        self.table = gtk.Table(1+3, 1)
+        self.pane.add(self.table)
+        self.window.vb.add(self.pane)
+        self.pane.show_all()
+        ref.callRemote("subscribe", "logs", 5, self)
+
+    def removeTable(self):
+        for child in self.table.get_children():
+            self.table.remove(child)
+        self.pane.remove(self.table)
+
+    def makeTable(self):
+        columns = len(self.builders)
+        self.table = gtk.Table(2, columns)
+        self.pane.add(self.table)
+        for i in range(len(self.buildernames)):
+            name = self.buildernames[i]
+            b = self.builders[name]
+            last,current,step = b.getBoxes()
+            self.table.attach(gtk.Label(name), i, i+1, 0, 1)
+            self.table.attach(last, i, i+1, 1, 2,
+                              xpadding=1, ypadding=1)
+            self.table.attach(current, i, i+1, 2, 3,
+                              xpadding=1, ypadding=1)
+            self.table.attach(step, i, i+1, 3, 4,
+                              xpadding=1, ypadding=1)
+        self.table.show_all()
+
+    def rebuildTable(self):
+        self.removeTable()
+        self.makeTable()
+
+    def remote_builderAdded(self, buildername, builder):
+        print "builderAdded", buildername
+        assert buildername not in self.buildernames
+        self.buildernames.append(buildername)
+
+        b = ThreeRowBuilder(buildername, builder)
+        self.builders[buildername] = b
+        self.rebuildTable()
+        b.getLastBuild()
+        b.getState()
+
+    def remote_builderRemoved(self, buildername):
+        del self.builders[buildername]
+        self.buildernames.remove(buildername)
+        self.rebuildTable()
+
+    def remote_builderChangedState(self, name, state, eta):
+        self.builders[name].gotState((state, eta, None))
+    def remote_buildStarted(self, name, build):
+        self.builders[name].buildStarted(build)
+    def remote_buildFinished(self, name, build, results):
+        self.builders[name].buildFinished(build, results)
+
+    def remote_buildETAUpdate(self, name, build, eta):
+        self.builders[name].buildETAUpdate(eta)
+    def remote_stepStarted(self, name, build, stepname, step):
+        self.builders[name].stepStarted(stepname, step)
+    def remote_stepFinished(self, name, build, stepname, step, results):
+        self.builders[name].stepFinished(stepname, step, results)
+
+    def remote_stepETAUpdate(self, name, build, stepname, step,
+                             eta, expectations):
+        # expectations is a list of (metricname, current_value,
+        # expected_value) tuples, so that we could show individual progress
+        # meters for each metric
+        self.builders[name].stepETAUpdate(stepname, eta)
+
+    def remote_logStarted(self, buildername, build, stepname, step,
+                          logname, log):
+        pass
+
+    def remote_logFinished(self, buildername, build, stepname, step,
+                           logname, log):
+        pass
+
+
+class GtkClient(TextClient):
+    ClientClass = ThreeRowClient
+
+    def __init__(self, master):
+        self.master = master
+
+        w = gtk.Window()
+        self.w = w
+        #w.set_size_request(64,64)
+        w.connect('destroy', lambda win: gtk.main_quit())
+        self.vb = gtk.VBox(False, 2)
+        self.status = gtk.Label("unconnected")
+        self.vb.add(self.status)
+        self.listener = self.ClientClass(self)
+        w.add(self.vb)
+        w.show_all()
+
+    def connected(self, ref):
+        self.status.set_text("connected")
+        TextClient.connected(self, ref)
+
+"""
+    def addBuilder(self, name, builder):
+        Client.addBuilder(self, name, builder)
+        self.pane.addBuilder(builder)
+    def removeBuilder(self, name):
+        self.pane.removeBuilder(name, self.builders[name])
+        Client.removeBuilder(self, name)
+        
+    def startConnecting(self, master):
+        self.master = master
+        Client.startConnecting(self, master)
+        self.status.set_text("connecting to %s.." % master)
+    def connected(self, remote):
+        Client.connected(self, remote)
+        self.status.set_text(self.master)
+        remote.notifyOnDisconnect(self.disconnected)
+    def disconnected(self, remote):
+        self.status.set_text("disconnected, will retry")
+"""
+
+def main():
+    master = "localhost:8007"
+    if len(sys.argv) > 1:
+        master = sys.argv[1]
+    c = GtkClient(master)
+    c.run()
+
+if __name__ == '__main__':
+    main()
+    

Added: vendor/buildbot/current/buildbot/clients/sendchange.py
===================================================================
--- vendor/buildbot/current/buildbot/clients/sendchange.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/clients/sendchange.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,38 @@
+
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.internet import reactor
+
+class Sender:
+    def __init__(self, master, user):
+        self.user = user
+        self.host, self.port = master.split(":")
+        self.port = int(self.port)
+
+    def send(self, branch, revision, comments, files):
+        change = {'who': self.user, 'files': files, 'comments': comments,
+                  'branch': branch, 'revision': revision}
+
+        f = pb.PBClientFactory()
+        d = f.login(credentials.UsernamePassword("change", "changepw"))
+        reactor.connectTCP(self.host, self.port, f)
+        d.addCallback(self.addChange, change)
+        return d
+
+    def addChange(self, remote, change):
+        d = remote.callRemote('addChange', change)
+        d.addCallback(lambda res: remote.broker.transport.loseConnection())
+        return d
+
+    def printSuccess(self, res):
+        print "change sent successfully"
+    def printFailure(self, why):
+        print "change NOT sent"
+        print why
+
+    def stop(self, res):
+        reactor.stop()
+        return res
+
+    def run(self):
+        reactor.run()

Added: vendor/buildbot/current/buildbot/dnotify.py
===================================================================
--- vendor/buildbot/current/buildbot/dnotify.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/dnotify.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,103 @@
+#! /usr/bin/python
+
+# spiv wants this
+
+import fcntl, signal
+
+class DNotify_Handler:
+    def __init__(self):
+        self.watchers = {}
+        self.installed = 0
+    def install(self):
+        if self.installed:
+            return
+        signal.signal(signal.SIGIO, self.fire)
+        self.installed = 1
+    def uninstall(self):
+        if not self.installed:
+            return
+        signal.signal(signal.SIGIO, signal.SIG_DFL)
+        self.installed = 0
+    def add(self, watcher):
+        self.watchers[watcher.fd.fileno()] = watcher
+        self.install()
+    def remove(self, watcher):
+        if self.watchers.has_key(watcher.fd.fileno()):
+            del(self.watchers[watcher.fd.fileno()])
+            if not self.watchers:
+                self.uninstall()
+    def fire(self, signum, frame):
+        # this is the signal handler
+        # without siginfo_t, we must fire them all
+        for watcher in self.watchers.values():
+            watcher.callback()
+            
+class DNotify:
+    DN_ACCESS = fcntl.DN_ACCESS  # a file in the directory was read
+    DN_MODIFY = fcntl.DN_MODIFY  # a file was modified (write,truncate)
+    DN_CREATE = fcntl.DN_CREATE  # a file was created
+    DN_DELETE = fcntl.DN_DELETE  # a file was unlinked
+    DN_RENAME = fcntl.DN_RENAME  # a file was renamed
+    DN_ATTRIB = fcntl.DN_ATTRIB  # a file had attributes changed (chmod,chown)
+
+    handler = [None]
+    
+    def __init__(self, dirname, callback=None,
+                 flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
+
+        """This object watches a directory for changes. The .callback
+        attribute should be set to a function to be run every time something
+        happens to it. Be aware that it will be called more times than you
+        expect."""
+
+        if callback:
+            self.callback = callback
+        else:
+            self.callback = self.fire
+        self.dirname = dirname
+        self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
+        self.fd = open(dirname, "r")
+        # ideally we would move the notification to something like SIGRTMIN,
+        # (to free up SIGIO) and use sigaction to have the signal handler
+        # receive a structure with the fd number. But python doesn't offer
+        # either.
+        if not self.handler[0]:
+            self.handler[0] = DNotify_Handler()
+        self.handler[0].add(self)
+        fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
+    def remove(self):
+        self.handler[0].remove(self)
+        self.fd.close()
+    def fire(self):
+        print self.dirname, "changed!"
+
+def test_dnotify1():
+    d = DNotify(".")
+    while 1:
+        signal.pause()
+
+def test_dnotify2():
+    # create ./foo/, create/delete files in ./ and ./foo/ while this is
+    # running. Notice how both notifiers are fired when anything changes;
+    # this is an unfortunate side-effect of the lack of extended sigaction
+    # support in Python.
+    count = [0]
+    d1 = DNotify(".")
+    def fire1(count=count, d1=d1):
+        print "./ changed!", count[0]
+        count[0] += 1
+        if count[0] > 5:
+            d1.remove()
+            del(d1)
+    # change the callback, since we can't define it until after we have the
+    # dnotify object. Hmm, unless we give the dnotify to the callback.
+    d1.callback = fire1
+    def fire2(): print "foo/ changed!"
+    d2 = DNotify("foo", fire2)
+    while 1:
+        signal.pause()
+        
+    
+if __name__ == '__main__':
+    test_dnotify2()
+    

Added: vendor/buildbot/current/buildbot/interfaces.py
===================================================================
--- vendor/buildbot/current/buildbot/interfaces.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/interfaces.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,918 @@
+#! /usr/bin/python
+
+"""Interface documentation.
+
+Define the interfaces that are implemented by various buildbot classes.
+"""
+
+from buildbot.twcompat import Interface
+
+# exceptions that can be raised while trying to start a build
+class NoSlaveError(Exception):
+    pass
+class BuilderInUseError(Exception):
+    pass
+class BuildSlaveTooOldError(Exception):
+    pass
+
+class IChangeSource(Interface):
+    """Object which feeds Change objects to the changemaster. When files or
+    directories are changed and the version control system provides some
+    kind of notification, this object should turn it into a Change object
+    and pass it through::
+
+      self.changemaster.addChange(change)
+    """
+
+    def start():
+        """Called when the buildmaster starts. Can be used to establish
+        connections to VC daemons or begin polling."""
+
+    def stop():
+        """Called when the buildmaster shuts down. Connections should be
+        terminated, polling timers should be canceled."""
+
+    def describe():
+        """Should return a string which briefly describes this source. This
+        string will be displayed in an HTML status page."""
+
+class IScheduler(Interface):
+    """I watch for Changes in the source tree and decide when to trigger
+    Builds. I create BuildSet objects and submit them to the BuildMaster. I
+    am a service, and the BuildMaster is always my parent."""
+
+    def addChange(change):
+        """A Change has just been dispatched by one of the ChangeSources.
+        Each Scheduler will receive this Change. I may decide to start a
+        build as a result, or I might choose to ignore it."""
+
+    def listBuilderNames():
+        """Return a list of strings indicating the Builders that this
+        Scheduler might feed."""
+
+    def getPendingBuildTimes():
+        """Return a list of timestamps for any builds that are waiting in the
+        tree-stable-timer queue. This is only relevant for Change-based
+        schedulers, all others can just return an empty list."""
+        # TODO: it might be nice to make this into getPendingBuildSets, which
+        # would let someone subscribe to the buildset being finished.
+        # However, the Scheduler doesn't actually create the buildset until
+        # it gets submitted, so doing this would require some major rework.
+
+class IUpstreamScheduler(Interface):
+    """This marks an IScheduler as being eligible for use as the 'upstream='
+    argument to a buildbot.scheduler.Dependent instance."""
+
+    def subscribeToSuccessfulBuilds(target):
+        """Request that the target callbable be invoked after every
+        successful buildset. The target will be called with a single
+        argument: the SourceStamp used by the successful builds."""
+
+    def listBuilderNames():
+        """Return a list of strings indicating the Builders that this
+        Scheduler might feed."""
+
+class ISourceStamp(Interface):
+    pass
+
+class IEmailSender(Interface):
+    """I know how to send email, and can be used by other parts of the
+    Buildbot to contact developers."""
+    pass
+
+class IEmailLookup(Interface):
+    def getAddress(user):
+        """Turn a User-name string into a valid email address. Either return
+        a string (with an @ in it), None (to indicate that the user cannot
+        be reached by email), or a Deferred which will fire with the same."""
+
+class IStatus(Interface):
+    """I am an object, obtainable from the buildmaster, which can provide
+    status information."""
+
+    def getProjectName():
+        """Return the name of the project that this Buildbot is working
+        for."""
+    def getProjectURL():
+        """Return the URL of this Buildbot's project."""
+    def getBuildbotURL():
+        """Return the URL of the top-most Buildbot status page, or None if
+        this Buildbot does not provide a web status page."""
+    def getURLForThing(thing):
+        """Return the URL of a page which provides information on 'thing',
+        which should be an object that implements one of the status
+        interfaces defined in L{buildbot.interfaces}. Returns None if no
+        suitable page is available (or if no Waterfall is running)."""
+
+    def getSchedulers():
+        """Return a list of ISchedulerStatus objects for all
+        currently-registered Schedulers."""
+
+    def getBuilderNames(categories=None):
+        """Return a list of the names of all current Builders."""
+    def getBuilder(name):
+        """Return the IBuilderStatus object for a given named Builder."""
+    def getSlave(name):
+        """Return the ISlaveStatus object for a given named buildslave."""
+
+    def getBuildSets():
+        """Return a list of active (non-finished) IBuildSetStatus objects."""
+
+    def subscribe(receiver):
+        """Register an IStatusReceiver to receive new status events. The
+        receiver will immediately be sent a set of 'builderAdded' messages
+        for all current builders. It will receive further 'builderAdded' and
+        'builderRemoved' messages as the config file is reloaded and builders
+        come and go. It will also receive 'buildsetSubmitted' messages for
+        all outstanding BuildSets (and each new BuildSet that gets
+        submitted). No additional messages will be sent unless the receiver
+        asks for them by calling .subscribe on the IBuilderStatus objects
+        which accompany the addedBuilder message."""
+
+    def unsubscribe(receiver):
+        """Unregister an IStatusReceiver. No further status messgaes will be
+        delivered."""
+
+class IBuildSetStatus(Interface):
+    """I represent a set of Builds, each run on a separate Builder but all
+    using the same source tree."""
+
+    def getSourceStamp():
+        pass
+    def getReason():
+        pass
+    def getID():
+        """Return the BuildSet's ID string, if any. The 'try' feature uses a
+        random string as a BuildSetID to relate submitted jobs with the
+        resulting BuildSet."""
+    def getResponsibleUsers():
+        pass # not implemented
+    def getInterestedUsers():
+        pass # not implemented
+    def getBuilderNames():
+        """Return a list of the names of all Builders on which this set will
+        do builds."""
+    def getBuildRequests():
+        """Return a list of IBuildRequestStatus objects that represent my
+        component Builds. This list might correspond to the Builders named by
+        getBuilderNames(), but if builder categories are used, or 'Builder
+        Aliases' are implemented, then they may not."""
+    def isFinished():
+        pass
+    def waitUntilSuccess():
+        """Return a Deferred that fires (with this IBuildSetStatus object)
+        when the outcome of the BuildSet is known, i.e., upon the first
+        failure, or after all builds complete successfully."""
+    def waitUntilFinished():
+        """Return a Deferred that fires (with this IBuildSetStatus object)
+        when all builds have finished."""
+    def getResults():
+        pass
+
+class IBuildRequestStatus(Interface):
+    """I represent a request to build a particular set of source code on a
+    particular Builder. These requests may be merged by the time they are
+    finally turned into a Build."""
+
+    def getSourceStamp():
+        pass
+    def getBuilderName():
+        pass
+    def getBuilds():
+        """Return a list of IBuildStatus objects for each Build that has been
+        started in an attempt to satify this BuildRequest."""
+
+    def subscribe(observer):
+        """Register a callable that will be invoked (with a single
+        IBuildStatus object) for each Build that is created to satisfy this
+        request. There may be multiple Builds created in an attempt to handle
+        the request: they may be interrupted by the user or abandoned due to
+        a lost slave. The last Build (the one which actually gets to run to
+        completion) is said to 'satisfy' the BuildRequest. The observer will
+        be called once for each of these Builds, both old and new."""
+    def unsubscribe(observer):
+        """Unregister the callable that was registered with subscribe()."""
+
+
+class ISlaveStatus(Interface):
+    def getName():
+        """Return the name of the build slave."""
+
+    def getAdmin():
+        """Return a string with the slave admin's contact data."""
+
+    def getHost():
+        """Return a string with the slave host info."""
+
+    def isConnected():
+        """Return True if the slave is currently online, False if not."""
+
+class ISchedulerStatus(Interface):
+    def getName():
+        """Return the name of this Scheduler (a string)."""
+
+    def getPendingBuildsets():
+        """Return an IBuildSet for all BuildSets that are pending. These
+        BuildSets are waiting for their tree-stable-timers to expire."""
+        # TODO: this is not implemented anywhere
+
+
+class IBuilderStatus(Interface):
+    def getName():
+        """Return the name of this Builder (a string)."""
+
+    def getState():
+        # TODO: this isn't nearly as meaningful as it used to be
+        """Return a tuple (state, builds) for this Builder. 'state' is the
+        so-called 'big-status', indicating overall status (as opposed to
+        which step is currently running). It is a string, one of 'offline',
+        'idle', or 'building'. 'builds' is a list of IBuildStatus objects
+        (possibly empty) representing the currently active builds."""
+
+    def getSlaves():
+        """Return a list of ISlaveStatus objects for the buildslaves that are
+        used by this builder."""
+
+    def getPendingBuilds():
+        """Return an IBuildRequestStatus object for all upcoming builds
+        (those which are ready to go but which are waiting for a buildslave
+        to be available."""
+
+    def getCurrentBuilds():
+        """Return a list containing an IBuildStatus object for each build
+        currently in progress."""
+        # again, we could probably provide an object for 'waiting' and
+        # 'interlocked' too, but things like the Change list might still be
+        # subject to change
+
+    def getLastFinishedBuild():
+        """Return the IBuildStatus object representing the last finished
+        build, which may be None if the builder has not yet finished any
+        builds."""
+
+    def getBuild(number):
+        """Return an IBuildStatus object for a historical build. Each build
+        is numbered (starting at 0 when the Builder is first added),
+        getBuild(n) will retrieve the Nth such build. getBuild(-n) will
+        retrieve a recent build, with -1 being the most recent build
+        started. If the Builder is idle, this will be the same as
+        getLastFinishedBuild(). If the Builder is active, it will be an
+        unfinished build. This method will return None if the build is no
+        longer available. Older builds are likely to have less information
+        stored: Logs are the first to go, then Steps."""
+
+    def getEvent(number):
+        """Return an IStatusEvent object for a recent Event. Builders
+        connecting and disconnecting are events, as are ping attempts.
+        getEvent(-1) will return the most recent event. Events are numbered,
+        but it probably doesn't make sense to ever do getEvent(+n)."""
+
+    def subscribe(receiver):
+        """Register an IStatusReceiver to receive new status events. The
+        receiver will be given builderChangedState, buildStarted, and
+        buildFinished messages."""
+
+    def unsubscribe(receiver):
+        """Unregister an IStatusReceiver. No further status messgaes will be
+        delivered."""
+
+class IBuildStatus(Interface):
+    """I represent the status of a single Build/BuildRequest. It could be
+    in-progress or finished."""
+
+    def getBuilder():
+        """
+        Return the BuilderStatus that owns this build.
+        
+        @rtype: implementor of L{IBuilderStatus}
+        """
+
+    def isFinished():
+        """Return a boolean. True means the build has finished, False means
+        it is still running."""
+
+    def waitUntilFinished():
+        """Return a Deferred that will fire when the build finishes. If the
+        build has already finished, this deferred will fire right away. The
+        callback is given this IBuildStatus instance as an argument."""
+
+    def getProperty(propname):
+        """Return the value of the build property with the given name."""
+
+    def getReason():
+        """Return a string that indicates why the build was run. 'changes',
+        'forced', and 'periodic' are the most likely values. 'try' will be
+        added in the future."""
+
+    def getSourceStamp():
+        """Return a tuple of (branch, revision, patch) which can be used to
+        re-create the source tree that this build used. 'branch' is a string
+        with a VC-specific meaning, or None to indicate that the checkout
+        step used its default branch. 'revision' is a string, the sort you
+        would pass to 'cvs co -r REVISION'. 'patch' is either None, or a
+        (level, diff) tuple which represents a patch that should be applied
+        with 'patch -pLEVEL < DIFF' from the directory created by the
+        checkout operation.
+
+        This method will return None if the source information is no longer
+        available."""
+        # TODO: it should be possible to expire the patch but still remember
+        # that the build was r123+something.
+
+        # TODO: change this to return the actual SourceStamp instance, and
+        # remove getChanges()
+
+    def getChanges():
+        """Return a list of Change objects which represent which source
+        changes went into the build."""
+
+    def getResponsibleUsers():
+        """Return a list of Users who are to blame for the changes that went
+        into this build. If anything breaks (at least anything that wasn't
+        already broken), blame them. Specifically, this is the set of users
+        who were responsible for the Changes that went into this build. Each
+        User is a string, corresponding to their name as known by the VC
+        repository."""
+
+    def getInterestedUsers():
+        """Return a list of Users who will want to know about the results of
+        this build. This is a superset of getResponsibleUsers(): it adds
+        people who are interested in this build but who did not actually
+        make the Changes that went into it (build sheriffs, code-domain
+        owners)."""
+
+    def getNumber():
+        """Within each builder, each Build has a number. Return it."""
+
+    def getPreviousBuild():
+        """Convenience method. Returns None if the previous build is
+        unavailable."""
+
+    def getSteps():
+        """Return a list of IBuildStepStatus objects. For invariant builds
+        (those which always use the same set of Steps), this should always
+        return the complete list, however some of the steps may not have
+        started yet (step.getTimes()[0] will be None). For variant builds,
+        this may not be complete (asking again later may give you more of
+        them)."""
+
+    def getTimes():
+        """Returns a tuple of (start, end). 'start' and 'end' are the times
+        (seconds since the epoch) when the Build started and finished. If
+        the build is still running, 'end' will be None."""
+
+    # while the build is running, the following methods make sense.
+    # Afterwards they return None
+
+    def getETA():
+        """Returns the number of seconds from now in which the build is
+        expected to finish, or None if we can't make a guess. This guess will
+        be refined over time."""
+
+    def getCurrentStep():
+        """Return an IBuildStepStatus object representing the currently
+        active step."""
+
+    # Once you know the build has finished, the following methods are legal.
+    # Before ths build has finished, they all return None.
+
+    def getSlavename():
+        """Return the name of the buildslave which handled this build."""
+
+    def getText():
+        """Returns a list of strings to describe the build. These are
+        intended to be displayed in a narrow column. If more space is
+        available, the caller should join them together with spaces before
+        presenting them to the user."""
+
+    def getColor():
+        """Returns a single string with the color that should be used to
+        display the build. 'green', 'orange', or 'red' are the most likely
+        ones."""
+
+    def getResults():
+        """Return a constant describing the results of the build: one of the
+        constants in buildbot.status.builder: SUCCESS, WARNINGS, or
+        FAILURE."""
+
+    def getLogs():
+        """Return a list of logs that describe the build as a whole. Some
+        steps will contribute their logs, while others are are less important
+        and will only be accessible through the IBuildStepStatus objects.
+        Each log is an object which implements the IStatusLog interface."""
+
+    def getTestResults():
+        """Return a dictionary that maps test-name tuples to ITestResult
+        objects. This may return an empty or partially-filled dictionary
+        until the build has completed."""
+
+    # subscription interface
+
+    def subscribe(receiver, updateInterval=None):
+        """Register an IStatusReceiver to receive new status events. The
+        receiver will be given stepStarted and stepFinished messages. If
+        'updateInterval' is non-None, buildETAUpdate messages will be sent
+        every 'updateInterval' seconds."""
+
+    def unsubscribe(receiver):
+        """Unregister an IStatusReceiver. No further status messgaes will be
+        delivered."""
+
+class ITestResult(Interface):
+    """I describe the results of a single unit test."""
+
+    def getName():
+        """Returns a tuple of strings which make up the test name. Tests may
+        be arranged in a hierarchy, so looking for common prefixes may be
+        useful."""
+
+    def getResults():
+        """Returns a constant describing the results of the test: SUCCESS,
+        WARNINGS, FAILURE."""
+
+    def getText():
+        """Returns a list of short strings which describe the results of the
+        test in slightly more detail. Suggested components include
+        'failure', 'error', 'passed', 'timeout'."""
+
+    def getLogs():
+        # in flux, it may be possible to provide more structured information
+        # like python Failure instances
+        """Returns a dictionary of test logs. The keys are strings like
+        'stdout', 'log', 'exceptions'. The values are strings."""
+
+
+class IBuildStepStatus(Interface):
+    """I hold status for a single BuildStep."""
+
+    def getName():
+        """Returns a short string with the name of this step. This string
+        may have spaces in it."""
+
+    def getBuild():
+        """Returns the IBuildStatus object which contains this step."""
+
+    def getTimes():
+        """Returns a tuple of (start, end). 'start' and 'end' are the times
+        (seconds since the epoch) when the Step started and finished. If the
+        step has not yet started, 'start' will be None. If the step is still
+        running, 'end' will be None."""
+
+    def getExpectations():
+        """Returns a list of tuples (name, current, target). Each tuple
+        describes a single axis along which the step's progress can be
+        measured. 'name' is a string which describes the axis itself, like
+        'filesCompiled' or 'tests run' or 'bytes of output'. 'current' is a
+        number with the progress made so far, while 'target' is the value
+        that we expect (based upon past experience) to get to when the build
+        is finished.
+
+        'current' will change over time until the step is finished. It is
+        'None' until the step starts. When the build is finished, 'current'
+        may or may not equal 'target' (which is merely the expectation based
+        upon previous builds)."""
+
+    def getURLs():
+        """Returns a dictionary of URLs. Each key is a link name (a short
+        string, like 'results' or 'coverage'), and each value is a URL. These
+        links will be displayed along with the LogFiles.
+        """
+
+    def getLogs():
+        """Returns a list of IStatusLog objects. If the step has not yet
+        finished, this list may be incomplete (asking again later may give
+        you more of them)."""
+
+
+    def isFinished():
+        """Return a boolean. True means the step has finished, False means it
+        is still running."""
+
+    def waitUntilFinished():
+        """Return a Deferred that will fire when the step finishes. If the
+        step has already finished, this deferred will fire right away. The
+        callback is given this IBuildStepStatus instance as an argument."""
+
+    # while the step is running, the following methods make sense.
+    # Afterwards they return None
+
+    def getETA():
+        """Returns the number of seconds from now in which the step is
+        expected to finish, or None if we can't make a guess. This guess will
+        be refined over time."""
+
+    # Once you know the step has finished, the following methods are legal.
+    # Before ths step has finished, they all return None.
+
+    def getText():
+        """Returns a list of strings which describe the step. These are
+        intended to be displayed in a narrow column. If more space is
+        available, the caller should join them together with spaces before
+        presenting them to the user."""
+
+    def getColor():
+        """Returns a single string with the color that should be used to
+        display this step. 'green', 'orange', 'red' and 'yellow' are the
+        most likely ones."""
+
+    def getResults():
+        """Return a tuple describing the results of the step: (result,
+        strings). 'result' is one of the constants in
+        buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, or SKIPPED.
+        'strings' is an optional list of strings that the step wants to
+        append to the overall build's results. These strings are usually
+        more terse than the ones returned by getText(): in particular,
+        successful Steps do not usually contribute any text to the overall
+        build."""
+
+    # subscription interface
+
+    def subscribe(receiver, updateInterval=10):
+        """Register an IStatusReceiver to receive new status events. The
+        receiver will be given logStarted and logFinished messages. It will
+        also be given a ETAUpdate message every 'updateInterval' seconds."""
+
+    def unsubscribe(receiver):
+        """Unregister an IStatusReceiver. No further status messgaes will be
+        delivered."""
+
+class IStatusEvent(Interface):
+    """I represent a Builder Event, something non-Build related that can
+    happen to a Builder."""
+
+    def getTimes():
+        """Returns a tuple of (start, end) like IBuildStepStatus, but end==0
+        indicates that this is a 'point event', which has no duration.
+        SlaveConnect/Disconnect are point events. Ping is not: it starts
+        when requested and ends when the response (positive or negative) is
+        returned"""
+
+    def getText():
+        """Returns a list of strings which describe the event. These are
+        intended to be displayed in a narrow column. If more space is
+        available, the caller should join them together with spaces before
+        presenting them to the user."""
+
+    def getColor():
+        """Returns a single string with the color that should be used to
+        display this event. 'red' and 'yellow' are the most likely ones."""
+
+
+LOG_CHANNEL_STDOUT = 0
+LOG_CHANNEL_STDERR = 1
+LOG_CHANNEL_HEADER = 2
+
+class IStatusLog(Interface):
+    """I represent a single Log, which is a growing list of text items that
+    contains some kind of output for a single BuildStep. I might be finished,
+    in which case this list has stopped growing.
+
+    Each Log has a name, usually something boring like 'log' or 'output'.
+    These names are not guaranteed to be unique, however they are usually
+    chosen to be useful within the scope of a single step (i.e. the Compile
+    step might produce both 'log' and 'warnings'). The name may also have
+    spaces. If you want something more globally meaningful, at least within a
+    given Build, try::
+
+      '%s.%s' % (log.getStep.getName(), log.getName())
+
+    The Log can be presented as plain text, or it can be accessed as a list
+    of items, each of which has a channel indicator (header, stdout, stderr)
+    and a text chunk. An HTML display might represent the interleaved
+    channels with different styles, while a straight download-the-text
+    interface would just want to retrieve a big string.
+
+    The 'header' channel is used by ShellCommands to prepend a note about
+    which command is about to be run ('running command FOO in directory
+    DIR'), and append another note giving the exit code of the process.
+
+    Logs can be streaming: if the Log has not yet finished, you can
+    subscribe to receive new chunks as they are added.
+
+    A ShellCommand will have a Log associated with it that gathers stdout
+    and stderr. Logs may also be created by parsing command output or
+    through other synthetic means (grepping for all the warnings in a
+    compile log, or listing all the test cases that are going to be run).
+    Such synthetic Logs are usually finished as soon as they are created."""
+
+
+    def getName():
+        """Returns a short string with the name of this log, probably 'log'.
+        """
+
+    def getStep():
+        """Returns the IBuildStepStatus which owns this log."""
+        # TODO: can there be non-Step logs?
+
+    def isFinished():
+        """Return a boolean. True means the log has finished and is closed,
+        False means it is still open and new chunks may be added to it."""
+
+    def waitUntilFinished():
+        """Return a Deferred that will fire when the log is closed. If the
+        log has already finished, this deferred will fire right away. The
+        callback is given this IStatusLog instance as an argument."""
+
+    def subscribe(receiver, catchup):
+        """Register an IStatusReceiver to receive chunks (with logChunk) as
+        data is added to the Log. If you use this, you will also want to use
+        waitUntilFinished to find out when the listener can be retired.
+        Subscribing to a closed Log is a no-op.
+
+        If 'catchup' is True, the receiver will immediately be sent a series
+        of logChunk messages to bring it up to date with the partially-filled
+        log. This allows a status client to join a Log already in progress
+        without missing any data. If the Log has already finished, it is too
+        late to catch up: just do getText() instead.
+
+        If the Log is very large, the receiver will be called many times with
+        a lot of data. There is no way to throttle this data. If the receiver
+        is planning on sending the data on to somewhere else, over a narrow
+        connection, you can get a throttleable subscription by using
+        C{subscribeConsumer} instead."""
+
+    def unsubscribe(receiver):
+        """Remove a receiver previously registered with subscribe(). Attempts
+        to remove a receiver which was not previously registered is a no-op.
+        """
+
+    def subscribeConsumer(consumer):
+        """Register an L{IStatusLogConsumer} to receive all chunks of the
+        logfile, including all the old entries and any that will arrive in
+        the future. The consumer will first have their C{registerProducer}
+        method invoked with a reference to an object that can be told
+        C{pauseProducing}, C{resumeProducing}, and C{stopProducing}. Then the
+        consumer's C{writeChunk} method will be called repeatedly with each
+        (channel, text) tuple in the log, starting with the very first. The
+        consumer will be notified with C{finish} when the log has been
+        exhausted (which can only happen when the log is finished). Note that
+        a small amount of data could be written via C{writeChunk} even after
+        C{pauseProducing} has been called.
+
+        To unsubscribe the consumer, use C{producer.stopProducing}."""
+
+    # once the log has finished, the following methods make sense. They can
+    # be called earlier, but they will only return the contents of the log up
+    # to the point at which they were called. You will lose items that are
+    # added later. Use C{subscribe} or C{subscribeConsumer} to avoid missing
+    # anything.
+
+    def hasContents():
+        """Returns True if the LogFile still has contents available. Returns
+        False for logs that have been pruned. Clients should test this before
+        offering to show the contents of any log."""
+
+    def getText():
+        """Return one big string with the contents of the Log. This merges
+        all non-header chunks together."""
+
+    def readlines(channel=LOG_CHANNEL_STDOUT):
+        """Read lines from one channel of the logfile. This returns an
+        iterator that will provide single lines of text (including the
+        trailing newline).
+        """
+
+    def getTextWithHeaders():
+        """Return one big string with the contents of the Log. This merges
+        all chunks (including headers) together."""
+
+    def getChunks():
+        """Generate a list of (channel, text) tuples. 'channel' is a number,
+        0 for stdout, 1 for stderr, 2 for header. (note that stderr is merged
+        into stdout if PTYs are in use)."""
+
+class IStatusLogConsumer(Interface):
+    """I am an object which can be passed to IStatusLog.subscribeConsumer().
+    I represent a target for writing the contents of an IStatusLog. This
+    differs from a regular IStatusReceiver in that it can pause the producer.
+    This makes it more suitable for use in streaming data over network
+    sockets, such as an HTTP request. Note that the consumer can only pause
+    the producer until it has caught up with all the old data. After that
+    point, C{pauseProducing} is ignored and all new output from the log is
+    sent directoy to the consumer."""
+
+    def registerProducer(producer, streaming):
+        """A producer is being hooked up to this consumer. The consumer only
+        has to handle a single producer. It should send .pauseProducing and
+        .resumeProducing messages to the producer when it wants to stop or
+        resume the flow of data. 'streaming' will be set to True because the
+        producer is always a PushProducer.
+        """
+
+    def unregisterProducer():
+        """The previously-registered producer has been removed. No further
+        pauseProducing or resumeProducing calls should be made. The consumer
+        should delete its reference to the Producer so it can be released."""
+
+    def writeChunk(chunk):
+        """A chunk (i.e. a tuple of (channel, text)) is being written to the
+        consumer."""
+
+    def finish():
+        """The log has finished sending chunks to the consumer."""
+
+class IStatusReceiver(Interface):
+    """I am an object which can receive build status updates. I may be
+    subscribed to an IStatus, an IBuilderStatus, or an IBuildStatus."""
+
+    def buildsetSubmitted(buildset):
+        """A new BuildSet has been submitted to the buildmaster.
+
+        @type buildset: implementor of L{IBuildSetStatus}
+        """
+
+    def builderAdded(builderName, builder):
+        """
+        A new Builder has just been added. This method may return an
+        IStatusReceiver (probably 'self') which will be subscribed to receive
+        builderChangedState and buildStarted/Finished events.
+
+        @type  builderName: string
+        @type  builder:     L{buildbot.status.builder.BuilderStatus}
+        @rtype: implementor of L{IStatusReceiver}
+        """
+
+    def builderChangedState(builderName, state):
+        """Builder 'builderName' has changed state. The possible values for
+        'state' are 'offline', 'idle', and 'building'."""
+
+    def buildStarted(builderName, build):
+        """Builder 'builderName' has just started a build. The build is an
+        object which implements IBuildStatus, and can be queried for more
+        information.
+
+        This method may return an IStatusReceiver (it could even return
+        'self'). If it does so, stepStarted and stepFinished methods will be
+        invoked on the object for the steps of this one build. This is a
+        convenient way to subscribe to all build steps without missing any.
+        This receiver will automatically be unsubscribed when the build
+        finishes.
+
+        It can also return a tuple of (IStatusReceiver, interval), in which
+        case buildETAUpdate messages are sent ever 'interval' seconds, in
+        addition to the stepStarted and stepFinished messages."""
+
+    def buildETAUpdate(build, ETA):
+        """This is a periodic update on the progress this Build has made
+        towards completion."""
+
+    def stepStarted(build, step):
+        """A step has just started. 'step' is the IBuildStepStatus which
+        represents the step: it can be queried for more information.
+
+        This method may return an IStatusReceiver (it could even return
+        'self'). If it does so, logStarted and logFinished methods will be
+        invoked on the object for logs created by this one step. This
+        receiver will be automatically unsubscribed when the step finishes.
+
+        Alternatively, the method may return a tuple of an IStatusReceiver
+        and an integer named 'updateInterval'. In addition to
+        logStarted/logFinished messages, it will also receive stepETAUpdate
+        messages about every updateInterval seconds."""
+
+    def stepETAUpdate(build, step, ETA, expectations):
+        """This is a periodic update on the progress this Step has made
+        towards completion. It gets an ETA (in seconds from the present) of
+        when the step ought to be complete, and a list of expectation tuples
+        (as returned by IBuildStepStatus.getExpectations) with more detailed
+        information."""
+
+    def logStarted(build, step, log):
+        """A new Log has been started, probably because a step has just
+        started running a shell command. 'log' is the IStatusLog object
+        which can be queried for more information.
+
+        This method may return an IStatusReceiver (such as 'self'), in which
+        case the target's logChunk method will be invoked as text is added to
+        the logfile. This receiver will automatically be unsubsribed when the
+        log finishes."""
+
+    def logChunk(build, step, log, channel, text):
+        """Some text has been added to this log. 'channel' is one of
+        LOG_CHANNEL_STDOUT, LOG_CHANNEL_STDERR, or LOG_CHANNEL_HEADER, as
+        defined in IStatusLog.getChunks."""
+
+    def logFinished(build, step, log):
+        """A Log has been closed."""
+
+    def stepFinished(build, step, results):
+        """A step has just finished. 'results' is the result tuple described
+        in IBuildStepStatus.getResults."""
+
+    def buildFinished(builderName, build, results):
+        """
+        A build has just finished. 'results' is the result tuple described
+        in L{IBuildStatus.getResults}.
+
+        @type  builderName: string
+        @type  build:       L{buildbot.status.builder.BuildStatus}
+        @type  results:     tuple
+        """
+
+    def builderRemoved(builderName):
+        """The Builder has been removed."""
+
+class IControl(Interface):
+    def addChange(change):
+        """Add a change to all builders. Each Builder will decide for
+        themselves whether the change is interesting or not, and may initiate
+        a build as a result."""
+
+    def submitBuildSet(buildset):
+        """Submit a BuildSet object, which will eventually be run on all of
+        the builders listed therein."""
+
+    def getBuilder(name):
+        """Retrieve the IBuilderControl object for the given Builder."""
+
+class IBuilderControl(Interface):
+    def requestBuild(request):
+        """Queue a L{buildbot.process.base.BuildRequest} object for later
+        building."""
+
+    def requestBuildSoon(request):
+        """Submit a BuildRequest like requestBuild, but raise a
+        L{buildbot.interfaces.NoSlaveError} if no slaves are currently
+        available, so it cannot be used to queue a BuildRequest in the hopes
+        that a slave will eventually connect. This method is appropriate for
+        use by things like the web-page 'Force Build' button."""
+
+    def resubmitBuild(buildStatus, reason="<rebuild, no reason given>"):
+        """Rebuild something we've already built before. This submits a
+        BuildRequest to our Builder using the same SourceStamp as the earlier
+        build. This has no effect (but may eventually raise an exception) if
+        this Build has not yet finished."""
+
+    def getPendingBuilds():
+        """Return a list of L{IBuildRequestControl} objects for this Builder.
+        Each one corresponds to a pending build that has not yet started (due
+        to a scarcity of build slaves). These upcoming builds can be canceled
+        through the control object."""
+
+    def getBuild(number):
+        """Attempt to return an IBuildControl object for the given build.
+        Returns None if no such object is available. This will only work for
+        the build that is currently in progress: once the build finishes,
+        there is nothing to control anymore."""
+
+    def ping(timeout=30):
+        """Attempt to contact the slave and see if it is still alive. This
+        returns a Deferred which fires with either True (the slave is still
+        alive) or False (the slave did not respond). As a side effect, adds
+        an event to this builder's column in the waterfall display
+        containing the results of the ping."""
+        # TODO: this ought to live in ISlaveControl, maybe with disconnect()
+        # or something. However the event that is emitted is most useful in
+        # the Builder column, so it kinda fits here too.
+
+class IBuildRequestControl(Interface):
+    def subscribe(observer):
+        """Register a callable that will be invoked (with a single
+        IBuildControl object) for each Build that is created to satisfy this
+        request. There may be multiple Builds created in an attempt to handle
+        the request: they may be interrupted by the user or abandoned due to
+        a lost slave. The last Build (the one which actually gets to run to
+        completion) is said to 'satisfy' the BuildRequest. The observer will
+        be called once for each of these Builds, both old and new."""
+    def unsubscribe(observer):
+        """Unregister the callable that was registered with subscribe()."""
+    def cancel():
+        """Remove the build from the pending queue. Has no effect if the
+        build has already been started."""
+
+class IBuildControl(Interface):
+    def getStatus():
+        """Return an IBuildStatus object for the Build that I control."""
+    def stopBuild(reason="<no reason given>"):
+        """Halt the build. This has no effect if the build has already
+        finished."""
+
+class ILogFile(Interface):
+    """This is the internal interface to a LogFile, used by the BuildStep to
+    write data into the log.
+    """
+    def addStdout(data):
+        pass
+    def addStderr(data):
+        pass
+    def addHeader(data):
+        pass
+    def finish():
+        """The process that is feeding the log file has finished, and no
+        further data will be added. This closes the logfile."""
+
+class ILogObserver(Interface):
+    """Objects which provide this interface can be used in a BuildStep to
+    watch the output of a LogFile and parse it incrementally.
+    """
+
+    # internal methods
+    def setStep(step):
+        pass
+    def setLog(log):
+        pass
+
+    # methods called by the LogFile
+    def logChunk(build, step, log, channel, text):
+        pass
+

Added: vendor/buildbot/current/buildbot/locks.py
===================================================================
--- vendor/buildbot/current/buildbot/locks.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/locks.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,148 @@
+# -*- test-case-name: buildbot.test.test_locks -*-
+
+from twisted.python import log
+from twisted.internet import reactor, defer
+from buildbot import util
+
+if False: # for debugging
+    def debuglog(msg):
+        log.msg(msg)
+else:
+    def debuglog(msg):
+        pass
+
+class BaseLock:
+    description = "<BaseLock>"
+
+    def __init__(self, name, maxCount=1):
+        self.name = name
+        self.waiting = []
+        self.owners = []
+        self.maxCount=maxCount
+
+    def __repr__(self):
+        return self.description
+
+    def isAvailable(self):
+        debuglog("%s isAvailable: self.owners=%r" % (self, self.owners))
+        return len(self.owners) < self.maxCount
+
+    def claim(self, owner):
+        debuglog("%s claim(%s)" % (self, owner))
+        assert owner is not None
+        assert len(self.owners) < self.maxCount, "ask for isAvailable() first"
+        self.owners.append(owner)
+        debuglog(" %s is claimed" % (self,))
+
+    def release(self, owner):
+        debuglog("%s release(%s)" % (self, owner))
+        assert owner in self.owners
+        self.owners.remove(owner)
+        # who can we wake up?
+        if self.waiting:
+            d = self.waiting.pop(0)
+            reactor.callLater(0, d.callback, self)
+
+    def waitUntilMaybeAvailable(self, owner):
+        """Fire when the lock *might* be available. The caller will need to
+        check with isAvailable() when the deferred fires. This loose form is
+        used to avoid deadlocks. If we were interested in a stronger form,
+        this would be named 'waitUntilAvailable', and the deferred would fire
+        after the lock had been claimed.
+        """
+        debuglog("%s waitUntilAvailable(%s)" % (self, owner))
+        if self.isAvailable():
+            return defer.succeed(self)
+        d = defer.Deferred()
+        self.waiting.append(d)
+        return d
+
+
+class RealMasterLock(BaseLock):
+    def __init__(self, lockid):
+        BaseLock.__init__(self, lockid.name, lockid.maxCount)
+        self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount)
+
+    def getLock(self, slave):
+        return self
+
+class RealSlaveLock:
+    def __init__(self, lockid):
+        self.name = lockid.name
+        self.maxCount = lockid.maxCount
+        self.maxCountForSlave = lockid.maxCountForSlave
+        self.description = "<SlaveLock(%s, %s, %s)>" % (self.name,
+                                                        self.maxCount,
+                                                        self.maxCountForSlave)
+        self.locks = {}
+
+    def __repr__(self):
+        return self.description
+
+    def getLock(self, slavebuilder):
+        slavename = slavebuilder.slave.slavename
+        if not self.locks.has_key(slavename):
+            maxCount = self.maxCountForSlave.get(slavename,
+                                                 self.maxCount)
+            lock = self.locks[slavename] = BaseLock(self.name, maxCount)
+            desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount,
+                                                   slavename, id(lock))
+            lock.description = desc
+            self.locks[slavename] = lock
+        return self.locks[slavename]
+
+
+# master.cfg should only reference the following MasterLock and SlaveLock
+# classes. They are identifiers that will be turned into real Locks later,
+# via the BotMaster.getLockByID method.
+
+class MasterLock(util.ComparableMixin):
+    """I am a semaphore that limits the number of simultaneous actions.
+
+    Builds and BuildSteps can declare that they wish to claim me as they run.
+    Only a limited number of such builds or steps will be able to run
+    simultaneously. By default this number is one, but my maxCount parameter
+    can be raised to allow two or three or more operations to happen at the
+    same time.
+
+    Use this to protect a resource that is shared among all builders and all
+    slaves, for example to limit the load on a common SVN repository.
+    """
+
+    compare_attrs = ['name', 'maxCount']
+    lockClass = RealMasterLock
+    def __init__(self, name, maxCount=1):
+        self.name = name
+        self.maxCount = maxCount
+
+class SlaveLock(util.ComparableMixin):
+    """I am a semaphore that limits simultaneous actions on each buildslave.
+
+    Builds and BuildSteps can declare that they wish to claim me as they run.
+    Only a limited number of such builds or steps will be able to run
+    simultaneously on any given buildslave. By default this number is one,
+    but my maxCount parameter can be raised to allow two or three or more
+    operations to happen on a single buildslave at the same time.
+
+    Use this to protect a resource that is shared among all the builds taking
+    place on each slave, for example to limit CPU or memory load on an
+    underpowered machine.
+
+    Each buildslave will get an independent copy of this semaphore. By
+    default each copy will use the same owner count (set with maxCount), but
+    you can provide maxCountForSlave with a dictionary that maps slavename to
+    owner count, to allow some slaves more parallelism than others.
+
+    """
+
+    compare_attrs = ['name', 'maxCount', '_maxCountForSlaveList']
+    lockClass = RealSlaveLock
+    def __init__(self, name, maxCount=1, maxCountForSlave={}):
+        self.name = name
+        self.maxCount = maxCount
+        self.maxCountForSlave = maxCountForSlave
+        # for comparison purposes, turn this dictionary into a stably-sorted
+        # list of tuples
+        self._maxCountForSlaveList = self.maxCountForSlave.items()
+        self._maxCountForSlaveList.sort()
+        self._maxCountForSlaveList = tuple(self._maxCountForSlaveList)

Added: vendor/buildbot/current/buildbot/manhole.py
===================================================================
--- vendor/buildbot/current/buildbot/manhole.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/manhole.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,265 @@
+
+import os.path
+import binascii, base64
+from twisted.python import log
+from twisted.application import service, strports
+from twisted.cred import checkers, portal
+from twisted.conch import manhole, telnet, manhole_ssh, checkers as conchc
+from twisted.conch.insults import insults
+from twisted.internet import protocol
+
+from buildbot.util import ComparableMixin
+from zope.interface import implements # requires Twisted-2.0 or later
+
+# makeTelnetProtocol and _TelnetRealm are for the TelnetManhole
+
+class makeTelnetProtocol:
+    # this curries the 'portal' argument into a later call to
+    # TelnetTransport()
+    def __init__(self, portal):
+        self.portal = portal
+
+    def __call__(self):
+        auth = telnet.AuthenticatingTelnetProtocol
+        return telnet.TelnetTransport(auth, self.portal)
+
+class _TelnetRealm:
+    implements(portal.IRealm)
+
+    def __init__(self, namespace_maker):
+        self.namespace_maker = namespace_maker
+
+    def requestAvatar(self, avatarId, *interfaces):
+        if telnet.ITelnetProtocol in interfaces:
+            namespace = self.namespace_maker()
+            p = telnet.TelnetBootstrapProtocol(insults.ServerProtocol,
+                                               manhole.ColoredManhole,
+                                               namespace)
+            return (telnet.ITelnetProtocol, p, lambda: None)
+        raise NotImplementedError()
+
+
+class chainedProtocolFactory:
+    # this curries the 'namespace' argument into a later call to
+    # chainedProtocolFactory()
+    def __init__(self, namespace):
+        self.namespace = namespace
+    
+    def __call__(self):
+        return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
+
+class AuthorizedKeysChecker(conchc.SSHPublicKeyDatabase):
+    """Accept connections using SSH keys from a given file.
+
+    SSHPublicKeyDatabase takes the username that the prospective client has
+    requested and attempts to get a ~/.ssh/authorized_keys file for that
+    username. This requires root access, so it isn't as useful as you'd
+    like.
+
+    Instead, this subclass looks for keys in a single file, given as an
+    argument. This file is typically kept in the buildmaster's basedir. The
+    file should have 'ssh-dss ....' lines in it, just like authorized_keys.
+    """
+
+    def __init__(self, authorized_keys_file):
+        self.authorized_keys_file = os.path.expanduser(authorized_keys_file)
+
+    def checkKey(self, credentials):
+        f = open(self.authorized_keys_file)
+        for l in f.readlines():
+            l2 = l.split()
+            if len(l2) < 2:
+                continue
+            try:
+                if base64.decodestring(l2[1]) == credentials.blob:
+                    return 1
+            except binascii.Error:
+                continue
+        return 0
+
+
+class _BaseManhole(service.MultiService):
+    """This provides remote access to a python interpreter (a read/exec/print
+    loop) embedded in the buildmaster via an internal SSH server. This allows
+    detailed inspection of the buildmaster state. It is of most use to
+    buildbot developers. Connect to this by running an ssh client.
+    """
+
+    def __init__(self, port, checker, using_ssh=True):
+        """
+        @type port: string or int
+        @param port: what port should the Manhole listen on? This is a
+        strports specification string, like 'tcp:12345' or
+        'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+        simple tcp port.
+
+        @type checker: an object providing the
+        L{twisted.cred.checkers.ICredentialsChecker} interface
+        @param checker: if provided, this checker is used to authenticate the
+        client instead of using the username/password scheme. You must either
+        provide a username/password or a Checker. Some useful values are::
+            import twisted.cred.checkers as credc
+            import twisted.conch.checkers as conchc
+            c = credc.AllowAnonymousAccess # completely open
+            c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
+            c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
+
+        @type using_ssh: bool
+        @param using_ssh: If True, accept SSH connections. If False, accept
+                          regular unencrypted telnet connections.
+        """
+
+        # unfortunately, these don't work unless we're running as root
+        #c = credc.PluggableAuthenticationModulesChecker: PAM
+        #c = conchc.SSHPublicKeyDatabase() # ~/.ssh/authorized_keys
+        # and I can't get UNIXPasswordDatabase to work
+
+        service.MultiService.__init__(self)
+        if type(port) is int:
+            port = "tcp:%d" % port
+        self.port = port # for comparison later
+        self.checker = checker # to maybe compare later
+
+        def makeNamespace():
+            # close over 'self' so we can get access to .parent later
+            master = self.parent
+            namespace = {
+                'master': master,
+                'status': master.getStatus(),
+                }
+            return namespace
+
+        def makeProtocol():
+            namespace = makeNamespace()
+            p = insults.ServerProtocol(manhole.ColoredManhole, namespace)
+            return p
+
+        self.using_ssh = using_ssh
+        if using_ssh:
+            r = manhole_ssh.TerminalRealm()
+            r.chainedProtocolFactory = makeProtocol
+            p = portal.Portal(r, [self.checker])
+            f = manhole_ssh.ConchFactory(p)
+        else:
+            r = _TelnetRealm(makeNamespace)
+            p = portal.Portal(r, [self.checker])
+            f = protocol.ServerFactory()
+            f.protocol = makeTelnetProtocol(p)
+        s = strports.service(self.port, f)
+        s.setServiceParent(self)
+
+
+    def startService(self):
+        service.MultiService.startService(self)
+        if self.using_ssh:
+            via = "via SSH"
+        else:
+            via = "via telnet"
+        log.msg("Manhole listening %s on port %s" % (via, self.port))
+
+
+class TelnetManhole(_BaseManhole, ComparableMixin):
+    """This Manhole accepts unencrypted (telnet) connections, and requires a
+    username and password authorize access. You are encouraged to use the
+    encrypted ssh-based manhole classes instead."""
+
+    compare_attrs = ["port", "username", "password"]
+
+    def __init__(self, port, username, password):
+        """
+        @type port: string or int
+        @param port: what port should the Manhole listen on? This is a
+        strports specification string, like 'tcp:12345' or
+        'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+        simple tcp port.
+
+        @param username:
+        @param password: username= and password= form a pair of strings to
+                         use when authenticating the remote user.
+        """
+
+        self.username = username
+        self.password = password
+
+        c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+        c.addUser(username, password)
+
+        _BaseManhole.__init__(self, port, c, using_ssh=False)
+
+class PasswordManhole(_BaseManhole, ComparableMixin):
+    """This Manhole accepts encrypted (ssh) connections, and requires a
+    username and password to authorize access.
+    """
+
+    compare_attrs = ["port", "username", "password"]
+
+    def __init__(self, port, username, password):
+        """
+        @type port: string or int
+        @param port: what port should the Manhole listen on? This is a
+        strports specification string, like 'tcp:12345' or
+        'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+        simple tcp port.
+
+        @param username:
+        @param password: username= and password= form a pair of strings to
+                         use when authenticating the remote user.
+        """
+
+        self.username = username
+        self.password = password
+
+        c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+        c.addUser(username, password)
+
+        _BaseManhole.__init__(self, port, c)
+
+class AuthorizedKeysManhole(_BaseManhole, ComparableMixin):
+    """This Manhole accepts ssh connections, and requires that the
+    prospective client have an ssh private key that matches one of the public
+    keys in our authorized_keys file. It is created with the name of a file
+    that contains the public keys that we will accept."""
+
+    compare_attrs = ["port", "keyfile"]
+
+    def __init__(self, port, keyfile):
+        """
+        @type port: string or int
+        @param port: what port should the Manhole listen on? This is a
+        strports specification string, like 'tcp:12345' or
+        'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+        simple tcp port.
+
+        @param keyfile: the name of a file (relative to the buildmaster's
+                        basedir) that contains SSH public keys of authorized
+                        users, one per line. This is the exact same format
+                        as used by sshd in ~/.ssh/authorized_keys .
+        """
+
+        # TODO: expanduser this, and make it relative to the buildmaster's
+        # basedir
+        self.keyfile = keyfile
+        c = AuthorizedKeysChecker(keyfile)
+        _BaseManhole.__init__(self, port, c)
+
+class ArbitraryCheckerManhole(_BaseManhole, ComparableMixin):
+    """This Manhole accepts ssh connections, but uses an arbitrary
+    user-supplied 'checker' object to perform authentication."""
+
+    compare_attrs = ["port", "checker"]
+
+    def __init__(self, port, checker):
+        """
+        @type port: string or int
+        @param port: what port should the Manhole listen on? This is a
+        strports specification string, like 'tcp:12345' or
+        'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+        simple tcp port.
+
+        @param checker: an instance of a twisted.cred 'checker' which will
+                        perform authentication
+        """
+
+        _BaseManhole.__init__(self, port, checker)
+
+

Added: vendor/buildbot/current/buildbot/master.py
===================================================================
--- vendor/buildbot/current/buildbot/master.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/master.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,1027 @@
+# -*- test-case-name: buildbot.test.test_run -*-
+
+import string, os
+signal = None
+try:
+    import signal
+except ImportError:
+    pass
+try:
+    import cPickle
+    pickle = cPickle
+except ImportError:
+    import pickle
+
+from twisted.python import log, components
+from twisted.internet import defer, reactor
+from twisted.spread import pb
+from twisted.cred import portal, checkers
+from twisted.application import service, strports
+from twisted.persisted import styles
+
+# sibling imports
+from buildbot.twcompat import implements
+from buildbot.util import now
+from buildbot.pbutil import NewCredPerspective
+from buildbot.process.builder import Builder, IDLE
+from buildbot.process.base import BuildRequest
+from buildbot.status.builder import SlaveStatus, Status
+from buildbot.changes.changes import Change, ChangeMaster
+from buildbot.sourcestamp import SourceStamp
+from buildbot import interfaces
+
+########################################
+
+
+
+
+class BotPerspective(NewCredPerspective):
+    """This is the master-side representative for a remote buildbot slave.
+    There is exactly one for each slave described in the config file (the
+    c['bots'] list). When buildbots connect in (.attach), they get a
+    reference to this instance. The BotMaster object is stashed as the
+    .service attribute."""
+
+    def __init__(self, name, botmaster):
+        self.slavename = name
+        self.botmaster = botmaster
+        self.slave_status = SlaveStatus(name)
+        self.slave = None # a RemoteReference to the Bot, when connected
+        self.slave_commands = None
+
+    def updateSlave(self):
+        """Called to add or remove builders after the slave has connected.
+
+        @return: a Deferred that indicates when an attached slave has
+        accepted the new builders and/or released the old ones."""
+        if self.slave:
+            return self.sendBuilderList()
+        return defer.succeed(None)
+
+    def __repr__(self):
+        return "<BotPerspective '%s', builders: %s>" % \
+               (self.slavename,
+                string.join(map(lambda b: b.name, self.builders), ','))
+
+    def attached(self, bot):
+        """This is called when the slave connects.
+
+        @return: a Deferred that fires with a suitable pb.IPerspective to
+                 give to the slave (i.e. 'self')"""
+
+        if self.slave:
+            # uh-oh, we've got a duplicate slave. The most likely
+            # explanation is that the slave is behind a slow link, thinks we
+            # went away, and has attempted to reconnect, so we've got two
+            # "connections" from the same slave, but the previous one is
+            # stale. Give the new one precedence.
+            log.msg("duplicate slave %s replacing old one" % self.slavename)
+
+            # just in case we've got two identically-configured slaves,
+            # report the IP addresses of both so someone can resolve the
+            # squabble
+            tport = self.slave.broker.transport
+            log.msg("old slave was connected from", tport.getPeer())
+            log.msg("new slave is from", bot.broker.transport.getPeer())
+            d = self.disconnect()
+        else:
+            d = defer.succeed(None)
+        # now we go through a sequence of calls, gathering information, then
+        # tell the Botmaster that it can finally give this slave to all the
+        # Builders that care about it.
+
+        # we accumulate slave information in this 'state' dictionary, then
+        # set it atomically if we make it far enough through the process
+        state = {}
+
+        def _log_attachment_on_slave(res):
+            d1 = bot.callRemote("print", "attached")
+            d1.addErrback(lambda why: None)
+            return d1
+        d.addCallback(_log_attachment_on_slave)
+
+        def _get_info(res):
+            d1 = bot.callRemote("getSlaveInfo")
+            def _got_info(info):
+                log.msg("Got slaveinfo from '%s'" % self.slavename)
+                # TODO: info{} might have other keys
+                state["admin"] = info.get("admin")
+                state["host"] = info.get("host")
+            def _info_unavailable(why):
+                # maybe an old slave, doesn't implement remote_getSlaveInfo
+                log.msg("BotPerspective.info_unavailable")
+                log.err(why)
+            d1.addCallbacks(_got_info, _info_unavailable)
+            return d1
+        d.addCallback(_get_info)
+
+        def _get_commands(res):
+            d1 = bot.callRemote("getCommands")
+            def _got_commands(commands):
+                state["slave_commands"] = commands
+            def _commands_unavailable(why):
+                # probably an old slave
+                log.msg("BotPerspective._commands_unavailable")
+                if why.check(AttributeError):
+                    return
+                log.err(why)
+            d1.addCallbacks(_got_commands, _commands_unavailable)
+            return d1
+        d.addCallback(_get_commands)
+
+        def _accept_slave(res):
+            self.slave_status.setAdmin(state.get("admin"))
+            self.slave_status.setHost(state.get("host"))
+            self.slave_status.setConnected(True)
+            self.slave_commands = state.get("slave_commands")
+            self.slave = bot
+            log.msg("bot attached")
+            return self.updateSlave()
+        d.addCallback(_accept_slave)
+
+        # Finally, the slave gets a reference to this BotPerspective. They
+        # receive this later, after we've started using them.
+        d.addCallback(lambda res: self)
+        return d
+
+    def detached(self, mind):
+        self.slave = None
+        self.slave_status.setConnected(False)
+        self.botmaster.slaveLost(self)
+        log.msg("BotPerspective.detached(%s)" % self.slavename)
+
+
+    def disconnect(self):
+        """Forcibly disconnect the slave.
+
+        This severs the TCP connection and returns a Deferred that will fire
+        (with None) when the connection is probably gone.
+
+        If the slave is still alive, they will probably try to reconnect
+        again in a moment.
+
+        This is called in two circumstances. The first is when a slave is
+        removed from the config file. In this case, when they try to
+        reconnect, they will be rejected as an unknown slave. The second is
+        when we wind up with two connections for the same slave, in which
+        case we disconnect the older connection.
+        """
+
+        if not self.slave:
+            return defer.succeed(None)
+        log.msg("disconnecting old slave %s now" % self.slavename)
+
+        # all kinds of teardown will happen as a result of
+        # loseConnection(), but it happens after a reactor iteration or
+        # two. Hook the actual disconnect so we can know when it is safe
+        # to connect the new slave. We have to wait one additional
+        # iteration (with callLater(0)) to make sure the *other*
+        # notifyOnDisconnect handlers have had a chance to run.
+        d = defer.Deferred()
+
+        # notifyOnDisconnect runs the callback with one argument, the
+        # RemoteReference being disconnected.
+        def _disconnected(rref):
+            reactor.callLater(0, d.callback, None)
+        self.slave.notifyOnDisconnect(_disconnected)
+        tport = self.slave.broker.transport
+        # this is the polite way to request that a socket be closed
+        tport.loseConnection()
+        try:
+            # but really we don't want to wait for the transmit queue to
+            # drain. The remote end is unlikely to ACK the data, so we'd
+            # probably have to wait for a (20-minute) TCP timeout.
+            #tport._closeSocket()
+            # however, doing _closeSocket (whether before or after
+            # loseConnection) somehow prevents the notifyOnDisconnect
+            # handlers from being run. Bummer.
+            tport.offset = 0
+            tport.dataBuffer = ""
+            pass
+        except:
+            # however, these hacks are pretty internal, so don't blow up if
+            # they fail or are unavailable
+            log.msg("failed to accelerate the shutdown process")
+            pass
+        log.msg("waiting for slave to finish disconnecting")
+
+        # When this Deferred fires, we'll be ready to accept the new slave
+        return d
+
+    def sendBuilderList(self):
+        our_builders = self.botmaster.getBuildersForSlave(self.slavename)
+        blist = [(b.name, b.builddir) for b in our_builders]
+        d = self.slave.callRemote("setBuilderList", blist)
+        def _sent(slist):
+            dl = []
+            for name, remote in slist.items():
+                # use get() since we might have changed our mind since then
+                b = self.botmaster.builders.get(name)
+                if b:
+                    d1 = b.attached(self, remote, self.slave_commands)
+                    dl.append(d1)
+            return defer.DeferredList(dl)
+        def _set_failed(why):
+            log.msg("BotPerspective.sendBuilderList (%s) failed" % self)
+            log.err(why)
+            # TODO: hang up on them?, without setBuilderList we can't use
+            # them
+        d.addCallbacks(_sent, _set_failed)
+        return d
+
+    def perspective_keepalive(self):
+        pass
+
+    
+class BotMaster(service.Service):
+
+    """This is the master-side service which manages remote buildbot slaves.
+    It provides them with BotPerspectives, and distributes file change
+    notification messages to them.
+    """
+
+    debug = 0
+
+    def __init__(self):
+        self.builders = {}
+        self.builderNames = []
+        # builders maps Builder names to instances of bb.p.builder.Builder,
+        # which is the master-side object that defines and controls a build.
+        # They are added by calling botmaster.addBuilder() from the startup
+        # code.
+
+        # self.slaves contains a ready BotPerspective instance for each
+        # potential buildslave, i.e. all the ones listed in the config file.
+        # If the slave is connected, self.slaves[slavename].slave will
+        # contain a RemoteReference to their Bot instance. If it is not
+        # connected, that attribute will hold None.
+        self.slaves = {} # maps slavename to BotPerspective
+        self.statusClientService = None
+        self.watchers = {}
+
+        # self.locks holds the real Lock instances
+        self.locks = {}
+
+    # these four are convenience functions for testing
+
+    def waitUntilBuilderAttached(self, name):
+        b = self.builders[name]
+        #if b.slaves:
+        #    return defer.succeed(None)
+        d = defer.Deferred()
+        b.watchers['attach'].append(d)
+        return d
+
+    def waitUntilBuilderDetached(self, name):
+        b = self.builders.get(name)
+        if not b or not b.slaves:
+            return defer.succeed(None)
+        d = defer.Deferred()
+        b.watchers['detach'].append(d)
+        return d
+
+    def waitUntilBuilderFullyDetached(self, name):
+        b = self.builders.get(name)
+        # TODO: this looks too deeply inside the Builder object
+        if not b or not b.slaves:
+            return defer.succeed(None)
+        d = defer.Deferred()
+        b.watchers['detach_all'].append(d)
+        return d
+
+    def waitUntilBuilderIdle(self, name):
+        b = self.builders[name]
+        # TODO: this looks way too deeply inside the Builder object
+        for sb in b.slaves:
+            if sb.state != IDLE:
+                d = defer.Deferred()
+                b.watchers['idle'].append(d)
+                return d
+        return defer.succeed(None)
+
+
+    def addSlave(self, slavename):
+        slave = BotPerspective(slavename, self)
+        self.slaves[slavename] = slave
+
+    def removeSlave(self, slavename):
+        d = self.slaves[slavename].disconnect()
+        del self.slaves[slavename]
+        return d
+
+    def slaveLost(self, bot):
+        for name, b in self.builders.items():
+            if bot.slavename in b.slavenames:
+                b.detached(bot)
+
+    def getBuildersForSlave(self, slavename):
+        return [b
+                for b in self.builders.values()
+                if slavename in b.slavenames]
+
+    def getBuildernames(self):
+        return self.builderNames
+
+    def getBuilders(self):
+        allBuilders = [self.builders[name] for name in self.builderNames]
+        return allBuilders
+
+    def setBuilders(self, builders):
+        self.builders = {}
+        self.builderNames = []
+        for b in builders:
+            for slavename in b.slavenames:
+                # this is actually validated earlier
+                assert slavename in self.slaves
+            self.builders[b.name] = b
+            self.builderNames.append(b.name)
+            b.setBotmaster(self)
+        d = self._updateAllSlaves()
+        return d
+
+    def _updateAllSlaves(self):
+        """Notify all buildslaves about changes in their Builders."""
+        dl = [s.updateSlave() for s in self.slaves.values()]
+        return defer.DeferredList(dl)
+
+    def maybeStartAllBuilds(self):
+        for b in self.builders.values():
+            b.maybeStartBuild()
+
+    def getPerspective(self, slavename):
+        return self.slaves[slavename]
+
+    def shutdownSlaves(self):
+        # TODO: make this into a bot method rather than a builder method
+        for b in self.slaves.values():
+            b.shutdownSlave()
+
+    def stopService(self):
+        for b in self.builders.values():
+            b.builder_status.addPointEvent(["master", "shutdown"])
+            b.builder_status.saveYourself()
+        return service.Service.stopService(self)
+
+    def getLockByID(self, lockid):
+        """Convert a Lock identifier into an actual Lock instance.
+        @param lockid: a locks.MasterLock or locks.SlaveLock instance
+        @return: a locks.RealMasterLock or locks.RealSlaveLock instance
+        """
+        if not lockid in self.locks:
+            self.locks[lockid] = lockid.lockClass(lockid)
+        # if the master.cfg file has changed maxCount= on the lock, the next
+        # time a build is started, they'll get a new RealLock instance. Note
+        # that this requires that MasterLock and SlaveLock (marker) instances
+        # be hashable and that they should compare properly.
+        return self.locks[lockid]
+
+########################################
+
+
+
+class DebugPerspective(NewCredPerspective):
+    def attached(self, mind):
+        return self
+    def detached(self, mind):
+        pass
+
+    def perspective_requestBuild(self, buildername, reason, branch, revision):
+        c = interfaces.IControl(self.master)
+        bc = c.getBuilder(buildername)
+        ss = SourceStamp(branch, revision)
+        br = BuildRequest(reason, ss, buildername)
+        bc.requestBuild(br)
+
+    def perspective_pingBuilder(self, buildername):
+        c = interfaces.IControl(self.master)
+        bc = c.getBuilder(buildername)
+        bc.ping()
+
+    def perspective_fakeChange(self, file, revision=None, who="fakeUser",
+                               branch=None):
+        change = Change(who, [file], "some fake comments\n",
+                        branch=branch, revision=revision)
+        c = interfaces.IControl(self.master)
+        c.addChange(change)
+
+    def perspective_setCurrentState(self, buildername, state):
+        builder = self.botmaster.builders.get(buildername)
+        if not builder: return
+        if state == "offline":
+            builder.statusbag.currentlyOffline()
+        if state == "idle":
+            builder.statusbag.currentlyIdle()
+        if state == "waiting":
+            builder.statusbag.currentlyWaiting(now()+10)
+        if state == "building":
+            builder.statusbag.currentlyBuilding(None)
+    def perspective_reload(self):
+        print "doing reload of the config file"
+        self.master.loadTheConfigFile()
+    def perspective_pokeIRC(self):
+        print "saying something on IRC"
+        from buildbot.status import words
+        for s in self.master:
+            if isinstance(s, words.IRC):
+                bot = s.f
+                for channel in bot.channels:
+                    print " channel", channel
+                    bot.p.msg(channel, "Ow, quit it")
+
+    def perspective_print(self, msg):
+        print "debug", msg
+
+class Dispatcher(styles.Versioned):
+    if implements:
+        implements(portal.IRealm)
+    else:
+        __implements__ = portal.IRealm,
+    persistenceVersion = 2
+
+    def __init__(self):
+        self.names = {}
+
+    def upgradeToVersion1(self):
+        self.master = self.botmaster.parent
+    def upgradeToVersion2(self):
+        self.names = {}
+
+    def register(self, name, afactory):
+        self.names[name] = afactory
+    def unregister(self, name):
+        del self.names[name]
+
+    def requestAvatar(self, avatarID, mind, interface):
+        assert interface == pb.IPerspective
+        afactory = self.names.get(avatarID)
+        if afactory:
+            p = afactory.getPerspective()
+        elif avatarID == "debug":
+            p = DebugPerspective()
+            p.master = self.master
+            p.botmaster = self.botmaster
+        elif avatarID == "statusClient":
+            p = self.statusClientService.getPerspective()
+        else:
+            # it must be one of the buildslaves: no other names will make it
+            # past the checker
+            p = self.botmaster.getPerspective(avatarID)
+
+        if not p:
+            raise ValueError("no perspective for '%s'" % avatarID)
+
+        d = defer.maybeDeferred(p.attached, mind)
+        d.addCallback(self._avatarAttached, mind)
+        return d
+
+    def _avatarAttached(self, p, mind):
+        return (pb.IPerspective, p, lambda p=p,mind=mind: p.detached(mind))
+
+########################################
+
+# service hierarchy:
+#  BuildMaster
+#   BotMaster
+#   ChangeMaster
+#    all IChangeSource objects
+#   StatusClientService
+#   TCPClient(self.ircFactory)
+#   TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
+#   TCPServer(self.site)
+#   UNIXServer(ResourcePublisher(self.site))
+
+
+class BuildMaster(service.MultiService, styles.Versioned):
+    debug = 0
+    persistenceVersion = 3
+    manhole = None
+    debugPassword = None
+    projectName = "(unspecified)"
+    projectURL = None
+    buildbotURL = None
+    change_svc = None
+
+    def __init__(self, basedir, configFileName="master.cfg"):
+        service.MultiService.__init__(self)
+        self.setName("buildmaster")
+        self.basedir = basedir
+        self.configFileName = configFileName
+
+        # the dispatcher is the realm in which all inbound connections are
+        # looked up: slave builders, change notifications, status clients, and
+        # the debug port
+        dispatcher = Dispatcher()
+        dispatcher.master = self
+        self.dispatcher = dispatcher
+        self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+        # the checker starts with no user/passwd pairs: they are added later
+        p = portal.Portal(dispatcher)
+        p.registerChecker(self.checker)
+        self.slaveFactory = pb.PBServerFactory(p)
+        self.slaveFactory.unsafeTracebacks = True # let them see exceptions
+
+        self.slavePortnum = None
+        self.slavePort = None
+
+        self.botmaster = BotMaster()
+        self.botmaster.setName("botmaster")
+        self.botmaster.setServiceParent(self)
+        dispatcher.botmaster = self.botmaster
+
+        self.status = Status(self.botmaster, self.basedir)
+
+        self.statusTargets = []
+
+        self.bots = []
+        # this ChangeMaster is a dummy, only used by tests. In the real
+        # buildmaster, where the BuildMaster instance is activated
+        # (startService is called) by twistd, this attribute is overwritten.
+        self.useChanges(ChangeMaster())
+
+        self.readConfig = False
+
+    def upgradeToVersion1(self):
+        self.dispatcher = self.slaveFactory.root.portal.realm
+
+    def upgradeToVersion2(self): # post-0.4.3
+        self.webServer = self.webTCPPort
+        del self.webTCPPort
+        self.webDistribServer = self.webUNIXPort
+        del self.webUNIXPort
+        self.configFileName = "master.cfg"
+
+    def upgradeToVersion3(self):
+        # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
+        # 0.6.5 I intend to do away with .tap files altogether
+        self.services = []
+        self.namedServices = {}
+        del self.change_svc
+
+    def startService(self):
+        service.MultiService.startService(self)
+        self.loadChanges() # must be done before loading the config file
+        if not self.readConfig:
+            # TODO: consider catching exceptions during this call to
+            # loadTheConfigFile and bailing (reactor.stop) if it fails,
+            # since without a config file we can't do anything except reload
+            # the config file, and it would be nice for the user to discover
+            # this quickly.
+            self.loadTheConfigFile()
+        if signal and hasattr(signal, "SIGHUP"):
+            signal.signal(signal.SIGHUP, self._handleSIGHUP)
+        for b in self.botmaster.builders.values():
+            b.builder_status.addPointEvent(["master", "started"])
+            b.builder_status.saveYourself()
+
+    def useChanges(self, changes):
+        if self.change_svc:
+            # TODO: can return a Deferred
+            self.change_svc.disownServiceParent()
+        self.change_svc = changes
+        self.change_svc.basedir = self.basedir
+        self.change_svc.setName("changemaster")
+        self.dispatcher.changemaster = self.change_svc
+        self.change_svc.setServiceParent(self)
+
+    def loadChanges(self):
+        filename = os.path.join(self.basedir, "changes.pck")
+        try:
+            changes = pickle.load(open(filename, "rb"))
+            styles.doUpgrade()
+        except IOError:
+            log.msg("changes.pck missing, using new one")
+            changes = ChangeMaster()
+        except EOFError:
+            log.msg("corrupted changes.pck, using new one")
+            changes = ChangeMaster()
+        self.useChanges(changes)
+
+    def _handleSIGHUP(self, *args):
+        reactor.callLater(0, self.loadTheConfigFile)
+
+    def getStatus(self):
+        """
+        @rtype: L{buildbot.status.builder.Status}
+        """
+        return self.status
+
+    def loadTheConfigFile(self, configFile=None):
+        if not configFile:
+            configFile = os.path.join(self.basedir, self.configFileName)
+
+        log.msg("loading configuration from %s" % configFile)
+        configFile = os.path.expanduser(configFile)
+
+        try:
+            f = open(configFile, "r")
+        except IOError, e:
+            log.msg("unable to open config file '%s'" % configFile)
+            log.msg("leaving old configuration in place")
+            log.err(e)
+            return
+
+        try:
+            self.loadConfig(f)
+        except:
+            log.msg("error during loadConfig")
+            log.err()
+            log.msg("The new config file is unusable, so I'll ignore it.")
+            log.msg("I will keep using the previous config file instead.")
+        f.close()
+
+    def loadConfig(self, f):
+        """Internal function to load a specific configuration file. Any
+        errors in the file will be signalled by raising an exception.
+
+        @return: a Deferred that will fire (with None) when the configuration
+        changes have been completed. This may involve a round-trip to each
+        buildslave that was involved."""
+
+        localDict = {'basedir': os.path.expanduser(self.basedir)}
+        try:
+            exec f in localDict
+        except:
+            log.msg("error while parsing config file")
+            raise
+
+        try:
+            config = localDict['BuildmasterConfig']
+        except KeyError:
+            log.err("missing config dictionary")
+            log.err("config file must define BuildmasterConfig")
+            raise
+
+        known_keys = "bots sources schedulers builders slavePortnum " + \
+                     "debugPassword manhole " + \
+                     "status projectName projectURL buildbotURL"
+        known_keys = known_keys.split()
+        for k in config.keys():
+            if k not in known_keys:
+                log.msg("unknown key '%s' defined in config dictionary" % k)
+
+        try:
+            # required
+            bots = config['bots']
+            sources = config['sources']
+            schedulers = config['schedulers']
+            builders = config['builders']
+            slavePortnum = config['slavePortnum']
+
+            # optional
+            debugPassword = config.get('debugPassword')
+            manhole = config.get('manhole')
+            status = config.get('status', [])
+            projectName = config.get('projectName')
+            projectURL = config.get('projectURL')
+            buildbotURL = config.get('buildbotURL')
+
+        except KeyError, e:
+            log.msg("config dictionary is missing a required parameter")
+            log.msg("leaving old configuration in place")
+            raise
+
+        # do some validation first
+        for name, passwd in bots:
+            if name in ("debug", "change", "status"):
+                raise KeyError, "reserved name '%s' used for a bot" % name
+        if config.has_key('interlocks'):
+            raise KeyError("c['interlocks'] is no longer accepted")
+
+        assert isinstance(sources, (list, tuple))
+        for s in sources:
+            assert interfaces.IChangeSource(s, None)
+        # this assertion catches c['schedulers'] = Scheduler(), since
+        # Schedulers are service.MultiServices and thus iterable.
+        errmsg = "c['schedulers'] must be a list of Scheduler instances"
+        assert isinstance(schedulers, (list, tuple)), errmsg
+        for s in schedulers:
+            assert interfaces.IScheduler(s, None), errmsg
+        assert isinstance(status, (list, tuple))
+        for s in status:
+            assert interfaces.IStatusReceiver(s, None)
+
+        slavenames = [name for name,pw in bots]
+        buildernames = []
+        dirnames = []
+        for b in builders:
+            if type(b) is tuple:
+                raise ValueError("builder %s must be defined with a dict, "
+                                 "not a tuple" % b[0])
+            if b.has_key('slavename') and b['slavename'] not in slavenames:
+                raise ValueError("builder %s uses undefined slave %s" \
+                                 % (b['name'], b['slavename']))
+            for n in b.get('slavenames', []):
+                if n not in slavenames:
+                    raise ValueError("builder %s uses undefined slave %s" \
+                                     % (b['name'], n))
+            if b['name'] in buildernames:
+                raise ValueError("duplicate builder name %s"
+                                 % b['name'])
+            buildernames.append(b['name'])
+            if b['builddir'] in dirnames:
+                raise ValueError("builder %s reuses builddir %s"
+                                 % (b['name'], b['builddir']))
+            dirnames.append(b['builddir'])
+
+        schedulernames = []
+        for s in schedulers:
+            for b in s.listBuilderNames():
+                assert b in buildernames, \
+                       "%s uses unknown builder %s" % (s, b)
+            if s.name in schedulernames:
+                # TODO: schedulers share a namespace with other Service
+                # children of the BuildMaster node, like status plugins, the
+                # Manhole, the ChangeMaster, and the BotMaster (although most
+                # of these don't have names)
+                msg = ("Schedulers must have unique names, but "
+                       "'%s' was a duplicate" % (s.name,))
+                raise ValueError(msg)
+            schedulernames.append(s.name)
+
+        # assert that all locks used by the Builds and their Steps are
+        # uniquely named.
+        locks = {}
+        for b in builders:
+            for l in b.get('locks', []):
+                if locks.has_key(l.name):
+                    if locks[l.name] is not l:
+                        raise ValueError("Two different locks (%s and %s) "
+                                         "share the name %s"
+                                         % (l, locks[l.name], l.name))
+                else:
+                    locks[l.name] = l
+            # TODO: this will break with any BuildFactory that doesn't use a
+            # .steps list, but I think the verification step is more
+            # important.
+            for s in b['factory'].steps:
+                for l in s[1].get('locks', []):
+                    if locks.has_key(l.name):
+                        if locks[l.name] is not l:
+                            raise ValueError("Two different locks (%s and %s)"
+                                             " share the name %s"
+                                             % (l, locks[l.name], l.name))
+                    else:
+                        locks[l.name] = l
+
+        # slavePortnum supposed to be a strports specification
+        if type(slavePortnum) is int:
+            slavePortnum = "tcp:%d" % slavePortnum
+
+        # now we're committed to implementing the new configuration, so do
+        # it atomically
+        # TODO: actually, this is spread across a couple of Deferreds, so it
+        # really isn't atomic.
+
+        d = defer.succeed(None)
+
+        self.projectName = projectName
+        self.projectURL = projectURL
+        self.buildbotURL = buildbotURL
+
+        # self.bots: Disconnect any that were attached and removed from the
+        # list. Update self.checker with the new list of passwords,
+        # including debug/change/status.
+        d.addCallback(lambda res: self.loadConfig_Slaves(bots))
+
+        # self.debugPassword
+        if debugPassword:
+            self.checker.addUser("debug", debugPassword)
+            self.debugPassword = debugPassword
+
+        # self.manhole
+        if manhole != self.manhole:
+            # changing
+            if self.manhole:
+                # disownServiceParent may return a Deferred
+                d.addCallback(lambda res: self.manhole.disownServiceParent())
+                def _remove(res):
+                    self.manhole = None
+                    return res
+                d.addCallback(_remove)
+            if manhole:
+                def _add(res):
+                    self.manhole = manhole
+                    manhole.setServiceParent(self)
+                d.addCallback(_add)
+
+        # add/remove self.botmaster.builders to match builders. The
+        # botmaster will handle startup/shutdown issues.
+        d.addCallback(lambda res: self.loadConfig_Builders(builders))
+
+        d.addCallback(lambda res: self.loadConfig_status(status))
+
+        # Schedulers are added after Builders in case they start right away
+        d.addCallback(lambda res: self.loadConfig_Schedulers(schedulers))
+        # and Sources go after Schedulers for the same reason
+        d.addCallback(lambda res: self.loadConfig_Sources(sources))
+
+        # self.slavePort
+        if self.slavePortnum != slavePortnum:
+            if self.slavePort:
+                def closeSlavePort(res):
+                    d1 = self.slavePort.disownServiceParent()
+                    self.slavePort = None
+                    return d1
+                d.addCallback(closeSlavePort)
+            if slavePortnum is not None:
+                def openSlavePort(res):
+                    self.slavePort = strports.service(slavePortnum,
+                                                      self.slaveFactory)
+                    self.slavePort.setServiceParent(self)
+                d.addCallback(openSlavePort)
+                log.msg("BuildMaster listening on port %s" % slavePortnum)
+            self.slavePortnum = slavePortnum
+
+        log.msg("configuration update started")
+        def _done(res):
+            self.readConfig = True
+            log.msg("configuration update complete")
+        d.addCallback(_done)
+        d.addCallback(lambda res: self.botmaster.maybeStartAllBuilds())
+        return d
+
+    def loadConfig_Slaves(self, bots):
+        # set up the Checker with the names and passwords of all valid bots
+        self.checker.users = {} # violates abstraction, oh well
+        for user, passwd in bots:
+            self.checker.addUser(user, passwd)
+        self.checker.addUser("change", "changepw")
+
+        # identify new/old bots
+        old = self.bots; oldnames = [name for name,pw in old]
+        new = bots; newnames = [name for name,pw in new]
+        # removeSlave will hang up on the old bot
+        dl = [self.botmaster.removeSlave(name)
+              for name in oldnames if name not in newnames]
+        [self.botmaster.addSlave(name)
+         for name in newnames if name not in oldnames]
+
+        # all done
+        self.bots = bots
+        return defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
+
+    def loadConfig_Sources(self, sources):
+        log.msg("loadConfig_Sources, change_svc is", self.change_svc,
+                self.change_svc.parent)
+        # shut down any that were removed, start any that were added
+        deleted_sources = [s for s in self.change_svc if s not in sources]
+        added_sources = [s for s in sources if s not in self.change_svc]
+        dl = [self.change_svc.removeSource(s) for s in deleted_sources]
+        def addNewOnes(res):
+            [self.change_svc.addSource(s) for s in added_sources]
+        d = defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
+        d.addCallback(addNewOnes)
+        return d
+
+    def allSchedulers(self):
+        # TODO: when twisted-1.3 compatibility is dropped, switch to the
+        # providedBy form, because it's faster (no actual adapter lookup)
+        return [child for child in self
+                #if interfaces.IScheduler.providedBy(child)]
+                if interfaces.IScheduler(child, None)]
+
+
+    def loadConfig_Schedulers(self, newschedulers):
+        oldschedulers = self.allSchedulers()
+        removed = [s for s in oldschedulers if s not in newschedulers]
+        added = [s for s in newschedulers if s not in oldschedulers]
+        dl = [defer.maybeDeferred(s.disownServiceParent) for s in removed]
+        def addNewOnes(res):
+            for s in added:
+                s.setServiceParent(self)
+        d = defer.DeferredList(dl, fireOnOneErrback=1)
+        d.addCallback(addNewOnes)
+        return d
+
+    def loadConfig_Builders(self, newBuilderData):
+        somethingChanged = False
+        newList = {}
+        newBuilderNames = []
+        allBuilders = self.botmaster.builders.copy()
+        for data in newBuilderData:
+            name = data['name']
+            newList[name] = data
+            newBuilderNames.append(name)
+
+        # identify all that were removed
+        for oldname in self.botmaster.getBuildernames():
+            if oldname not in newList:
+                log.msg("removing old builder %s" % oldname)
+                del allBuilders[oldname]
+                somethingChanged = True
+                # announce the change
+                self.status.builderRemoved(oldname)
+
+        # everything in newList is either unchanged, changed, or new
+        for name, data in newList.items():
+            old = self.botmaster.builders.get(name)
+            basedir = data['builddir'] # used on both master and slave
+            #name, slave, builddir, factory = data
+            if not old: # new
+                # category added after 0.6.2
+                category = data.get('category', None)
+                log.msg("adding new builder %s for category %s" %
+                        (name, category))
+                statusbag = self.status.builderAdded(name, basedir, category)
+                builder = Builder(data, statusbag)
+                allBuilders[name] = builder
+                somethingChanged = True
+            elif old.compareToSetup(data):
+                # changed: try to minimize the disruption and only modify the
+                # pieces that really changed
+                diffs = old.compareToSetup(data)
+                log.msg("updating builder %s: %s" % (name, "\n".join(diffs)))
+
+                statusbag = old.builder_status
+                statusbag.saveYourself() # seems like a good idea
+                # TODO: if the basedir was changed, we probably need to make
+                # a new statusbag
+                new_builder = Builder(data, statusbag)
+                new_builder.consumeTheSoulOfYourPredecessor(old)
+                # that migrates any retained slavebuilders too
+
+                # point out that the builder was updated. On the Waterfall,
+                # this will appear just after any currently-running builds.
+                statusbag.addPointEvent(["config", "updated"])
+
+                allBuilders[name] = new_builder
+                somethingChanged = True
+            else:
+                # unchanged: leave it alone
+                log.msg("builder %s is unchanged" % name)
+                pass
+
+        if somethingChanged:
+            sortedAllBuilders = [allBuilders[name] for name in newBuilderNames]
+            d = self.botmaster.setBuilders(sortedAllBuilders)
+            return d
+        return None
+
+    def loadConfig_status(self, status):
+        dl = []
+
+        # remove old ones
+        for s in self.statusTargets[:]:
+            if not s in status:
+                log.msg("removing IStatusReceiver", s)
+                d = defer.maybeDeferred(s.disownServiceParent)
+                dl.append(d)
+                self.statusTargets.remove(s)
+        # after those are finished going away, add new ones
+        def addNewOnes(res):
+            for s in status:
+                if not s in self.statusTargets:
+                    log.msg("adding IStatusReceiver", s)
+                    s.setServiceParent(self)
+                    self.statusTargets.append(s)
+        d = defer.DeferredList(dl, fireOnOneErrback=1)
+        d.addCallback(addNewOnes)
+        return d
+
+
+    def addChange(self, change):
+        for s in self.allSchedulers():
+            s.addChange(change)
+
+    def submitBuildSet(self, bs):
+        # determine the set of Builders to use
+        builders = []
+        for name in bs.builderNames:
+            b = self.botmaster.builders.get(name)
+            if b:
+                if b not in builders:
+                    builders.append(b)
+                continue
+            # TODO: add aliases like 'all'
+            raise KeyError("no such builder named '%s'" % name)
+
+        # now tell the BuildSet to create BuildRequests for all those
+        # Builders and submit them
+        bs.start(builders)
+        self.status.buildsetSubmitted(bs.status)
+
+
+class Control:
+    if implements:
+        implements(interfaces.IControl)
+    else:
+        __implements__ = interfaces.IControl,
+
+    def __init__(self, master):
+        self.master = master
+
+    def addChange(self, change):
+        self.master.change_svc.addChange(change)
+
+    def submitBuildSet(self, bs):
+        self.master.submitBuildSet(bs)
+
+    def getBuilder(self, name):
+        b = self.master.botmaster.builders[name]
+        return interfaces.IBuilderControl(b)
+
+components.registerAdapter(Control, BuildMaster, interfaces.IControl)
+
+# so anybody who can get a handle on the BuildMaster can cause a build with:
+#  IControl(master).getBuilder("full-2.3").requestBuild(buildrequest)
+

Added: vendor/buildbot/current/buildbot/pbutil.py
===================================================================
--- vendor/buildbot/current/buildbot/pbutil.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/pbutil.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,147 @@
+
+"""Base classes handy for use with PB clients.
+"""
+
+from twisted.spread import pb
+
+from twisted.spread.pb import PBClientFactory
+from twisted.internet import protocol
+from twisted.python import log
+
+class NewCredPerspective(pb.Avatar):
+    def attached(self, mind):
+        return self
+    def detached(self, mind):
+        pass
+
+class ReconnectingPBClientFactory(PBClientFactory,
+                                  protocol.ReconnectingClientFactory):
+    """Reconnecting client factory for PB brokers.
+
+    Like PBClientFactory, but if the connection fails or is lost, the factory
+    will attempt to reconnect.
+
+    Instead of using f.getRootObject (which gives a Deferred that can only
+    be fired once), override the gotRootObject method.
+
+    Instead of using the newcred f.login (which is also one-shot), call
+    f.startLogin() with the credentials and client, and override the
+    gotPerspective method.
+
+    Instead of using the oldcred f.getPerspective (also one-shot), call
+    f.startGettingPerspective() with the same arguments, and override
+    gotPerspective.
+
+    gotRootObject and gotPerspective will be called each time the object is
+    received (once per successful connection attempt). You will probably want
+    to use obj.notifyOnDisconnect to find out when the connection is lost.
+
+    If an authorization error occurs, failedToGetPerspective() will be
+    invoked.
+
+    To use me, subclass, then hand an instance to a connector (like
+    TCPClient).
+    """
+
+    def __init__(self):
+        PBClientFactory.__init__(self)
+        self._doingLogin = False
+        self._doingGetPerspective = False
+
+    def clientConnectionFailed(self, connector, reason):
+        PBClientFactory.clientConnectionFailed(self, connector, reason)
+        # Twisted-1.3 erroneously abandons the connection on non-UserErrors.
+        # To avoid this bug, don't upcall, and implement the correct version
+        # of the method here.
+        if self.continueTrying:
+            self.connector = connector
+            self.retry()
+
+    def clientConnectionLost(self, connector, reason):
+        PBClientFactory.clientConnectionLost(self, connector, reason,
+                                             reconnecting=True)
+        RCF = protocol.ReconnectingClientFactory
+        RCF.clientConnectionLost(self, connector, reason)
+
+    def clientConnectionMade(self, broker):
+        self.resetDelay()
+        PBClientFactory.clientConnectionMade(self, broker)
+        if self._doingLogin:
+            self.doLogin(self._root)
+        if self._doingGetPerspective:
+            self.doGetPerspective(self._root)
+        self.gotRootObject(self._root)
+
+    def __getstate__(self):
+        # this should get folded into ReconnectingClientFactory
+        d = self.__dict__.copy()
+        d['connector'] = None
+        d['_callID'] = None
+        return d
+
+    # oldcred methods
+
+    def getPerspective(self, *args):
+        raise RuntimeError, "getPerspective is one-shot: use startGettingPerspective instead"
+
+    def startGettingPerspective(self, username, password, serviceName,
+                                perspectiveName=None, client=None):
+        self._doingGetPerspective = True
+        if perspectiveName == None:
+            perspectiveName = username
+        self._oldcredArgs = (username, password, serviceName,
+                             perspectiveName, client)
+
+    def doGetPerspective(self, root):
+        # oldcred getPerspective()
+        (username, password,
+         serviceName, perspectiveName, client) = self._oldcredArgs
+        d = self._cbAuthIdentity(root, username, password)
+        d.addCallback(self._cbGetPerspective,
+                      serviceName, perspectiveName, client)
+        d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
+
+
+    # newcred methods
+
+    def login(self, *args):
+        raise RuntimeError, "login is one-shot: use startLogin instead"
+
+    def startLogin(self, credentials, client=None):
+        self._credentials = credentials
+        self._client = client
+        self._doingLogin = True
+
+    def doLogin(self, root):
+        # newcred login()
+        d = self._cbSendUsername(root, self._credentials.username,
+                                 self._credentials.password, self._client)
+        d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
+
+
+    # methods to override
+
+    def gotPerspective(self, perspective):
+        """The remote avatar or perspective (obtained each time this factory
+        connects) is now available."""
+        pass
+
+    def gotRootObject(self, root):
+        """The remote root object (obtained each time this factory connects)
+        is now available. This method will be called each time the connection
+        is established and the object reference is retrieved."""
+        pass
+
+    def failedToGetPerspective(self, why):
+        """The login process failed, most likely because of an authorization
+        failure (bad password), but it is also possible that we lost the new
+        connection before we managed to send our credentials.
+        """
+        log.msg("ReconnectingPBClientFactory.failedToGetPerspective")
+        if why.check(pb.PBConnectionLost):
+            log.msg("we lost the brand-new connection")
+            # retrying might help here, let clientConnectionLost decide
+            return
+        # probably authorization
+        self.stopTrying() # logging in harder won't help
+        log.err(why)

Added: vendor/buildbot/current/buildbot/process/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/process/base.py
===================================================================
--- vendor/buildbot/current/buildbot/process/base.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/base.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,574 @@
+# -*- test-case-name: buildbot.test.test_step -*-
+
+import types
+
+from twisted.python import log
+from twisted.python.failure import Failure
+from twisted.internet import reactor, defer, error
+
+from buildbot import interfaces
+from buildbot.twcompat import implements
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
+from buildbot.status.builder import Results, BuildRequestStatus
+from buildbot.status.progress import BuildProgress
+
+class BuildRequest:
+    """I represent a request to a specific Builder to run a single build.
+
+    I have a SourceStamp which specifies what sources I will build. This may
+    specify a specific revision of the source tree (so source.branch,
+    source.revision, and source.patch are used). The .patch attribute is
+    either None or a tuple of (patchlevel, diff), consisting of a number to
+    use in 'patch -pN', and a unified-format context diff.
+
+    Alternatively, the SourceStamp may specify a set of Changes to be built,
+    contained in source.changes. In this case, I may be mergeable with other
+    BuildRequests on the same branch.
+
+    I may be part of a BuildSet, in which case I will report status results
+    to it.
+
+    I am paired with a BuildRequestStatus object, to which I feed status
+    information.
+
+    @type source: a L{buildbot.sourcestamp.SourceStamp} instance.   
+    @ivar source: the source code that this BuildRequest use
+
+    @type reason: string
+    @ivar reason: the reason this Build is being requested. Schedulers
+                  provide this, but for forced builds the user requesting the
+                  build will provide a string.
+
+    @ivar status: the IBuildStatus object which tracks our status
+
+    @ivar submittedAt: a timestamp (seconds since epoch) when this request
+                       was submitted to the Builder. This is used by the CVS
+                       step to compute a checkout timestamp.
+    """
+
+    source = None
+    builder = None
+    startCount = 0 # how many times we have tried to start this build
+
+    if implements:
+        implements(interfaces.IBuildRequestControl)
+    else:
+        __implements__ = interfaces.IBuildRequestControl,
+
+    def __init__(self, reason, source, builderName=None):
+        # TODO: remove the =None on builderName, it is there so I don't have
+        # to change a lot of tests that create BuildRequest objects
+        assert interfaces.ISourceStamp(source, None)
+        self.reason = reason
+        self.source = source
+        self.start_watchers = []
+        self.finish_watchers = []
+        self.status = BuildRequestStatus(source, builderName)
+
+    def canBeMergedWith(self, other):
+        return self.source.canBeMergedWith(other.source)
+
+    def mergeWith(self, others):
+        return self.source.mergeWith([o.source for o in others])
+
+    def mergeReasons(self, others):
+        """Return a reason for the merged build request."""
+        reasons = []
+        for req in [self] + others:
+            if req.reason and req.reason not in reasons:
+                reasons.append(req.reason)
+        return ", ".join(reasons)
+
+    def waitUntilFinished(self):
+        """Get a Deferred that will fire (with a
+        L{buildbot.interfaces.IBuildStatus} instance when the build
+        finishes."""
+        d = defer.Deferred()
+        self.finish_watchers.append(d)
+        return d
+
+    # these are called by the Builder
+
+    def requestSubmitted(self, builder):
+        # the request has been placed on the queue
+        self.builder = builder
+
+    def buildStarted(self, build, buildstatus):
+        """This is called by the Builder when a Build has been started in the
+        hopes of satifying this BuildRequest. It may be called multiple
+        times, since interrupted builds and lost buildslaves may force
+        multiple Builds to be run until the fate of the BuildRequest is known
+        for certain."""
+        for o in self.start_watchers[:]:
+            # these observers get the IBuildControl
+            o(build)
+        # while these get the IBuildStatus
+        self.status.buildStarted(buildstatus)
+
+    def finished(self, buildstatus):
+        """This is called by the Builder when the BuildRequest has been
+        retired. This happens when its Build has either succeeded (yay!) or
+        failed (boo!). TODO: If it is halted due to an exception (oops!), or
+        some other retryable error, C{finished} will not be called yet."""
+
+        for w in self.finish_watchers:
+            w.callback(buildstatus)
+        self.finish_watchers = []
+
+    # IBuildRequestControl
+
+    def subscribe(self, observer):
+        self.start_watchers.append(observer)
+    def unsubscribe(self, observer):
+        self.start_watchers.remove(observer)
+
+    def cancel(self):
+        """Cancel this request. This can only be successful if the Build has
+        not yet been started.
+
+        @return: a boolean indicating if the cancel was successful."""
+        if self.builder:
+            return self.builder.cancelBuildRequest(self)
+        return False
+
+
+class Build:
+    """I represent a single build by a single bot. Specialized Builders can
+    use subclasses of Build to hold status information unique to those build
+    processes.
+
+    I control B{how} the build proceeds. The actual build is broken up into a
+    series of steps, saved in the .buildSteps[] array as a list of
+    L{buildbot.process.step.BuildStep} objects. Each step is a single remote
+    command, possibly a shell command.
+
+    During the build, I put status information into my C{BuildStatus}
+    gatherer.
+
+    After the build, I go away.
+
+    I can be used by a factory by setting buildClass on
+    L{buildbot.process.factory.BuildFactory}
+
+    @ivar request: the L{BuildRequest} that triggered me
+    @ivar build_status: the L{buildbot.status.builder.BuildStatus} that
+                        collects our status
+    """
+
+    if implements:
+        implements(interfaces.IBuildControl)
+    else:
+        __implements__ = interfaces.IBuildControl,
+
+    workdir = "build"
+    build_status = None
+    reason = "changes"
+    finished = False
+    results = None
+
+    def __init__(self, requests):
+        self.requests = requests
+        for req in self.requests:
+            req.startCount += 1
+        self.locks = []
+        # build a source stamp
+        self.source = requests[0].mergeWith(requests[1:])
+        self.reason = requests[0].mergeReasons(requests[1:])
+
+        #self.abandoned = False
+
+        self.progress = None
+        self.currentStep = None
+        self.slaveEnvironment = {}
+
+    def setBuilder(self, builder):
+        """
+        Set the given builder as our builder.
+
+        @type  builder: L{buildbot.process.builder.Builder}
+        """
+        self.builder = builder
+
+    def setLocks(self, locks):
+        self.locks = locks
+
+    def getSourceStamp(self):
+        return self.source
+
+    def setProperty(self, propname, value):
+        """Set a property on this build. This may only be called after the
+        build has started, so that it has a BuildStatus object where the
+        properties can live."""
+        self.build_status.setProperty(propname, value)
+
+    def getProperty(self, propname):
+        return self.build_status.properties[propname]
+
+
+    def allChanges(self):
+        return self.source.changes
+
+    def allFiles(self):
+        # return a list of all source files that were changed
+        files = []
+        havedirs = 0
+        for c in self.allChanges():
+            for f in c.files:
+                files.append(f)
+            if c.isdir:
+                havedirs = 1
+        return files
+
+    def __repr__(self):
+        return "<Build %s>" % (self.builder.name,)
+
+    def __getstate__(self):
+        d = self.__dict__.copy()
+        if d.has_key('remote'):
+            del d['remote']
+        return d
+
+    def blamelist(self):
+        blamelist = []
+        for c in self.allChanges():
+            if c.who not in blamelist:
+                blamelist.append(c.who)
+        blamelist.sort()
+        return blamelist
+
+    def changesText(self):
+        changetext = ""
+        for c in self.allChanges():
+            changetext += "-" * 60 + "\n\n" + c.asText() + "\n"
+        # consider sorting these by number
+        return changetext
+
+    def setSteps(self, steps):
+        """Set a list of StepFactories, which are generally just class
+        objects which derive from step.BuildStep . These are used to create
+        the Steps themselves when the Build starts (as opposed to when it is
+        first created). By creating the steps later, their __init__ method
+        will have access to things like build.allFiles() ."""
+        self.stepFactories = steps # tuples of (factory, kwargs)
+        for s in steps:
+            pass
+
+
+
+
+    useProgress = True
+
+    def getSlaveCommandVersion(self, command, oldversion=None):
+        return self.slavebuilder.getSlaveCommandVersion(command, oldversion)
+    def getSlaveName(self):
+        return self.slavebuilder.slave.slavename
+
+    def setupStatus(self, build_status):
+        self.build_status = build_status
+        self.setProperty("buildername", self.builder.name)
+        self.setProperty("buildnumber", self.build_status.number)
+        self.setProperty("branch", self.source.branch)
+        self.setProperty("revision", self.source.revision)
+
+    def setupSlaveBuilder(self, slavebuilder):
+        self.slavebuilder = slavebuilder
+        self.slavename = slavebuilder.slave.slavename
+        self.build_status.setSlavename(self.slavename)
+        self.setProperty("slavename", self.slavename)
+
+    def startBuild(self, build_status, expectations, slavebuilder):
+        """This method sets up the build, then starts it by invoking the
+        first Step. It returns a Deferred which will fire when the build
+        finishes. This Deferred is guaranteed to never errback."""
+
+        # we are taking responsibility for watching the connection to the
+        # remote. This responsibility was held by the Builder until our
+        # startBuild was called, and will not return to them until we fire
+        # the Deferred returned by this method.
+
+        log.msg("%s.startBuild" % self)
+        self.setupStatus(build_status)
+        # now that we have a build_status, we can set properties
+        self.setupSlaveBuilder(slavebuilder)
+
+        # convert all locks into their real forms
+        self.locks = [self.builder.botmaster.getLockByID(l)
+                      for l in self.locks]
+        # then narrow SlaveLocks down to the right slave
+        self.locks = [l.getLock(self.slavebuilder) for l in self.locks]
+        self.remote = slavebuilder.remote
+        self.remote.notifyOnDisconnect(self.lostRemote)
+        d = self.deferred = defer.Deferred()
+        def _release_slave(res):
+            self.slavebuilder.buildFinished()
+            return res
+        d.addCallback(_release_slave)
+
+        try:
+            self.setupBuild(expectations) # create .steps
+        except:
+            # the build hasn't started yet, so log the exception as a point
+            # event instead of flunking the build. TODO: associate this
+            # failure with the build instead. this involves doing
+            # self.build_status.buildStarted() from within the exception
+            # handler
+            log.msg("Build.setupBuild failed")
+            log.err(Failure())
+            self.builder.builder_status.addPointEvent(["setupBuild",
+                                                       "exception"],
+                                                      color="purple")
+            self.finished = True
+            self.results = FAILURE
+            self.deferred = None
+            d.callback(self)
+            return d
+
+        self.build_status.buildStarted(self)
+        self.acquireLocks().addCallback(self._startBuild_2)
+        return d
+
+    def acquireLocks(self, res=None):
+        log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
+        if not self.locks:
+            return defer.succeed(None)
+        for lock in self.locks:
+            if not lock.isAvailable():
+                log.msg("Build %s waiting for lock %s" % (self, lock))
+                d = lock.waitUntilMaybeAvailable(self)
+                d.addCallback(self.acquireLocks)
+                return d
+        # all locks are available, claim them all
+        for lock in self.locks:
+            lock.claim(self)
+        return defer.succeed(None)
+
+    def _startBuild_2(self, res):
+        self.startNextStep()
+
+    def setupBuild(self, expectations):
+        # create the actual BuildSteps. If there are any name collisions, we
+        # add a count to the loser until it is unique.
+        self.steps = []
+        self.stepStatuses = {}
+        stepnames = []
+        sps = []
+
+        for factory, args in self.stepFactories:
+            args = args.copy()
+            if not args.has_key("workdir"):
+                args['workdir'] = self.workdir
+            try:
+                step = factory(build=self, **args)
+            except:
+                log.msg("error while creating step, factory=%s, args=%s"
+                        % (factory, args))
+                raise
+            name = step.name
+            count = 1
+            while name in stepnames and count < 100:
+                count += 1
+                name = step.name + "_%d" % count
+            if name in stepnames:
+                raise RuntimeError("duplicate step '%s'" % step.name)
+            step.name = name
+            stepnames.append(name)
+            self.steps.append(step)
+
+            # tell the BuildStatus about the step. This will create a
+            # BuildStepStatus and bind it to the Step.
+            step_status = self.build_status.addStepWithName(name)
+            step.setStepStatus(step_status)
+
+            sp = None
+            if self.useProgress:
+                # XXX: maybe bail if step.progressMetrics is empty? or skip
+                # progress for that one step (i.e. "it is fast"), or have a
+                # separate "variable" flag that makes us bail on progress
+                # tracking
+                sp = step.setupProgress()
+            if sp:
+                sps.append(sp)
+
+        # Create a buildbot.status.progress.BuildProgress object. This is
+        # called once at startup to figure out how to build the long-term
+        # Expectations object, and again at the start of each build to get a
+        # fresh BuildProgress object to track progress for that individual
+        # build. TODO: revisit at-startup call
+
+        if self.useProgress:
+            self.progress = BuildProgress(sps)
+            if self.progress and expectations:
+                self.progress.setExpectationsFrom(expectations)
+
+        # we are now ready to set up our BuildStatus.
+        self.build_status.setSourceStamp(self.source)
+        self.build_status.setReason(self.reason)
+        self.build_status.setBlamelist(self.blamelist())
+        self.build_status.setProgress(self.progress)
+
+        self.results = [] # list of FAILURE, SUCCESS, WARNINGS, SKIPPED
+        self.result = SUCCESS # overall result, may downgrade after each step
+        self.text = [] # list of text string lists (text2)
+
+    def getNextStep(self):
+        """This method is called to obtain the next BuildStep for this build.
+        When it returns None (or raises a StopIteration exception), the build
+        is complete."""
+        if not self.steps:
+            return None
+        return self.steps.pop(0)
+
+    def startNextStep(self):
+        try:
+            s = self.getNextStep()
+        except StopIteration:
+            s = None
+        if not s:
+            return self.allStepsDone()
+        self.currentStep = s
+        d = defer.maybeDeferred(s.startStep, self.remote)
+        d.addCallback(self._stepDone, s)
+        d.addErrback(self.buildException)
+
+    def _stepDone(self, results, step):
+        self.currentStep = None
+        if self.finished:
+            return # build was interrupted, don't keep building
+        terminate = self.stepDone(results, step) # interpret/merge results
+        if terminate:
+            return self.allStepsDone()
+        self.startNextStep()
+
+    def stepDone(self, result, step):
+        """This method is called when the BuildStep completes. It is passed a
+        status object from the BuildStep and is responsible for merging the
+        Step's results into those of the overall Build."""
+
+        terminate = False
+        text = None
+        if type(result) == types.TupleType:
+            result, text = result
+        assert type(result) == type(SUCCESS)
+        log.msg(" step '%s' complete: %s" % (step.name, Results[result]))
+        self.results.append(result)
+        if text:
+            self.text.extend(text)
+        if not self.remote:
+            terminate = True
+        if result == FAILURE:
+            if step.warnOnFailure:
+                if self.result != FAILURE:
+                    self.result = WARNINGS
+            if step.flunkOnFailure:
+                self.result = FAILURE
+            if step.haltOnFailure:
+                self.result = FAILURE
+                terminate = True
+        elif result == WARNINGS:
+            if step.warnOnWarnings:
+                if self.result != FAILURE:
+                    self.result = WARNINGS
+            if step.flunkOnWarnings:
+                self.result = FAILURE
+        elif result == EXCEPTION:
+            self.result = EXCEPTION
+            terminate = True
+        return terminate
+
+    def lostRemote(self, remote=None):
+        # the slave went away. There are several possible reasons for this,
+        # and they aren't necessarily fatal. For now, kill the build, but
+        # TODO: see if we can resume the build when it reconnects.
+        log.msg("%s.lostRemote" % self)
+        self.remote = None
+        if self.currentStep:
+            # this should cause the step to finish.
+            log.msg(" stopping currentStep", self.currentStep)
+            self.currentStep.interrupt(Failure(error.ConnectionLost()))
+
+    def stopBuild(self, reason="<no reason given>"):
+        # the idea here is to let the user cancel a build because, e.g.,
+        # they realized they committed a bug and they don't want to waste
+        # the time building something that they know will fail. Another
+        # reason might be to abandon a stuck build. We want to mark the
+        # build as failed quickly rather than waiting for the slave's
+        # timeout to kill it on its own.
+
+        log.msg(" %s: stopping build: %s" % (self, reason))
+        if self.finished:
+            return
+        # TODO: include 'reason' in this point event
+        self.builder.builder_status.addPointEvent(['interrupt'])
+        self.currentStep.interrupt(reason)
+        if 0:
+            # TODO: maybe let its deferred do buildFinished
+            if self.currentStep and self.currentStep.progress:
+                # XXX: really .fail or something
+                self.currentStep.progress.finish()
+            text = ["stopped", reason]
+            self.buildFinished(text, "red", FAILURE)
+
+    def allStepsDone(self):
+        if self.result == FAILURE:
+            color = "red"
+            text = ["failed"]
+        elif self.result == WARNINGS:
+            color = "orange"
+            text = ["warnings"]
+        elif self.result == EXCEPTION:
+            color = "purple"
+            text = ["exception"]
+        else:
+            color = "green"
+            text = ["build", "successful"]
+        text.extend(self.text)
+        return self.buildFinished(text, color, self.result)
+
+    def buildException(self, why):
+        log.msg("%s.buildException" % self)
+        log.err(why)
+        self.buildFinished(["build", "exception"], "purple", FAILURE)
+
+    def buildFinished(self, text, color, results):
+        """This method must be called when the last Step has completed. It
+        marks the Build as complete and returns the Builder to the 'idle'
+        state.
+
+        It takes three arguments which describe the overall build status:
+        text, color, results. 'results' is one of SUCCESS, WARNINGS, or
+        FAILURE.
+
+        If 'results' is SUCCESS or WARNINGS, we will permit any dependant
+        builds to start. If it is 'FAILURE', those builds will be
+        abandoned."""
+
+        self.finished = True
+        if self.remote:
+            self.remote.dontNotifyOnDisconnect(self.lostRemote)
+        self.results = results
+
+        log.msg(" %s: build finished" % self)
+        self.build_status.setText(text)
+        self.build_status.setColor(color)
+        self.build_status.setResults(results)
+        self.build_status.buildFinished()
+        if self.progress:
+            # XXX: also test a 'timing consistent' flag?
+            log.msg(" setting expectations for next time")
+            self.builder.setExpectations(self.progress)
+        reactor.callLater(0, self.releaseLocks)
+        self.deferred.callback(self)
+        self.deferred = None
+
+    def releaseLocks(self):
+        log.msg("releaseLocks(%s): %s" % (self, self.locks))
+        for lock in self.locks:
+            lock.release(self)
+
+    # IBuildControl
+
+    def getStatus(self):
+        return self.build_status
+
+    # stopBuild is defined earlier
+

Added: vendor/buildbot/current/buildbot/process/builder.py
===================================================================
--- vendor/buildbot/current/buildbot/process/builder.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/builder.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,697 @@
+#! /usr/bin/python
+
+import warnings
+
+from twisted.python import log, components
+from twisted.spread import pb
+from twisted.internet import reactor, defer
+
+from buildbot import interfaces, sourcestamp
+from buildbot.twcompat import implements
+from buildbot.status.progress import Expectations
+from buildbot.util import now
+from buildbot.process import base
+
+(ATTACHING, # slave attached, still checking hostinfo/etc
+ IDLE, # idle, available for use
+ PINGING, # build about to start, making sure it is still alive
+ BUILDING, # build is running
+ ) = range(4)
+
+class SlaveBuilder(pb.Referenceable):
+    """I am the master-side representative for one of the
+    L{buildbot.slave.bot.SlaveBuilder} objects that lives in a remote
+    buildbot. When a remote builder connects, I query it for command versions
+    and then make it available to any Builds that are ready to run. """
+
+    def __init__(self):
+        self.ping_watchers = []
+        self.state = ATTACHING
+        self.remote = None
+
+    def setBuilder(self, b):
+        self.builder = b
+        self.builder_name = b.name
+
+    def getSlaveCommandVersion(self, command, oldversion=None):
+        if self.remoteCommands is None:
+            # the slave is 0.5.0 or earlier
+            return oldversion
+        return self.remoteCommands.get(command)
+
+    def isAvailable(self):
+        if self.state == IDLE:
+            return True
+        return False
+
+    def attached(self, slave, remote, commands):
+        self.slave = slave
+        self.remote = remote
+        self.remoteCommands = commands # maps command name to version
+        log.msg("Buildslave %s attached to %s" % (slave.slavename,
+                                                  self.builder_name))
+        d = self.remote.callRemote("setMaster", self)
+        d.addErrback(self._attachFailure, "Builder.setMaster")
+        d.addCallback(self._attached2)
+        return d
+
+    def _attached2(self, res):
+        d = self.remote.callRemote("print", "attached")
+        d.addErrback(self._attachFailure, "Builder.print 'attached'")
+        d.addCallback(self._attached3)
+        return d
+
+    def _attached3(self, res):
+        # now we say they're really attached
+        self.state = IDLE
+        return self
+
+    def _attachFailure(self, why, where):
+        assert isinstance(where, str)
+        log.msg(where)
+        log.err(why)
+        return why
+
+    def detached(self):
+        log.msg("Buildslave %s detached from %s" % (self.slave.slavename,
+                                                    self.builder_name))
+        self.slave = None
+        self.remote = None
+        self.remoteCommands = None
+
+    def buildStarted(self):
+        self.state = BUILDING
+
+    def buildFinished(self):
+        self.state = IDLE
+        reactor.callLater(0, self.builder.maybeStartBuild)
+
+    def ping(self, timeout, status=None):
+        """Ping the slave to make sure it is still there. Returns a Deferred
+        that fires with True if it is.
+
+        @param status: if you point this at a BuilderStatus, a 'pinging'
+                       event will be pushed.
+        """
+
+        self.state = PINGING
+        newping = not self.ping_watchers
+        d = defer.Deferred()
+        self.ping_watchers.append(d)
+        if newping:
+            if status:
+                event = status.addEvent(["pinging"], "yellow")
+                d2 = defer.Deferred()
+                d2.addCallback(self._pong_status, event)
+                self.ping_watchers.insert(0, d2)
+                # I think it will make the tests run smoother if the status
+                # is updated before the ping completes
+            Ping().ping(self.remote, timeout).addCallback(self._pong)
+
+        return d
+
+    def _pong(self, res):
+        watchers, self.ping_watchers = self.ping_watchers, []
+        for d in watchers:
+            d.callback(res)
+
+    def _pong_status(self, res, event):
+        if res:
+            event.text = ["ping", "success"]
+            event.color = "green"
+        else:
+            event.text = ["ping", "failed"]
+            event.color = "red"
+        event.finish()
+
+class Ping:
+    running = False
+    timer = None
+
+    def ping(self, remote, timeout):
+        assert not self.running
+        self.running = True
+        log.msg("sending ping")
+        self.d = defer.Deferred()
+        # TODO: add a distinct 'ping' command on the slave.. using 'print'
+        # for this purpose is kind of silly.
+        remote.callRemote("print", "ping").addCallbacks(self._pong,
+                                                        self._ping_failed,
+                                                        errbackArgs=(remote,))
+
+        # We use either our own timeout or the (long) TCP timeout to detect
+        # silently-missing slaves. This might happen because of a NAT
+        # timeout or a routing loop. If the slave just shuts down (and we
+        # somehow missed the FIN), we should get a "connection refused"
+        # message.
+        self.timer = reactor.callLater(timeout, self._ping_timeout, remote)
+        return self.d
+
+    def _ping_timeout(self, remote):
+        log.msg("ping timeout")
+        # force the BotPerspective to disconnect, since this indicates that
+        # the bot is unreachable.
+        del self.timer
+        remote.broker.transport.loseConnection()
+        # the forcibly-lost connection will now cause the ping to fail
+
+    def _stopTimer(self):
+        if not self.running:
+            return
+        self.running = False
+
+        if self.timer:
+            self.timer.cancel()
+            del self.timer
+
+    def _pong(self, res):
+        log.msg("ping finished: success")
+        self._stopTimer()
+        self.d.callback(True)
+
+    def _ping_failed(self, res, remote):
+        log.msg("ping finished: failure")
+        self._stopTimer()
+        # the slave has some sort of internal error, disconnect them. If we
+        # don't, we'll requeue a build and ping them again right away,
+        # creating a nasty loop.
+        remote.broker.transport.loseConnection()
+        # TODO: except, if they actually did manage to get this far, they'll
+        # probably reconnect right away, and we'll do this game again. Maybe
+        # it would be better to leave them in the PINGING state.
+        self.d.callback(False)
+
+
+class Builder(pb.Referenceable):
+    """I manage all Builds of a given type.
+
+    Each Builder is created by an entry in the config file (the c['builders']
+    list), with a number of parameters.
+
+    One of these parameters is the L{buildbot.process.factory.BuildFactory}
+    object that is associated with this Builder. The factory is responsible
+    for creating new L{Build<buildbot.process.base.Build>} objects. Each
+    Build object defines when and how the build is performed, so a new
+    Factory or Builder should be defined to control this behavior.
+
+    The Builder holds on to a number of L{base.BuildRequest} objects in a
+    list named C{.buildable}. Incoming BuildRequest objects will be added to
+    this list, or (if possible) merged into an existing request. When a slave
+    becomes available, I will use my C{BuildFactory} to turn the request into
+    a new C{Build} object. The C{BuildRequest} is forgotten, the C{Build}
+    goes into C{.building} while it runs. Once the build finishes, I will
+    discard it.
+
+    I maintain a list of available SlaveBuilders, one for each connected
+    slave that the C{slavenames} parameter says we can use. Some of these
+    will be idle, some of them will be busy running builds for me. If there
+    are multiple slaves, I can run multiple builds at once.
+
+    I also manage forced builds, progress expectation (ETA) management, and
+    some status delivery chores.
+
+    I am persisted in C{BASEDIR/BUILDERNAME/builder}, so I can remember how
+    long a build usually takes to run (in my C{expectations} attribute). This
+    pickle also includes the L{buildbot.status.builder.BuilderStatus} object,
+    which remembers the set of historic builds.
+
+    @type buildable: list of L{buildbot.process.base.BuildRequest}
+    @ivar buildable: BuildRequests that are ready to build, but which are
+                     waiting for a buildslave to be available.
+
+    @type building: list of L{buildbot.process.base.Build}
+    @ivar building: Builds that are actively running
+
+    """
+
+    expectations = None # this is created the first time we get a good build
+    START_BUILD_TIMEOUT = 10
+
+    def __init__(self, setup, builder_status):
+        """
+        @type  setup: dict
+        @param setup: builder setup data, as stored in
+                      BuildmasterConfig['builders'].  Contains name,
+                      slavename(s), builddir, factory, locks.
+        @type  builder_status: L{buildbot.status.builder.BuilderStatus}
+        """
+        self.name = setup['name']
+        self.slavenames = []
+        if setup.has_key('slavename'):
+            self.slavenames.append(setup['slavename'])
+        if setup.has_key('slavenames'):
+            self.slavenames.extend(setup['slavenames'])
+        self.builddir = setup['builddir']
+        self.buildFactory = setup['factory']
+        self.locks = setup.get("locks", [])
+        if setup.has_key('periodicBuildTime'):
+            raise ValueError("periodicBuildTime can no longer be defined as"
+                             " part of the Builder: use scheduler.Periodic"
+                             " instead")
+
+        # build/wannabuild slots: Build objects move along this sequence
+        self.buildable = []
+        self.building = []
+
+        # buildslaves which have connected but which are not yet available.
+        # These are always in the ATTACHING state.
+        self.attaching_slaves = []
+
+        # buildslaves at our disposal. Each SlaveBuilder instance has a
+        # .state that is IDLE, PINGING, or BUILDING. "PINGING" is used when a
+        # Build is about to start, to make sure that they're still alive.
+        self.slaves = []
+
+        self.builder_status = builder_status
+        self.builder_status.setSlavenames(self.slavenames)
+
+        # for testing, to help synchronize tests
+        self.watchers = {'attach': [], 'detach': [], 'detach_all': [],
+                         'idle': []}
+
+    def setBotmaster(self, botmaster):
+        self.botmaster = botmaster
+
+    def compareToSetup(self, setup):
+        diffs = []
+        setup_slavenames = []
+        if setup.has_key('slavename'):
+            setup_slavenames.append(setup['slavename'])
+        setup_slavenames.extend(setup.get('slavenames', []))
+        if setup_slavenames != self.slavenames:
+            diffs.append('slavenames changed from %s to %s' \
+                         % (self.slavenames, setup_slavenames))
+        if setup['builddir'] != self.builddir:
+            diffs.append('builddir changed from %s to %s' \
+                         % (self.builddir, setup['builddir']))
+        if setup['factory'] != self.buildFactory: # compare objects
+            diffs.append('factory changed')
+        oldlocks = [(lock.__class__, lock.name)
+                    for lock in setup.get('locks',[])]
+        newlocks = [(lock.__class__, lock.name)
+                    for lock in self.locks]
+        if oldlocks != newlocks:
+            diffs.append('locks changed from %s to %s' % (oldlocks, newlocks))
+        return diffs
+
+    def __repr__(self):
+        return "<Builder '%s' at %d>" % (self.name, id(self))
+
+
+    def submitBuildRequest(self, req):
+        req.submittedAt = now()
+        self.buildable.append(req)
+        req.requestSubmitted(self)
+        self.builder_status.addBuildRequest(req.status)
+        self.maybeStartBuild()
+
+    def cancelBuildRequest(self, req):
+        if req in self.buildable:
+            self.buildable.remove(req)
+            self.builder_status.removeBuildRequest(req.status)
+            return True
+        return False
+
+    def __getstate__(self):
+        d = self.__dict__.copy()
+        # TODO: note that d['buildable'] can contain Deferreds
+        del d['building'] # TODO: move these back to .buildable?
+        del d['slaves']
+        return d
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self.building = []
+        self.slaves = []
+
+    def consumeTheSoulOfYourPredecessor(self, old):
+        """Suck the brain out of an old Builder.
+
+        This takes all the runtime state from an existing Builder and moves
+        it into ourselves. This is used when a Builder is changed in the
+        master.cfg file: the new Builder has a different factory, but we want
+        all the builds that were queued for the old one to get processed by
+        the new one. Any builds which are already running will keep running.
+        The new Builder will get as many of the old SlaveBuilder objects as
+        it wants."""
+
+        log.msg("consumeTheSoulOfYourPredecessor: %s feeding upon %s" %
+                (self, old))
+        # we claim all the pending builds, removing them from the old
+        # Builder's queue. This insures that the old Builder will not start
+        # any new work.
+        log.msg(" stealing %s buildrequests" % len(old.buildable))
+        self.buildable.extend(old.buildable)
+        old.buildable = []
+
+        # old.building is not migrated: it keeps track of builds which were
+        # in progress in the old Builder. When those builds finish, the old
+        # Builder will be notified, not us. However, since the old
+        # SlaveBuilder will point to us, it is our maybeStartBuild() that
+        # will be triggered.
+        if old.building:
+            self.builder_status.setBigState("building")
+
+        # Our set of slavenames may be different. Steal any of the old
+        # buildslaves that we want to keep using.
+        for sb in old.slaves[:]:
+            if sb.slave.slavename in self.slavenames:
+                log.msg(" stealing buildslave %s" % sb)
+                self.slaves.append(sb)
+                old.slaves.remove(sb)
+                sb.setBuilder(self)
+
+        # old.attaching_slaves:
+        #  these SlaveBuilders are waiting on a sequence of calls:
+        #  remote.setMaster and remote.print . When these two complete,
+        #  old._attached will be fired, which will add a 'connect' event to
+        #  the builder_status and try to start a build. However, we've pulled
+        #  everything out of the old builder's queue, so it will have no work
+        #  to do. The outstanding remote.setMaster/print call will be holding
+        #  the last reference to the old builder, so it will disappear just
+        #  after that response comes back.
+        #
+        #  The BotMaster will ask the slave to re-set their list of Builders
+        #  shortly after this function returns, which will cause our
+        #  attached() method to be fired with a bunch of references to remote
+        #  SlaveBuilders, some of which we already have (by stealing them
+        #  from the old Builder), some of which will be new. The new ones
+        #  will be re-attached.
+
+        #  Therefore, we don't need to do anything about old.attaching_slaves
+
+        return # all done
+
+    def fireTestEvent(self, name, with=None):
+        if with is None:
+            with = self
+        watchers = self.watchers[name]
+        self.watchers[name] = []
+        for w in watchers:
+            reactor.callLater(0, w.callback, with)
+
+    def attached(self, slave, remote, commands):
+        """This is invoked by the BotPerspective when the self.slavename bot
+        registers their builder.
+
+        @type  slave: L{buildbot.master.BotPerspective}
+        @param slave: the BotPerspective that represents the buildslave as a
+                      whole
+        @type  remote: L{twisted.spread.pb.RemoteReference}
+        @param remote: a reference to the L{buildbot.slave.bot.SlaveBuilder}
+        @type  commands: dict: string -> string, or None
+        @param commands: provides the slave's version of each RemoteCommand
+
+        @rtype:  L{twisted.internet.defer.Deferred}
+        @return: a Deferred that fires (with 'self') when the slave-side
+                 builder is fully attached and ready to accept commands.
+        """
+        for s in self.attaching_slaves + self.slaves:
+            if s.slave == slave:
+                # already attached to them. This is fairly common, since
+                # attached() gets called each time we receive the builder
+                # list from the slave, and we ask for it each time we add or
+                # remove a builder. So if the slave is hosting builders
+                # A,B,C, and the config file changes A, we'll remove A and
+                # re-add it, triggering two builder-list requests, getting
+                # two redundant calls to attached() for B, and another two
+                # for C.
+                #
+                # Therefore, when we see that we're already attached, we can
+                # just ignore it. TODO: build a diagram of the state
+                # transitions here, I'm concerned about sb.attached() failing
+                # and leaving sb.state stuck at 'ATTACHING', and about
+                # the detached() message arriving while there's some
+                # transition pending such that the response to the transition
+                # re-vivifies sb
+                return defer.succeed(self)
+
+        sb = SlaveBuilder()
+        sb.setBuilder(self)
+        self.attaching_slaves.append(sb)
+        d = sb.attached(slave, remote, commands)
+        d.addCallback(self._attached)
+        d.addErrback(self._not_attached, slave)
+        return d
+
+    def _attached(self, sb):
+        # TODO: make this .addSlaveEvent(slave.slavename, ['connect']) ?
+        self.builder_status.addPointEvent(['connect', sb.slave.slavename])
+        self.attaching_slaves.remove(sb)
+        self.slaves.append(sb)
+        reactor.callLater(0, self.maybeStartBuild)
+
+        self.fireTestEvent('attach')
+        return self
+
+    def _not_attached(self, why, slave):
+        # already log.err'ed by SlaveBuilder._attachFailure
+        # TODO: make this .addSlaveEvent?
+        # TODO: remove from self.slaves (except that detached() should get
+        #       run first, right?)
+        self.builder_status.addPointEvent(['failed', 'connect',
+                                           slave.slave.slavename])
+        # TODO: add an HTMLLogFile of the exception
+        self.fireTestEvent('attach', why)
+
+    def detached(self, slave):
+        """This is called when the connection to the bot is lost."""
+        log.msg("%s.detached" % self, slave.slavename)
+        for sb in self.attaching_slaves + self.slaves:
+            if sb.slave == slave:
+                break
+        else:
+            log.msg("WEIRD: Builder.detached(%s) (%s)"
+                    " not in attaching_slaves(%s)"
+                    " or slaves(%s)" % (slave, slave.slavename,
+                                        self.attaching_slaves,
+                                        self.slaves))
+            return
+        if sb.state == BUILDING:
+            # the Build's .lostRemote method (invoked by a notifyOnDisconnect
+            # handler) will cause the Build to be stopped, probably right
+            # after the notifyOnDisconnect that invoked us finishes running.
+
+            # TODO: should failover to a new Build
+            #self.retryBuild(sb.build)
+            pass
+
+        if sb in self.attaching_slaves:
+            self.attaching_slaves.remove(sb)
+        if sb in self.slaves:
+            self.slaves.remove(sb)
+
+        # TODO: make this .addSlaveEvent?
+        self.builder_status.addPointEvent(['disconnect', slave.slavename])
+        sb.detached() # inform the SlaveBuilder that their slave went away
+        self.updateBigStatus()
+        self.fireTestEvent('detach')
+        if not self.slaves:
+            self.fireTestEvent('detach_all')
+
+    def updateBigStatus(self):
+        if not self.slaves:
+            self.builder_status.setBigState("offline")
+        elif self.building:
+            self.builder_status.setBigState("building")
+        else:
+            self.builder_status.setBigState("idle")
+            self.fireTestEvent('idle')
+
+    def maybeStartBuild(self):
+        log.msg("maybeStartBuild %s: %s %s" %
+                (self, self.buildable, self.slaves))
+        if not self.buildable:
+            self.updateBigStatus()
+            return # nothing to do
+        # find the first idle slave
+        for sb in self.slaves:
+            if sb.isAvailable():
+                break
+        else:
+            log.msg("%s: want to start build, but we don't have a remote"
+                    % self)
+            self.updateBigStatus()
+            return
+
+        # there is something to build, and there is a slave on which to build
+        # it. Grab the oldest request, see if we can merge it with anything
+        # else.
+        req = self.buildable.pop(0)
+        self.builder_status.removeBuildRequest(req.status)
+        mergers = []
+        for br in self.buildable[:]:
+            if req.canBeMergedWith(br):
+                self.buildable.remove(br)
+                self.builder_status.removeBuildRequest(br.status)
+                mergers.append(br)
+        requests = [req] + mergers
+
+        # Create a new build from our build factory and set ourself as the
+        # builder.
+        build = self.buildFactory.newBuild(requests)
+        build.setBuilder(self)
+        build.setLocks(self.locks)
+
+        # start it
+        self.startBuild(build, sb)
+
+    def startBuild(self, build, sb):
+        """Start a build on the given slave.
+        @param build: the L{base.Build} to start
+        @param sb: the L{SlaveBuilder} which will host this build
+
+        @return: a Deferred which fires with a
+        L{buildbot.interfaces.IBuildControl} that can be used to stop the
+        Build, or to access a L{buildbot.interfaces.IBuildStatus} which will
+        watch the Build as it runs. """
+
+        self.building.append(build)
+        self.updateBigStatus()
+
+        log.msg("starting build %s.. pinging the slave" % build)
+        # ping the slave to make sure they're still there. If they're fallen
+        # off the map (due to a NAT timeout or something), this will fail in
+        # a couple of minutes, depending upon the TCP timeout. TODO: consider
+        # making this time out faster, or at least characterize the likely
+        # duration.
+        d = sb.ping(self.START_BUILD_TIMEOUT)
+        d.addCallback(self._startBuild_1, build, sb)
+        return d
+
+    def _startBuild_1(self, res, build, sb):
+        if not res:
+            return self._startBuildFailed("slave ping failed", build, sb)
+        # The buildslave is ready to go. sb.buildStarted() sets its state to
+        # BUILDING (so we won't try to use it for any other builds). This
+        # gets set back to IDLE by the Build itself when it finishes.
+        sb.buildStarted()
+        d = sb.remote.callRemote("startBuild")
+        d.addCallbacks(self._startBuild_2, self._startBuildFailed,
+                       callbackArgs=(build,sb), errbackArgs=(build,sb))
+        return d
+
+    def _startBuild_2(self, res, build, sb):
+        # create the BuildStatus object that goes with the Build
+        bs = self.builder_status.newBuild()
+
+        # start the build. This will first set up the steps, then tell the
+        # BuildStatus that it has started, which will announce it to the
+        # world (through our BuilderStatus object, which is its parent).
+        # Finally it will start the actual build process.
+        d = build.startBuild(bs, self.expectations, sb)
+        d.addCallback(self.buildFinished, sb)
+        d.addErrback(log.err) # this shouldn't happen. if it does, the slave
+                              # will be wedged
+        for req in build.requests:
+            req.buildStarted(build, bs)
+        return build # this is the IBuildControl
+
+    def _startBuildFailed(self, why, build, sb):
+        # put the build back on the buildable list
+        log.msg("I tried to tell the slave that the build %s started, but "
+                "remote_startBuild failed: %s" % (build, why))
+        # release the slave. This will queue a call to maybeStartBuild, which
+        # will fire after other notifyOnDisconnect handlers have marked the
+        # slave as disconnected (so we don't try to use it again).
+        sb.buildFinished()
+
+        log.msg("re-queueing the BuildRequest")
+        self.building.remove(build)
+        for req in build.requests:
+            self.buildable.insert(0, req) # the interrupted build gets first
+                                          # priority
+            self.builder_status.addBuildRequest(req.status)
+
+
+    def buildFinished(self, build, sb):
+        """This is called when the Build has finished (either success or
+        failure). Any exceptions during the build are reported with
+        results=FAILURE, not with an errback."""
+
+        # by the time we get here, the Build has already released the slave
+        # (which queues a call to maybeStartBuild)
+
+        self.building.remove(build)
+        for req in build.requests:
+            req.finished(build.build_status)
+
+    def setExpectations(self, progress):
+        """Mark the build as successful and update expectations for the next
+        build. Only call this when the build did not fail in any way that
+        would invalidate the time expectations generated by it. (if the
+        compile failed and thus terminated early, we can't use the last
+        build to predict how long the next one will take).
+        """
+        if self.expectations:
+            self.expectations.update(progress)
+        else:
+            # the first time we get a good build, create our Expectations
+            # based upon its results
+            self.expectations = Expectations(progress)
+        log.msg("new expectations: %s seconds" % \
+                self.expectations.expectedBuildTime())
+
+    def shutdownSlave(self):
+        if self.remote:
+            self.remote.callRemote("shutdown")
+
+
+class BuilderControl(components.Adapter):
+    if implements:
+        implements(interfaces.IBuilderControl)
+    else:
+        __implements__ = interfaces.IBuilderControl,
+
+    def requestBuild(self, req):
+        """Submit a BuildRequest to this Builder."""
+        self.original.submitBuildRequest(req)
+
+    def requestBuildSoon(self, req):
+        """Submit a BuildRequest like requestBuild, but raise a
+        L{buildbot.interfaces.NoSlaveError} if no slaves are currently
+        available, so it cannot be used to queue a BuildRequest in the hopes
+        that a slave will eventually connect. This method is appropriate for
+        use by things like the web-page 'Force Build' button."""
+        if not self.original.slaves:
+            raise interfaces.NoSlaveError
+        self.requestBuild(req)
+
+    def resubmitBuild(self, bs, reason="<rebuild, no reason given>"):
+        if not bs.isFinished():
+            return
+        branch, revision, patch = bs.getSourceStamp()
+        changes = bs.getChanges()
+        ss = sourcestamp.SourceStamp(branch, revision, patch, changes)
+        req = base.BuildRequest(reason, ss, self.original.name)
+        self.requestBuild(req)
+
+    def getPendingBuilds(self):
+        # return IBuildRequestControl objects
+        raise NotImplementedError
+
+    def getBuild(self, number):
+        for b in self.original.building:
+            if b.build_status.number == number:
+                return b
+        return None
+
+    def ping(self, timeout=30):
+        if not self.original.slaves:
+            self.original.builder_status.addPointEvent(["ping", "no slave"],
+                                                       "red")
+            return defer.succeed(False) # interfaces.NoSlaveError
+        dl = []
+        for s in self.original.slaves:
+            dl.append(s.ping(timeout, self.original.builder_status))
+        d = defer.DeferredList(dl)
+        d.addCallback(self._gatherPingResults)
+        return d
+
+    def _gatherPingResults(self, res):
+        for ignored,success in res:
+            if not success:
+                return False
+        return True
+
+components.registerAdapter(BuilderControl, Builder, interfaces.IBuilderControl)

Added: vendor/buildbot/current/buildbot/process/buildstep.py
===================================================================
--- vendor/buildbot/current/buildbot/process/buildstep.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/buildstep.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,1063 @@
+# -*- test-case-name: buildbot.test.test_steps -*-
+
+from twisted.internet import reactor, defer, error
+from twisted.protocols import basic
+from twisted.spread import pb
+from twisted.python import log
+from twisted.python.failure import Failure
+from twisted.web.util import formatFailure
+
+from buildbot import interfaces
+from buildbot.twcompat import implements, providedBy
+from buildbot import util
+from buildbot.status import progress
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, SKIPPED, \
+     EXCEPTION
+
+"""
+BuildStep and RemoteCommand classes for master-side representation of the
+build process
+"""
+
+class RemoteCommand(pb.Referenceable):
+    """
+    I represent a single command to be run on the slave. I handle the details
+    of reliably gathering status updates from the slave (acknowledging each),
+    and (eventually, in a future release) recovering from interrupted builds.
+    This is the master-side object that is known to the slave-side
+    L{buildbot.slave.bot.SlaveBuilder}, to which status updates are sent.
+
+    My command should be started by calling .run(), which returns a
+    Deferred that will fire when the command has finished, or will
+    errback if an exception is raised.
+    
+    Typically __init__ or run() will set up self.remote_command to be a
+    string which corresponds to one of the SlaveCommands registered in
+    the buildslave, and self.args to a dictionary of arguments that will
+    be passed to the SlaveCommand instance.
+
+    start, remoteUpdate, and remoteComplete are available to be overridden
+
+    @type  commandCounter: list of one int
+    @cvar  commandCounter: provides a unique value for each
+                           RemoteCommand executed across all slaves
+    @type  active:         boolean
+    @ivar  active:         whether the command is currently running
+    """
+    commandCounter = [0] # we use a list as a poor man's singleton
+    active = False
+
+    def __init__(self, remote_command, args):
+        """
+        @type  remote_command: string
+        @param remote_command: remote command to start.  This will be
+                               passed to
+                               L{buildbot.slave.bot.SlaveBuilder.remote_startCommand}
+                               and needs to have been registered
+                               slave-side by
+                               L{buildbot.slave.registry.registerSlaveCommand}
+        @type  args:           dict
+        @param args:           arguments to send to the remote command
+        """
+
+        self.remote_command = remote_command
+        self.args = args
+
+    def __getstate__(self):
+        dict = self.__dict__.copy()
+        # Remove the remote ref: if necessary (only for resumed builds), it
+        # will be reattached at resume time
+        if dict.has_key("remote"):
+            del dict["remote"]
+        return dict
+
+    def run(self, step, remote):
+        self.active = True
+        self.step = step
+        self.remote = remote
+        c = self.commandCounter[0]
+        self.commandCounter[0] += 1
+        #self.commandID = "%d %d" % (c, random.randint(0, 1000000))
+        self.commandID = "%d" % c
+        log.msg("%s: RemoteCommand.run [%s]" % (self, self.commandID))
+        self.deferred = defer.Deferred()
+
+        d = defer.maybeDeferred(self.start)
+
+        # _finished is called with an error for unknown commands, errors
+        # that occur while the command is starting (including OSErrors in
+        # exec()), StaleBroker (when the connection was lost before we
+        # started), and pb.PBConnectionLost (when the slave isn't responding
+        # over this connection, perhaps it had a power failure, or NAT
+        # weirdness). If this happens, self.deferred is fired right away.
+        d.addErrback(self._finished)
+
+        # Connections which are lost while the command is running are caught
+        # when our parent Step calls our .lostRemote() method.
+        return self.deferred
+
+    def start(self):
+        """
+        Tell the slave to start executing the remote command.
+
+        @rtype:   L{twisted.internet.defer.Deferred}
+        @returns: a deferred that will fire when the remote command is
+                  done (with None as the result)
+        """
+        # This method only initiates the remote command.
+        # We will receive remote_update messages as the command runs.
+        # We will get a single remote_complete when it finishes.
+        # We should fire self.deferred when the command is done.
+        d = self.remote.callRemote("startCommand", self, self.commandID,
+                                   self.remote_command, self.args)
+        return d
+
+    def interrupt(self, why):
+        # TODO: consider separating this into interrupt() and stop(), where
+        # stop() unconditionally calls _finished, but interrupt() merely
+        # asks politely for the command to stop soon.
+
+        log.msg("RemoteCommand.interrupt", self, why)
+        if not self.active:
+            log.msg(" but this RemoteCommand is already inactive")
+            return
+        if not self.remote:
+            log.msg(" but our .remote went away")
+            return
+        if isinstance(why, Failure) and why.check(error.ConnectionLost):
+            log.msg("RemoteCommand.disconnect: lost slave")
+            self.remote = None
+            self._finished(why)
+            return
+
+        # tell the remote command to halt. Returns a Deferred that will fire
+        # when the interrupt command has been delivered.
+        
+        d = defer.maybeDeferred(self.remote.callRemote, "interruptCommand",
+                                self.commandID, str(why))
+        # the slave may not have remote_interruptCommand
+        d.addErrback(self._interruptFailed)
+        return d
+
+    def _interruptFailed(self, why):
+        log.msg("RemoteCommand._interruptFailed", self)
+        # TODO: forcibly stop the Command now, since we can't stop it
+        # cleanly
+        return None
+
+    def remote_update(self, updates):
+        """
+        I am called by the slave's L{buildbot.slave.bot.SlaveBuilder} so
+        I can receive updates from the running remote command.
+
+        @type  updates: list of [object, int]
+        @param updates: list of updates from the remote command
+        """
+        max_updatenum = 0
+        for (update, num) in updates:
+            #log.msg("update[%d]:" % num)
+            try:
+                if self.active: # ignore late updates
+                    self.remoteUpdate(update)
+            except:
+                # log failure, terminate build, let slave retire the update
+                self._finished(Failure())
+                # TODO: what if multiple updates arrive? should
+                # skip the rest but ack them all
+            if num > max_updatenum:
+                max_updatenum = num
+        return max_updatenum
+
+    def remoteUpdate(self, update):
+        raise NotImplementedError("You must implement this in a subclass")
+
+    def remote_complete(self, failure=None):
+        """
+        Called by the slave's L{buildbot.slave.bot.SlaveBuilder} to
+        notify me the remote command has finished.
+
+        @type  failure: L{twisted.python.failure.Failure} or None
+
+        @rtype: None
+        """
+        # call the real remoteComplete a moment later, but first return an
+        # acknowledgement so the slave can retire the completion message.
+        if self.active:
+            reactor.callLater(0, self._finished, failure)
+        return None
+
+    def _finished(self, failure=None):
+        self.active = False
+        # call .remoteComplete. If it raises an exception, or returns the
+        # Failure that we gave it, our self.deferred will be errbacked. If
+        # it does not (either it ate the Failure or there the step finished
+        # normally and it didn't raise a new exception), self.deferred will
+        # be callbacked.
+        d = defer.maybeDeferred(self.remoteComplete, failure)
+        # arrange for the callback to get this RemoteCommand instance
+        # instead of just None
+        d.addCallback(lambda r: self)
+        # this fires the original deferred we returned from .run(),
+        # with self as the result, or a failure
+        d.addBoth(self.deferred.callback)
+
+    def remoteComplete(self, maybeFailure):
+        """Subclasses can override this.
+
+        This is called when the RemoteCommand has finished. 'maybeFailure'
+        will be None if the command completed normally, or a Failure
+        instance in one of the following situations:
+
+         - the slave was lost before the command was started
+         - the slave didn't respond to the startCommand message
+         - the slave raised an exception while starting the command
+           (bad command name, bad args, OSError from missing executable)
+         - the slave raised an exception while finishing the command
+           (they send back a remote_complete message with a Failure payload)
+
+        and also (for now):
+         -  slave disconnected while the command was running
+        
+        This method should do cleanup, like closing log files. It should
+        normally return the 'failure' argument, so that any exceptions will
+        be propagated to the Step. If it wants to consume them, return None
+        instead."""
+
+        return maybeFailure
+
+class LoggedRemoteCommand(RemoteCommand):
+    """
+
+    I am a L{RemoteCommand} which gathers output from the remote command into
+    one or more local log files. My C{self.logs} dictionary contains
+    references to these L{buildbot.status.builder.LogFile} instances. Any
+    stdout/stderr/header updates from the slave will be put into
+    C{self.logs['stdio']}, if it exists. If the remote command uses other log
+    files, they will go into other entries in C{self.logs}.
+
+    If you want to use stdout or stderr, you should create a LogFile named
+    'stdio' and pass it to my useLog() message. Otherwise stdout/stderr will
+    be ignored, which is probably not what you want.
+
+    Unless you tell me otherwise, when my command completes I will close all
+    the LogFiles that I know about.
+
+    @ivar logs: maps logname to a LogFile instance
+    @ivar _closeWhenFinished: maps logname to a boolean. If true, this
+                              LogFile will be closed when the RemoteCommand
+                              finishes. LogFiles which are shared between
+                              multiple RemoteCommands should use False here.
+
+    """
+
+    rc = None
+    debug = False
+
+    def __init__(self, *args, **kwargs):
+        self.logs = {}
+        self._closeWhenFinished = {}
+        RemoteCommand.__init__(self, *args, **kwargs)
+
+    def __repr__(self):
+        return "<RemoteCommand '%s' at %d>" % (self.remote_command, id(self))
+
+    def useLog(self, loog, closeWhenFinished=False, logfileName=None):
+        """Start routing messages from a remote logfile to a local LogFile
+
+        I take a local ILogFile instance in 'loog', and arrange to route
+        remote log messages for the logfile named 'logfileName' into it. By
+        default this logfileName comes from the ILogFile itself (using the
+        name by which the ILogFile will be displayed), but the 'logfileName'
+        argument can be used to override this. For example, if
+        logfileName='stdio', this logfile will collect text from the stdout
+        and stderr of the command.
+
+        @param loog: an instance which implements ILogFile
+        @param closeWhenFinished: a boolean, set to False if the logfile
+                                  will be shared between multiple
+                                  RemoteCommands. If True, the logfile will
+                                  be closed when this ShellCommand is done
+                                  with it.
+        @param logfileName: a string, which indicates which remote log file
+                            should be routed into this ILogFile. This should
+                            match one of the keys of the logfiles= argument
+                            to ShellCommand.
+
+        """
+
+        assert providedBy(loog, interfaces.ILogFile)
+        if not logfileName:
+            logfileName = loog.getName()
+        assert logfileName not in self.logs
+        self.logs[logfileName] = loog
+        self._closeWhenFinished[logfileName] = closeWhenFinished
+
+    def start(self):
+        log.msg("LoggedRemoteCommand.start")
+        if 'stdio' not in self.logs:
+            log.msg("LoggedRemoteCommand (%s) is running a command, but "
+                    "it isn't being logged to anything. This seems unusual."
+                    % self)
+        self.updates = {}
+        return RemoteCommand.start(self)
+
+    def addStdout(self, data):
+        if 'stdio' in self.logs:
+            self.logs['stdio'].addStdout(data)
+    def addStderr(self, data):
+        if 'stdio' in self.logs:
+            self.logs['stdio'].addStderr(data)
+    def addHeader(self, data):
+        if 'stdio' in self.logs:
+            self.logs['stdio'].addHeader(data)
+
+    def addToLog(self, logname, data):
+        if logname in self.logs:
+            self.logs[logname].addStdout(data)
+        else:
+            log.msg("%s.addToLog: no such log %s" % (self, logname))
+
+    def remoteUpdate(self, update):
+        if self.debug:
+            for k,v in update.items():
+                log.msg("Update[%s]: %s" % (k,v))
+        if update.has_key('stdout'):
+            # 'stdout': data
+            self.addStdout(update['stdout'])
+        if update.has_key('stderr'):
+            # 'stderr': data
+            self.addStderr(update['stderr'])
+        if update.has_key('header'):
+            # 'header': data
+            self.addHeader(update['header'])
+        if update.has_key('log'):
+            # 'log': (logname, data)
+            logname, data = update['log']
+            self.addToLog(logname, data)
+        if update.has_key('rc'):
+            rc = self.rc = update['rc']
+            log.msg("%s rc=%s" % (self, rc))
+            self.addHeader("program finished with exit code %d\n" % rc)
+
+        for k in update:
+            if k not in ('stdout', 'stderr', 'header', 'rc'):
+                if k not in self.updates:
+                    self.updates[k] = []
+                self.updates[k].append(update[k])
+
+    def remoteComplete(self, maybeFailure):
+        for name,loog in self.logs.items():
+            if self._closeWhenFinished[name]:
+                if maybeFailure:
+                    loog.addHeader("\nremoteFailed: %s" % maybeFailure)
+                else:
+                    log.msg("closing log %s" % loog)
+                loog.finish()
+        return maybeFailure
+
+
+class LogObserver:
+    if implements:
+        implements(interfaces.ILogObserver)
+    else:
+        __implements__ = interfaces.ILogObserver,
+
+    def setStep(self, step):
+        self.step = step
+
+    def setLog(self, loog):
+        assert providedBy(loog, interfaces.IStatusLog)
+        loog.subscribe(self, True)
+
+    def logChunk(self, build, step, log, channel, text):
+        if channel == interfaces.LOG_CHANNEL_STDOUT:
+            self.outReceived(text)
+        elif channel == interfaces.LOG_CHANNEL_STDERR:
+            self.errReceived(text)
+
+    # TODO: add a logEnded method? er, stepFinished?
+
+    def outReceived(self, data):
+        """This will be called with chunks of stdout data. Override this in
+        your observer."""
+        pass
+
+    def errReceived(self, data):
+        """This will be called with chunks of stderr data. Override this in
+        your observer."""
+        pass
+
+
+class LogLineObserver(LogObserver):
+    def __init__(self):
+        self.stdoutParser = basic.LineOnlyReceiver()
+        self.stdoutParser.delimiter = "\n"
+        self.stdoutParser.lineReceived = self.outLineReceived
+        self.stdoutParser.transport = self # for the .disconnecting attribute
+        self.disconnecting = False
+
+        self.stderrParser = basic.LineOnlyReceiver()
+        self.stderrParser.delimiter = "\n"
+        self.stderrParser.lineReceived = self.errLineReceived
+        self.stderrParser.transport = self
+
+    def outReceived(self, data):
+        self.stdoutParser.dataReceived(data)
+
+    def errReceived(self, data):
+        self.stderrParser.dataReceived(data)
+
+    def outLineReceived(self, line):
+        """This will be called with complete stdout lines (not including the
+        delimiter). Override this in your observer."""
+        pass
+
+    def errLineReceived(self, line):
+        """This will be called with complete lines of stderr (not including
+        the delimiter). Override this in your observer."""
+        pass
+
+
+class RemoteShellCommand(LoggedRemoteCommand):
+    """This class helps you run a shell command on the build slave. It will
+    accumulate all the command's output into a Log named 'stdio'. When the
+    command is finished, it will fire a Deferred. You can then check the
+    results of the command and parse the output however you like."""
+
+    def __init__(self, workdir, command, env=None, 
+                 want_stdout=1, want_stderr=1,
+                 timeout=20*60, logfiles={}, **kwargs):
+        """
+        @type  workdir: string
+        @param workdir: directory where the command ought to run,
+                        relative to the Builder's home directory. Defaults to
+                        '.': the same as the Builder's homedir. This should
+                        probably be '.' for the initial 'cvs checkout'
+                        command (which creates a workdir), and the Build-wide
+                        workdir for all subsequent commands (including
+                        compiles and 'cvs update').
+
+        @type  command: list of strings (or string)
+        @param command: the shell command to run, like 'make all' or
+                        'cvs update'. This should be a list or tuple
+                        which can be used directly as the argv array.
+                        For backwards compatibility, if this is a
+                        string, the text will be given to '/bin/sh -c
+                        %s'.
+
+        @type  env:     dict of string->string
+        @param env:     environment variables to add or change for the
+                        slave.  Each command gets a separate
+                        environment; all inherit the slave's initial
+                        one.  TODO: make it possible to delete some or
+                        all of the slave's environment.
+
+        @type  want_stdout: bool
+        @param want_stdout: defaults to True. Set to False if stdout should
+                            be thrown away. Do this to avoid storing or
+                            sending large amounts of useless data.
+
+        @type  want_stderr: bool
+        @param want_stderr: False if stderr should be thrown away
+
+        @type  timeout: int
+        @param timeout: tell the remote that if the command fails to
+                        produce any output for this number of seconds,
+                        the command is hung and should be killed. Use
+                        None to disable the timeout.
+        """
+
+        self.command = command # stash .command, set it later
+        if env is not None:
+            # avoid mutating the original master.cfg dictionary. Each
+            # ShellCommand gets its own copy, any start() methods won't be
+            # able to modify the original.
+            env = env.copy()
+        args = {'workdir': workdir,
+                'env': env,
+                'want_stdout': want_stdout,
+                'want_stderr': want_stderr,
+                'logfiles': logfiles,
+                'timeout': timeout,
+                }
+        LoggedRemoteCommand.__init__(self, "shell", args)
+
+    def start(self):
+        self.args['command'] = self.command
+        if self.remote_command == "shell":
+            # non-ShellCommand slavecommands are responsible for doing this
+            # fixup themselves
+            if self.step.slaveVersion("shell", "old") == "old":
+                self.args['dir'] = self.args['workdir']
+        what = "command '%s' in dir '%s'" % (self.args['command'],
+                                             self.args['workdir'])
+        log.msg(what)
+        return LoggedRemoteCommand.start(self)
+
+    def __repr__(self):
+        return "<RemoteShellCommand '%s'>" % self.command
+
+class BuildStep:
+    """
+    I represent a single step of the build process. This step may involve
+    zero or more commands to be run in the build slave, as well as arbitrary
+    processing on the master side. Regardless of how many slave commands are
+    run, the BuildStep will result in a single status value.
+
+    The step is started by calling startStep(), which returns a Deferred that
+    fires when the step finishes. See C{startStep} for a description of the
+    results provided by that Deferred.
+
+    __init__ and start are good methods to override. Don't forget to upcall
+    BuildStep.__init__ or bad things will happen.
+
+    To launch a RemoteCommand, pass it to .runCommand and wait on the
+    Deferred it returns.
+
+    Each BuildStep generates status as it runs. This status data is fed to
+    the L{buildbot.status.builder.BuildStepStatus} listener that sits in
+    C{self.step_status}. It can also feed progress data (like how much text
+    is output by a shell command) to the
+    L{buildbot.status.progress.StepProgress} object that lives in
+    C{self.progress}, by calling C{self.setProgress(metric, value)} as it
+    runs.
+
+    @type build: L{buildbot.process.base.Build}
+    @ivar build: the parent Build which is executing this step
+
+    @type progress: L{buildbot.status.progress.StepProgress}
+    @ivar progress: tracks ETA for the step
+
+    @type step_status: L{buildbot.status.builder.BuildStepStatus}
+    @ivar step_status: collects output status
+    """
+
+    # these parameters are used by the parent Build object to decide how to
+    # interpret our results. haltOnFailure will affect the build process
+    # immediately, the others will be taken into consideration when
+    # determining the overall build status.
+    #
+    haltOnFailure = False
+    flunkOnWarnings = False
+    flunkOnFailure = False
+    warnOnWarnings = False
+    warnOnFailure = False
+
+    # 'parms' holds a list of all the parameters we care about, to allow
+    # users to instantiate a subclass of BuildStep with a mixture of
+    # arguments, some of which are for us, some of which are for the subclass
+    # (or a delegate of the subclass, like how ShellCommand delivers many
+    # arguments to the RemoteShellCommand that it creates). Such delegating
+    # subclasses will use this list to figure out which arguments are meant
+    # for us and which should be given to someone else.
+    parms = ['build', 'name', 'locks',
+             'haltOnFailure',
+             'flunkOnWarnings',
+             'flunkOnFailure',
+             'warnOnWarnings',
+             'warnOnFailure',
+             'progressMetrics',
+             ]
+
+    name = "generic"
+    locks = []
+    progressMetrics = () # 'time' is implicit
+    useProgress = True # set to False if step is really unpredictable
+    build = None
+    step_status = None
+    progress = None
+
+    def __init__(self, build, **kwargs):
+        self.build = build
+        for p in self.__class__.parms:
+            if kwargs.has_key(p):
+                setattr(self, p, kwargs[p])
+                del kwargs[p]
+        # we want to encourage all steps to get a workdir, so tolerate its
+        # presence here. It really only matters for non-ShellCommand steps
+        # like Dummy
+        if kwargs.has_key('workdir'):
+            del kwargs['workdir']
+        if kwargs:
+            why = "%s.__init__ got unexpected keyword argument(s) %s" \
+                  % (self, kwargs.keys())
+            raise TypeError(why)
+        self._pendingLogObservers = []
+
+    def setStepStatus(self, step_status):
+        self.step_status = step_status
+
+    def setupProgress(self):
+        if self.useProgress:
+            sp = progress.StepProgress(self.name, self.progressMetrics)
+            self.progress = sp
+            self.step_status.setProgress(sp)
+            return sp
+        return None
+
+    def setProgress(self, metric, value):
+        """BuildSteps can call self.setProgress() to announce progress along
+        some metric."""
+        if self.progress:
+            self.progress.setProgress(metric, value)
+
+    def getProperty(self, propname):
+        return self.build.getProperty(propname)
+
+    def setProperty(self, propname, value):
+        self.build.setProperty(propname, value)
+
+    def startStep(self, remote):
+        """Begin the step. This returns a Deferred that will fire when the
+        step finishes.
+
+        This deferred fires with a tuple of (result, [extra text]), although
+        older steps used to return just the 'result' value, so the receiving
+        L{base.Build} needs to be prepared to handle that too. C{result} is
+        one of the SUCCESS/WARNINGS/FAILURE/SKIPPED constants from
+        L{buildbot.status.builder}, and the extra text is a list of short
+        strings which should be appended to the Build's text results. This
+        text allows a test-case step which fails to append B{17 tests} to the
+        Build's status, in addition to marking the build as failing.
+
+        The deferred will errback if the step encounters an exception,
+        including an exception on the slave side (or if the slave goes away
+        altogether). Failures in shell commands (rc!=0) will B{not} cause an
+        errback, in general the BuildStep will evaluate the results and
+        decide whether to treat it as a WARNING or FAILURE.
+
+        @type remote: L{twisted.spread.pb.RemoteReference}
+        @param remote: a reference to the slave's
+                       L{buildbot.slave.bot.SlaveBuilder} instance where any
+                       RemoteCommands may be run
+        """
+
+        self.remote = remote
+        self.deferred = defer.Deferred()
+        # convert all locks into their real form
+        self.locks = [self.build.builder.botmaster.getLockByID(l)
+                      for l in self.locks]
+        # then narrow SlaveLocks down to the slave that this build is being
+        # run on
+        self.locks = [l.getLock(self.build.slavebuilder) for l in self.locks]
+        for l in self.locks:
+            if l in self.build.locks:
+                log.msg("Hey, lock %s is claimed by both a Step (%s) and the"
+                        " parent Build (%s)" % (l, self, self.build))
+                raise RuntimeError("lock claimed by both Step and Build")
+        d = self.acquireLocks()
+        d.addCallback(self._startStep_2)
+        return self.deferred
+
+    def acquireLocks(self, res=None):
+        log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
+        if not self.locks:
+            return defer.succeed(None)
+        for lock in self.locks:
+            if not lock.isAvailable():
+                log.msg("step %s waiting for lock %s" % (self, lock))
+                d = lock.waitUntilMaybeAvailable(self)
+                d.addCallback(self.acquireLocks)
+                return d
+        # all locks are available, claim them all
+        for lock in self.locks:
+            lock.claim(self)
+        return defer.succeed(None)
+
+    def _startStep_2(self, res):
+        if self.progress:
+            self.progress.start()
+        self.step_status.stepStarted()
+        try:
+            skip = self.start()
+            if skip == SKIPPED:
+                reactor.callLater(0, self.releaseLocks)
+                reactor.callLater(0, self.deferred.callback, SKIPPED)
+        except:
+            log.msg("BuildStep.startStep exception in .start")
+            self.failed(Failure())
+
+    def start(self):
+        """Begin the step. Override this method and add code to do local
+        processing, fire off remote commands, etc.
+
+        To spawn a command in the buildslave, create a RemoteCommand instance
+        and run it with self.runCommand::
+
+          c = RemoteCommandFoo(args)
+          d = self.runCommand(c)
+          d.addCallback(self.fooDone).addErrback(self.failed)
+
+        As the step runs, it should send status information to the
+        BuildStepStatus::
+
+          self.step_status.setColor('red')
+          self.step_status.setText(['compile', 'failed'])
+          self.step_status.setText2(['4', 'warnings'])
+
+        To have some code parse stdio (or other log stream) in realtime, add
+        a LogObserver subclass. This observer can use self.step.setProgress()
+        to provide better progress notification to the step.::
+
+          self.addLogObserver('stdio', MyLogObserver())
+
+        To add a LogFile, use self.addLog. Make sure it gets closed when it
+        finishes. When giving a Logfile to a RemoteShellCommand, just ask it
+        to close the log when the command completes::
+
+          log = self.addLog('output')
+          cmd = RemoteShellCommand(args)
+          cmd.useLog(log, closeWhenFinished=True)
+
+        You can also create complete Logfiles with generated text in a single
+        step::
+
+          self.addCompleteLog('warnings', text)
+
+        When the step is done, it should call self.finished(result). 'result'
+        will be provided to the L{buildbot.process.base.Build}, and should be
+        one of the constants defined above: SUCCESS, WARNINGS, FAILURE, or
+        SKIPPED.
+
+        If the step encounters an exception, it should call self.failed(why).
+        'why' should be a Failure object. This automatically fails the whole
+        build with an exception. It is a good idea to add self.failed as an
+        errback to any Deferreds you might obtain.
+
+        If the step decides it does not need to be run, start() can return
+        the constant SKIPPED. This fires the callback immediately: it is not
+        necessary to call .finished yourself. This can also indicate to the
+        status-reporting mechanism that this step should not be displayed."""
+        
+        raise NotImplementedError("your subclass must implement this method")
+
+    def interrupt(self, reason):
+        """Halt the command, either because the user has decided to cancel
+        the build ('reason' is a string), or because the slave has
+        disconnected ('reason' is a ConnectionLost Failure). Any further
+        local processing should be skipped, and the Step completed with an
+        error status. The results text should say something useful like
+        ['step', 'interrupted'] or ['remote', 'lost']"""
+        pass
+
+    def releaseLocks(self):
+        log.msg("releaseLocks(%s): %s" % (self, self.locks))
+        for lock in self.locks:
+            lock.release(self)
+
+    def finished(self, results):
+        if self.progress:
+            self.progress.finish()
+        self.step_status.stepFinished(results)
+        self.releaseLocks()
+        self.deferred.callback(results)
+
+    def failed(self, why):
+        # if isinstance(why, pb.CopiedFailure): # a remote exception might
+        # only have short traceback, so formatFailure is not as useful as
+        # you'd like (no .frames, so no traceback is displayed)
+        log.msg("BuildStep.failed, traceback follows")
+        log.err(why)
+        try:
+            if self.progress:
+                self.progress.finish()
+            self.addHTMLLog("err.html", formatFailure(why))
+            self.addCompleteLog("err.text", why.getTraceback())
+            # could use why.getDetailedTraceback() for more information
+            self.step_status.setColor("purple")
+            self.step_status.setText([self.name, "exception"])
+            self.step_status.setText2([self.name])
+            self.step_status.stepFinished(EXCEPTION)
+        except:
+            log.msg("exception during failure processing")
+            log.err()
+            # the progress stuff may still be whacked (the StepStatus may
+            # think that it is still running), but the build overall will now
+            # finish
+        try:
+            self.releaseLocks()
+        except:
+            log.msg("exception while releasing locks")
+            log.err()
+
+        log.msg("BuildStep.failed now firing callback")
+        self.deferred.callback(EXCEPTION)
+
+    # utility methods that BuildSteps may find useful
+
+    def slaveVersion(self, command, oldversion=None):
+        """Return the version number of the given slave command. For the
+        commands defined in buildbot.slave.commands, this is the value of
+        'cvs_ver' at the top of that file. Non-existent commands will return
+        a value of None. Buildslaves running buildbot-0.5.0 or earlier did
+        not respond to the version query: commands on those slaves will
+        return a value of OLDVERSION, so you can distinguish between old
+        buildslaves and missing commands.
+
+        If you know that <=0.5.0 buildslaves have the command you want (CVS
+        and SVN existed back then, but none of the other VC systems), then it
+        makes sense to call this with oldversion='old'. If the command you
+        want is newer than that, just leave oldversion= unspecified, and the
+        command will return None for a buildslave that does not implement the
+        command.
+        """
+        return self.build.getSlaveCommandVersion(command, oldversion)
+
+    def slaveVersionIsOlderThan(self, command, minversion):
+        sv = self.build.getSlaveCommandVersion(command, None)
+        if sv is None:
+            return True
+        # the version we get back is a string form of the CVS version number
+        # of the slave's buildbot/slave/commands.py, something like 1.39 .
+        # This might change in the future (I might move away from CVS), but
+        # if so I'll keep updating that string with suitably-comparable
+        # values.
+        if sv.split(".") < minversion.split("."):
+            return True
+        return False
+
+    def getSlaveName(self):
+        return self.build.getSlaveName()
+
+    def addLog(self, name):
+        loog = self.step_status.addLog(name)
+        self._connectPendingLogObservers()
+        return loog
+
+    # TODO: add a getLog() ? At the moment all logs have to be retrieved from
+    # the RemoteCommand that created them, but for status summarizers it
+    # would be more convenient to get them from the BuildStep / BSStatus,
+    # especially if there are multiple RemoteCommands involved.
+
+    def addCompleteLog(self, name, text):
+        log.msg("addCompleteLog(%s)" % name)
+        loog = self.step_status.addLog(name)
+        size = loog.chunkSize
+        for start in range(0, len(text), size):
+            loog.addStdout(text[start:start+size])
+        loog.finish()
+        self._connectPendingLogObservers()
+
+    def addHTMLLog(self, name, html):
+        log.msg("addHTMLLog(%s)" % name)
+        self.step_status.addHTMLLog(name, html)
+        self._connectPendingLogObservers()
+
+    def addLogObserver(self, logname, observer):
+        assert providedBy(observer, interfaces.ILogObserver)
+        observer.setStep(self)
+        self._pendingLogObservers.append((logname, observer))
+        self._connectPendingLogObservers()
+
+    def _connectPendingLogObservers(self):
+        if not self._pendingLogObservers:
+            return
+        if not self.step_status:
+            return
+        current_logs = {}
+        for loog in self.step_status.getLogs():
+            current_logs[loog.getName()] = loog
+        for logname, observer in self._pendingLogObservers[:]:
+            if logname in current_logs:
+                observer.setLog(current_logs[logname])
+                self._pendingLogObservers.remove((logname, observer))
+
+    def addURL(self, name, url):
+        """Add a BuildStep URL to this step.
+
+        An HREF to this URL will be added to any HTML representations of this
+        step. This allows a step to provide links to external web pages,
+        perhaps to provide detailed HTML code coverage results or other forms
+        of build status.
+        """
+        self.step_status.addURL(name, url)
+
+    def runCommand(self, c):
+        d = c.run(self, self.remote)
+        return d
+
+
+class OutputProgressObserver(LogObserver):
+    length = 0
+
+    def __init__(self, name):
+        self.name = name
+
+    def logChunk(self, build, step, log, channel, text):
+        self.length += len(text)
+        self.step.setProgress(self.name, self.length)
+
+class LoggingBuildStep(BuildStep):
+    """This is an abstract base class, suitable for inheritance by all
+    BuildSteps that invoke RemoteCommands which emit stdout/stderr messages.
+    """
+
+    progressMetrics = ('output',)
+    logfiles = {}
+
+    parms = BuildStep.parms + ['logfiles']
+
+    def __init__(self, logfiles={}, *args, **kwargs):
+        BuildStep.__init__(self, *args, **kwargs)
+        # merge a class-level 'logfiles' attribute with one passed in as an
+        # argument
+        self.logfiles = self.logfiles.copy()
+        self.logfiles.update(logfiles)
+        self.addLogObserver('stdio', OutputProgressObserver("output"))
+
+    def describe(self, done=False):
+        raise NotImplementedError("implement this in a subclass")
+
+    def startCommand(self, cmd, errorMessages=[]):
+        """
+        @param cmd: a suitable RemoteCommand which will be launched, with
+                    all output being put into our self.stdio_log LogFile
+        """
+        log.msg("ShellCommand.startCommand(cmd=%s)", (cmd,))
+        self.cmd = cmd # so we can interrupt it
+        self.step_status.setColor("yellow")
+        self.step_status.setText(self.describe(False))
+
+        # stdio is the first log
+        self.stdio_log = stdio_log = self.addLog("stdio")
+        cmd.useLog(stdio_log, True)
+        for em in errorMessages:
+            stdio_log.addHeader(em)
+            # TODO: consider setting up self.stdio_log earlier, and have the
+            # code that passes in errorMessages instead call
+            # self.stdio_log.addHeader() directly.
+
+        # there might be other logs
+        self.setupLogfiles(cmd, self.logfiles)
+
+        d = self.runCommand(cmd) # might raise ConnectionLost
+        d.addCallback(lambda res: self.commandComplete(cmd))
+        d.addCallback(lambda res: self.createSummary(cmd.logs['stdio']))
+        d.addCallback(lambda res: self.evaluateCommand(cmd)) # returns results
+        def _gotResults(results):
+            self.setStatus(cmd, results)
+            return results
+        d.addCallback(_gotResults) # returns results
+        d.addCallbacks(self.finished, self.checkDisconnect)
+        d.addErrback(self.failed)
+
+    def setupLogfiles(self, cmd, logfiles):
+        """Set up any additional logfiles= logs.
+        """
+        for logname,remotefilename in logfiles.items():
+            # tell the BuildStepStatus to add a LogFile
+            newlog = self.addLog(logname)
+            # and tell the LoggedRemoteCommand to feed it
+            cmd.useLog(newlog, True)
+
+    def interrupt(self, reason):
+        # TODO: consider adding an INTERRUPTED or STOPPED status to use
+        # instead of FAILURE, might make the text a bit more clear.
+        # 'reason' can be a Failure, or text
+        self.addCompleteLog('interrupt', str(reason))
+        d = self.cmd.interrupt(reason)
+        return d
+
+    def checkDisconnect(self, f):
+        f.trap(error.ConnectionLost)
+        self.step_status.setColor("red")
+        self.step_status.setText(self.describe(True) +
+                                 ["failed", "slave", "lost"])
+        self.step_status.setText2(["failed", "slave", "lost"])
+        return self.finished(FAILURE)
+
+    # to refine the status output, override one or more of the following
+    # methods. Change as little as possible: start with the first ones on
+    # this list and only proceed further if you have to    
+    #
+    # createSummary: add additional Logfiles with summarized results
+    # evaluateCommand: decides whether the step was successful or not
+    #
+    # getText: create the final per-step text strings
+    # describeText2: create the strings added to the overall build status
+    #
+    # getText2: only adds describeText2() when the step affects build status
+    #
+    # setStatus: handles all status updating
+
+    # commandComplete is available for general-purpose post-completion work.
+    # It is a good place to do one-time parsing of logfiles, counting
+    # warnings and errors. It should probably stash such counts in places
+    # like self.warnings so they can be picked up later by your getText
+    # method.
+
+    # TODO: most of this stuff should really be on BuildStep rather than
+    # ShellCommand. That involves putting the status-setup stuff in
+    # .finished, which would make it hard to turn off.
+
+    def commandComplete(self, cmd):
+        """This is a general-purpose hook method for subclasses. It will be
+        called after the remote command has finished, but before any of the
+        other hook functions are called."""
+        pass
+
+    def createSummary(self, log):
+        """To create summary logs, do something like this:
+        warnings = grep('^Warning:', log.getText())
+        self.addCompleteLog('warnings', warnings)
+        """
+        pass
+
+    def evaluateCommand(self, cmd):
+        """Decide whether the command was SUCCESS, WARNINGS, or FAILURE.
+        Override this to, say, declare WARNINGS if there is any stderr
+        activity, or to say that rc!=0 is not actually an error."""
+
+        if cmd.rc != 0:
+            return FAILURE
+        # if cmd.log.getStderr(): return WARNINGS
+        return SUCCESS
+
+    def getText(self, cmd, results):
+        if results == SUCCESS:
+            return self.describe(True)
+        elif results == WARNINGS:
+            return self.describe(True) + ["warnings"]
+        else:
+            return self.describe(True) + ["failed"]
+
+    def getText2(self, cmd, results):
+        """We have decided to add a short note about ourselves to the overall
+        build description, probably because something went wrong. Return a
+        short list of short strings. If your subclass counts test failures or
+        warnings of some sort, this is a good place to announce the count."""
+        # return ["%d warnings" % warningcount]
+        # return ["%d tests" % len(failedTests)]
+        return [self.name]
+
+    def maybeGetText2(self, cmd, results):
+        if results == SUCCESS:
+            # successful steps do not add anything to the build's text
+            pass
+        elif results == WARNINGS:
+            if (self.flunkOnWarnings or self.warnOnWarnings):
+                # we're affecting the overall build, so tell them why
+                return self.getText2(cmd, results)
+        else:
+            if (self.haltOnFailure or self.flunkOnFailure
+                or self.warnOnFailure):
+                # we're affecting the overall build, so tell them why
+                return self.getText2(cmd, results)
+        return []
+
+    def getColor(self, cmd, results):
+        assert results in (SUCCESS, WARNINGS, FAILURE)
+        if results == SUCCESS:
+            return "green"
+        elif results == WARNINGS:
+            return "orange"
+        else:
+            return "red"
+
+    def setStatus(self, cmd, results):
+        # this is good enough for most steps, but it can be overridden to
+        # get more control over the displayed text
+        self.step_status.setColor(self.getColor(cmd, results))
+        self.step_status.setText(self.getText(cmd, results))
+        self.step_status.setText2(self.maybeGetText2(cmd, results))
+

Added: vendor/buildbot/current/buildbot/process/factory.py
===================================================================
--- vendor/buildbot/current/buildbot/process/factory.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/factory.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,179 @@
+# -*- test-case-name: buildbot.test.test_step -*-
+
+from buildbot import util
+from buildbot.process.base import Build
+from buildbot.process.buildstep import BuildStep
+from buildbot.steps.source import CVS, SVN
+from buildbot.steps.shell import Configure, Compile, Test
+
+# deprecated, use BuildFactory.addStep
+def s(steptype, **kwargs):
+    # convenience function for master.cfg files, to create step
+    # specification tuples
+    return (steptype, kwargs)
+
+class BuildFactory(util.ComparableMixin):
+    """
+    @cvar  buildClass: class to use when creating builds
+    @type  buildClass: L{buildbot.process.base.Build}
+    """
+    buildClass = Build
+    useProgress = 1
+    compare_attrs = ['buildClass', 'steps', 'useProgress']
+
+    def __init__(self, steps=None):
+        if steps is None:
+            steps = []
+        self.steps = steps
+
+    def newBuild(self, request):
+        """Create a new Build instance.
+        @param request: a L{base.BuildRequest} describing what is to be built
+        """
+        b = self.buildClass(request)
+        b.useProgress = self.useProgress
+        b.setSteps(self.steps)
+        return b
+
+    def addStep(self, steptype, **kwargs):
+        self.steps.append((steptype, kwargs))
+
+
+# BuildFactory subclasses for common build tools
+
+class GNUAutoconf(BuildFactory):
+    def __init__(self, source, configure="./configure",
+                 configureEnv={},
+                 configureFlags=[],
+                 compile=["make", "all"],
+                 test=["make", "check"]):
+        assert isinstance(source, tuple)
+        assert issubclass(source[0], BuildStep)
+        BuildFactory.__init__(self, [source])
+        if configure is not None:
+            # we either need to wind up with a string (which will be
+            # space-split), or with a list of strings (which will not). The
+            # list of strings is the preferred form.
+            if type(configure) is str:
+                if configureFlags:
+                    assert not " " in configure # please use list instead
+                    command = [configure] + configureFlags
+                else:
+                    command = configure
+            else:
+                assert isinstance(configure, (list, tuple))
+                command = configure + configureFlags
+            self.addStep(Configure, command=command, env=configureEnv)
+        if compile is not None:
+            self.addStep(Compile, command=compile)
+        if test is not None:
+            self.addStep(Test, command=test)
+
+class CPAN(BuildFactory):
+    def __init__(self, source, perl="perl"):
+        assert isinstance(source, tuple)
+        assert issubclass(source[0], BuildStep)
+        BuildFactory.__init__(self, [source])
+        self.addStep(Configure, command=[perl, "Makefile.PL"])
+        self.addStep(Compile, command=["make"])
+        self.addStep(Test, command=["make", "test"])
+
+class Distutils(BuildFactory):
+    def __init__(self, source, python="python", test=None):
+        assert isinstance(source, tuple)
+        assert issubclass(source[0], BuildStep)
+        BuildFactory.__init__(self, [source])
+        self.addStep(Compile, command=[python, "./setup.py", "build"])
+        if test is not None:
+            self.addStep(Test, command=test)
+
+class Trial(BuildFactory):
+    """Build a python module that uses distutils and trial. Set 'tests' to
+    the module in which the tests can be found, or set useTestCaseNames=True
+    to always have trial figure out which tests to run (based upon which
+    files have been changed).
+
+    See docs/factories.xhtml for usage samples. Not all of the Trial
+    BuildStep options are available here, only the most commonly used ones.
+    To get complete access, you will need to create a custom
+    BuildFactory."""
+
+    trial = "trial"
+    randomly = False
+    recurse = False
+
+    def __init__(self, source,
+                 buildpython=["python"], trialpython=[], trial=None,
+                 testpath=".", randomly=None, recurse=None,
+                 tests=None,  useTestCaseNames=False, env=None):
+        BuildFactory.__init__(self, [source])
+        assert isinstance(source, tuple)
+        assert issubclass(source[0], BuildStep)
+        assert tests or useTestCaseNames, "must use one or the other"
+        if trial is not None:
+            self.trial = trial
+        if randomly is not None:
+            self.randomly = randomly
+        if recurse is not None:
+            self.recurse = recurse
+
+        from buildbot.steps.python_twisted import Trial
+        buildcommand = buildpython + ["./setup.py", "build"]
+        self.addStep(Compile, command=buildcommand, env=env)
+        self.addStep(Trial,
+                     python=trialpython, trial=self.trial,
+                     testpath=testpath,
+                     tests=tests, testChanges=useTestCaseNames,
+                     randomly=self.randomly,
+                     recurse=self.recurse,
+                     env=env,
+                     )
+
+
+# compatibility classes, will go away. Note that these only offer
+# compatibility at the constructor level: if you have subclassed these
+# factories, your subclasses are unlikely to still work correctly.
+
+ConfigurableBuildFactory = BuildFactory
+
+class BasicBuildFactory(GNUAutoconf):
+    # really a "GNU Autoconf-created tarball -in-CVS tree" builder
+
+    def __init__(self, cvsroot, cvsmodule,
+                 configure=None, configureEnv={},
+                 compile="make all",
+                 test="make check", cvsCopy=False):
+        mode = "clobber"
+        if cvsCopy:
+            mode = "copy"
+        source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
+        GNUAutoconf.__init__(self, source,
+                             configure=configure, configureEnv=configureEnv,
+                             compile=compile,
+                             test=test)
+
+class QuickBuildFactory(BasicBuildFactory):
+    useProgress = False
+
+    def __init__(self, cvsroot, cvsmodule,
+                 configure=None, configureEnv={},
+                 compile="make all",
+                 test="make check", cvsCopy=False):
+        mode = "update"
+        source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
+        GNUAutoconf.__init__(self, source,
+                             configure=configure, configureEnv=configureEnv,
+                             compile=compile,
+                             test=test)
+
+class BasicSVN(GNUAutoconf):
+
+    def __init__(self, svnurl,
+                 configure=None, configureEnv={},
+                 compile="make all",
+                 test="make check"):
+        source = s(SVN, svnurl=svnurl, mode="update")
+        GNUAutoconf.__init__(self, source,
+                             configure=configure, configureEnv=configureEnv,
+                             compile=compile,
+                             test=test)

Added: vendor/buildbot/current/buildbot/process/maxq.py
===================================================================
--- vendor/buildbot/current/buildbot/process/maxq.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/maxq.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,4 @@
+
+# legacy compatibility
+from buildbot.steps.maxq import MaxQ
+

Added: vendor/buildbot/current/buildbot/process/process_twisted.py
===================================================================
--- vendor/buildbot/current/buildbot/process/process_twisted.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/process_twisted.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,119 @@
+#! /usr/bin/python
+
+# Build classes specific to the Twisted codebase
+
+from buildbot.process.base import Build
+from buildbot.process.factory import BuildFactory
+from buildbot.steps import shell
+from buildbot.steps.python_twisted import HLint, ProcessDocs, BuildDebs, \
+     Trial, RemovePYCs
+
+class TwistedBuild(Build):
+    workdir = "Twisted" # twisted's bin/trial expects to live in here
+    def isFileImportant(self, filename):
+        if filename.startswith("doc/fun/"):
+            return 0
+        if filename.startswith("sandbox/"):
+            return 0
+        return 1
+
+class TwistedTrial(Trial):
+    tests = "twisted"
+    # the Trial in Twisted >=2.1.0 has --recurse on by default, and -to
+    # turned into --reporter=bwverbose .
+    recurse = False
+    trialMode = ["--reporter=bwverbose"]
+    testpath = None
+    trial = "./bin/trial"
+
+class TwistedBaseFactory(BuildFactory):
+    buildClass = TwistedBuild
+    # bin/trial expects its parent directory to be named "Twisted": it uses
+    # this to add the local tree to PYTHONPATH during tests
+    workdir = "Twisted"
+
+    def __init__(self, source):
+        BuildFactory.__init__(self, [source])
+
+class QuickTwistedBuildFactory(TwistedBaseFactory):
+    treeStableTimer = 30
+    useProgress = 0
+
+    def __init__(self, source, python="python"):
+        TwistedBaseFactory.__init__(self, source)
+        if type(python) is str:
+            python = [python]
+        self.addStep(HLint, python=python[0])
+        self.addStep(RemovePYCs)
+        for p in python:
+            cmd = [p, "setup.py", "build_ext", "-i"]
+            self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
+            self.addStep(TwistedTrial, python=p, testChanges=True)
+
+class FullTwistedBuildFactory(TwistedBaseFactory):
+    treeStableTimer = 5*60
+
+    def __init__(self, source, python="python",
+                 processDocs=False, runTestsRandomly=False,
+                 compileOpts=[], compileOpts2=[]):
+        TwistedBaseFactory.__init__(self, source)
+        if processDocs:
+            self.addStep(ProcessDocs)
+
+        if type(python) == str:
+            python = [python]
+        assert isinstance(compileOpts, list)
+        assert isinstance(compileOpts2, list)
+        cmd = (python + compileOpts + ["setup.py", "build_ext"]
+               + compileOpts2 + ["-i"])
+
+        self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
+        self.addStep(RemovePYCs)
+        self.addStep(TwistedTrial, python=python, randomly=runTestsRandomly)
+
+class TwistedDebsBuildFactory(TwistedBaseFactory):
+    treeStableTimer = 10*60
+
+    def __init__(self, source, python="python"):
+        TwistedBaseFactory.__init__(self, source)
+        self.addStep(ProcessDocs, haltOnFailure=True)
+        self.addStep(BuildDebs, warnOnWarnings=True)
+
+class TwistedReactorsBuildFactory(TwistedBaseFactory):
+    treeStableTimer = 5*60
+
+    def __init__(self, source,
+                 python="python", compileOpts=[], compileOpts2=[],
+                 reactors=None):
+        TwistedBaseFactory.__init__(self, source)
+
+        if type(python) == str:
+            python = [python]
+        assert isinstance(compileOpts, list)
+        assert isinstance(compileOpts2, list)
+        cmd = (python + compileOpts + ["setup.py", "build_ext"]
+               + compileOpts2 + ["-i"])
+
+        self.addStep(shell.Compile, command=cmd, warnOnFailure=True)
+
+        if reactors == None:
+            reactors = [
+                'gtk2',
+                'gtk',
+                #'kqueue',
+                'poll',
+                'c',
+                'qt',
+                #'win32',
+                ]
+        for reactor in reactors:
+            flunkOnFailure = 1
+            warnOnFailure = 0
+            #if reactor in ['c', 'qt', 'win32']:
+            #    # these are buggy, so tolerate failures for now
+            #    flunkOnFailure = 0
+            #    warnOnFailure = 1
+            self.addStep(RemovePYCs) # TODO: why?
+            self.addStep(TwistedTrial, name=reactor, python=python,
+                         reactor=reactor, flunkOnFailure=flunkOnFailure,
+                         warnOnFailure=warnOnFailure)

Added: vendor/buildbot/current/buildbot/process/step.py
===================================================================
--- vendor/buildbot/current/buildbot/process/step.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/step.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,16 @@
+# -*- test-case-name: buildbot.test.test_steps.ReorgCompatibility -*-
+
+# legacy compatibility
+
+import warnings
+warnings.warn("buildbot.process.step is deprecated. Please import things like ShellCommand from one of the buildbot.steps.* modules instead.",
+              DeprecationWarning)
+
+from buildbot.steps.shell import ShellCommand, WithProperties, TreeSize, Configure, Compile, Test
+from buildbot.steps.source import CVS, SVN, Darcs, Git, Arch, Bazaar, Mercurial, P4, P4Sync
+from buildbot.steps.dummy import Dummy, FailingDummy, RemoteDummy
+
+from buildbot.process.buildstep import LogObserver, LogLineObserver
+from buildbot.process.buildstep import RemoteShellCommand
+from buildbot.process.buildstep import BuildStep, LoggingBuildStep
+

Added: vendor/buildbot/current/buildbot/process/step_twisted.py
===================================================================
--- vendor/buildbot/current/buildbot/process/step_twisted.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/step_twisted.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,6 @@
+# -*- test-case-name: buildbot.test.test_twisted -*-
+
+# legacy compatibility
+
+from buildbot.steps.python_twisted import HLint, Trial, ProcessDocs, BuildDebs
+from buildbot.steps.python_twisted import RemovePYCs

Added: vendor/buildbot/current/buildbot/process/step_twisted2.py
===================================================================
--- vendor/buildbot/current/buildbot/process/step_twisted2.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/process/step_twisted2.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,162 @@
+#! /usr/bin/python
+
+from buildbot.status import tests
+from buildbot.process.step import SUCCESS, FAILURE, BuildStep
+from buildbot.process.step_twisted import RunUnitTests
+
+from zope.interface import implements
+from twisted.python import log, failure
+from twisted.spread import jelly
+from twisted.pb.tokens import BananaError
+from twisted.web.html import PRE
+from twisted.web.error import NoResource
+
+class Null: pass
+ResultTypes = Null()
+ResultTypeNames = ["SKIP",
+                   "EXPECTED_FAILURE", "FAILURE", "ERROR",
+                   "UNEXPECTED_SUCCESS", "SUCCESS"]
+try:
+    from twisted.trial import reporter # introduced in Twisted-1.0.5
+    # extract the individual result types
+    for name in ResultTypeNames:
+        setattr(ResultTypes, name, getattr(reporter, name))
+except ImportError:
+    from twisted.trial import unittest # Twisted-1.0.4 has them here
+    for name in ResultTypeNames:
+        setattr(ResultTypes, name, getattr(unittest, name))
+
+log._keepErrors = 0
+from twisted.trial import remote # for trial/jelly parsing
+
+import StringIO
+
+class OneJellyTest(tests.OneTest):
+    def html(self, request):
+        tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
+        pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
+        t = request.postpath[0] # one of 'short', 'long' #, or 'html'
+        if isinstance(self.results, failure.Failure):
+            # it would be nice to remove unittest functions from the
+            # traceback like unittest.format_exception() does.
+            if t == 'short':
+                s = StringIO.StringIO()
+                self.results.printTraceback(s)
+                return pptpl % PRE(s.getvalue())
+            elif t == 'long':
+                s = StringIO.StringIO()
+                self.results.printDetailedTraceback(s)
+                return pptpl % PRE(s.getvalue())
+            #elif t == 'html':
+            #    return tpl % formatFailure(self.results)
+            # ACK! source lines aren't stored in the Failure, rather,
+            # formatFailure pulls them (by filename) from the local
+            # disk. Feh. Even printTraceback() won't work. Double feh.
+            return NoResource("No such mode '%s'" % t)
+        if self.results == None:
+            return tpl % "No results to show: test probably passed."
+        # maybe results are plain text?
+        return pptpl % PRE(self.results)
+
+class TwistedJellyTestResults(tests.TestResults):
+    oneTestClass = OneJellyTest
+    def describeOneTest(self, testname):
+        return "%s: %s\n" % (testname, self.tests[testname][0])
+
+class RunUnitTestsJelly(RunUnitTests):
+    """I run the unit tests with the --jelly option, which generates
+    machine-parseable results as the tests are run.
+    """
+    trialMode = "--jelly"
+    implements(remote.IRemoteReporter)
+
+    ourtypes = { ResultTypes.SKIP: tests.SKIP,
+                 ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
+                 ResultTypes.FAILURE: tests.FAILURE,
+                 ResultTypes.ERROR: tests.ERROR,
+                 ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
+                 ResultTypes.SUCCESS: tests.SUCCESS,
+                 }
+
+    def __getstate__(self):
+        #d = RunUnitTests.__getstate__(self)
+        d = self.__dict__.copy()
+        # Banana subclasses are Ephemeral
+        if d.has_key("decoder"):
+            del d['decoder']
+        return d
+    def start(self):
+        self.decoder = remote.DecodeReport(self)
+        # don't accept anything unpleasant from the (untrusted) build slave
+        # The jellied stream may have Failures, but everything inside should
+        # be a string
+        security = jelly.SecurityOptions()
+        security.allowBasicTypes()
+        security.allowInstancesOf(failure.Failure)
+        self.decoder.taster = security
+        self.results = TwistedJellyTestResults()
+        RunUnitTests.start(self)
+
+    def logProgress(self, progress):
+        # XXX: track number of tests
+        BuildStep.logProgress(self, progress)
+
+    def addStdout(self, data):
+        if not self.decoder:
+            return
+        try:
+            self.decoder.dataReceived(data)
+        except BananaError:
+            self.decoder = None
+            log.msg("trial --jelly output unparseable, traceback follows")
+            log.deferr()
+
+    def remote_start(self, expectedTests, times=None):
+        print "remote_start", expectedTests
+    def remote_reportImportError(self, name, aFailure, times=None):
+        pass
+    def remote_reportStart(self, testClass, method, times=None):
+        print "reportStart", testClass, method
+
+    def remote_reportResults(self, testClass, method, resultType, results,
+                             times=None):
+        print "reportResults", testClass, method, resultType
+        which = testClass + "." + method
+        self.results.addTest(which,
+                             self.ourtypes.get(resultType, tests.UNKNOWN),
+                             results)
+
+    def finished(self, rc):
+        # give self.results to our Build object
+        self.build.testsFinished(self.results)
+        total = self.results.countTests()
+        count = self.results.countFailures()
+        result = SUCCESS
+        if total == None:
+            result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
+        if count:
+            result = (FAILURE, ["%d tes%s%s" % (count,
+                                                (count == 1 and 't' or 'ts'),
+                                                self.rtext(' (%s)'))])
+        return self.stepComplete(result)
+    def finishStatus(self, result):
+        total = self.results.countTests()
+        count = self.results.countFailures()
+        color = "green"
+        text = []
+        if count == 0:
+            text.extend(["%d %s" % \
+                         (total,
+                          total == 1 and "test" or "tests"),
+                         "passed"])
+        else:
+            text.append("tests")
+            text.append("%d %s" % \
+                        (count,
+                         count == 1 and "failure" or "failures"))
+            color = "red"
+        self.updateCurrentActivity(color=color, text=text)
+        self.addFileToCurrentActivity("tests", self.results)
+        #self.finishStatusSummary()
+        self.finishCurrentActivity()
+            

Added: vendor/buildbot/current/buildbot/scheduler.py
===================================================================
--- vendor/buildbot/current/buildbot/scheduler.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/scheduler.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,699 @@
+# -*- test-case-name: buildbot.test.test_dependencies -*-
+
+import time, os.path
+
+from twisted.internet import reactor
+from twisted.application import service, internet, strports
+from twisted.python import log, runtime
+from twisted.protocols import basic
+from twisted.cred import portal, checkers
+from twisted.spread import pb
+
+from buildbot import interfaces, buildset, util, pbutil
+from buildbot.status import builder
+from buildbot.twcompat import implements, providedBy
+from buildbot.sourcestamp import SourceStamp
+from buildbot.changes import maildirtwisted
+
+
+class BaseScheduler(service.MultiService, util.ComparableMixin):
+    if implements:
+        implements(interfaces.IScheduler)
+    else:
+        __implements__ = (interfaces.IScheduler,
+                          service.MultiService.__implements__)
+
+    def __init__(self, name):
+        service.MultiService.__init__(self)
+        self.name = name
+
+    def __repr__(self):
+        # TODO: why can't id() return a positive number? %d is ugly.
+        return "<Scheduler '%s' at %d>" % (self.name, id(self))
+
+    def submit(self, bs):
+        self.parent.submitBuildSet(bs)
+
+    def addChange(self, change):
+        pass
+
+class BaseUpstreamScheduler(BaseScheduler):
+    if implements:
+        implements(interfaces.IUpstreamScheduler)
+    else:
+        __implements__ = (interfaces.IUpstreamScheduler,
+                          BaseScheduler.__implements__)
+
+    def __init__(self, name):
+        BaseScheduler.__init__(self, name)
+        self.successWatchers = []
+
+    def subscribeToSuccessfulBuilds(self, watcher):
+        self.successWatchers.append(watcher)
+    def unsubscribeToSuccessfulBuilds(self, watcher):
+        self.successWatchers.remove(watcher)
+
+    def submit(self, bs):
+        d = bs.waitUntilFinished()
+        d.addCallback(self.buildSetFinished)
+        self.parent.submitBuildSet(bs)
+
+    def buildSetFinished(self, bss):
+        if not self.running:
+            return
+        if bss.getResults() == builder.SUCCESS:
+            ss = bss.getSourceStamp()
+            for w in self.successWatchers:
+                w(ss)
+
+
+class Scheduler(BaseUpstreamScheduler):
+    """The default Scheduler class will run a build after some period of time
+    called the C{treeStableTimer}, on a given set of Builders. It only pays
+    attention to a single branch. You you can provide a C{fileIsImportant}
+    function which will evaluate each Change to decide whether or not it
+    should trigger a new build.
+    """
+
+    fileIsImportant = None
+    compare_attrs = ('name', 'treeStableTimer', 'builderNames', 'branch',
+                     'fileIsImportant')
+    
+    def __init__(self, name, branch, treeStableTimer, builderNames,
+                 fileIsImportant=None):
+        """
+        @param name: the name of this Scheduler
+        @param branch: The branch name that the Scheduler should pay
+                       attention to. Any Change that is not on this branch
+                       will be ignored. It can be set to None to only pay
+                       attention to the default branch.
+        @param treeStableTimer: the duration, in seconds, for which the tree
+                                must remain unchanged before a build will be
+                                triggered. This is intended to avoid builds
+                                of partially-committed fixes.
+        @param builderNames: a list of Builder names. When this Scheduler
+                             decides to start a set of builds, they will be
+                             run on the Builders named by this list.
+
+        @param fileIsImportant: A callable which takes one argument (a Change
+                                instance) and returns True if the change is
+                                worth building, and False if it is not.
+                                Unimportant Changes are accumulated until the
+                                build is triggered by an important change.
+                                The default value of None means that all
+                                Changes are important.
+        """
+
+        BaseUpstreamScheduler.__init__(self, name)
+        self.treeStableTimer = treeStableTimer
+        errmsg = ("The builderNames= argument to Scheduler must be a list "
+                  "of Builder description names (i.e. the 'name' key of the "
+                  "Builder specification dictionary)")
+        assert isinstance(builderNames, (list, tuple)), errmsg
+        for b in builderNames:
+            assert isinstance(b, str), errmsg
+        self.builderNames = builderNames
+        self.branch = branch
+        if fileIsImportant:
+            assert callable(fileIsImportant)
+            self.fileIsImportant = fileIsImportant
+
+        self.importantChanges = []
+        self.unimportantChanges = []
+        self.nextBuildTime = None
+        self.timer = None
+
+    def listBuilderNames(self):
+        return self.builderNames
+
+    def getPendingBuildTimes(self):
+        if self.nextBuildTime is not None:
+            return [self.nextBuildTime]
+        return []
+
+    def addChange(self, change):
+        if change.branch != self.branch:
+            log.msg("%s ignoring off-branch %s" % (self, change))
+            return
+        if not self.fileIsImportant:
+            self.addImportantChange(change)
+        elif self.fileIsImportant(change):
+            self.addImportantChange(change)
+        else:
+            self.addUnimportantChange(change)
+
+    def addImportantChange(self, change):
+        log.msg("%s: change is important, adding %s" % (self, change))
+        self.importantChanges.append(change)
+        self.nextBuildTime = max(self.nextBuildTime,
+                                 change.when + self.treeStableTimer)
+        self.setTimer(self.nextBuildTime)
+
+    def addUnimportantChange(self, change):
+        log.msg("%s: change is not important, adding %s" % (self, change))
+        self.unimportantChanges.append(change)
+
+    def setTimer(self, when):
+        log.msg("%s: setting timer to %s" %
+                (self, time.strftime("%H:%M:%S", time.localtime(when))))
+        now = util.now()
+        if when < now:
+            when = now + 1
+        if self.timer:
+            self.timer.cancel()
+        self.timer = reactor.callLater(when - now, self.fireTimer)
+
+    def stopTimer(self):
+        if self.timer:
+            self.timer.cancel()
+            self.timer = None
+
+    def fireTimer(self):
+        # clear out our state
+        self.timer = None
+        self.nextBuildTime = None
+        changes = self.importantChanges + self.unimportantChanges
+        self.importantChanges = []
+        self.unimportantChanges = []
+
+        # create a BuildSet, submit it to the BuildMaster
+        bs = buildset.BuildSet(self.builderNames,
+                               SourceStamp(changes=changes))
+        self.submit(bs)
+
+    def stopService(self):
+        self.stopTimer()
+        return service.MultiService.stopService(self)
+
+
+class AnyBranchScheduler(BaseUpstreamScheduler):
+    """This Scheduler will handle changes on a variety of branches. It will
+    accumulate Changes for each branch separately. It works by creating a
+    separate Scheduler for each new branch it sees."""
+
+    schedulerFactory = Scheduler
+    fileIsImportant = None
+
+    compare_attrs = ('name', 'branches', 'treeStableTimer', 'builderNames',
+                     'fileIsImportant')
+
+    def __init__(self, name, branches, treeStableTimer, builderNames,
+                 fileIsImportant=None):
+        """
+        @param name: the name of this Scheduler
+        @param branches: The branch names that the Scheduler should pay
+                         attention to. Any Change that is not on one of these
+                         branches will be ignored. It can be set to None to
+                         accept changes from any branch. Don't use [] (an
+                         empty list), because that means we don't pay
+                         attention to *any* branches, so we'll never build
+                         anything.
+        @param treeStableTimer: the duration, in seconds, for which the tree
+                                must remain unchanged before a build will be
+                                triggered. This is intended to avoid builds
+                                of partially-committed fixes.
+        @param builderNames: a list of Builder names. When this Scheduler
+                             decides to start a set of builds, they will be
+                             run on the Builders named by this list.
+
+        @param fileIsImportant: A callable which takes one argument (a Change
+                                instance) and returns True if the change is
+                                worth building, and False if it is not.
+                                Unimportant Changes are accumulated until the
+                                build is triggered by an important change.
+                                The default value of None means that all
+                                Changes are important.
+        """
+
+        BaseUpstreamScheduler.__init__(self, name)
+        self.treeStableTimer = treeStableTimer
+        for b in builderNames:
+            assert isinstance(b, str)
+        self.builderNames = builderNames
+        self.branches = branches
+        if self.branches == []:
+            log.msg("AnyBranchScheduler %s: branches=[], so we will ignore "
+                    "all branches, and never trigger any builds. Please set "
+                    "branches=None to mean 'all branches'" % self)
+            # consider raising an exception here, to make this warning more
+            # prominent, but I can vaguely imagine situations where you might
+            # want to comment out branches temporarily and wouldn't
+            # appreciate it being treated as an error.
+        if fileIsImportant:
+            assert callable(fileIsImportant)
+            self.fileIsImportant = fileIsImportant
+        self.schedulers = {} # one per branch
+
+    def __repr__(self):
+        return "<AnyBranchScheduler '%s'>" % self.name
+
+    def listBuilderNames(self):
+        return self.builderNames
+
+    def getPendingBuildTimes(self):
+        bts = []
+        for s in self.schedulers.values():
+            if s.nextBuildTime is not None:
+                bts.append(s.nextBuildTime)
+        return bts
+
+    def addChange(self, change):
+        branch = change.branch
+        if self.branches is not None and branch not in self.branches:
+            log.msg("%s ignoring off-branch %s" % (self, change))
+            return
+        s = self.schedulers.get(branch)
+        if not s:
+            if branch:
+                name = self.name + "." + branch
+            else:
+                name = self.name + ".<default>"
+            s = self.schedulerFactory(name, branch,
+                                      self.treeStableTimer,
+                                      self.builderNames,
+                                      self.fileIsImportant)
+            s.successWatchers = self.successWatchers
+            s.setServiceParent(self)
+            # TODO: does this result in schedulers that stack up forever?
+            # When I make the persistify-pass, think about this some more.
+            self.schedulers[branch] = s
+        s.addChange(change)
+
+    def submitBuildSet(self, bs):
+        self.parent.submitBuildSet(bs)
+
+
+class Dependent(BaseUpstreamScheduler):
+    """This scheduler runs some set of 'downstream' builds when the
+    'upstream' scheduler has completed successfully."""
+
+    compare_attrs = ('name', 'upstream', 'builders')
+
+    def __init__(self, name, upstream, builderNames):
+        assert providedBy(upstream, interfaces.IUpstreamScheduler)
+        BaseUpstreamScheduler.__init__(self, name)
+        self.upstream = upstream
+        self.builderNames = builderNames
+
+    def listBuilderNames(self):
+        return self.builderNames
+
+    def getPendingBuildTimes(self):
+        # report the upstream's value
+        return self.upstream.getPendingBuildTimes()
+
+    def startService(self):
+        service.MultiService.startService(self)
+        self.upstream.subscribeToSuccessfulBuilds(self.upstreamBuilt)
+
+    def stopService(self):
+        d = service.MultiService.stopService(self)
+        self.upstream.unsubscribeToSuccessfulBuilds(self.upstreamBuilt)
+        return d
+
+    def upstreamBuilt(self, ss):
+        bs = buildset.BuildSet(self.builderNames, ss)
+        self.submit(bs)
+
+
+
+class Periodic(BaseUpstreamScheduler):
+    """Instead of watching for Changes, this Scheduler can just start a build
+    at fixed intervals. The C{periodicBuildTimer} parameter sets the number
+    of seconds to wait between such periodic builds. The first build will be
+    run immediately."""
+
+    # TODO: consider having this watch another (changed-based) scheduler and
+    # merely enforce a minimum time between builds.
+
+    compare_attrs = ('name', 'builderNames', 'periodicBuildTimer', 'branch')
+
+    def __init__(self, name, builderNames, periodicBuildTimer,
+                 branch=None):
+        BaseUpstreamScheduler.__init__(self, name)
+        self.builderNames = builderNames
+        self.periodicBuildTimer = periodicBuildTimer
+        self.branch = branch
+        self.reason = ("The Periodic scheduler named '%s' triggered this build"
+                       % name)
+        self.timer = internet.TimerService(self.periodicBuildTimer,
+                                           self.doPeriodicBuild)
+        self.timer.setServiceParent(self)
+
+    def listBuilderNames(self):
+        return self.builderNames
+
+    def getPendingBuildTimes(self):
+        # TODO: figure out when self.timer is going to fire next and report
+        # that
+        return []
+
+    def doPeriodicBuild(self):
+        bs = buildset.BuildSet(self.builderNames,
+                               SourceStamp(branch=self.branch),
+                               self.reason)
+        self.submit(bs)
+
+
+
+class Nightly(BaseUpstreamScheduler):
+    """Imitate 'cron' scheduling. This can be used to schedule a nightly
+    build, or one which runs are certain times of the day, week, or month.
+
+    Pass some subset of minute, hour, dayOfMonth, month, and dayOfWeek; each
+    may be a single number or a list of valid values. The builds will be
+    triggered whenever the current time matches these values. Wildcards are
+    represented by a '*' string. All fields default to a wildcard except
+    'minute', so with no fields this defaults to a build every hour, on the
+    hour.
+
+    For example, the following master.cfg clause will cause a build to be
+    started every night at 3:00am::
+
+     s = Nightly('nightly', ['builder1', 'builder2'], hour=3, minute=0)
+     c['schedules'].append(s)
+
+    This scheduler will perform a build each monday morning at 6:23am and
+    again at 8:23am::
+
+     s = Nightly('BeforeWork', ['builder1'],
+                 dayOfWeek=0, hour=[6,8], minute=23)
+
+    The following runs a build every two hours::
+
+     s = Nightly('every2hours', ['builder1'], hour=range(0, 24, 2))
+
+    And this one will run only on December 24th::
+
+     s = Nightly('SleighPreflightCheck', ['flying_circuits', 'radar'],
+                 month=12, dayOfMonth=24, hour=12, minute=0)
+
+    For dayOfWeek and dayOfMonth, builds are triggered if the date matches
+    either of them. All time values are compared against the tuple returned
+    by time.localtime(), so month and dayOfMonth numbers start at 1, not
+    zero. dayOfWeek=0 is Monday, dayOfWeek=6 is Sunday.
+    """
+
+    compare_attrs = ('name', 'builderNames',
+                     'minute', 'hour', 'dayOfMonth', 'month',
+                     'dayOfWeek', 'branch')
+
+    def __init__(self, name, builderNames, minute=0, hour='*',
+                 dayOfMonth='*', month='*', dayOfWeek='*',
+                 branch=None):
+        # Setting minute=0 really makes this an 'Hourly' scheduler. This
+        # seemed like a better default than minute='*', which would result in
+        # a build every 60 seconds.
+        BaseUpstreamScheduler.__init__(self, name)
+        self.builderNames = builderNames
+        self.minute = minute
+        self.hour = hour
+        self.dayOfMonth = dayOfMonth
+        self.month = month
+        self.dayOfWeek = dayOfWeek
+        self.branch = branch
+        self.delayedRun = None
+        self.nextRunTime = None
+        self.reason = ("The Nightly scheduler named '%s' triggered this build"
+                       % name)
+
+    def addTime(self, timetuple, secs):
+        return time.localtime(time.mktime(timetuple)+secs)
+    def findFirstValueAtLeast(self, values, value, default=None):
+        for v in values:
+            if v >= value: return v
+        return default
+
+    def setTimer(self):
+        self.nextRunTime = self.calculateNextRunTime()
+        self.delayedRun = reactor.callLater(self.nextRunTime - time.time(),
+                                            self.doPeriodicBuild)
+
+    def startService(self):
+        BaseUpstreamScheduler.startService(self)
+        self.setTimer()
+
+    def stopService(self):
+        BaseUpstreamScheduler.stopService(self)
+        self.delayedRun.cancel()
+
+    def isRunTime(self, timetuple):
+        def check(ourvalue, value):
+            if ourvalue == '*': return True
+            if isinstance(ourvalue, int): return value == ourvalue
+            return (value in ourvalue)
+
+        if not check(self.minute, timetuple[4]):
+            #print 'bad minute', timetuple[4], self.minute
+            return False
+
+        if not check(self.hour, timetuple[3]):
+            #print 'bad hour', timetuple[3], self.hour
+            return False
+
+        if not check(self.month, timetuple[1]):
+            #print 'bad month', timetuple[1], self.month
+            return False
+
+        if self.dayOfMonth != '*' and self.dayOfWeek != '*':
+            # They specified both day(s) of month AND day(s) of week.
+            # This means that we only have to match one of the two. If
+            # neither one matches, this time is not the right time.
+            if not (check(self.dayOfMonth, timetuple[2]) or
+                    check(self.dayOfWeek, timetuple[6])):
+                #print 'bad day'
+                return False
+        else:
+            if not check(self.dayOfMonth, timetuple[2]):
+                #print 'bad day of month'
+                return False
+
+            if not check(self.dayOfWeek, timetuple[6]):
+                #print 'bad day of week'
+                return False
+
+        return True
+
+    def calculateNextRunTime(self):
+        return self.calculateNextRunTimeFrom(time.time())
+
+    def calculateNextRunTimeFrom(self, now):
+        dateTime = time.localtime(now)
+
+        # Remove seconds by advancing to at least the next minue
+        dateTime = self.addTime(dateTime, 60-dateTime[5])
+
+        # Now we just keep adding minutes until we find something that matches
+
+        # It not an efficient algorithm, but it'll *work* for now
+        yearLimit = dateTime[0]+2
+        while not self.isRunTime(dateTime):
+            dateTime = self.addTime(dateTime, 60)
+            #print 'Trying', time.asctime(dateTime)
+            assert dateTime[0] < yearLimit, 'Something is wrong with this code'
+        return time.mktime(dateTime)
+
+    def listBuilderNames(self):
+        return self.builderNames
+
+    def getPendingBuildTimes(self):
+        # TODO: figure out when self.timer is going to fire next and report
+        # that
+        if self.nextRunTime is None: return []
+        return [self.nextRunTime]
+
+    def doPeriodicBuild(self):
+        # Schedule the next run
+        self.setTimer()
+
+        # And trigger a build
+        bs = buildset.BuildSet(self.builderNames,
+                               SourceStamp(branch=self.branch),
+                               self.reason)
+        self.submit(bs)
+
+    def addChange(self, change):
+        pass
+
+
+
+class TryBase(service.MultiService, util.ComparableMixin):
+    if implements:
+        implements(interfaces.IScheduler)
+    else:
+        __implements__ = (interfaces.IScheduler,
+                          service.MultiService.__implements__)
+
+    def __init__(self, name, builderNames):
+        service.MultiService.__init__(self)
+        self.name = name
+        self.builderNames = builderNames
+
+    def listBuilderNames(self):
+        return self.builderNames
+
+    def getPendingBuildTimes(self):
+        # we can't predict what the developers are going to do in the future
+        return []
+
+    def addChange(self, change):
+        # Try schedulers ignore Changes
+        pass
+
+
+class BadJobfile(Exception):
+    pass
+
+class JobFileScanner(basic.NetstringReceiver):
+    def __init__(self):
+        self.strings = []
+        self.transport = self # so transport.loseConnection works
+        self.error = False
+
+    def stringReceived(self, s):
+        self.strings.append(s)
+
+    def loseConnection(self):
+        self.error = True
+
+class Try_Jobdir(TryBase):
+    compare_attrs = ["name", "builderNames", "jobdir"]
+
+    def __init__(self, name, builderNames, jobdir):
+        TryBase.__init__(self, name, builderNames)
+        self.jobdir = jobdir
+        self.watcher = maildirtwisted.MaildirService()
+        self.watcher.setServiceParent(self)
+
+    def setServiceParent(self, parent):
+        self.watcher.setBasedir(os.path.join(parent.basedir, self.jobdir))
+        TryBase.setServiceParent(self, parent)
+
+    def parseJob(self, f):
+        # jobfiles are serialized build requests. Each is a list of
+        # serialized netstrings, in the following order:
+        #  "1", the version number of this format
+        #  buildsetID, arbitrary string, used to find the buildSet later
+        #  branch name, "" for default-branch
+        #  base revision
+        #  patchlevel, usually "1"
+        #  patch
+        #  builderNames...
+        p = JobFileScanner()
+        p.dataReceived(f.read())
+        if p.error:
+            raise BadJobfile("unable to parse netstrings")
+        s = p.strings
+        ver = s.pop(0)
+        if ver != "1":
+            raise BadJobfile("unknown version '%s'" % ver)
+        buildsetID, branch, baserev, patchlevel, diff = s[:5]
+        builderNames = s[5:]
+        if branch == "":
+            branch = None
+        patchlevel = int(patchlevel)
+        patch = (patchlevel, diff)
+        ss = SourceStamp(branch, baserev, patch)
+        return builderNames, ss, buildsetID
+
+    def messageReceived(self, filename):
+        md = os.path.join(self.parent.basedir, self.jobdir)
+        if runtime.platformType == "posix":
+            # open the file before moving it, because I'm afraid that once
+            # it's in cur/, someone might delete it at any moment
+            path = os.path.join(md, "new", filename)
+            f = open(path, "r")
+            os.rename(os.path.join(md, "new", filename),
+                      os.path.join(md, "cur", filename))
+        else:
+            # do this backwards under windows, because you can't move a file
+            # that somebody is holding open. This was causing a Permission
+            # Denied error on bear's win32-twisted1.3 buildslave.
+            os.rename(os.path.join(md, "new", filename),
+                      os.path.join(md, "cur", filename))
+            path = os.path.join(md, "cur", filename)
+            f = open(path, "r")
+
+        try:
+            builderNames, ss, bsid = self.parseJob(f)
+        except BadJobfile:
+            log.msg("%s reports a bad jobfile in %s" % (self, filename))
+            log.err()
+            return
+        # compare builderNames against self.builderNames
+        # TODO: think about this some more.. why bother restricting it?
+        # perhaps self.builderNames should be used as the default list
+        # instead of being used as a restriction?
+        for b in builderNames:
+            if not b in self.builderNames:
+                log.msg("%s got jobfile %s with builder %s" % (self,
+                                                               filename, b))
+                log.msg(" but that wasn't in our list: %s"
+                        % (self.builderNames,))
+                return
+
+        reason = "'try' job"
+        bs = buildset.BuildSet(builderNames, ss, reason=reason, bsid=bsid)
+        self.parent.submitBuildSet(bs)
+
+class Try_Userpass(TryBase):
+    compare_attrs = ["name", "builderNames", "port", "userpass"]
+
+    if implements:
+        implements(portal.IRealm)
+    else:
+        __implements__ = (portal.IRealm,
+                          TryBase.__implements__)
+
+    def __init__(self, name, builderNames, port, userpass):
+        TryBase.__init__(self, name, builderNames)
+        if type(port) is int:
+            port = "tcp:%d" % port
+        self.port = port
+        self.userpass = userpass
+        c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+        for user,passwd in self.userpass:
+            c.addUser(user, passwd)
+
+        p = portal.Portal(self)
+        p.registerChecker(c)
+        f = pb.PBServerFactory(p)
+        s = strports.service(port, f)
+        s.setServiceParent(self)
+
+    def getPort(self):
+        # utility method for tests: figure out which TCP port we just opened.
+        return self.services[0]._port.getHost().port
+
+    def requestAvatar(self, avatarID, mind, interface):
+        log.msg("%s got connection from user %s" % (self, avatarID))
+        assert interface == pb.IPerspective
+        p = Try_Userpass_Perspective(self, avatarID)
+        return (pb.IPerspective, p, lambda: None)
+
+    def submitBuildSet(self, bs):
+        return self.parent.submitBuildSet(bs)
+
+class Try_Userpass_Perspective(pbutil.NewCredPerspective):
+    def __init__(self, parent, username):
+        self.parent = parent
+        self.username = username
+
+    def perspective_try(self, branch, revision, patch, builderNames):
+        log.msg("user %s requesting build on builders %s" % (self.username,
+                                                             builderNames))
+        for b in builderNames:
+            if not b in self.parent.builderNames:
+                log.msg("%s got job with builder %s" % (self, b))
+                log.msg(" but that wasn't in our list: %s"
+                        % (self.parent.builderNames,))
+                return
+        ss = SourceStamp(branch, revision, patch)
+        reason = "'try' job from user %s" % self.username
+        bs = buildset.BuildSet(builderNames, ss, reason=reason)
+        self.parent.submitBuildSet(bs)
+
+        # return a remotely-usable BuildSetStatus object
+        from buildbot.status.client import makeRemote
+        return makeRemote(bs.status)
+

Added: vendor/buildbot/current/buildbot/scripts/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/scripts/logwatcher.py
===================================================================
--- vendor/buildbot/current/buildbot/scripts/logwatcher.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/scripts/logwatcher.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,95 @@
+
+import os
+from twisted.python.failure import Failure
+from twisted.internet import task, defer, reactor
+from twisted.protocols.basic import LineOnlyReceiver
+
+class FakeTransport:
+    disconnecting = False
+
+class BuildmasterTimeoutError(Exception):
+    pass
+class BuildslaveTimeoutError(Exception):
+    pass
+class ReconfigError(Exception):
+    pass
+class BuildSlaveDetectedError(Exception):
+    pass
+
+class LogWatcher(LineOnlyReceiver):
+    POLL_INTERVAL = 0.1
+    TIMEOUT_DELAY = 5.0
+    delimiter = os.linesep
+
+    def __init__(self, logfile):
+        self.logfile = logfile
+        self.in_reconfig = False
+        self.transport = FakeTransport()
+        self.f = None
+        self.processtype = "buildmaster"
+
+    def start(self):
+        # return a Deferred that fires when the reconfig process has
+        # finished. It errbacks with TimeoutError if the finish line has not
+        # been seen within 5 seconds, and with ReconfigError if the error
+        # line was seen. If the logfile could not be opened, it errbacks with
+        # an IOError.
+        self.running = True
+        d = defer.maybeDeferred(self._start)
+        return d
+
+    def _start(self):
+        self.d = defer.Deferred()
+        try:
+            self.f = open(self.logfile, "rb")
+            self.f.seek(0, 2) # start watching from the end
+        except IOError:
+            pass
+        reactor.callLater(self.TIMEOUT_DELAY, self.timeout)
+        self.poller = task.LoopingCall(self.poll)
+        self.poller.start(self.POLL_INTERVAL)
+        return self.d
+
+    def timeout(self):
+        if self.processtype == "buildmaster":
+            self.d.errback(BuildmasterTimeoutError())
+        else:
+            self.d.errback(BuildslaveTimeoutError())
+
+    def finished(self, results):
+        self.running = False
+        self.in_reconfig = False
+        self.d.callback(results)
+
+    def lineReceived(self, line):
+        if not self.running:
+            return
+        if "Log opened." in line:
+            self.in_reconfig = True
+        if "loading configuration from" in line:
+            self.in_reconfig = True
+        if "Creating BuildSlave" in line:
+            self.processtype = "buildslave"
+
+        if self.in_reconfig:
+            print line
+
+        if "message from master: attached" in line:
+            return self.finished("buildslave")
+        if "I will keep using the previous config file" in line:
+            return self.finished(Failure(ReconfigError()))
+        if "configuration update complete" in line:
+            return self.finished("buildmaster")
+
+    def poll(self):
+        if not self.f:
+            try:
+                self.f = open(self.logfile, "rb")
+            except IOError:
+                return
+        while True:
+            data = self.f.read(1000)
+            if not data:
+                return
+            self.dataReceived(data)
+

Added: vendor/buildbot/current/buildbot/scripts/reconfig.py
===================================================================
--- vendor/buildbot/current/buildbot/scripts/reconfig.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/scripts/reconfig.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,63 @@
+
+import os, signal
+from twisted.internet import reactor
+
+from buildbot.scripts.logwatcher import LogWatcher, BuildmasterTimeoutError, \
+     ReconfigError
+
+class Reconfigurator:
+    def run(self, config):
+
+        basedir = config['basedir']
+        quiet = config['quiet']
+        os.chdir(basedir)
+        f = open("twistd.pid", "rt")
+        self.pid = int(f.read().strip())
+        if quiet:
+            os.kill(self.pid, signal.SIGHUP)
+            return
+
+        # keep reading twistd.log. Display all messages between "loading
+        # configuration from ..." and "configuration update complete" or
+        # "I will keep using the previous config file instead.", or until
+        # 5 seconds have elapsed.
+
+        self.sent_signal = False
+        lw = LogWatcher("twistd.log")
+        d = lw.start()
+        d.addCallbacks(self.success, self.failure)
+        reactor.callLater(0.2, self.sighup)
+        reactor.run()
+
+    def sighup(self):
+        if self.sent_signal:
+            return
+        print "sending SIGHUP to process %d" % self.pid
+        self.sent_signal = True
+        os.kill(self.pid, signal.SIGHUP)
+
+    def success(self, res):
+        print """
+Reconfiguration appears to have completed successfully.
+"""
+        reactor.stop()
+
+    def failure(self, why):
+        if why.check(BuildmasterTimeoutError):
+            print "Never saw reconfiguration finish."
+        elif why.check(ReconfigError):
+            print """
+Reconfiguration failed. Please inspect the master.cfg file for errors,
+correct them, then try 'buildbot reconfig' again.
+"""
+        elif why.check(IOError):
+            # we were probably unable to open the file in the first place
+            self.sighup()
+        else:
+            print "Error while following twistd.log: %s" % why
+        reactor.stop()
+
+def reconfig(config):
+    r = Reconfigurator()
+    r.run(config)
+

Added: vendor/buildbot/current/buildbot/scripts/runner.py
===================================================================
--- vendor/buildbot/current/buildbot/scripts/runner.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/scripts/runner.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,735 @@
+# -*- test-case-name: buildbot.test.test_runner -*-
+
+# N.B.: don't import anything that might pull in a reactor yet. Some of our
+# subcommands want to load modules that need the gtk reactor.
+import os, sys, stat, re, time
+from twisted.python import usage, util, runtime
+
+# this is mostly just a front-end for mktap, twistd, and kill(1), but in the
+# future it will also provide an interface to some developer tools that talk
+# directly to a remote buildmaster (like 'try' and a status client)
+
+# the create/start/stop commands should all be run as the same user,
+# preferably a separate 'buildbot' account.
+
+class MakerBase(usage.Options):
+    optFlags = [
+        ['help', 'h', "Display this message"],
+        ["quiet", "q", "Do not emit the commands being run"],
+        ]
+
+    #["basedir", "d", None, "Base directory for the buildmaster"],
+    opt_h = usage.Options.opt_help
+
+    def parseArgs(self, *args):
+        if len(args) > 0:
+            self['basedir'] = args[0]
+        else:
+            self['basedir'] = None
+        if len(args) > 1:
+            raise usage.UsageError("I wasn't expecting so many arguments")
+
+    def postOptions(self):
+        if self['basedir'] is None:
+            raise usage.UsageError("<basedir> parameter is required")
+        self['basedir'] = os.path.abspath(self['basedir'])
+
+makefile_sample = """# -*- makefile -*-
+
+# This is a simple makefile which lives in a buildmaster/buildslave
+# directory (next to the buildbot.tac file). It allows you to start/stop the
+# master or slave by doing 'make start' or 'make stop'.
+
+# The 'reconfig' target will tell a buildmaster to reload its config file.
+
+start:
+	twistd --no_save -y buildbot.tac
+
+stop:
+	kill `cat twistd.pid`
+
+reconfig:
+	kill -HUP `cat twistd.pid`
+
+log:
+	tail -f twistd.log
+"""
+
+class Maker:
+    def __init__(self, config):
+        self.config = config
+        self.basedir = config['basedir']
+        self.force = config['force']
+        self.quiet = config['quiet']
+
+    def mkdir(self):
+        if os.path.exists(self.basedir):
+            if not self.quiet:
+                print "updating existing installation"
+            return
+        if not self.quiet: print "mkdir", self.basedir
+        os.mkdir(self.basedir)
+
+    def mkinfo(self):
+        path = os.path.join(self.basedir, "info")
+        if not os.path.exists(path):
+            if not self.quiet: print "mkdir", path
+            os.mkdir(path)
+        created = False
+        admin = os.path.join(path, "admin")
+        if not os.path.exists(admin):
+            if not self.quiet:
+                print "Creating info/admin, you need to edit it appropriately"
+            f = open(admin, "wt")
+            f.write("Your Name Here <admin at youraddress.invalid>\n")
+            f.close()
+            created = True
+        host = os.path.join(path, "host")
+        if not os.path.exists(host):
+            if not self.quiet:
+                print "Creating info/host, you need to edit it appropriately"
+            f = open(host, "wt")
+            f.write("Please put a description of this build host here\n")
+            f.close()
+            created = True
+        if created and not self.quiet:
+            print "Please edit the files in %s appropriately." % path
+
+    def chdir(self):
+        if not self.quiet: print "chdir", self.basedir
+        os.chdir(self.basedir)
+
+    def makeTAC(self, contents, secret=False):
+        tacfile = "buildbot.tac"
+        if os.path.exists(tacfile):
+            oldcontents = open(tacfile, "rt").read()
+            if oldcontents == contents:
+                if not self.quiet:
+                    print "buildbot.tac already exists and is correct"
+                return
+            if not self.quiet:
+                print "not touching existing buildbot.tac"
+                print "creating buildbot.tac.new instead"
+            tacfile = "buildbot.tac.new"
+        f = open(tacfile, "wt")
+        f.write(contents)
+        f.close()
+        if secret:
+            os.chmod(tacfile, 0600)
+
+    def makefile(self):
+        target = "Makefile.sample"
+        if os.path.exists(target):
+            oldcontents = open(target, "rt").read()
+            if oldcontents == makefile_sample:
+                if not self.quiet:
+                    print "Makefile.sample already exists and is correct"
+                return
+            if not self.quiet:
+                print "replacing Makefile.sample"
+        else:
+            if not self.quiet:
+                print "creating Makefile.sample"
+        f = open(target, "wt")
+        f.write(makefile_sample)
+        f.close()
+
+    def sampleconfig(self, source):
+        target = "master.cfg.sample"
+        config_sample = open(source, "rt").read()
+        if os.path.exists(target):
+            oldcontents = open(target, "rt").read()
+            if oldcontents == config_sample:
+                if not self.quiet:
+                    print "master.cfg.sample already exists and is up-to-date"
+                return
+            if not self.quiet:
+                print "replacing master.cfg.sample"
+        else:
+            if not self.quiet:
+                print "creating master.cfg.sample"
+        f = open(target, "wt")
+        f.write(config_sample)
+        f.close()
+        os.chmod(target, 0600)
+
+class MasterOptions(MakerBase):
+    optFlags = [
+        ["force", "f",
+         "Re-use an existing directory (will not overwrite master.cfg file)"],
+        ]
+    optParameters = [
+        ["config", "c", "master.cfg", "name of the buildmaster config file"],
+        ]
+    def getSynopsis(self):
+        return "Usage:    buildbot create-master [options] <basedir>"
+
+    longdesc = """
+    This command creates a buildmaster working directory and buildbot.tac
+    file. The master will live in <dir> and create various files there.
+
+    At runtime, the master will read a configuration file (named
+    'master.cfg' by default) in its basedir. This file should contain python
+    code which eventually defines a dictionary named 'BuildmasterConfig'.
+    The elements of this dictionary are used to configure the Buildmaster.
+    See doc/config.xhtml for details about what can be controlled through
+    this interface."""
+
+masterTAC = """
+from twisted.application import service
+from buildbot.master import BuildMaster
+
+basedir = r'%(basedir)s'
+configfile = r'%(config)s'
+
+application = service.Application('buildmaster')
+BuildMaster(basedir, configfile).setServiceParent(application)
+
+"""
+
+def createMaster(config):
+    m = Maker(config)
+    m.mkdir()
+    m.chdir()
+    contents = masterTAC % config
+    m.makeTAC(contents)
+    m.sampleconfig(util.sibpath(__file__, "sample.cfg"))
+    m.makefile()
+
+    if not m.quiet: print "buildmaster configured in %s" % m.basedir
+
+class SlaveOptions(MakerBase):
+    optFlags = [
+        ["force", "f", "Re-use an existing directory"],
+        ]
+    optParameters = [
+#        ["name", "n", None, "Name for this build slave"],
+#        ["passwd", "p", None, "Password for this build slave"],
+#        ["basedir", "d", ".", "Base directory to use"],
+#        ["master", "m", "localhost:8007",
+#         "Location of the buildmaster (host:port)"],
+
+        ["keepalive", "k", 600,
+         "Interval at which keepalives should be sent (in seconds)"],
+        ["usepty", None, 1,
+         "(1 or 0) child processes should be run in a pty"],
+        ["umask", None, "None",
+         "controls permissions of generated files. Use --umask=022 to be world-readable"],
+        ]
+    
+    longdesc = """
+    This command creates a buildslave working directory and buildbot.tac
+    file. The bot will use the <name> and <passwd> arguments to authenticate
+    itself when connecting to the master. All commands are run in a
+    build-specific subdirectory of <basedir>. <master> is a string of the
+    form 'hostname:port', and specifies where the buildmaster can be reached.
+
+    <name>, <passwd>, and <master> will be provided by the buildmaster
+    administrator for your bot. You must choose <basedir> yourself.
+    """
+
+    def getSynopsis(self):
+        return "Usage:    buildbot create-slave [options] <basedir> <master> <name> <passwd>"
+
+    def parseArgs(self, *args):
+        if len(args) < 4:
+            raise usage.UsageError("command needs more arguments")
+        basedir, master, name, passwd = args
+        self['basedir'] = basedir
+        self['master'] = master
+        self['name'] = name
+        self['passwd'] = passwd
+
+    def postOptions(self):
+        MakerBase.postOptions(self)
+        self['usepty'] = int(self['usepty'])
+        self['keepalive'] = int(self['keepalive'])
+        if self['master'].find(":") == -1:
+            raise usage.UsageError("--master must be in the form host:portnum")
+
+slaveTAC = """
+from twisted.application import service
+from buildbot.slave.bot import BuildSlave
+
+basedir = r'%(basedir)s'
+host = '%(host)s'
+port = %(port)d
+slavename = '%(name)s'
+passwd = '%(passwd)s'
+keepalive = %(keepalive)d
+usepty = %(usepty)d
+umask = %(umask)s
+
+application = service.Application('buildslave')
+s = BuildSlave(host, port, slavename, passwd, basedir, keepalive, usepty,
+               umask=umask)
+s.setServiceParent(application)
+
+"""
+
+def createSlave(config):
+    m = Maker(config)
+    m.mkdir()
+    m.chdir()
+    try:
+        master = config['master']
+        host, port = re.search(r'(.+):(\d+)', master).groups()
+        config['host'] = host
+        config['port'] = int(port)
+    except:
+        print "unparseable master location '%s'" % master
+        print " expecting something more like localhost:8007"
+        raise
+    contents = slaveTAC % config
+
+    m.makeTAC(contents, secret=True)
+
+    m.makefile()
+    m.mkinfo()
+
+    if not m.quiet: print "buildslave configured in %s" % m.basedir
+
+
+
+def stop(config, signame="TERM", wait=False):
+    import signal
+    basedir = config['basedir']
+    quiet = config['quiet']
+    os.chdir(basedir)
+    f = open("twistd.pid", "rt")
+    pid = int(f.read().strip())
+    signum = getattr(signal, "SIG"+signame)
+    timer = 0
+    os.kill(pid, signum)
+    if not wait:
+        if not quiet:
+            print "sent SIG%s to process" % signame
+        return
+    time.sleep(0.1)
+    while timer < 5:
+        # poll once per second until twistd.pid goes away, up to 5 seconds
+        try:
+            os.kill(pid, 0)
+        except OSError:
+            if not quiet:
+                print "buildbot process %d is dead" % pid
+            return
+        timer += 1
+        time.sleep(1)
+    if not quiet:
+        print "never saw process go away"
+
+def restart(config):
+    quiet = config['quiet']
+    from buildbot.scripts.startup import start
+    stop(config, wait=True)
+    if not quiet:
+        print "now restarting buildbot process.."
+    start(config)
+
+
+def loadOptions(filename="options", here=None, home=None):
+    """Find the .buildbot/FILENAME file. Crawl from the current directory up
+    towards the root, and also look in ~/.buildbot . The first directory
+    that's owned by the user and has the file we're looking for wins. Windows
+    skips the owned-by-user test.
+    
+    @rtype:  dict
+    @return: a dictionary of names defined in the options file. If no options
+             file was found, return an empty dict.
+    """
+
+    if here is None:
+        here = os.getcwd()
+    here = os.path.abspath(here)
+
+    if home is None:
+        if runtime.platformType == 'win32':
+            home = os.path.join(os.environ['APPDATA'], "buildbot")
+        else:
+            home = os.path.expanduser("~/.buildbot")
+
+    searchpath = []
+    toomany = 20
+    while True:
+        searchpath.append(os.path.join(here, ".buildbot"))
+        next = os.path.dirname(here)
+        if next == here:
+            break # we've hit the root
+        here = next
+        toomany -= 1 # just in case
+        if toomany == 0:
+            raise ValueError("Hey, I seem to have wandered up into the "
+                             "infinite glories of the heavens. Oops.")
+    searchpath.append(home)
+
+    localDict = {}
+
+    for d in searchpath:
+        if os.path.isdir(d):
+            if runtime.platformType != 'win32':
+                if os.stat(d)[stat.ST_UID] != os.getuid():
+                    print "skipping %s because you don't own it" % d
+                    continue # security, skip other people's directories
+            optfile = os.path.join(d, filename)
+            if os.path.exists(optfile):
+                try:
+                    f = open(optfile, "r")
+                    options = f.read()
+                    exec options in localDict
+                except:
+                    print "error while reading %s" % optfile
+                    raise
+                break
+
+    for k in localDict.keys():
+        if k.startswith("__"):
+            del localDict[k]
+    return localDict
+
+class StartOptions(MakerBase):
+    optFlags = [
+        ['quiet', 'q', "Don't display startup log messages"],
+        ]
+    def getSynopsis(self):
+        return "Usage:    buildbot start <basedir>"
+
+class StopOptions(MakerBase):
+    def getSynopsis(self):
+        return "Usage:    buildbot stop <basedir>"
+
+class ReconfigOptions(MakerBase):
+    optFlags = [
+        ['quiet', 'q', "Don't display log messages about reconfiguration"],
+        ]
+    def getSynopsis(self):
+        return "Usage:    buildbot reconfig <basedir>"
+
+
+
+class RestartOptions(MakerBase):
+    optFlags = [
+        ['quiet', 'q', "Don't display startup log messages"],
+        ]
+    def getSynopsis(self):
+        return "Usage:    buildbot restart <basedir>"
+
+class DebugClientOptions(usage.Options):
+    optFlags = [
+        ['help', 'h', "Display this message"],
+        ]
+    optParameters = [
+        ["master", "m", None,
+         "Location of the buildmaster's slaveport (host:port)"],
+        ["passwd", "p", None, "Debug password to use"],
+        ]
+
+    def parseArgs(self, *args):
+        if len(args) > 0:
+            self['master'] = args[0]
+        if len(args) > 1:
+            self['passwd'] = args[1]
+        if len(args) > 2:
+            raise usage.UsageError("I wasn't expecting so many arguments")
+
+def debugclient(config):
+    from buildbot.clients import debug
+    opts = loadOptions()
+
+    master = config.get('master')
+    if not master:
+        master = opts.get('master')
+    if master is None:
+        raise usage.UsageError("master must be specified: on the command "
+                               "line or in ~/.buildbot/options")
+
+    passwd = config.get('passwd')
+    if not passwd:
+        passwd = opts.get('debugPassword')
+    if passwd is None:
+        raise usage.UsageError("passwd must be specified: on the command "
+                               "line or in ~/.buildbot/options")
+
+    d = debug.DebugWidget(master, passwd)
+    d.run()
+
+class StatusClientOptions(usage.Options):
+    optFlags = [
+        ['help', 'h', "Display this message"],
+        ]
+    optParameters = [
+        ["master", "m", None,
+         "Location of the buildmaster's status port (host:port)"],
+        ]
+
+    def parseArgs(self, *args):
+        if len(args) > 0:
+            self['master'] = args[0]
+        if len(args) > 1:
+            raise usage.UsageError("I wasn't expecting so many arguments")
+
+def statuslog(config):
+    from buildbot.clients import base
+    opts = loadOptions()
+    master = config.get('master')
+    if not master:
+        master = opts.get('masterstatus')
+    if master is None:
+        raise usage.UsageError("master must be specified: on the command "
+                               "line or in ~/.buildbot/options")
+    c = base.TextClient(master)
+    c.run()
+
+def statusgui(config):
+    from buildbot.clients import gtkPanes
+    opts = loadOptions()
+    master = config.get('master')
+    if not master:
+        master = opts.get('masterstatus')
+    if master is None:
+        raise usage.UsageError("master must be specified: on the command "
+                               "line or in ~/.buildbot/options")
+    c = gtkPanes.GtkClient(master)
+    c.run()
+
+class SendChangeOptions(usage.Options):
+    optParameters = [
+        ("master", "m", None,
+         "Location of the buildmaster's PBListener (host:port)"),
+        ("username", "u", None, "Username performing the commit"),
+        ("branch", "b", None, "Branch specifier"),
+        ("revision", "r", None, "Revision specifier (string)"),
+        ("revision_number", "n", None, "Revision specifier (integer)"),
+        ("revision_file", None, None, "Filename containing revision spec"),
+        ("comments", "m", None, "log message"),
+        ("logfile", "F", None,
+         "Read the log messages from this file (- for stdin)"),
+        ]
+    def getSynopsis(self):
+        return "Usage:    buildbot sendchange [options] filenames.."
+    def parseArgs(self, *args):
+        self['files'] = args
+
+
+def sendchange(config, runReactor=False):
+    """Send a single change to the buildmaster's PBChangeSource. The
+    connection will be drpoped as soon as the Change has been sent."""
+    from buildbot.clients.sendchange import Sender
+
+    opts = loadOptions()
+    user = config.get('username', opts.get('username'))
+    master = config.get('master', opts.get('master'))
+    branch = config.get('branch', opts.get('branch'))
+    revision = config.get('revision')
+    # SVN and P4 use numeric revisions
+    if config.get("revision_number"):
+        revision = int(config['revision_number'])
+    if config.get("revision_file"):
+        revision = open(config["revision_file"],"r").read()
+
+    comments = config.get('comments')
+    if not comments and config.get('logfile'):
+        if config['logfile'] == "-":
+            f = sys.stdin
+        else:
+            f = open(config['logfile'], "rt")
+        comments = f.read()
+    if comments is None:
+        comments = ""
+
+    files = config.get('files', [])
+
+    assert user, "you must provide a username"
+    assert master, "you must provide the master location"
+
+    s = Sender(master, user)
+    d = s.send(branch, revision, comments, files)
+    if runReactor:
+        d.addCallbacks(s.printSuccess, s.printFailure)
+        d.addBoth(s.stop)
+        s.run()
+    return d
+
+
+class ForceOptions(usage.Options):
+    optParameters = [
+        ["builder", None, None, "which Builder to start"],
+        ["branch", None, None, "which branch to build"],
+        ["revision", None, None, "which revision to build"],
+        ["reason", None, None, "the reason for starting the build"],
+        ]
+
+    def parseArgs(self, *args):
+        args = list(args)
+        if len(args) > 0:
+            if self['builder'] is not None:
+                raise usage.UsageError("--builder provided in two ways")
+            self['builder'] = args.pop(0)
+        if len(args) > 0:
+            if self['reason'] is not None:
+                raise usage.UsageError("--reason provided in two ways")
+            self['reason'] = " ".join(args)
+
+
+class TryOptions(usage.Options):
+    optParameters = [
+        ["connect", "c", None,
+         "how to reach the buildmaster, either 'ssh' or 'pb'"],
+        # for ssh, use --tryhost, --username, and --trydir
+        ["tryhost", None, None,
+         "the hostname (used by ssh) for the buildmaster"],
+        ["trydir", None, None,
+         "the directory (on the tryhost) where tryjobs are deposited"],
+        ["username", "u", None, "Username performing the trial build"],
+        # for PB, use --master, --username, and --passwd
+        ["master", "m", None,
+         "Location of the buildmaster's PBListener (host:port)"],
+        ["passwd", None, None, "password for PB authentication"],
+        
+        ["vc", None, None,
+         "The VC system in use, one of: cvs,svn,tla,baz,darcs"],
+        ["branch", None, None,
+         "The branch in use, for VC systems that can't figure it out"
+         " themselves"],
+
+        ["builder", "b", None,
+         "Run the trial build on this Builder. Can be used multiple times."],
+        ]
+
+    optFlags = [
+        ["wait", None, "wait until the builds have finished"],
+        ]
+
+    def __init__(self):
+        super(TryOptions, self).__init__()
+        self['builders'] = []
+
+    def opt_builder(self, option):
+        self['builders'].append(option)
+
+    def getSynopsis(self):
+        return "Usage:    buildbot try [options]"
+
+def doTry(config):
+    from buildbot.scripts import tryclient
+    t = tryclient.Try(config)
+    t.run()
+
+class TryServerOptions(usage.Options):
+    optParameters = [
+        ["jobdir", None, None, "the jobdir (maildir) for submitting jobs"],
+        ]
+
+def doTryServer(config):
+    import md5
+    jobdir = os.path.expanduser(config["jobdir"])
+    job = sys.stdin.read()
+    # now do a 'safecat'-style write to jobdir/tmp, then move atomically to
+    # jobdir/new . Rather than come up with a unique name randomly, I'm just
+    # going to MD5 the contents and prepend a timestamp.
+    timestring = "%d" % time.time()
+    jobhash = md5.new(job).hexdigest()
+    fn = "%s-%s" % (timestring, jobhash)
+    tmpfile = os.path.join(jobdir, "tmp", fn)
+    newfile = os.path.join(jobdir, "new", fn)
+    f = open(tmpfile, "w")
+    f.write(job)
+    f.close()
+    os.rename(tmpfile, newfile)
+
+
+class Options(usage.Options):
+    synopsis = "Usage:    buildbot <command> [command options]"
+
+    subCommands = [
+        # the following are all admin commands
+        ['create-master', None, MasterOptions,
+         "Create and populate a directory for a new buildmaster"],
+        ['create-slave', None, SlaveOptions,
+         "Create and populate a directory for a new buildslave"],
+        ['start', None, StartOptions, "Start a buildmaster or buildslave"],
+        ['stop', None, StopOptions, "Stop a buildmaster or buildslave"],
+        ['restart', None, RestartOptions,
+         "Restart a buildmaster or buildslave"],
+
+        ['reconfig', None, ReconfigOptions,
+         "SIGHUP a buildmaster to make it re-read the config file"],
+        ['sighup', None, ReconfigOptions,
+         "SIGHUP a buildmaster to make it re-read the config file"],
+
+        ['sendchange', None, SendChangeOptions,
+         "Send a change to the buildmaster"],
+
+        ['debugclient', None, DebugClientOptions,
+         "Launch a small debug panel GUI"],
+
+        ['statuslog', None, StatusClientOptions,
+         "Emit current builder status to stdout"],
+        ['statusgui', None, StatusClientOptions,
+         "Display a small window showing current builder status"],
+
+        #['force', None, ForceOptions, "Run a build"],
+        ['try', None, TryOptions, "Run a build with your local changes"],
+
+        ['tryserver', None, TryServerOptions,
+         "buildmaster-side 'try' support function, not for users"],
+
+        # TODO: 'watch'
+        ]
+
+    def opt_version(self):
+        import buildbot
+        print "Buildbot version: %s" % buildbot.version
+        usage.Options.opt_version(self)
+
+    def opt_verbose(self):
+        from twisted.python import log
+        log.startLogging(sys.stderr)
+
+    def postOptions(self):
+        if not hasattr(self, 'subOptions'):
+            raise usage.UsageError("must specify a command")
+
+
+def run():
+    config = Options()
+    try:
+        config.parseOptions()
+    except usage.error, e:
+        print "%s:  %s" % (sys.argv[0], e)
+        print
+        c = getattr(config, 'subOptions', config)
+        print str(c)
+        sys.exit(1)
+
+    command = config.subCommand
+    so = config.subOptions
+
+    if command == "create-master":
+        createMaster(so)
+    elif command == "create-slave":
+        createSlave(so)
+    elif command == "start":
+        from buildbot.scripts.startup import start
+        start(so)
+    elif command == "stop":
+        stop(so, wait=True)
+    elif command == "restart":
+        restart(so)
+    elif command == "reconfig" or command == "sighup":
+        from buildbot.scripts.reconfig import Reconfigurator
+        Reconfigurator().run(so)
+    elif command == "sendchange":
+        sendchange(so, True)
+    elif command == "debugclient":
+        debugclient(so)
+    elif command == "statuslog":
+        statuslog(so)
+    elif command == "statusgui":
+        statusgui(so)
+    elif command == "try":
+        doTry(so)
+    elif command == "tryserver":
+        doTryServer(so)
+
+

Added: vendor/buildbot/current/buildbot/scripts/sample.cfg
===================================================================
--- vendor/buildbot/current/buildbot/scripts/sample.cfg	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/scripts/sample.cfg	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,175 @@
+# -*- python -*-
+# ex: set syntax=python:
+
+# This is a sample buildmaster config file. It must be installed as
+# 'master.cfg' in your buildmaster's base directory (although the filename
+# can be changed with the --basedir option to 'mktap buildbot master').
+
+# It has one job: define a dictionary named BuildmasterConfig. This
+# dictionary has a variety of keys to control different aspects of the
+# buildmaster. They are documented in docs/config.xhtml .
+
+
+# This is the dictionary that the buildmaster pays attention to. We also use
+# a shorter alias to save typing.
+c = BuildmasterConfig = {}
+
+####### BUILDSLAVES
+
+# the 'bots' list defines the set of allowable buildslaves. Each element is a
+# tuple of bot-name and bot-password. These correspond to values given to the
+# buildslave's mktap invocation.
+c['bots'] = [("bot1name", "bot1passwd")]
+
+
+# 'slavePortnum' defines the TCP port to listen on. This must match the value
+# configured into the buildslaves (with their --master option)
+
+c['slavePortnum'] = 9989
+
+
+####### CHANGESOURCES
+
+# the 'sources' list tells the buildmaster how it should find out about
+# source code changes. Any class which implements IChangeSource can be added
+# to this list: there are several in buildbot/changes/*.py to choose from.
+
+c['sources'] = []
+
+# For example, if you had CVSToys installed on your repository, and your
+# CVSROOT/freshcfg file had an entry like this:
+#pb = ConfigurationSet([
+#    (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+#    ])
+
+# then you could use the following buildmaster Change Source to subscribe to
+# the FreshCVS daemon and be notified on every commit:
+#
+#from buildbot.changes.freshcvs import FreshCVSSource
+#fc_source = FreshCVSSource("cvs.example.com", 4519, "foo", "bar")
+#c['sources'].append(fc_source)
+
+# or, use a PBChangeSource, and then have your repository's commit script run
+# 'buildbot sendchange', or contrib/svn_buildbot.py, or
+# contrib/arch_buildbot.py :
+#
+#from buildbot.changes.pb import PBChangeSource
+#c['sources'].append(PBChangeSource())
+
+
+####### SCHEDULERS
+
+## configure the Schedulers
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = []
+c['schedulers'].append(Scheduler(name="all", branch=None,
+                                 treeStableTimer=2*60,
+                                 builderNames=["buildbot-full"]))
+
+
+####### BUILDERS
+
+# the 'builders' list defines the Builders. Each one is configured with a
+# dictionary, using the following keys:
+#  name (required): the name used to describe this bilder
+#  slavename (required): which slave to use, must appear in c['bots']
+#  builddir (required): which subdirectory to run the builder in
+#  factory (required): a BuildFactory to define how the build is run
+#  periodicBuildTime (optional): if set, force a build every N seconds
+
+# buildbot/process/factory.py provides several BuildFactory classes you can
+# start with, which implement build processes for common targets (GNU
+# autoconf projects, CPAN perl modules, etc). The factory.BuildFactory is the
+# base class, and is configured with a series of BuildSteps. When the build
+# is run, the appropriate buildslave is told to execute each Step in turn.
+
+# the first BuildStep is typically responsible for obtaining a copy of the
+# sources. There are source-obtaining Steps in buildbot/process/step.py for
+# CVS, SVN, and others.
+
+cvsroot = ":pserver:anonymous at cvs.sourceforge.net:/cvsroot/buildbot"
+cvsmodule = "buildbot"
+
+builders = []
+
+from buildbot.process import factory
+from buildbot.steps.source import CVS
+from buildbot.steps.shell import Compile
+from buildbot.steps.python_twisted import Trial
+f1 = factory.BuildFactory()
+f1.addStep(CVS,
+           cvsroot=cvsroot, cvsmodule=cvsmodule, login="",
+           mode="copy")
+f1.addStep(Compile, command=["./setup.py", "build"])
+f1.addStep(Trial, testpath=".")
+
+b1 = {'name': "buildbot-full",
+      'slavename': "bot1name",
+      'builddir': "full",
+      'factory': f1,
+      }
+c['builders'] = [b1]
+
+
+####### STATUS TARGETS
+
+# 'status' is a list of Status Targets. The results of each build will be
+# pushed to these targets. buildbot/status/*.py has a variety to choose from,
+# including web pages, email senders, and IRC bots.
+
+c['status'] = []
+
+from buildbot.status import html
+c['status'].append(html.Waterfall(http_port=8010))
+
+# from buildbot.status import mail
+# c['status'].append(mail.MailNotifier(fromaddr="buildbot at localhost",
+#                                      extraRecipients=["builds at example.com"],
+#                                      sendToInterestedUsers=False))
+#
+# from buildbot.status import words
+# c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+#                              channels=["#example"]))
+#
+# from buildbot.status import client
+# c['status'].append(client.PBListener(9988))
+
+
+####### DEBUGGING OPTIONS
+
+# if you set 'debugPassword', then you can connect to the buildmaster with
+# the diagnostic tool in contrib/debugclient.py . From this tool, you can
+# manually force builds and inject changes, which may be useful for testing
+# your buildmaster without actually commiting changes to your repository (or
+# before you have a functioning 'sources' set up). The debug tool uses the
+# same port number as the slaves do: 'slavePortnum'.
+
+#c['debugPassword'] = "debugpassword"
+
+# if you set 'manhole', you can ssh into the buildmaster and get an
+# interactive python shell, which may be useful for debugging buildbot
+# internals. It is probably only useful for buildbot developers. You can also
+# use an authorized_keys file, or plain telnet.
+#from buildbot import manhole
+#c['manhole'] = manhole.PasswordManhole("tcp:9999:interface=127.0.0.1",
+#                                       "admin", "password")
+
+
+####### PROJECT IDENTITY
+
+# the 'projectName' string will be used to describe the project that this
+# buildbot is working on. For example, it is used as the title of the
+# waterfall HTML page. The 'projectURL' string will be used to provide a link
+# from buildbot HTML pages to your project's home page.
+
+c['projectName'] = "Buildbot"
+c['projectURL'] = "http://buildbot.sourceforge.net/"
+
+# the 'buildbotURL' string should point to the location where the buildbot's
+# internal web server (usually the html.Waterfall page) is visible. This
+# typically uses the port number set in the Waterfall 'status' entry, but
+# with an externally-visible host name which the buildbot cannot figure out
+# without some help.
+
+c['buildbotURL'] = "http://localhost:8010/"

Added: vendor/buildbot/current/buildbot/scripts/startup.py
===================================================================
--- vendor/buildbot/current/buildbot/scripts/startup.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/scripts/startup.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,118 @@
+
+import os, sys, time
+
+class Follower:
+    def follow(self):
+        from twisted.internet import reactor
+        from buildbot.scripts.reconfig import LogWatcher
+        self.rc = 0
+        print "Following twistd.log until startup finished.."
+        lw = LogWatcher("twistd.log")
+        d = lw.start()
+        d.addCallbacks(self._success, self._failure)
+        reactor.run()
+        return self.rc
+
+    def _success(self, processtype):
+        from twisted.internet import reactor
+        print "The %s appears to have (re)started correctly." % processtype
+        self.rc = 0
+        reactor.stop()
+
+    def _failure(self, why):
+        from twisted.internet import reactor
+        from buildbot.scripts.logwatcher import BuildmasterTimeoutError, \
+             ReconfigError, BuildslaveTimeoutError, BuildSlaveDetectedError
+        if why.check(BuildmasterTimeoutError):
+            print """
+The buildmaster took more than 5 seconds to start, so we were unable to
+confirm that it started correctly. Please 'tail twistd.log' and look for a
+line that says 'configuration update complete' to verify correct startup.
+"""
+        elif why.check(BuildslaveTimeoutError):
+            print """
+The buildslave took more than 5 seconds to start and/or connect to the
+buildmaster, so we were unable to confirm that it started and connected
+correctly. Please 'tail twistd.log' and look for a line that says 'message
+from master: attached' to verify correct startup. If you see a bunch of
+messages like 'will retry in 6 seconds', your buildslave might not have the
+correct hostname or portnumber for the buildmaster, or the buildmaster might
+not be running. If you see messages like
+   'Failure: twisted.cred.error.UnauthorizedLogin'
+then your buildslave might be using the wrong botname or password. Please
+correct these problems and then restart the buildslave.
+"""
+        elif why.check(ReconfigError):
+            print """
+The buildmaster appears to have encountered an error in the master.cfg config
+file during startup. It is probably running with an empty configuration right
+now. Please inspect and fix master.cfg, then restart the buildmaster.
+"""
+        elif why.check(BuildSlaveDetectedError):
+            print """
+Buildslave is starting up, not following logfile.
+"""
+        else:
+            print """
+Unable to confirm that the buildmaster started correctly. You may need to
+stop it, fix the config file, and restart.
+"""
+            print why
+        self.rc = 1
+        reactor.stop()
+
+
+def start(config):
+    os.chdir(config['basedir'])
+    if config['quiet']:
+        return launch(config)
+
+    # we probably can't do this os.fork under windows
+    from twisted.python.runtime import platformType
+    if platformType == "win32":
+        return launch(config)
+
+    # fork a child to launch the daemon, while the parent process tails the
+    # logfile
+    if os.fork():
+        # this is the parent
+        rc = Follower().follow()
+        sys.exit(rc)
+    # this is the child: give the logfile-watching parent a chance to start
+    # watching it before we start the daemon
+    time.sleep(0.2)
+    launch(config)
+
+def launch(config):
+    sys.path.insert(0, os.path.abspath(os.getcwd()))
+    if os.path.exists("/usr/bin/make") and os.path.exists("Makefile.buildbot"):
+        # Preferring the Makefile lets slave admins do useful things like set
+        # up environment variables for the buildslave.
+        cmd = "make -f Makefile.buildbot start"
+        if not config['quiet']:
+            print cmd
+        os.system(cmd)
+    else:
+        # see if we can launch the application without actually having to
+        # spawn twistd, since spawning processes correctly is a real hassle
+        # on windows.
+        from twisted.python.runtime import platformType
+        argv = ["twistd",
+                "--no_save",
+                "--logfile=twistd.log", # windows doesn't use the same default
+                "--python=buildbot.tac"]
+        if platformType == "win32":
+            argv.append("--reactor=win32")
+        sys.argv = argv
+
+        # this is copied from bin/twistd. twisted-1.3.0 uses twistw, while
+        # twisted-2.0.0 uses _twistw.
+        if platformType == "win32":
+            try:
+                from twisted.scripts._twistw import run
+            except ImportError:
+                from twisted.scripts.twistw import run
+        else:
+            from twisted.scripts.twistd import run
+        run()
+

Added: vendor/buildbot/current/buildbot/scripts/tryclient.py
===================================================================
--- vendor/buildbot/current/buildbot/scripts/tryclient.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/scripts/tryclient.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,578 @@
+# -*- test-case-name: buildbot.test.test_scheduler,buildbot.test.test_vc -*-
+
+import sys, os, re, time, random
+from twisted.internet import utils, protocol, defer, reactor, task
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.python import log
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.scripts import runner
+from buildbot.util import now
+from buildbot.status import builder
+from buildbot.twcompat import which
+
+class SourceStampExtractor:
+
+    def __init__(self, treetop, branch):
+        self.treetop = treetop
+        self.branch = branch
+        self.exe = which(self.vcexe)[0]
+
+    def dovc(self, cmd):
+        """This accepts the arguments of a command, without the actual
+        command itself."""
+        env = os.environ.copy()
+        env['LC_ALL'] = "C"
+        return utils.getProcessOutput(self.exe, cmd, env=env,
+                                      path=self.treetop)
+
+    def get(self):
+        """Return a Deferred that fires with a SourceStamp instance."""
+        d = self.getBaseRevision()
+        d.addCallback(self.getPatch)
+        d.addCallback(self.done)
+        return d
+    def readPatch(self, res, patchlevel):
+        self.patch = (patchlevel, res)
+    def done(self, res):
+        # TODO: figure out the branch too
+        ss = SourceStamp(self.branch, self.baserev, self.patch)
+        return ss
+
+class CVSExtractor(SourceStampExtractor):
+    patchlevel = 0
+    vcexe = "cvs"
+    def getBaseRevision(self):
+        # this depends upon our local clock and the repository's clock being
+        # reasonably synchronized with each other. We express everything in
+        # UTC because the '%z' format specifier for strftime doesn't always
+        # work.
+        self.baserev = time.strftime("%Y-%m-%d %H:%M:%S +0000",
+                                     time.gmtime(now()))
+        return defer.succeed(None)
+
+    def getPatch(self, res):
+        # the -q tells CVS to not announce each directory as it works
+        if self.branch is not None:
+            # 'cvs diff' won't take both -r and -D at the same time (it
+            # ignores the -r). As best I can tell, there is no way to make
+            # cvs give you a diff relative to a timestamp on the non-trunk
+            # branch. A bare 'cvs diff' will tell you about the changes
+            # relative to your checked-out versions, but I know of no way to
+            # find out what those checked-out versions are.
+            raise RuntimeError("Sorry, CVS 'try' builds don't work with "
+                               "branches")
+        args = ['-q', 'diff', '-u', '-D', self.baserev]
+        d = self.dovc(args)
+        d.addCallback(self.readPatch, self.patchlevel)
+        return d
+
+class SVNExtractor(SourceStampExtractor):
+    patchlevel = 0
+    vcexe = "svn"
+
+    def getBaseRevision(self):
+        d = self.dovc(["status", "-u"])
+        d.addCallback(self.parseStatus)
+        return d
+    def parseStatus(self, res):
+        # svn shows the base revision for each file that has been modified or
+        # which needs an update. You can update each file to a different
+        # version, so each file is displayed with its individual base
+        # revision. It also shows the repository-wide latest revision number
+        # on the last line ("Status against revision: \d+").
+
+        # for our purposes, we use the latest revision number as the "base"
+        # revision, and get a diff against that. This means we will get
+        # reverse-diffs for local files that need updating, but the resulting
+        # tree will still be correct. The only weirdness is that the baserev
+        # that we emit may be different than the version of the tree that we
+        # first checked out.
+
+        # to do this differently would probably involve scanning the revision
+        # numbers to find the max (or perhaps the min) revision, and then
+        # using that as a base.
+
+        for line in res.split("\n"):
+            m = re.search(r'^Status against revision:\s+(\d+)', line)
+            if m:
+                self.baserev = int(m.group(1))
+                return
+        raise IndexError("Could not find 'Status against revision' in "
+                         "SVN output: %s" % res)
+    def getPatch(self, res):
+        d = self.dovc(["diff", "-r%d" % self.baserev])
+        d.addCallback(self.readPatch, self.patchlevel)
+        return d
+
+class BazExtractor(SourceStampExtractor):
+    vcexe = "baz"
+    def getBaseRevision(self):
+        d = self.dovc(["tree-id"])
+        d.addCallback(self.parseStatus)
+        return d
+    def parseStatus(self, res):
+        tid = res.strip()
+        slash = tid.index("/")
+        dd = tid.rindex("--")
+        self.branch = tid[slash+1:dd]
+        self.baserev = tid[dd+2:]
+    def getPatch(self, res):
+        d = self.dovc(["diff"])
+        d.addCallback(self.readPatch, 1)
+        return d
+
+class TlaExtractor(SourceStampExtractor):
+    vcexe = "tla"
+    def getBaseRevision(self):
+        # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
+        # 'tla logs' gives us REVISION
+        d = self.dovc(["logs", "--full", "--reverse"])
+        d.addCallback(self.parseStatus)
+        return d
+    def parseStatus(self, res):
+        tid = res.split("\n")[0].strip()
+        slash = tid.index("/")
+        dd = tid.rindex("--")
+        self.branch = tid[slash+1:dd]
+        self.baserev = tid[dd+2:]
+
+    def getPatch(self, res):
+        d = self.dovc(["changes", "--diffs"])
+        d.addCallback(self.readPatch, 1)
+        return d
+
+class MercurialExtractor(SourceStampExtractor):
+    patchlevel = 1
+    vcexe = "hg"
+    def getBaseRevision(self):
+        d = self.dovc(["identify"])
+        d.addCallback(self.parseStatus)
+        return d
+    def parseStatus(self, output):
+        m = re.search(r'^(\w+)', output)
+        self.baserev = m.group(0)
+    def getPatch(self, res):
+        d = self.dovc(["diff"])
+        d.addCallback(self.readPatch, self.patchlevel)
+        return d
+
+class DarcsExtractor(SourceStampExtractor):
+    patchlevel = 1
+    vcexe = "darcs"
+    def getBaseRevision(self):
+        d = self.dovc(["changes", "--context"])
+        d.addCallback(self.parseStatus)
+        return d
+    def parseStatus(self, res):
+        self.baserev = res # the whole context file
+    def getPatch(self, res):
+        d = self.dovc(["diff", "-u"])
+        d.addCallback(self.readPatch, self.patchlevel)
+        return d
+
+def getSourceStamp(vctype, treetop, branch=None):
+    if vctype == "cvs":
+        e = CVSExtractor(treetop, branch)
+    elif vctype == "svn":
+        e = SVNExtractor(treetop, branch)
+    elif vctype == "baz":
+        e = BazExtractor(treetop, branch)
+    elif vctype == "tla":
+        e = TlaExtractor(treetop, branch)
+    elif vctype == "hg":
+        e = MercurialExtractor(treetop, branch)
+    elif vctype == "darcs":
+        e = DarcsExtractor(treetop, branch)
+    else:
+        raise KeyError("unknown vctype '%s'" % vctype)
+    return e.get()
+
+
+def ns(s):
+    return "%d:%s," % (len(s), s)
+
+def createJobfile(bsid, branch, baserev, patchlevel, diff, builderNames):
+    job = ""
+    job += ns("1")
+    job += ns(bsid)
+    job += ns(branch)
+    job += ns(str(baserev))
+    job += ns("%d" % patchlevel)
+    job += ns(diff)
+    for bn in builderNames:
+        job += ns(bn)
+    return job
+
+def getTopdir(topfile, start=None):
+    """walk upwards from the current directory until we find this topfile"""
+    if not start:
+        start = os.getcwd()
+    here = start
+    toomany = 20
+    while toomany > 0:
+        if os.path.exists(os.path.join(here, topfile)):
+            return here
+        next = os.path.dirname(here)
+        if next == here:
+            break # we've hit the root
+        here = next
+        toomany -= 1
+    raise ValueError("Unable to find topfile '%s' anywhere from %s upwards"
+                     % (topfile, start))
+
+class RemoteTryPP(protocol.ProcessProtocol):
+    def __init__(self, job):
+        self.job = job
+        self.d = defer.Deferred()
+    def connectionMade(self):
+        self.transport.write(self.job)
+        self.transport.closeStdin()
+    def outReceived(self, data):
+        sys.stdout.write(data)
+    def errReceived(self, data):
+        sys.stderr.write(data)
+    def processEnded(self, status_object):
+        sig = status_object.value.signal
+        rc = status_object.value.exitCode
+        if sig != None or rc != 0:
+            self.d.errback(RuntimeError("remote 'buildbot tryserver' failed"
+                                        ": sig=%s, rc=%s" % (sig, rc)))
+            return
+        self.d.callback((sig, rc))
+
+class BuildSetStatusGrabber:
+    retryCount = 5 # how many times to we try to grab the BuildSetStatus?
+    retryDelay = 3 # seconds to wait between attempts
+
+    def __init__(self, status, bsid):
+        self.status = status
+        self.bsid = bsid
+
+    def grab(self):
+        # return a Deferred that either fires with the BuildSetStatus
+        # reference or errbacks because we were unable to grab it
+        self.d = defer.Deferred()
+        # wait a second before querying to give the master's maildir watcher
+        # a chance to see the job
+        reactor.callLater(1, self.go)
+        return self.d
+
+    def go(self, dummy=None):
+        if self.retryCount == 0:
+            raise RuntimeError("couldn't find matching buildset")
+        self.retryCount -= 1
+        d = self.status.callRemote("getBuildSets")
+        d.addCallback(self._gotSets)
+
+    def _gotSets(self, buildsets):
+        for bs,bsid in buildsets:
+            if bsid == self.bsid:
+                # got it
+                self.d.callback(bs)
+                return
+        d = defer.Deferred()
+        d.addCallback(self.go)
+        reactor.callLater(self.retryDelay, d.callback, None)
+
+
+class Try(pb.Referenceable):
+    buildsetStatus = None
+    quiet = False
+
+    def __init__(self, config):
+        self.config = config
+        self.opts = runner.loadOptions()
+        self.connect = self.getopt('connect', 'try_connect')
+        assert self.connect, "you must specify a connect style: ssh or pb"
+        self.builderNames = self.getopt('builders', 'try_builders')
+        assert self.builderNames, "no builders! use --builder or " \
+               "try_builders=[names..] in .buildbot/options"
+
+    def getopt(self, config_name, options_name, default=None):
+        value = self.config.get(config_name)
+        if value is None or value == []:
+            value = self.opts.get(options_name)
+        if value is None or value == []:
+            value = default
+        return value
+
+    def createJob(self):
+        # returns a Deferred which fires when the job parameters have been
+        # created
+        opts = self.opts
+        # generate a random (unique) string. It would make sense to add a
+        # hostname and process ID here, but a) I suspect that would cause
+        # windows portability problems, and b) really this is good enough
+        self.bsid = "%d-%s" % (time.time(), random.randint(0, 1000000))
+
+        # common options
+        vc = self.getopt("vc", "try_vc")
+        branch = self.getopt("branch", "try_branch")
+
+        if vc in ("cvs", "svn"):
+            # we need to find the tree-top
+            topdir = self.getopt("try_topdir", "try_topdir")
+            if topdir:
+                treedir = os.path.expanduser(topdir)
+            else:
+                topfile = self.getopt("try-topfile", "try_topfile")
+                treedir = getTopdir(topfile)
+        else:
+            treedir = os.getcwd()
+        d = getSourceStamp(vc, treedir, branch)
+        d.addCallback(self._createJob_1)
+        return d
+    def _createJob_1(self, ss):
+        self.sourcestamp = ss
+        if self.connect == "ssh":
+            patchlevel, diff = ss.patch
+            self.jobfile = createJobfile(self.bsid,
+                                         ss.branch or "", ss.revision,
+                                         patchlevel, diff,
+                                         self.builderNames)
+
+    def deliverJob(self):
+        # returns a Deferred that fires when the job has been delivered
+        opts = self.opts
+
+        if self.connect == "ssh":
+            tryhost = self.getopt("tryhost", "try_host")
+            tryuser = self.getopt("username", "try_username")
+            trydir = self.getopt("trydir", "try_dir")
+
+            argv = ["ssh", "-l", tryuser, tryhost,
+                    "buildbot", "tryserver", "--jobdir", trydir]
+            # now run this command and feed the contents of 'job' into stdin
+
+            pp = RemoteTryPP(self.jobfile)
+            p = reactor.spawnProcess(pp, argv[0], argv, os.environ)
+            d = pp.d
+            return d
+        if self.connect == "pb":
+            user = self.getopt("username", "try_username")
+            passwd = self.getopt("passwd", "try_password")
+            master = self.getopt("master", "try_master")
+            tryhost, tryport = master.split(":")
+            tryport = int(tryport)
+            f = pb.PBClientFactory()
+            d = f.login(credentials.UsernamePassword(user, passwd))
+            reactor.connectTCP(tryhost, tryport, f)
+            d.addCallback(self._deliverJob_pb)
+            return d
+        raise RuntimeError("unknown connecttype '%s', should be 'ssh' or 'pb'"
+                           % self.connect)
+
+    def _deliverJob_pb(self, remote):
+        ss = self.sourcestamp
+        d = remote.callRemote("try",
+                              ss.branch, ss.revision, ss.patch,
+                              self.builderNames)
+        d.addCallback(self._deliverJob_pb2)
+        return d
+    def _deliverJob_pb2(self, status):
+        self.buildsetStatus = status
+        return status
+
+    def getStatus(self):
+        # returns a Deferred that fires when the builds have finished, and
+        # may emit status messages while we wait
+        wait = bool(self.getopt("wait", "try_wait", False))
+        if not wait:
+            # TODO: emit the URL where they can follow the builds. This
+            # requires contacting the Status server over PB and doing
+            # getURLForThing() on the BuildSetStatus. To get URLs for
+            # individual builds would require we wait for the builds to
+            # start.
+            print "not waiting for builds to finish"
+            return
+        d = self.running = defer.Deferred()
+        if self.buildsetStatus:
+            self._getStatus_1()
+        # contact the status port
+        # we're probably using the ssh style
+        master = self.getopt("master", "masterstatus")
+        host, port = master.split(":")
+        port = int(port)
+        self.announce("contacting the status port at %s:%d" % (host, port))
+        f = pb.PBClientFactory()
+        creds = credentials.UsernamePassword("statusClient", "clientpw")
+        d = f.login(creds)
+        reactor.connectTCP(host, port, f)
+        d.addCallback(self._getStatus_ssh_1)
+        return self.running
+
+    def _getStatus_ssh_1(self, remote):
+        # find a remotereference to the corresponding BuildSetStatus object
+        self.announce("waiting for job to be accepted")
+        g = BuildSetStatusGrabber(remote, self.bsid)
+        d = g.grab()
+        d.addCallback(self._getStatus_1)
+        return d
+
+    def _getStatus_1(self, res=None):
+        if res:
+            self.buildsetStatus = res
+        # gather the set of BuildRequests
+        d = self.buildsetStatus.callRemote("getBuildRequests")
+        d.addCallback(self._getStatus_2)
+
+    def _getStatus_2(self, brs):
+        self.builderNames = []
+        self.buildRequests = {}
+
+        # self.builds holds the current BuildStatus object for each one
+        self.builds = {}
+
+        # self.outstanding holds the list of builderNames which haven't
+        # finished yet
+        self.outstanding = []
+
+        # self.results holds the list of build results. It holds a tuple of
+        # (result, text)
+        self.results = {}
+
+        # self.currentStep holds the name of the Step that each build is
+        # currently running
+        self.currentStep = {}
+
+        # self.ETA holds the expected finishing time (absolute time since
+        # epoch)
+        self.ETA = {}
+
+        for n,br in brs:
+            self.builderNames.append(n)
+            self.buildRequests[n] = br
+            self.builds[n] = None
+            self.outstanding.append(n)
+            self.results[n] = [None,None]
+            self.currentStep[n] = None
+            self.ETA[n] = None
+            # get new Builds for this buildrequest. We follow each one until
+            # it finishes or is interrupted.
+            br.callRemote("subscribe", self)
+
+        # now that those queries are in transit, we can start the
+        # display-status-every-30-seconds loop
+        self.printloop = task.LoopingCall(self.printStatus)
+        self.printloop.start(3, now=False)
+
+
+    # these methods are invoked by the status objects we've subscribed to
+
+    def remote_newbuild(self, bs, builderName):
+        if self.builds[builderName]:
+            self.builds[builderName].callRemote("unsubscribe", self)
+        self.builds[builderName] = bs
+        bs.callRemote("subscribe", self, 20)
+        d = bs.callRemote("waitUntilFinished")
+        d.addCallback(self._build_finished, builderName)
+
+    def remote_stepStarted(self, buildername, build, stepname, step):
+        self.currentStep[buildername] = stepname
+
+    def remote_stepFinished(self, buildername, build, stepname, step, results):
+        pass
+
+    def remote_buildETAUpdate(self, buildername, build, eta):
+        self.ETA[buildername] = now() + eta
+
+    def _build_finished(self, bs, builderName):
+        # we need to collect status from the newly-finished build. We don't
+        # remove the build from self.outstanding until we've collected
+        # everything we want.
+        self.builds[builderName] = None
+        self.ETA[builderName] = None
+        self.currentStep[builderName] = "finished"
+        d = bs.callRemote("getResults")
+        d.addCallback(self._build_finished_2, bs, builderName)
+        return d
+    def _build_finished_2(self, results, bs, builderName):
+        self.results[builderName][0] = results
+        d = bs.callRemote("getText")
+        d.addCallback(self._build_finished_3, builderName)
+        return d
+    def _build_finished_3(self, text, builderName):
+        self.results[builderName][1] = text
+        
+        self.outstanding.remove(builderName)
+        if not self.outstanding:
+            # all done
+            return self.statusDone()
+
+    def printStatus(self):
+        names = self.buildRequests.keys()
+        names.sort()
+        for n in names:
+            if n not in self.outstanding:
+                # the build is finished, and we have results
+                code,text = self.results[n]
+                t = builder.Results[code]
+                if text:
+                    t += " (%s)" % " ".join(text)
+            elif self.builds[n]:
+                t = self.currentStep[n] or "building"
+                if self.ETA[n]:
+                    t += " [ETA %ds]" % (self.ETA[n] - now())
+            else:
+                t = "no build"
+            self.announce("%s: %s" % (n, t))
+        self.announce("")
+
+    def statusDone(self):
+        self.printloop.stop()
+        print "All Builds Complete"
+        # TODO: include a URL for all failing builds
+        names = self.buildRequests.keys()
+        names.sort()
+        happy = True
+        for n in names:
+            code,text = self.results[n]
+            t = "%s: %s" % (n, builder.Results[code])
+            if text:
+                t += " (%s)" % " ".join(text)
+            print t
+            if self.results[n] != builder.SUCCESS:
+                happy = False
+
+        if happy:
+            self.exitcode = 0
+        else:
+            self.exitcode = 1
+        self.running.callback(self.exitcode)
+
+    def announce(self, message):
+        if not self.quiet:
+            print message
+
+    def run(self):
+        # we can't do spawnProcess until we're inside reactor.run(), so get
+        # funky
+        print "using '%s' connect method" % self.connect
+        self.exitcode = 0
+        d = defer.Deferred()
+        d.addCallback(lambda res: self.createJob())
+        d.addCallback(lambda res: self.announce("job created"))
+        d.addCallback(lambda res: self.deliverJob())
+        d.addCallback(lambda res: self.announce("job has been delivered"))
+        d.addCallback(lambda res: self.getStatus())
+        d.addErrback(log.err)
+        d.addCallback(self.cleanup)
+        d.addCallback(lambda res: reactor.stop())
+
+        reactor.callLater(0, d.callback, None)
+        reactor.run()
+        sys.exit(self.exitcode)
+
+    def logErr(self, why):
+        log.err(why)
+        print "error during 'try' processing"
+        print why
+
+    def cleanup(self, res=None):
+        if self.buildsetStatus:
+            self.buildsetStatus.broker.transport.loseConnection()
+
+
+    

Added: vendor/buildbot/current/buildbot/slave/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/slave/bot.py
===================================================================
--- vendor/buildbot/current/buildbot/slave/bot.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/slave/bot.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,500 @@
+#! /usr/bin/python
+
+import os.path
+
+from twisted.spread import pb
+from twisted.python import log
+from twisted.internet import reactor, defer
+from twisted.application import service, internet
+from twisted.cred import credentials
+
+from buildbot.util import now
+from buildbot.pbutil import ReconnectingPBClientFactory
+from buildbot.slave import registry
+# make sure the standard commands get registered
+from buildbot.slave import commands
+
+class NoCommandRunning(pb.Error):
+    pass
+class WrongCommandRunning(pb.Error):
+    pass
+class UnknownCommand(pb.Error):
+    pass
+
+class Master:
+    def __init__(self, host, port, username, password):
+        self.host = host
+        self.port = port
+        self.username = username
+        self.password = password
+
+class SlaveBuild:
+
+    """This is an object that can hold state from one step to another in the
+    same build. All SlaveCommands have access to it.
+    """
+    def __init__(self, builder):
+        self.builder = builder
+    
+class SlaveBuilder(pb.Referenceable, service.Service):
+
+    """This is the local representation of a single Builder: it handles a
+    single kind of build (like an all-warnings build). It has a name and a
+    home directory. The rest of its behavior is determined by the master.
+    """
+
+    stopCommandOnShutdown = True
+
+    # remote is a ref to the Builder object on the master side, and is set
+    # when they attach. We use it to detect when the connection to the master
+    # is severed.
+    remote = None
+
+    # .build points to a SlaveBuild object, a new one for each build
+    build = None
+
+    # .command points to a SlaveCommand instance, and is set while the step
+    # is running. We use it to implement the stopBuild method.
+    command = None
+
+    # .remoteStep is a ref to the master-side BuildStep object, and is set
+    # when the step is started
+    remoteStep = None
+
+    def __init__(self, name, not_really):
+        #service.Service.__init__(self) # Service has no __init__ method
+        self.setName(name)
+        self.not_really = not_really
+
+    def __repr__(self):
+        return "<SlaveBuilder '%s' at %d>" % (self.name, id(self))
+
+    def setServiceParent(self, parent):
+        service.Service.setServiceParent(self, parent)
+        self.bot = self.parent
+        # note that self.parent will go away when the buildmaster's config
+        # file changes and this Builder is removed (possibly because it has
+        # been changed, so the Builder will be re-added again in a moment).
+        # This may occur during a build, while a step is running.
+
+    def setBuilddir(self, builddir):
+        assert self.parent
+        self.builddir = builddir
+        self.basedir = os.path.join(self.bot.basedir, self.builddir)
+        if not os.path.isdir(self.basedir):
+            os.mkdir(self.basedir)
+
+    def stopService(self):
+        service.Service.stopService(self)
+        if self.stopCommandOnShutdown:
+            self.stopCommand()
+
+    def activity(self):
+        bot = self.parent
+        if bot:
+            buildslave = bot.parent
+            if buildslave:
+                bf = buildslave.bf
+                bf.activity()
+
+    def remote_setMaster(self, remote):
+        self.remote = remote
+        self.remote.notifyOnDisconnect(self.lostRemote)
+    def remote_print(self, message):
+        log.msg("SlaveBuilder.remote_print(%s): message from master: %s" %
+                (self.name, message))
+        if message == "ping":
+            return self.remote_ping()
+
+    def remote_ping(self):
+        log.msg("SlaveBuilder.remote_ping(%s)" % self)
+        if self.bot and self.bot.parent:
+            debugOpts = self.bot.parent.debugOpts
+            if debugOpts.get("stallPings"):
+                log.msg(" debug_stallPings")
+                timeout, timers = debugOpts["stallPings"]
+                d = defer.Deferred()
+                t = reactor.callLater(timeout, d.callback, None)
+                timers.append(t)
+                return d
+            if debugOpts.get("failPingOnce"):
+                log.msg(" debug_failPingOnce")
+                class FailPingError(pb.Error): pass
+                del debugOpts['failPingOnce']
+                raise FailPingError("debug_failPingOnce means we should fail")
+
+    def lostRemote(self, remote):
+        log.msg("lost remote")
+        self.remote = None
+
+    def lostRemoteStep(self, remotestep):
+        log.msg("lost remote step")
+        self.remoteStep = None
+        if self.stopCommandOnShutdown:
+            self.stopCommand()
+        
+    # the following are Commands that can be invoked by the master-side
+    # Builder
+    def remote_startBuild(self):
+        """This is invoked before the first step of any new build is run. It
+        creates a new SlaveBuild object, which holds slave-side state from
+        one step to the next."""
+        self.build = SlaveBuild(self)
+        log.msg("%s.startBuild" % self)
+
+    def remote_startCommand(self, stepref, stepId, command, args):
+        """
+        This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
+        part of various master-side BuildSteps, to start various commands
+        that actually do the build. I return nothing. Eventually I will call
+        .commandComplete() to notify the master-side RemoteCommand that I'm
+        done.
+        """
+
+        self.activity()
+
+        if self.command:
+            log.msg("leftover command, dropping it")
+            self.stopCommand()
+
+        try:
+            factory, version = registry.commandRegistry[command]
+        except KeyError:
+            raise UnknownCommand, "unrecognized SlaveCommand '%s'" % command
+        self.command = factory(self, stepId, args)
+
+        log.msg(" startCommand:%s [id %s]" % (command,stepId))
+        self.remoteStep = stepref
+        self.remoteStep.notifyOnDisconnect(self.lostRemoteStep)
+        d = self.command.doStart()
+        d.addCallback(lambda res: None)
+        d.addBoth(self.commandComplete)
+        return None
+
+    def remote_interruptCommand(self, stepId, why):
+        """Halt the current step."""
+        log.msg("asked to interrupt current command: %s" % why)
+        self.activity()
+        if not self.command:
+            # TODO: just log it, a race could result in their interrupting a
+            # command that wasn't actually running
+            log.msg(" .. but none was running")
+            return
+        self.command.doInterrupt()
+
+
+    def stopCommand(self):
+        """Make any currently-running command die, with no further status
+        output. This is used when the buildslave is shutting down or the
+        connection to the master has been lost. Interrupt the command,
+        silence it, and then forget about it."""
+        if not self.command:
+            return
+        log.msg("stopCommand: halting current command %s" % self.command)
+        self.command.doInterrupt() # shut up! and die!
+        self.command = None # forget you!
+
+    # sendUpdate is invoked by the Commands we spawn
+    def sendUpdate(self, data):
+        """This sends the status update to the master-side
+        L{buildbot.process.step.RemoteCommand} object, giving it a sequence
+        number in the process. It adds the update to a queue, and asks the
+        master to acknowledge the update so it can be removed from that
+        queue."""
+
+        if not self.running:
+            # .running comes from service.Service, and says whether the
+            # service is running or not. If we aren't running, don't send any
+            # status messages.
+            return
+        # the update[1]=0 comes from the leftover 'updateNum', which the
+        # master still expects to receive. Provide it to avoid significant
+        # interoperability issues between new slaves and old masters.
+        if self.remoteStep:
+            update = [data, 0]
+            updates = [update]
+            d = self.remoteStep.callRemote("update", updates)
+            d.addCallback(self.ackUpdate)
+            d.addErrback(self._ackFailed, "SlaveBuilder.sendUpdate")
+
+    def ackUpdate(self, acknum):
+        self.activity() # update the "last activity" timer
+
+    def ackComplete(self, dummy):
+        self.activity() # update the "last activity" timer
+
+    def _ackFailed(self, why, where):
+        log.msg("SlaveBuilder._ackFailed:", where)
+        #log.err(why) # we don't really care
+
+
+    # this is fired by the Deferred attached to each Command
+    def commandComplete(self, failure):
+        if failure:
+            log.msg("SlaveBuilder.commandFailed", self.command)
+            log.err(failure)
+            # failure, if present, is a failure.Failure. To send it across
+            # the wire, we must turn it into a pb.CopyableFailure.
+            failure = pb.CopyableFailure(failure)
+            failure.unsafeTracebacks = True
+        else:
+            # failure is None
+            log.msg("SlaveBuilder.commandComplete", self.command)
+        self.command = None
+        if not self.running:
+            log.msg(" but we weren't running, quitting silently")
+            return
+        if self.remoteStep:
+            self.remoteStep.dontNotifyOnDisconnect(self.lostRemoteStep)
+            d = self.remoteStep.callRemote("complete", failure)
+            d.addCallback(self.ackComplete)
+            d.addErrback(self._ackFailed, "sendComplete")
+            self.remoteStep = None
+
+
+    def remote_shutdown(self):
+        print "slave shutting down on command from master"
+        reactor.stop()
+        
+        
+class Bot(pb.Referenceable, service.MultiService):
+    """I represent the slave-side bot."""
+    usePTY = None
+    name = "bot"
+
+    def __init__(self, basedir, usePTY, not_really=0):
+        service.MultiService.__init__(self)
+        self.basedir = basedir
+        self.usePTY = usePTY
+        self.not_really = not_really
+        self.builders = {}
+
+    def startService(self):
+        assert os.path.isdir(self.basedir)
+        service.MultiService.startService(self)
+
+    def remote_getDirs(self):
+        return filter(lambda d: os.path.isdir(d), os.listdir(self.basedir))
+
+    def remote_getCommands(self):
+        commands = {}
+        for name, (factory, version) in registry.commandRegistry.items():
+            commands[name] = version
+        return commands
+
+    def remote_setBuilderList(self, wanted):
+        retval = {}
+        wanted_dirs = []
+        for (name, builddir) in wanted:
+            wanted_dirs.append(builddir)
+            b = self.builders.get(name, None)
+            if b:
+                if b.builddir != builddir:
+                    log.msg("changing builddir for builder %s from %s to %s" \
+                            % (name, b.builddir, builddir))
+                    b.setBuilddir(builddir)
+            else:
+                b = SlaveBuilder(name, self.not_really)
+                b.usePTY = self.usePTY
+                b.setServiceParent(self)
+                b.setBuilddir(builddir)
+                self.builders[name] = b
+            retval[name] = b
+        for name in self.builders.keys():
+            if not name in map(lambda a: a[0], wanted):
+                log.msg("removing old builder %s" % name)
+                self.builders[name].disownServiceParent()
+                del(self.builders[name])
+
+        for d in os.listdir(self.basedir):
+            if os.path.isdir(d):
+                if d not in wanted_dirs:
+                    log.msg("I have a leftover directory '%s' that is not "
+                            "being used by the buildmaster: you can delete "
+                            "it now" % d)
+        return retval
+
+    def remote_print(self, message):
+        log.msg("message from master:", message)
+
+    def remote_getSlaveInfo(self):
+        """This command retrieves data from the files in SLAVEDIR/info/* and
+        sends the contents to the buildmaster. These are used to describe
+        the slave and its configuration, and should be created and
+        maintained by the slave administrator. They will be retrieved each
+        time the master-slave connection is established.
+        """
+
+        files = {}
+        basedir = os.path.join(self.basedir, "info")
+        if not os.path.isdir(basedir):
+            return files
+        for f in os.listdir(basedir):
+            filename = os.path.join(basedir, f)
+            if os.path.isfile(filename):
+                files[f] = open(filename, "r").read()
+        return files
+
+class BotFactory(ReconnectingPBClientFactory):
+    # 'keepaliveInterval' serves two purposes. The first is to keep the
+    # connection alive: it guarantees that there will be at least some
+    # traffic once every 'keepaliveInterval' seconds, which may help keep an
+    # interposed NAT gateway from dropping the address mapping because it
+    # thinks the connection has been abandoned. The second is to put an upper
+    # limit on how long the buildmaster might have gone away before we notice
+    # it. For this second purpose, we insist upon seeing *some* evidence of
+    # the buildmaster at least once every 'keepaliveInterval' seconds.
+    keepaliveInterval = None # None = do not use keepalives
+
+    # 'keepaliveTimeout' seconds before the interval expires, we will send a
+    # keepalive request, both to add some traffic to the connection, and to
+    # prompt a response from the master in case all our builders are idle. We
+    # don't insist upon receiving a timely response from this message: a slow
+    # link might put the request at the wrong end of a large build message.
+    keepaliveTimeout = 30 # how long we will go without a response
+
+    keepaliveTimer = None
+    activityTimer = None
+    lastActivity = 0
+    unsafeTracebacks = 1
+    perspective = None
+
+    def __init__(self, keepaliveInterval, keepaliveTimeout):
+        ReconnectingPBClientFactory.__init__(self)
+        self.keepaliveInterval = keepaliveInterval
+        self.keepaliveTimeout = keepaliveTimeout
+
+    def startedConnecting(self, connector):
+        ReconnectingPBClientFactory.startedConnecting(self, connector)
+        self.connector = connector
+
+    def gotPerspective(self, perspective):
+        ReconnectingPBClientFactory.gotPerspective(self, perspective)
+        self.perspective = perspective
+        try:
+            perspective.broker.transport.setTcpKeepAlive(1)
+        except:
+            log.msg("unable to set SO_KEEPALIVE")
+            if not self.keepaliveInterval:
+                self.keepaliveInterval = 10*60
+        self.activity()
+        if self.keepaliveInterval:
+            log.msg("sending application-level keepalives every %d seconds" \
+                    % self.keepaliveInterval)
+            self.startTimers()
+
+    def clientConnectionFailed(self, connector, reason):
+        self.connector = None
+        ReconnectingPBClientFactory.clientConnectionFailed(self,
+                                                           connector, reason)
+
+    def clientConnectionLost(self, connector, reason):
+        self.connector = None
+        self.stopTimers()
+        self.perspective = None
+        ReconnectingPBClientFactory.clientConnectionLost(self,
+                                                         connector, reason)
+
+    def startTimers(self):
+        assert self.keepaliveInterval
+        assert not self.keepaliveTimer
+        assert not self.activityTimer
+        # Insist that doKeepalive fires before checkActivity. Really, it
+        # needs to happen at least one RTT beforehand.
+        assert self.keepaliveInterval > self.keepaliveTimeout
+
+        # arrange to send a keepalive a little while before our deadline
+        when = self.keepaliveInterval - self.keepaliveTimeout
+        self.keepaliveTimer = reactor.callLater(when, self.doKeepalive)
+        # and check for activity too
+        self.activityTimer = reactor.callLater(self.keepaliveInterval,
+                                               self.checkActivity)
+
+    def stopTimers(self):
+        if self.keepaliveTimer:
+            self.keepaliveTimer.cancel()
+            self.keepaliveTimer = None
+        if self.activityTimer:
+            self.activityTimer.cancel()
+            self.activityTimer = None
+
+    def activity(self, res=None):
+        self.lastActivity = now()
+
+    def doKeepalive(self):
+        # send the keepalive request. If it fails outright, the connection
+        # was already dropped, so just log and ignore.
+        self.keepaliveTimer = None
+        log.msg("sending app-level keepalive")
+        d = self.perspective.callRemote("keepalive")
+        d.addCallback(self.activity)
+        d.addErrback(self.keepaliveLost)
+
+    def keepaliveLost(self, f):
+        log.msg("BotFactory.keepaliveLost")
+
+    def checkActivity(self):
+        self.activityTimer = None
+        if self.lastActivity + self.keepaliveInterval < now():
+            log.msg("BotFactory.checkActivity: nothing from master for "
+                    "%d secs" % (now() - self.lastActivity))
+            self.perspective.broker.transport.loseConnection()
+            return
+        self.startTimers()
+
+    def stopFactory(self):
+        ReconnectingPBClientFactory.stopFactory(self)
+        self.stopTimers()
+
+
+class BuildSlave(service.MultiService):
+    botClass = Bot
+
+    # debugOpts is a dictionary used during unit tests.
+
+    # debugOpts['stallPings'] can be set to a tuple of (timeout, []). Any
+    # calls to remote_print will stall for 'timeout' seconds before
+    # returning. The DelayedCalls used to implement this are stashed in the
+    # list so they can be cancelled later.
+
+    # debugOpts['failPingOnce'] can be set to True to make the slaveping fail
+    # exactly once.
+
+    def __init__(self, host, port, name, passwd, basedir, keepalive,
+                 usePTY, keepaliveTimeout=30, umask=None, debugOpts={}):
+        log.msg("Creating BuildSlave")
+        service.MultiService.__init__(self)
+        self.debugOpts = debugOpts.copy()
+        bot = self.botClass(basedir, usePTY)
+        bot.setServiceParent(self)
+        self.bot = bot
+        if keepalive == 0:
+            keepalive = None
+        self.umask = umask
+        bf = self.bf = BotFactory(keepalive, keepaliveTimeout)
+        bf.startLogin(credentials.UsernamePassword(name, passwd), client=bot)
+        self.connection = c = internet.TCPClient(host, port, bf)
+        c.setServiceParent(self)
+
+    def waitUntilDisconnected(self):
+        # utility method for testing. Returns a Deferred that will fire when
+        # we lose the connection to the master.
+        if not self.bf.perspective:
+            return defer.succeed(None)
+        d = defer.Deferred()
+        self.bf.perspective.notifyOnDisconnect(lambda res: d.callback(None))
+        return d
+
+    def startService(self):
+        if self.umask is not None:
+            os.umask(self.umask)
+        service.MultiService.startService(self)
+
+    def stopService(self):
+        self.bf.continueTrying = 0
+        self.bf.stopTrying()
+        service.MultiService.stopService(self)
+        # now kill the TCP connection
+        # twisted >2.0.1 does this for us, and leaves _connection=None
+        if self.connection._connection:
+            self.connection._connection.disconnect()

Added: vendor/buildbot/current/buildbot/slave/commands.py
===================================================================
--- vendor/buildbot/current/buildbot/slave/commands.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/slave/commands.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,2191 @@
+# -*- test-case-name: buildbot.test.test_slavecommand -*-
+
+import os, re, signal, shutil, types, time
+from stat import ST_CTIME, ST_MTIME, ST_SIZE
+
+from twisted.internet.protocol import ProcessProtocol
+from twisted.internet import reactor, defer, task
+from twisted.python import log, failure, runtime
+
+from buildbot.twcompat import implements, which
+from buildbot.slave.interfaces import ISlaveCommand
+from buildbot.slave.registry import registerSlaveCommand
+
+# this used to be a CVS $-style "Revision" auto-updated keyword, but since I
+# moved to Darcs as the primary repository, this is updated manually each
+# time this file is changed. The last cvs_ver that was here was 1.51 .
+command_version = "2.2"
+
+# version history:
+#  >=1.17: commands are interruptable
+#  >=1.28: Arch understands 'revision', added Bazaar
+#  >=1.33: Source classes understand 'retry'
+#  >=1.39: Source classes correctly handle changes in branch (except Git)
+#          Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
+#          Arch/Baz should accept 'build-config'
+#  >=1.51: (release 0.7.3)
+#  >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
+#          and 'logfiles'. It now sends 'log' messages in addition to
+#          stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
+#          but these are not remotely callable yet.
+#          (not externally visible: ShellCommandPP has writeStdin/closeStdin.
+#          ShellCommand accepts new arguments (logfiles=, initialStdin=,
+#          keepStdinOpen=) and no longer accepts stdin=)
+#          (release 0.7.4)
+#  >= 2.2: added monotone, uploadFile, and downloadFile (release 0.7.5)
+
+class CommandInterrupted(Exception):
+    pass
+class TimeoutError(Exception):
+    pass
+
+class AbandonChain(Exception):
+    """A series of chained steps can raise this exception to indicate that
+    one of the intermediate ShellCommands has failed, such that there is no
+    point in running the remainder. 'rc' should be the non-zero exit code of
+    the failing ShellCommand."""
+
+    def __repr__(self):
+        return "<AbandonChain rc=%s>" % self.args[0]
+
+def getCommand(name):
+    possibles = which(name)
+    if not possibles:
+        raise RuntimeError("Couldn't find executable for '%s'" % name)
+    return possibles[0]
+
+def rmdirRecursive(dir):
+    """This is a replacement for shutil.rmtree that works better under
+    windows. Thanks to Bear at the OSAF for the code."""
+    if not os.path.exists(dir):
+        return
+
+    if os.path.islink(dir):
+        os.remove(dir)
+        return
+
+    for name in os.listdir(dir):
+        full_name = os.path.join(dir, name)
+        # on Windows, if we don't have write permission we can't remove
+        # the file/directory either, so turn that on
+        if os.name == 'nt':
+            if not os.access(full_name, os.W_OK):
+                os.chmod(full_name, 0600)
+        if os.path.isdir(full_name):
+            rmdirRecursive(full_name)
+        else:
+            # print "removing file", full_name
+            os.remove(full_name)
+    os.rmdir(dir)
+
+class ShellCommandPP(ProcessProtocol):
+    debug = False
+
+    def __init__(self, command):
+        self.command = command
+        self.pending_stdin = ""
+        self.stdin_finished = False
+
+    def writeStdin(self, data):
+        assert not self.stdin_finished
+        if self.connected:
+            self.transport.write(data)
+        else:
+            self.pending_stdin += data
+
+    def closeStdin(self):
+        if self.connected:
+            if self.debug: log.msg(" closing stdin")
+            self.transport.closeStdin()
+        self.stdin_finished = True
+
+    def connectionMade(self):
+        if self.debug:
+            log.msg("ShellCommandPP.connectionMade")
+        if not self.command.process:
+            if self.debug:
+                log.msg(" assigning self.command.process: %s" %
+                        (self.transport,))
+            self.command.process = self.transport
+
+        # TODO: maybe we shouldn't close stdin when using a PTY. I can't test
+        # this yet, recent debian glibc has a bug which causes thread-using
+        # test cases to SIGHUP trial, and the workaround is to either run
+        # the whole test with /bin/sh -c " ".join(argv)  (way gross) or to
+        # not use a PTY. Once the bug is fixed, I'll be able to test what
+        # happens when you close stdin on a pty. My concern is that it will
+        # SIGHUP the child (since we are, in a sense, hanging up on them).
+        # But it may well be that keeping stdout open prevents the SIGHUP
+        # from being sent.
+        #if not self.command.usePTY:
+
+        if self.pending_stdin:
+            if self.debug: log.msg(" writing to stdin")
+            self.transport.write(self.pending_stdin)
+        if self.stdin_finished:
+            if self.debug: log.msg(" closing stdin")
+            self.transport.closeStdin()
+
+    def outReceived(self, data):
+        if self.debug:
+            log.msg("ShellCommandPP.outReceived")
+        self.command.addStdout(data)
+
+    def errReceived(self, data):
+        if self.debug:
+            log.msg("ShellCommandPP.errReceived")
+        self.command.addStderr(data)
+
+    def processEnded(self, status_object):
+        if self.debug:
+            log.msg("ShellCommandPP.processEnded", status_object)
+        # status_object is a Failure wrapped around an
+        # error.ProcessTerminated or and error.ProcessDone.
+        # requires twisted >= 1.0.4 to overcome a bug in process.py
+        sig = status_object.value.signal
+        rc = status_object.value.exitCode
+        self.command.finished(sig, rc)
+
+class LogFileWatcher:
+    POLL_INTERVAL = 2
+
+    def __init__(self, command, name, logfile):
+        self.command = command
+        self.name = name
+        self.logfile = logfile
+        log.msg("LogFileWatcher created to watch %s" % logfile)
+        # we are created before the ShellCommand starts. If the logfile we're
+        # supposed to be watching already exists, record its size and
+        # ctime/mtime so we can tell when it starts to change.
+        self.old_logfile_stats = self.statFile()
+        self.started = False
+
+        # every 2 seconds we check on the file again
+        self.poller = task.LoopingCall(self.poll)
+
+    def start(self):
+        self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
+
+    def _cleanupPoll(self, err):
+        log.err(err, msg="Polling error")
+        self.poller = None
+
+    def stop(self):
+        self.poll()
+        if self.poller is not None:
+            self.poller.stop()
+        if self.started:
+            self.f.close()
+
+    def statFile(self):
+        if os.path.exists(self.logfile):
+            s = os.stat(self.logfile)
+            return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE])
+        return None
+
+    def poll(self):
+        if not self.started:
+            s = self.statFile()
+            if s == self.old_logfile_stats:
+                return # not started yet
+            if not s:
+                # the file was there, but now it's deleted. Forget about the
+                # initial state, clearly the process has deleted the logfile
+                # in preparation for creating a new one.
+                self.old_logfile_stats = None
+                return # no file to work with
+            self.f = open(self.logfile, "rb")
+            self.started = True
+        self.f.seek(self.f.tell(), 0)
+        while True:
+            data = self.f.read(10000)
+            if not data:
+                return
+            self.command.addLogfile(self.name, data)
+
+
+class ShellCommand:
+    # This is a helper class, used by SlaveCommands to run programs in a
+    # child shell.
+
+    notreally = False
+    BACKUP_TIMEOUT = 5
+    KILL = "KILL"
+
+    def __init__(self, builder, command,
+                 workdir, environ=None,
+                 sendStdout=True, sendStderr=True, sendRC=True,
+                 timeout=None, initialStdin=None, keepStdinOpen=False,
+                 keepStdout=False,
+                 logfiles={}):
+        """
+
+        @param keepStdout: if True, we keep a copy of all the stdout text
+                           that we've seen. This copy is available in
+                           self.stdout, which can be read after the command
+                           has finished.
+
+        """
+
+        self.builder = builder
+        self.command = command
+        self.sendStdout = sendStdout
+        self.sendStderr = sendStderr
+        self.sendRC = sendRC
+        self.logfiles = logfiles
+        self.workdir = workdir
+        self.environ = os.environ.copy()
+        if environ:
+            if environ.has_key('PYTHONPATH'):
+                ppath = environ['PYTHONPATH']
+                # Need to do os.pathsep translation.  We could either do that
+                # by replacing all incoming ':'s with os.pathsep, or by
+                # accepting lists.  I like lists better.
+                if not isinstance(ppath, str):
+                    # If it's not a string, treat it as a sequence to be
+                    # turned in to a string.
+                    ppath = os.pathsep.join(ppath)
+
+                if self.environ.has_key('PYTHONPATH'):
+                    # special case, prepend the builder's items to the
+                    # existing ones. This will break if you send over empty
+                    # strings, so don't do that.
+                    ppath = ppath + os.pathsep + self.environ['PYTHONPATH']
+
+                environ['PYTHONPATH'] = ppath
+
+            self.environ.update(environ)
+        self.initialStdin = initialStdin
+        self.keepStdinOpen = keepStdinOpen
+        self.timeout = timeout
+        self.timer = None
+        self.keepStdout = keepStdout
+
+        # usePTY=True is a convenience for cleaning up all children and
+        # grandchildren of a hung command. Fall back to usePTY=False on
+        # systems where ptys cause problems.
+
+        self.usePTY = self.builder.usePTY
+        if runtime.platformType != "posix":
+            self.usePTY = False # PTYs are posix-only
+        if initialStdin is not None:
+            # for .closeStdin to matter, we must use a pipe, not a PTY
+            self.usePTY = False
+
+        self.logFileWatchers = []
+        for name,filename in self.logfiles.items():
+            w = LogFileWatcher(self, name,
+                               os.path.join(self.workdir, filename))
+            self.logFileWatchers.append(w)
+
+    def __repr__(self):
+        return "<slavecommand.ShellCommand '%s'>" % self.command
+
+    def sendStatus(self, status):
+        self.builder.sendUpdate(status)
+
+    def start(self):
+        # return a Deferred which fires (with the exit code) when the command
+        # completes
+        if self.keepStdout:
+            self.stdout = ""
+        self.deferred = defer.Deferred()
+        try:
+            self._startCommand()
+        except:
+            log.msg("error in ShellCommand._startCommand")
+            log.err()
+            # pretend it was a shell error
+            self.deferred.errback(AbandonChain(-1))
+        return self.deferred
+
+    def _startCommand(self):
+        log.msg("ShellCommand._startCommand")
+        if self.notreally:
+            self.sendStatus({'header': "command '%s' in dir %s" % \
+                             (self.command, self.workdir)})
+            self.sendStatus({'header': "(not really)\n"})
+            self.finished(None, 0)
+            return
+
+        self.pp = ShellCommandPP(self)
+
+        if type(self.command) in types.StringTypes:
+            if runtime.platformType  == 'win32':
+                argv = [os.environ['COMSPEC'], '/c', self.command]
+            else:
+                # for posix, use /bin/sh. for other non-posix, well, doesn't
+                # hurt to try
+                argv = ['/bin/sh', '-c', self.command]
+        else:
+            if runtime.platformType  == 'win32':
+                argv = [os.environ['COMSPEC'], '/c'] + list(self.command)
+            else:
+                argv = self.command
+
+        # self.stdin is handled in ShellCommandPP.connectionMade
+
+        # first header line is the command in plain text, argv joined with
+        # spaces. You should be able to cut-and-paste this into a shell to
+        # obtain the same results. If there are spaces in the arguments, too
+        # bad.
+        msg = " ".join(argv)
+        log.msg(" " + msg)
+        self.sendStatus({'header': msg+"\n"})
+
+        # then comes the secondary information
+        msg = " in dir %s" % (self.workdir,)
+        if self.timeout:
+            msg += " (timeout %d secs)" % (self.timeout,)
+        log.msg(" " + msg)
+        self.sendStatus({'header': msg+"\n"})
+
+        msg = " watching logfiles %s" % (self.logfiles,)
+        log.msg(" " + msg)
+        self.sendStatus({'header': msg+"\n"})
+
+        # then the argv array for resolving unambiguity
+        msg = " argv: %s" % (argv,)
+        log.msg(" " + msg)
+        self.sendStatus({'header': msg+"\n"})
+
+        # then the environment, since it sometimes causes problems
+        msg = " environment: %s" % (self.environ,)
+        log.msg(" " + msg)
+        self.sendStatus({'header': msg+"\n"})
+
+        # this will be buffered until connectionMade is called
+        if self.initialStdin:
+            self.pp.writeStdin(self.initialStdin)
+        if not self.keepStdinOpen:
+            self.pp.closeStdin()
+
+        # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns
+        # None, as opposed to all the posixbase-derived reactors (which
+        # return the new Process object). This is a nuisance. We can make up
+        # for it by having the ProcessProtocol give us their .transport
+        # attribute after they get one. I'd prefer to get it from
+        # spawnProcess because I'm concerned about returning from this method
+        # without having a valid self.process to work with. (if kill() were
+        # called right after we return, but somehow before connectionMade
+        # were called, then kill() would blow up).
+        self.process = None
+        p = reactor.spawnProcess(self.pp, argv[0], argv,
+                                 self.environ,
+                                 self.workdir,
+                                 usePTY=self.usePTY)
+        # connectionMade might have been called during spawnProcess
+        if not self.process:
+            self.process = p
+
+        # connectionMade also closes stdin as long as we're not using a PTY.
+        # This is intended to kill off inappropriately interactive commands
+        # better than the (long) hung-command timeout. ProcessPTY should be
+        # enhanced to allow the same childFDs argument that Process takes,
+        # which would let us connect stdin to /dev/null .
+
+        if self.timeout:
+            self.timer = reactor.callLater(self.timeout, self.doTimeout)
+
+        for w in self.logFileWatchers:
+            w.start()
+
+
+    def addStdout(self, data):
+        if self.sendStdout:
+            self.sendStatus({'stdout': data})
+        if self.keepStdout:
+            self.stdout += data
+        if self.timer:
+            self.timer.reset(self.timeout)
+
+    def addStderr(self, data):
+        if self.sendStderr:
+            self.sendStatus({'stderr': data})
+        if self.timer:
+            self.timer.reset(self.timeout)
+
+    def addLogfile(self, name, data):
+        self.sendStatus({'log': (name, data)})
+        if self.timer:
+            self.timer.reset(self.timeout)
+
+    def finished(self, sig, rc):
+        log.msg("command finished with signal %s, exit code %s" % (sig,rc))
+        for w in self.logFileWatchers:
+             # this will send the final updates
+            w.stop()
+        if sig is not None:
+            rc = -1
+        if self.sendRC:
+            if sig is not None:
+                self.sendStatus(
+                    {'header': "process killed by signal %d\n" % sig})
+            self.sendStatus({'rc': rc})
+        if self.timer:
+            self.timer.cancel()
+            self.timer = None
+        d = self.deferred
+        self.deferred = None
+        if d:
+            d.callback(rc)
+        else:
+            log.msg("Hey, command %s finished twice" % self)
+
+    def failed(self, why):
+        log.msg("ShellCommand.failed: command failed: %s" % (why,))
+        if self.timer:
+            self.timer.cancel()
+            self.timer = None
+        d = self.deferred
+        self.deferred = None
+        if d:
+            d.errback(why)
+        else:
+            log.msg("Hey, command %s finished twice" % self)
+
+    def doTimeout(self):
+        self.timer = None
+        msg = "command timed out: %d seconds without output" % self.timeout
+        self.kill(msg)
+
+    def kill(self, msg):
+        # This may be called by the timeout, or when the user has decided to
+        # abort this build.
+        if self.timer:
+            self.timer.cancel()
+            self.timer = None
+        if hasattr(self.process, "pid"):
+            msg += ", killing pid %d" % self.process.pid
+        log.msg(msg)
+        self.sendStatus({'header': "\n" + msg + "\n"})
+
+        hit = 0
+        if runtime.platformType == "posix":
+            try:
+                # really want to kill off all child processes too. Process
+                # Groups are ideal for this, but that requires
+                # spawnProcess(usePTY=1). Try both ways in case process was
+                # not started that way.
+
+                # the test suite sets self.KILL=None to tell us we should
+                # only pretend to kill the child. This lets us test the
+                # backup timer.
+
+                sig = None
+                if self.KILL is not None:
+                    sig = getattr(signal, "SIG"+ self.KILL, None)
+
+                if self.KILL == None:
+                    log.msg("self.KILL==None, only pretending to kill child")
+                elif sig is None:
+                    log.msg("signal module is missing SIG%s" % self.KILL)
+                elif not hasattr(os, "kill"):
+                    log.msg("os module is missing the 'kill' function")
+                else:
+                    log.msg("trying os.kill(-pid, %d)" % (sig,))
+                    # TODO: maybe use os.killpg instead of a negative pid?
+                    os.kill(-self.process.pid, sig)
+                    log.msg(" signal %s sent successfully" % sig)
+                    hit = 1
+            except OSError:
+                # probably no-such-process, maybe because there is no process
+                # group
+                pass
+        if not hit:
+            try:
+                if self.KILL is None:
+                    log.msg("self.KILL==None, only pretending to kill child")
+                else:
+                    log.msg("trying process.signalProcess('KILL')")
+                    self.process.signalProcess(self.KILL)
+                    log.msg(" signal %s sent successfully" % (self.KILL,))
+                    hit = 1
+            except OSError:
+                # could be no-such-process, because they finished very recently
+                pass
+        if not hit:
+            log.msg("signalProcess/os.kill failed both times")
+
+        if runtime.platformType == "posix":
+            # we only do this under posix because the win32eventreactor
+            # blocks here until the process has terminated, while closing
+            # stderr. This is weird.
+            self.pp.transport.loseConnection()
+
+        # finished ought to be called momentarily. Just in case it doesn't,
+        # set a timer which will abandon the command.
+        self.timer = reactor.callLater(self.BACKUP_TIMEOUT,
+                                       self.doBackupTimeout)
+
+    def doBackupTimeout(self):
+        log.msg("we tried to kill the process, and it wouldn't die.."
+                " finish anyway")
+        self.timer = None
+        self.sendStatus({'header': "SIGKILL failed to kill process\n"})
+        if self.sendRC:
+            self.sendStatus({'header': "using fake rc=-1\n"})
+            self.sendStatus({'rc': -1})
+        self.failed(TimeoutError("SIGKILL failed to kill process"))
+
+
+    def writeStdin(self, data):
+        self.pp.writeStdin(data)
+
+    def closeStdin(self):
+        self.pp.closeStdin()
+
+
+class Command:
+    if implements:
+        implements(ISlaveCommand)
+    else:
+        __implements__ = ISlaveCommand
+
+    """This class defines one command that can be invoked by the build master.
+    The command is executed on the slave side, and always sends back a
+    completion message when it finishes. It may also send intermediate status
+    as it runs (by calling builder.sendStatus). Some commands can be
+    interrupted (either by the build master or a local timeout), in which
+    case the step is expected to complete normally with a status message that
+    indicates an error occurred.
+
+    These commands are used by BuildSteps on the master side. Each kind of
+    BuildStep uses a single Command. The slave must implement all the
+    Commands required by the set of BuildSteps used for any given build:
+    this is checked at startup time.
+
+    All Commands are constructed with the same signature:
+     c = CommandClass(builder, args)
+    where 'builder' is the parent SlaveBuilder object, and 'args' is a
+    dict that is interpreted per-command.
+
+    The setup(args) method is available for setup, and is run from __init__.
+
+    The Command is started with start(). This method must be implemented in a
+    subclass, and it should return a Deferred. When your step is done, you
+    should fire the Deferred (the results are not used). If the command is
+    interrupted, it should fire the Deferred anyway.
+
+    While the command runs. it may send status messages back to the
+    buildmaster by calling self.sendStatus(statusdict). The statusdict is
+    interpreted by the master-side BuildStep however it likes.
+
+    A separate completion message is sent when the deferred fires, which
+    indicates that the Command has finished, but does not carry any status
+    data. If the Command needs to return an exit code of some sort, that
+    should be sent as a regular status message before the deferred is fired .
+    Once builder.commandComplete has been run, no more status messages may be
+    sent.
+
+    If interrupt() is called, the Command should attempt to shut down as
+    quickly as possible. Child processes should be killed, new ones should
+    not be started. The Command should send some kind of error status update,
+    then complete as usual by firing the Deferred.
+
+    .interrupted should be set by interrupt(), and can be tested to avoid
+    sending multiple error status messages.
+
+    If .running is False, the bot is shutting down (or has otherwise lost the
+    connection to the master), and should not send any status messages. This
+    is checked in Command.sendStatus .
+
+    """
+
+    # builder methods:
+    #  sendStatus(dict) (zero or more)
+    #  commandComplete() or commandInterrupted() (one, at end)
+
+    debug = False
+    interrupted = False
+    running = False # set by Builder, cleared on shutdown or when the
+                    # Deferred fires
+
+    def __init__(self, builder, stepId, args):
+        self.builder = builder
+        self.stepId = stepId # just for logging
+        self.args = args
+        self.setup(args)
+
+    def setup(self, args):
+        """Override this in a subclass to extract items from the args dict."""
+        pass
+
+    def doStart(self):
+        self.running = True
+        d = defer.maybeDeferred(self.start)
+        d.addBoth(self.commandComplete)
+        return d
+
+    def start(self):
+        """Start the command. This method should return a Deferred that will
+        fire when the command has completed. The Deferred's argument will be
+        ignored.
+
+        This method should be overridden by subclasses."""
+        raise NotImplementedError, "You must implement this in a subclass"
+
+    def sendStatus(self, status):
+        """Send a status update to the master."""
+        if self.debug:
+            log.msg("sendStatus", status)
+        if not self.running:
+            log.msg("would sendStatus but not .running")
+            return
+        self.builder.sendUpdate(status)
+
+    def doInterrupt(self):
+        self.running = False
+        self.interrupt()
+
+    def interrupt(self):
+        """Override this in a subclass to allow commands to be interrupted.
+        May be called multiple times, test and set self.interrupted=True if
+        this matters."""
+        pass
+
+    def commandComplete(self, res):
+        self.running = False
+        return res
+
+    # utility methods, mostly used by SlaveShellCommand and the like
+
+    def _abandonOnFailure(self, rc):
+        if type(rc) is not int:
+            log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
+                    (rc, type(rc)))
+        assert isinstance(rc, int)
+        if rc != 0:
+            raise AbandonChain(rc)
+        return rc
+
+    def _sendRC(self, res):
+        self.sendStatus({'rc': 0})
+
+    def _checkAbandoned(self, why):
+        log.msg("_checkAbandoned", why)
+        why.trap(AbandonChain)
+        log.msg(" abandoning chain", why.value)
+        self.sendStatus({'rc': why.value.args[0]})
+        return None
+
+
+
+class SlaveFileUploadCommand(Command):
+    """
+    Upload a file from slave to build master
+    Arguments:
+
+        - ['workdir']:   base directory to use
+        - ['slavesrc']:  name of the slave-side file to read from
+        - ['writer']:    RemoteReference to a transfer._FileWriter object
+        - ['maxsize']:   max size (in bytes) of file to write
+        - ['blocksize']: max size for each data block
+    """
+    debug = False
+
+    def setup(self, args):
+        self.workdir = args['workdir']
+        self.filename = args['slavesrc']
+        self.writer = args['writer']
+        self.remaining = args['maxsize']
+        self.blocksize = args['blocksize']
+        self.stderr = None
+        self.rc = 0
+
+    def start(self):
+	if self.debug:
+            log.msg('SlaveFileUploadCommand started')
+
+        # Open file
+        self.path = os.path.join(self.builder.basedir,
+                                 self.workdir,
+                                 os.path.expanduser(self.filename))
+        try:
+            self.fp = open(self.path, 'r')
+            if self.debug:
+                log.msg('Opened %r for upload' % self.path)
+        except:
+            # TODO: this needs cleanup
+            self.fp = None
+            self.stderr = 'Cannot open file %r for upload' % self.path
+            self.rc = 1
+            if self.debug:
+                log.msg('Cannot open file %r for upload' % self.path)
+
+        self.sendStatus({'header': "sending %s" % self.path})
+
+        d = defer.Deferred()
+        d.addCallback(self._writeBlock)
+        d.addBoth(self.finished)
+        reactor.callLater(0, d.callback, None)
+        return d
+
+    def _writeBlock(self, res):
+        """
+        Write a block of data to the remote writer
+        """
+        if self.interrupted or self.fp is None:
+            if self.debug:
+                log.msg('SlaveFileUploadCommand._writeBlock(): end')
+            d = self.writer.callRemote('close')
+            return d
+
+        length = self.blocksize
+        if self.remaining is not None and length > self.remaining:
+            length = self.remaining
+
+        if length <= 0:
+            if self.stderr is None:
+                self.stderr = 'Maximum filesize reached, truncating file %r' \
+                                % self.path
+                self.rc = 1
+            data = ''
+        else:
+            data = self.fp.read(length)
+
+        if self.debug:
+            log.msg('SlaveFileUploadCommand._writeBlock(): '+
+                    'allowed=%d readlen=%d' % (length, len(data)))
+        if len(data) == 0:
+            d = self.writer.callRemote('close')
+            return d
+
+        if self.remaining is not None:
+            self.remaining = self.remaining - len(data)
+            assert self.remaining >= 0
+        d = self.writer.callRemote('write', data)
+        d.addCallback(self._writeBlock)
+        return d
+
+    def interrupt(self):
+        if self.debug:
+            log.msg('interrupted')
+        if self.interrupted:
+            return
+        if self.stderr is None:
+            self.stderr = 'Upload of %r interrupted' % self.path
+            self.rc = 1
+        self.interrupted = True
+        # the next _writeBlock call will notice the .interrupted flag
+
+    def finished(self, res):
+        if self.debug:
+            log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
+        if self.stderr is None:
+            self.sendStatus({'rc': self.rc})
+        else:
+            self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
+        return res
+
+registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version)
+
+
+class SlaveFileDownloadCommand(Command):
+    """
+    Download a file from master to slave
+    Arguments:
+
+        - ['workdir']:   base directory to use
+        - ['slavedest']: name of the slave-side file to be created
+        - ['reader']:    RemoteReference to a transfer._FileReader object
+        - ['maxsize']:   max size (in bytes) of file to write
+        - ['blocksize']: max size for each data block
+        - ['mode']:      access mode for the new file
+    """
+    debug = False
+
+    def setup(self, args):
+        self.workdir = args['workdir']
+        self.filename = args['slavedest']
+        self.reader = args['reader']
+        self.bytes_remaining = args['maxsize']
+        self.blocksize = args['blocksize']
+        self.mode = args['mode']
+        self.stderr = None
+        self.rc = 0
+
+    def start(self):
+	if self.debug:
+            log.msg('SlaveFileDownloadCommand starting')
+
+        # Open file
+        self.path = os.path.join(self.builder.basedir,
+                                 self.workdir,
+                                 os.path.expanduser(self.filename))
+        try:
+            self.fp = open(self.path, 'w')
+            if self.debug:
+                log.msg('Opened %r for download' % self.path)
+            if self.mode is not None:
+                # note: there is a brief window during which the new file
+                # will have the buildslave's default (umask) mode before we
+                # set the new one. Don't use this mode= feature to keep files
+                # private: use the buildslave's umask for that instead. (it
+                # is possible to call os.umask() before and after the open()
+                # call, but cleaning up from exceptions properly is more of a
+                # nuisance that way).
+                os.chmod(self.path, self.mode)
+        except IOError:
+            # TODO: this still needs cleanup
+            self.fp = None
+            self.stderr = 'Cannot open file %r for download' % self.path
+            self.rc = 1
+            if self.debug:
+                log.msg('Cannot open file %r for download' % self.path)
+
+        d = defer.Deferred()
+        d.addCallback(self._readBlock)
+        d.addBoth(self.finished)
+        reactor.callLater(0, d.callback, None)
+        return d
+
+    def _readBlock(self, res):
+        """
+        Read a block of data from the remote reader
+        """
+        if self.interrupted or self.fp is None:
+            if self.debug:
+                log.msg('SlaveFileDownloadCommand._readBlock(): end')
+            d = self.reader.callRemote('close')
+            return d
+
+        length = self.blocksize
+        if self.bytes_remaining is not None and length > self.bytes_remaining:
+            length = self.bytes_remaining
+
+        if length <= 0:
+            if self.stderr is None:
+                self.stderr = 'Maximum filesize reached, truncating file %r' \
+                                % self.path
+                self.rc = 1
+            d = self.reader.callRemote('close')
+        else:
+            d = self.reader.callRemote('read', length)
+            d.addCallback(self._writeData)
+        return d
+
+    def _writeData(self, data):
+        if self.debug:
+            log.msg('SlaveFileDownloadCommand._readBlock(): readlen=%d' %
+                    len(data))
+        if len(data) == 0:
+            d = self.reader.callRemote('close')
+            return d
+
+        if self.bytes_remaining is not None:
+            self.bytes_remaining = self.bytes_remaining - len(data)
+            assert self.bytes_remaining >= 0
+        self.fp.write(data)
+        d = self._readBlock(None) # setup call back for next block (or finish)
+        return d
+
+    def interrupt(self):
+        if self.debug:
+            log.msg('interrupted')
+        if self.interrupted:
+            return
+        if self.stderr is None:
+            self.stderr = 'Download of %r interrupted' % self.path
+            self.rc = 1
+        self.interrupted = True
+        # now we wait for the next read request to return. _readBlock will
+        # abandon the file when it sees self.interrupted set.
+
+    def finished(self, res):
+        if self.fp is not None:
+            self.fp.close()
+
+        if self.debug:
+            log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
+        if self.stderr is None:
+            self.sendStatus({'rc': self.rc})
+        else:
+            self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
+        return res
+
+registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version)
+
+
+
+class SlaveShellCommand(Command):
+    """This is a Command which runs a shell command. The args dict contains
+    the following keys:
+
+        - ['command'] (required): a shell command to run. If this is a string,
+                                  it will be run with /bin/sh (['/bin/sh',
+                                  '-c', command]). If it is a list
+                                  (preferred), it will be used directly.
+        - ['workdir'] (required): subdirectory in which the command will be
+                                  run, relative to the builder dir
+        - ['env']: a dict of environment variables to augment/replace
+                   os.environ . PYTHONPATH is treated specially, and
+                   should be a list of path components to be prepended to
+                   any existing PYTHONPATH environment variable.
+        - ['initial_stdin']: a string which will be written to the command's
+                             stdin as soon as it starts
+        - ['keep_stdin_open']: unless True, the command's stdin will be
+                               closed as soon as initial_stdin has been
+                               written. Set this to True if you plan to write
+                               to stdin after the command has been started.
+        - ['want_stdout']: 0 if stdout should be thrown away
+        - ['want_stderr']: 0 if stderr should be thrown away
+        - ['not_really']: 1 to skip execution and return rc=0
+        - ['timeout']: seconds of silence to tolerate before killing command
+        - ['logfiles']: dict mapping LogFile name to the workdir-relative
+                        filename of a local log file. This local file will be
+                        watched just like 'tail -f', and all changes will be
+                        written to 'log' status updates.
+
+    ShellCommand creates the following status messages:
+        - {'stdout': data} : when stdout data is available
+        - {'stderr': data} : when stderr data is available
+        - {'header': data} : when headers (command start/stop) are available
+        - {'log': (logfile_name, data)} : when log files have new contents
+        - {'rc': rc} : when the process has terminated
+    """
+
+    def start(self):
+        args = self.args
+        # args['workdir'] is relative to Builder directory, and is required.
+        assert args['workdir'] is not None
+        workdir = os.path.join(self.builder.basedir, args['workdir'])
+
+        c = ShellCommand(self.builder, args['command'],
+                         workdir, environ=args.get('env'),
+                         timeout=args.get('timeout', None),
+                         sendStdout=args.get('want_stdout', True),
+                         sendStderr=args.get('want_stderr', True),
+                         sendRC=True,
+                         initialStdin=args.get('initial_stdin'),
+                         keepStdinOpen=args.get('keep_stdin_open'),
+                         logfiles=args.get('logfiles', {}),
+                         )
+        self.command = c
+        d = self.command.start()
+        return d
+
+    def interrupt(self):
+        self.interrupted = True
+        self.command.kill("command interrupted")
+
+    def writeStdin(self, data):
+        self.command.writeStdin(data)
+
+    def closeStdin(self):
+        self.command.closeStdin()
+
+registerSlaveCommand("shell", SlaveShellCommand, command_version)
+
+
+class DummyCommand(Command):
+    """
+    I am a dummy no-op command that by default takes 5 seconds to complete.
+    See L{buildbot.steps.dummy.RemoteDummy}
+    """
+    
+    def start(self):
+        self.d = defer.Deferred()
+        log.msg("  starting dummy command [%s]" % self.stepId)
+        self.timer = reactor.callLater(1, self.doStatus)
+        return self.d
+
+    def interrupt(self):
+        if self.interrupted:
+            return
+        self.timer.cancel()
+        self.timer = None
+        self.interrupted = True
+        self.finished()
+
+    def doStatus(self):
+        log.msg("  sending intermediate status")
+        self.sendStatus({'stdout': 'data'})
+        timeout = self.args.get('timeout', 5) + 1
+        self.timer = reactor.callLater(timeout - 1, self.finished)
+
+    def finished(self):
+        log.msg("  dummy command finished [%s]" % self.stepId)
+        if self.interrupted:
+            self.sendStatus({'rc': 1})
+        else:
+            self.sendStatus({'rc': 0})
+        self.d.callback(0)
+
+registerSlaveCommand("dummy", DummyCommand, command_version)
+
+
+# this maps handle names to a callable. When the WaitCommand starts, this
+# callable is invoked with no arguments. It should return a Deferred. When
+# that Deferred fires, our WaitCommand will finish.
+waitCommandRegistry = {}
+
+class WaitCommand(Command):
+    """
+    I am a dummy command used by the buildbot unit test suite. I want for the
+    unit test to tell us to finish. See L{buildbot.steps.dummy.Wait}
+    """
+    
+    def start(self):
+        self.d = defer.Deferred()
+        log.msg("  starting wait command [%s]" % self.stepId)
+        handle = self.args['handle']
+        cb = waitCommandRegistry[handle]
+        del waitCommandRegistry[handle]
+        def _called():
+            log.msg(" wait-%s starting" % (handle,))
+            d = cb()
+            def _done(res):
+                log.msg(" wait-%s finishing: %s" % (handle, res))
+                return res
+            d.addBoth(_done)
+            d.addCallbacks(self.finished, self.failed)
+        reactor.callLater(0, _called)
+        return self.d
+
+    def interrupt(self):
+        log.msg("  wait command interrupted")
+        if self.interrupted:
+            return
+        self.interrupted = True
+        self.finished("interrupted")
+
+    def finished(self, res):
+        log.msg("  wait command finished [%s]" % self.stepId)
+        if self.interrupted:
+            self.sendStatus({'rc': 2})
+        else:
+            self.sendStatus({'rc': 0})
+        self.d.callback(0)
+    def failed(self, why):
+        log.msg("  wait command failed [%s]" % self.stepId)
+        self.sendStatus({'rc': 1})
+        self.d.callback(0)
+
+registerSlaveCommand("dummy.wait", WaitCommand, command_version)
+
+
+class SourceBase(Command):
+    """Abstract base class for Version Control System operations (checkout
+    and update). This class extracts the following arguments from the
+    dictionary received from the master:
+
+        - ['workdir']:  (required) the subdirectory where the buildable sources
+                        should be placed
+
+        - ['mode']:     one of update/copy/clobber/export, defaults to 'update'
+
+        - ['revision']: If not None, this is an int or string which indicates
+                        which sources (along a time-like axis) should be used.
+                        It is the thing you provide as the CVS -r or -D
+                        argument.
+
+        - ['patch']:    If not None, this is a tuple of (striplevel, patch)
+                        which contains a patch that should be applied after the
+                        checkout has occurred. Once applied, the tree is no
+                        longer eligible for use with mode='update', and it only
+                        makes sense to use this in conjunction with a
+                        ['revision'] argument. striplevel is an int, and patch
+                        is a string in standard unified diff format. The patch
+                        will be applied with 'patch -p%d <PATCH', with
+                        STRIPLEVEL substituted as %d. The command will fail if
+                        the patch process fails (rejected hunks).
+
+        - ['timeout']:  seconds of silence tolerated before we kill off the
+                        command
+
+        - ['retry']:    If not None, this is a tuple of (delay, repeats)
+                        which means that any failed VC updates should be
+                        reattempted, up to REPEATS times, after a delay of
+                        DELAY seconds. This is intended to deal with slaves
+                        that experience transient network failures.
+    """
+
+    sourcedata = ""
+
+    def setup(self, args):
+        # if we need to parse the output, use this environment. Otherwise
+        # command output will be in whatever the buildslave's native language
+        # has been set to.
+        self.env = os.environ.copy()
+        self.env['LC_ALL'] = "C"
+
+        self.workdir = args['workdir']
+        self.mode = args.get('mode', "update")
+        self.revision = args.get('revision')
+        self.patch = args.get('patch')
+        self.timeout = args.get('timeout', 120)
+        self.retry = args.get('retry')
+        # VC-specific subclasses should override this to extract more args.
+        # Make sure to upcall!
+
+    def start(self):
+        self.sendStatus({'header': "starting " + self.header + "\n"})
+        self.command = None
+
+        # self.srcdir is where the VC system should put the sources
+        if self.mode == "copy":
+            self.srcdir = "source" # hardwired directory name, sorry
+        else:
+            self.srcdir = self.workdir
+        self.sourcedatafile = os.path.join(self.builder.basedir,
+                                           self.srcdir,
+                                           ".buildbot-sourcedata")
+
+        d = defer.succeed(None)
+        # do we need to clobber anything?
+        if self.mode in ("copy", "clobber", "export"):
+            d.addCallback(self.doClobber, self.workdir)
+        if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
+            # the directory cannot be updated, so we have to clobber it.
+            # Perhaps the master just changed modes from 'export' to
+            # 'update'.
+            d.addCallback(self.doClobber, self.srcdir)
+
+        d.addCallback(self.doVC)
+
+        if self.mode == "copy":
+            d.addCallback(self.doCopy)
+        if self.patch:
+            d.addCallback(self.doPatch)
+        d.addCallbacks(self._sendRC, self._checkAbandoned)
+        return d
+
+    def interrupt(self):
+        self.interrupted = True
+        if self.command:
+            self.command.kill("command interrupted")
+
+    def doVC(self, res):
+        if self.interrupted:
+            raise AbandonChain(1)
+        if self.sourcedirIsUpdateable() and self.sourcedataMatches():
+            d = self.doVCUpdate()
+            d.addCallback(self.maybeDoVCFallback)
+        else:
+            d = self.doVCFull()
+            d.addBoth(self.maybeDoVCRetry)
+        d.addCallback(self._abandonOnFailure)
+        d.addCallback(self._handleGotRevision)
+        d.addCallback(self.writeSourcedata)
+        return d
+
+    def sourcedataMatches(self):
+        try:
+            olddata = open(self.sourcedatafile, "r").read()
+            if olddata != self.sourcedata:
+                return False
+        except IOError:
+            return False
+        return True
+
+    def _handleGotRevision(self, res):
+        d = defer.maybeDeferred(self.parseGotRevision)
+        d.addCallback(lambda got_revision:
+                      self.sendStatus({'got_revision': got_revision}))
+        return d
+
+    def parseGotRevision(self):
+        """Override this in a subclass. It should return a string that
+        represents which revision was actually checked out, or a Deferred
+        that will fire with such a string. If, in a future build, you were to
+        pass this 'got_revision' string in as the 'revision' component of a
+        SourceStamp, you should wind up with the same source code as this
+        checkout just obtained.
+
+        It is probably most useful to scan self.command.stdout for a string
+        of some sort. Be sure to set keepStdout=True on the VC command that
+        you run, so that you'll have something available to look at.
+
+        If this information is unavailable, just return None."""
+
+        return None
+
+    def writeSourcedata(self, res):
+        open(self.sourcedatafile, "w").write(self.sourcedata)
+        return res
+
+    def sourcedirIsUpdateable(self):
+        raise NotImplementedError("this must be implemented in a subclass")
+
+    def doVCUpdate(self):
+        raise NotImplementedError("this must be implemented in a subclass")
+
+    def doVCFull(self):
+        raise NotImplementedError("this must be implemented in a subclass")
+
+    def maybeDoVCFallback(self, rc):
+        if type(rc) is int and rc == 0:
+            return rc
+        if self.interrupted:
+            raise AbandonChain(1)
+        msg = "update failed, clobbering and trying again"
+        self.sendStatus({'header': msg + "\n"})
+        log.msg(msg)
+        d = self.doClobber(None, self.srcdir)
+        d.addCallback(self.doVCFallback2)
+        return d
+
+    def doVCFallback2(self, res):
+        msg = "now retrying VC operation"
+        self.sendStatus({'header': msg + "\n"})
+        log.msg(msg)
+        d = self.doVCFull()
+        d.addBoth(self.maybeDoVCRetry)
+        d.addCallback(self._abandonOnFailure)
+        return d
+
+    def maybeDoVCRetry(self, res):
+        """We get here somewhere after a VC chain has finished. res could
+        be::
+
+         - 0: the operation was successful
+         - nonzero: the operation failed. retry if possible
+         - AbandonChain: the operation failed, someone else noticed. retry.
+         - Failure: some other exception, re-raise
+        """
+
+        if isinstance(res, failure.Failure):
+            if self.interrupted:
+                return res # don't re-try interrupted builds
+            res.trap(AbandonChain)
+        else:
+            if type(res) is int and res == 0:
+                return res
+            if self.interrupted:
+                raise AbandonChain(1)
+        # if we get here, we should retry, if possible
+        if self.retry:
+            delay, repeats = self.retry
+            if repeats >= 0:
+                self.retry = (delay, repeats-1)
+                msg = ("update failed, trying %d more times after %d seconds"
+                       % (repeats, delay))
+                self.sendStatus({'header': msg + "\n"})
+                log.msg(msg)
+                d = defer.Deferred()
+                d.addCallback(lambda res: self.doVCFull())
+                d.addBoth(self.maybeDoVCRetry)
+                reactor.callLater(delay, d.callback, None)
+                return d
+        return res
+
+    def doClobber(self, dummy, dirname):
+        # TODO: remove the old tree in the background
+##         workdir = os.path.join(self.builder.basedir, self.workdir)
+##         deaddir = self.workdir + ".deleting"
+##         if os.path.isdir(workdir):
+##             try:
+##                 os.rename(workdir, deaddir)
+##                 # might fail if deaddir already exists: previous deletion
+##                 # hasn't finished yet
+##                 # start the deletion in the background
+##                 # TODO: there was a solaris/NetApp/NFS problem where a
+##                 # process that was still running out of the directory we're
+##                 # trying to delete could prevent the rm-rf from working. I
+##                 # think it stalled the rm, but maybe it just died with
+##                 # permission issues. Try to detect this.
+##                 os.commands("rm -rf %s &" % deaddir)
+##             except:
+##                 # fall back to sequential delete-then-checkout
+##                 pass
+        d = os.path.join(self.builder.basedir, dirname)
+        if runtime.platformType != "posix":
+            # if we're running on w32, use rmtree instead. It will block,
+            # but hopefully it won't take too long.
+            rmdirRecursive(d)
+            return defer.succeed(0)
+        command = ["rm", "-rf", d]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=0, timeout=self.timeout)
+        self.command = c
+        # sendRC=0 means the rm command will send stdout/stderr to the
+        # master, but not the rc=0 when it finishes. That job is left to
+        # _sendRC
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        return d
+
+    def doCopy(self, res):
+        # now copy tree to workdir
+        fromdir = os.path.join(self.builder.basedir, self.srcdir)
+        todir = os.path.join(self.builder.basedir, self.workdir)
+        if runtime.platformType != "posix":
+            shutil.copytree(fromdir, todir)
+            return defer.succeed(0)
+        command = ['cp', '-r', '-p', fromdir, todir]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        return d
+
+    def doPatch(self, res):
+        patchlevel, diff = self.patch
+        command = [getCommand("patch"), '-p%d' % patchlevel]
+        dir = os.path.join(self.builder.basedir, self.workdir)
+        # mark the directory so we don't try to update it later
+        open(os.path.join(dir, ".buildbot-patched"), "w").write("patched\n")
+        # now apply the patch
+        c = ShellCommand(self.builder, command, dir,
+                         sendRC=False, timeout=self.timeout,
+                         initialStdin=diff)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        return d
+
+
+class CVS(SourceBase):
+    """CVS-specific VC operation. In addition to the arguments handled by
+    SourceBase, this command reads the following keys:
+
+    ['cvsroot'] (required): the CVSROOT repository string
+    ['cvsmodule'] (required): the module to be retrieved
+    ['branch']: a '-r' tag or branch name to use for the checkout/update
+    ['login']: a string for use as a password to 'cvs login'
+    ['global_options']: a list of strings to use before the CVS verb
+    """
+
+    header = "cvs operation"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.vcexe = getCommand("cvs")
+        self.cvsroot = args['cvsroot']
+        self.cvsmodule = args['cvsmodule']
+        self.global_options = args.get('global_options', [])
+        self.branch = args.get('branch')
+        self.login = args.get('login')
+        self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule,
+                                            self.branch)
+
+    def sourcedirIsUpdateable(self):
+        if os.path.exists(os.path.join(self.builder.basedir,
+                                       self.srcdir, ".buildbot-patched")):
+            return False
+        return os.path.isdir(os.path.join(self.builder.basedir,
+                                          self.srcdir, "CVS"))
+
+    def start(self):
+        if self.login is not None:
+            # need to do a 'cvs login' command first
+            d = self.builder.basedir
+            command = ([self.vcexe, '-d', self.cvsroot] + self.global_options
+                       + ['login'])
+            c = ShellCommand(self.builder, command, d,
+                             sendRC=False, timeout=self.timeout,
+                             initialStdin=self.login+"\n")
+            self.command = c
+            d = c.start()
+            d.addCallback(self._abandonOnFailure)
+            d.addCallback(self._didLogin)
+            return d
+        else:
+            return self._didLogin(None)
+
+    def _didLogin(self, res):
+        # now we really start
+        return SourceBase.start(self)
+
+    def doVCUpdate(self):
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP']
+        if self.branch:
+            command += ['-r', self.branch]
+        if self.revision:
+            command += ['-D', self.revision]
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def doVCFull(self):
+        d = self.builder.basedir
+        if self.mode == "export":
+            verb = "export"
+        else:
+            verb = "checkout"
+        command = ([self.vcexe, '-d', self.cvsroot, '-z3'] +
+                   self.global_options +
+                   [verb, '-d', self.srcdir])
+        if self.branch:
+            command += ['-r', self.branch]
+        if self.revision:
+            command += ['-D', self.revision]
+        command += [self.cvsmodule]
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def parseGotRevision(self):
+        # CVS does not have any kind of revision stamp to speak of. We return
+        # the current timestamp as a best-effort guess, but this depends upon
+        # the local system having a clock that is
+        # reasonably-well-synchronized with the repository.
+        return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
+
+registerSlaveCommand("cvs", CVS, command_version)
+
+class SVN(SourceBase):
+    """Subversion-specific VC operation. In addition to the arguments
+    handled by SourceBase, this command reads the following keys:
+
+    ['svnurl'] (required): the SVN repository string
+    """
+
+    header = "svn operation"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.vcexe = getCommand("svn")
+        self.svnurl = args['svnurl']
+        self.sourcedata = "%s\n" % self.svnurl
+
+    def sourcedirIsUpdateable(self):
+        if os.path.exists(os.path.join(self.builder.basedir,
+                                       self.srcdir, ".buildbot-patched")):
+            return False
+        return os.path.isdir(os.path.join(self.builder.basedir,
+                                          self.srcdir, ".svn"))
+
+    def doVCUpdate(self):
+        revision = self.args['revision'] or 'HEAD'
+        # update: possible for mode in ('copy', 'update')
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe, 'update', '--revision', str(revision),
+                   '--non-interactive']
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout,
+                         keepStdout=True)
+        self.command = c
+        return c.start()
+
+    def doVCFull(self):
+        revision = self.args['revision'] or 'HEAD'
+        d = self.builder.basedir
+        if self.mode == "export":
+            command = [self.vcexe, 'export', '--revision', str(revision),
+                       '--non-interactive',
+                       self.svnurl, self.srcdir]
+        else:
+            # mode=='clobber', or copy/update on a broken workspace
+            command = [self.vcexe, 'checkout', '--revision', str(revision),
+                       '--non-interactive',
+                       self.svnurl, self.srcdir]
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout,
+                         keepStdout=True)
+        self.command = c
+        return c.start()
+
+    def parseGotRevision(self):
+        # svn checkout operations finish with 'Checked out revision 16657.'
+        # svn update operations finish the line 'At revision 16654.'
+        # But we don't use those. Instead, run 'svnversion'.
+        svnversion_command = getCommand("svnversion")
+        # older versions of 'svnversion' (1.1.4) require the WC_PATH
+        # argument, newer ones (1.3.1) do not.
+        command = [svnversion_command, "."]
+        c = ShellCommand(self.builder, command,
+                         os.path.join(self.builder.basedir, self.srcdir),
+                         environ=self.env,
+                         sendStdout=False, sendStderr=False, sendRC=False,
+                         keepStdout=True)
+        c.usePTY = False
+        d = c.start()
+        def _parse(res):
+            r = c.stdout.strip()
+            got_version = None
+            try:
+                got_version = int(r)
+            except ValueError:
+                msg =("SVN.parseGotRevision unable to parse output "
+                      "of svnversion: '%s'" % r)
+                log.msg(msg)
+                self.sendStatus({'header': msg + "\n"})
+            return got_version
+        d.addCallback(_parse)
+        return d
+
+
+registerSlaveCommand("svn", SVN, command_version)
+
+class Darcs(SourceBase):
+    """Darcs-specific VC operation. In addition to the arguments
+    handled by SourceBase, this command reads the following keys:
+
+    ['repourl'] (required): the Darcs repository string
+    """
+
+    header = "darcs operation"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.vcexe = getCommand("darcs")
+        self.repourl = args['repourl']
+        self.sourcedata = "%s\n" % self.repourl
+        self.revision = self.args.get('revision')
+
+    def sourcedirIsUpdateable(self):
+        if os.path.exists(os.path.join(self.builder.basedir,
+                                       self.srcdir, ".buildbot-patched")):
+            return False
+        if self.revision:
+            # checking out a specific revision requires a full 'darcs get'
+            return False
+        return os.path.isdir(os.path.join(self.builder.basedir,
+                                          self.srcdir, "_darcs"))
+
+    def doVCUpdate(self):
+        assert not self.revision
+        # update: possible for mode in ('copy', 'update')
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe, 'pull', '--all', '--verbose']
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def doVCFull(self):
+        # checkout or export
+        d = self.builder.basedir
+        command = [self.vcexe, 'get', '--verbose', '--partial',
+                   '--repo-name', self.srcdir]
+        if self.revision:
+            # write the context to a file
+            n = os.path.join(self.builder.basedir, ".darcs-context")
+            f = open(n, "wb")
+            f.write(self.revision)
+            f.close()
+            # tell Darcs to use that context
+            command.append('--context')
+            command.append(n)
+        command.append(self.repourl)
+
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        d = c.start()
+        if self.revision:
+            d.addCallback(self.removeContextFile, n)
+        return d
+
+    def removeContextFile(self, res, n):
+        os.unlink(n)
+        return res
+
+    def parseGotRevision(self):
+        # we use 'darcs context' to find out what we wound up with
+        command = [self.vcexe, "changes", "--context"]
+        c = ShellCommand(self.builder, command,
+                         os.path.join(self.builder.basedir, self.srcdir),
+                         environ=self.env,
+                         sendStdout=False, sendStderr=False, sendRC=False,
+                         keepStdout=True)
+        c.usePTY = False
+        d = c.start()
+        d.addCallback(lambda res: c.stdout)
+        return d
+
+registerSlaveCommand("darcs", Darcs, command_version)
+
+class Monotone(SourceBase):
+    """Monotone-specific VC operation.  In addition to the arguments handled
+    by SourceBase, this command reads the following keys:
+
+    ['server_addr'] (required): the address of the server to pull from
+    ['branch'] (required): the branch the revision is on
+    ['db_path'] (required): the local database path to use
+    ['revision'] (required): the revision to check out
+    ['monotone']: (required): path to monotone executable
+    """
+
+    header = "monotone operation"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.server_addr = args["server_addr"]
+        self.branch = args["branch"]
+        self.db_path = args["db_path"]
+        self.revision = args["revision"]
+        self.monotone = args["monotone"]
+        self._made_fulls = False
+        self._pull_timeout = args["timeout"]
+
+    def _makefulls(self):
+        if not self._made_fulls:
+            basedir = self.builder.basedir
+            self.full_db_path = os.path.join(basedir, self.db_path)
+            self.full_srcdir = os.path.join(basedir, self.srcdir)
+            self._made_fulls = True
+
+    def sourcedirIsUpdateable(self):
+        self._makefulls()
+        if os.path.exists(os.path.join(self.full_srcdir,
+                                       ".buildbot_patched")):
+            return False
+        return (os.path.isfile(self.full_db_path)
+                and os.path.isdir(os.path.join(self.full_srcdir, "MT")))
+
+    def doVCUpdate(self):
+        return self._withFreshDb(self._doUpdate)
+
+    def _doUpdate(self):
+        # update: possible for mode in ('copy', 'update')
+        command = [self.monotone, "update",
+                   "-r", self.revision,
+                   "-b", self.branch]
+        c = ShellCommand(self.builder, command, self.full_srcdir,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def doVCFull(self):
+        return self._withFreshDb(self._doFull)
+
+    def _doFull(self):
+        command = [self.monotone, "--db=" + self.full_db_path,
+                   "checkout",
+                   "-r", self.revision,
+                   "-b", self.branch,
+                   self.full_srcdir]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def _withFreshDb(self, callback):
+        self._makefulls()
+        # first ensure the db exists and is usable
+        if os.path.isfile(self.full_db_path):
+            # already exists, so run 'db migrate' in case monotone has been
+            # upgraded under us
+            command = [self.monotone, "db", "migrate",
+                       "--db=" + self.full_db_path]
+        else:
+            # We'll be doing an initial pull, so up the timeout to 3 hours to
+            # make sure it will have time to complete.
+            self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60)
+            self.sendStatus({"header": "creating database %s\n"
+                                       % (self.full_db_path,)})
+            command = [self.monotone, "db", "init",
+                       "--db=" + self.full_db_path]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        d.addCallback(self._didDbInit)
+        d.addCallback(self._didPull, callback)
+        return d
+
+    def _didDbInit(self, res):
+        command = [self.monotone, "--db=" + self.full_db_path,
+                   "pull", "--ticker=dot", self.server_addr, self.branch]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, timeout=self._pull_timeout)
+        self.sendStatus({"header": "pulling %s from %s\n"
+                                   % (self.branch, self.server_addr)})
+        self.command = c
+        return c.start()
+
+    def _didPull(self, res, callback):
+        return callback()
+
+registerSlaveCommand("monotone", Monotone, command_version)
+
+
+class Git(SourceBase):
+    """Git specific VC operation. In addition to the arguments
+    handled by SourceBase, this command reads the following keys:
+
+    ['repourl'] (required): the Cogito repository string
+    """
+
+    header = "git operation"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.repourl = args['repourl']
+        #self.sourcedata = "" # TODO
+
+    def sourcedirIsUpdateable(self):
+        if os.path.exists(os.path.join(self.builder.basedir,
+                                       self.srcdir, ".buildbot-patched")):
+            return False
+        return os.path.isdir(os.path.join(self.builder.basedir,
+                                          self.srcdir, ".git"))
+
+    def doVCUpdate(self):
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = ['cg-update']
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def doVCFull(self):
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        os.mkdir(d)
+        command = ['cg-clone', '-s', self.repourl]
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+registerSlaveCommand("git", Git, command_version)
+
+class Arch(SourceBase):
+    """Arch-specific (tla-specific) VC operation. In addition to the
+    arguments handled by SourceBase, this command reads the following keys:
+
+    ['url'] (required): the repository string
+    ['version'] (required): which version (i.e. branch) to retrieve
+    ['revision'] (optional): the 'patch-NN' argument to check out
+    ['archive']: the archive name to use. If None, use the archive's default
+    ['build-config']: if present, give to 'tla build-config' after checkout
+    """
+
+    header = "arch operation"
+    buildconfig = None
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.vcexe = getCommand("tla")
+        self.archive = args.get('archive')
+        self.url = args['url']
+        self.version = args['version']
+        self.revision = args.get('revision')
+        self.buildconfig = args.get('build-config')
+        self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
+                                            self.buildconfig)
+
+    def sourcedirIsUpdateable(self):
+        if self.revision:
+            # Arch cannot roll a directory backwards, so if they ask for a
+            # specific revision, clobber the directory. Technically this
+            # could be limited to the cases where the requested revision is
+            # later than our current one, but it's too hard to extract the
+            # current revision from the tree.
+            return False
+        if os.path.exists(os.path.join(self.builder.basedir,
+                                       self.srcdir, ".buildbot-patched")):
+            return False
+        return os.path.isdir(os.path.join(self.builder.basedir,
+                                          self.srcdir, "{arch}"))
+
+    def doVCUpdate(self):
+        # update: possible for mode in ('copy', 'update')
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe, 'replay']
+        if self.revision:
+            command.append(self.revision)
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def doVCFull(self):
+        # to do a checkout, we must first "register" the archive by giving
+        # the URL to tla, which will go to the repository at that URL and
+        # figure out the archive name. tla will tell you the archive name
+        # when it is done, and all further actions must refer to this name.
+
+        command = [self.vcexe, 'register-archive', '--force', self.url]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, keepStdout=True,
+                         timeout=self.timeout)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        d.addCallback(self._didRegister, c)
+        return d
+
+    def _didRegister(self, res, c):
+        # find out what tla thinks the archive name is. If the user told us
+        # to use something specific, make sure it matches.
+        r = re.search(r'Registering archive: (\S+)\s*$', c.stdout)
+        if r:
+            msg = "tla reports archive name is '%s'" % r.group(1)
+            log.msg(msg)
+            self.builder.sendUpdate({'header': msg+"\n"})
+            if self.archive and r.group(1) != self.archive:
+                msg = (" mismatch, we wanted an archive named '%s'"
+                       % self.archive)
+                log.msg(msg)
+                self.builder.sendUpdate({'header': msg+"\n"})
+                raise AbandonChain(-1)
+            self.archive = r.group(1)
+        assert self.archive, "need archive name to continue"
+        return self._doGet()
+
+    def _doGet(self):
+        ver = self.version
+        if self.revision:
+            ver += "--%s" % self.revision
+        command = [self.vcexe, 'get', '--archive', self.archive,
+                   '--no-pristine',
+                   ver, self.srcdir]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        if self.buildconfig:
+            d.addCallback(self._didGet)
+        return d
+
+    def _didGet(self, res):
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe, 'build-config', self.buildconfig]
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        return d
+
+    def parseGotRevision(self):
+        # using code from tryclient.TlaExtractor
+        # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
+        # 'tla logs' gives us REVISION
+        command = [self.vcexe, "logs", "--full", "--reverse"]
+        c = ShellCommand(self.builder, command,
+                         os.path.join(self.builder.basedir, self.srcdir),
+                         environ=self.env,
+                         sendStdout=False, sendStderr=False, sendRC=False,
+                         keepStdout=True)
+        c.usePTY = False
+        d = c.start()
+        def _parse(res):
+            tid = c.stdout.split("\n")[0].strip()
+            slash = tid.index("/")
+            dd = tid.rindex("--")
+            #branch = tid[slash+1:dd]
+            baserev = tid[dd+2:]
+            return baserev
+        d.addCallback(_parse)
+        return d
+
+registerSlaveCommand("arch", Arch, command_version)
+
+class Bazaar(Arch):
+    """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories.
+    It is mostly option-compatible, but archive registration is different
+    enough to warrant a separate Command.
+
+    ['archive'] (required): the name of the archive being used
+    """
+
+    def setup(self, args):
+        Arch.setup(self, args)
+        self.vcexe = getCommand("baz")
+        # baz doesn't emit the repository name after registration (and
+        # grepping through the output of 'baz archives' is too hard), so we
+        # require that the buildmaster configuration to provide both the
+        # archive name and the URL.
+        self.archive = args['archive'] # required for Baz
+        self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
+                                            self.buildconfig)
+
+    # in _didRegister, the regexp won't match, so we'll stick with the name
+    # in self.archive
+
+    def _doGet(self):
+        # baz prefers ARCHIVE/VERSION. This will work even if
+        # my-default-archive is not set.
+        ver = self.archive + "/" + self.version
+        if self.revision:
+            ver += "--%s" % self.revision
+        command = [self.vcexe, 'get', '--no-pristine',
+                   ver, self.srcdir]
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        if self.buildconfig:
+            d.addCallback(self._didGet)
+        return d
+
+    def parseGotRevision(self):
+        # using code from tryclient.BazExtractor
+        command = [self.vcexe, "tree-id"]
+        c = ShellCommand(self.builder, command,
+                         os.path.join(self.builder.basedir, self.srcdir),
+                         environ=self.env,
+                         sendStdout=False, sendStderr=False, sendRC=False,
+                         keepStdout=True)
+        c.usePTY = False
+        d = c.start()
+        def _parse(res):
+            tid = c.stdout.strip()
+            slash = tid.index("/")
+            dd = tid.rindex("--")
+            #branch = tid[slash+1:dd]
+            baserev = tid[dd+2:]
+            return baserev
+        d.addCallback(_parse)
+        return d
+
+registerSlaveCommand("bazaar", Bazaar, command_version)
+
+
+class Mercurial(SourceBase):
+    """Mercurial specific VC operation. In addition to the arguments
+    handled by SourceBase, this command reads the following keys:
+
+    ['repourl'] (required): the Cogito repository string
+    """
+
+    header = "mercurial operation"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.vcexe = getCommand("hg")
+        self.repourl = args['repourl']
+        self.sourcedata = "%s\n" % self.repourl
+        self.stdout = ""
+        self.stderr = ""
+
+    def sourcedirIsUpdateable(self):
+        if os.path.exists(os.path.join(self.builder.basedir,
+                                       self.srcdir, ".buildbot-patched")):
+            return False
+        # like Darcs, to check out a specific (old) revision, we have to do a
+        # full checkout. TODO: I think 'hg pull' plus 'hg update' might work
+        if self.revision:
+            return False
+        return os.path.isdir(os.path.join(self.builder.basedir,
+                                          self.srcdir, ".hg"))
+
+    def doVCUpdate(self):
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe, 'pull', '--update', '--verbose']
+        if self.args['revision']:
+            command.extend(['--rev', self.args['revision']])
+        c = ShellCommand(self.builder, command, d,
+                         sendRC=False, timeout=self.timeout,
+                         keepStdout=True)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._handleEmptyUpdate)
+        return d
+
+    def _handleEmptyUpdate(self, res):
+        if type(res) is int and res == 1:
+            if self.command.stdout.find("no changes found") != -1:
+                # 'hg pull', when it doesn't have anything to do, exits with
+                # rc=1, and there appears to be no way to shut this off. It
+                # emits a distinctive message to stdout, though. So catch
+                # this and pretend that it completed successfully.
+                return 0
+        return res
+
+    def doVCFull(self):
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe, 'clone']
+        if self.args['revision']:
+            command.extend(['--rev', self.args['revision']])
+        command.extend([self.repourl, d])
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def parseGotRevision(self):
+        # we use 'hg identify' to find out what we wound up with
+        command = [self.vcexe, "identify"]
+        c = ShellCommand(self.builder, command,
+                         os.path.join(self.builder.basedir, self.srcdir),
+                         environ=self.env,
+                         sendStdout=False, sendStderr=False, sendRC=False,
+                         keepStdout=True)
+        d = c.start()
+        def _parse(res):
+            m = re.search(r'^(\w+)', c.stdout)
+            return m.group(1)
+        d.addCallback(_parse)
+        return d
+
+registerSlaveCommand("hg", Mercurial, command_version)
+
+
+class P4(SourceBase):
+    """A P4 source-updater.
+
+    ['p4port'] (required): host:port for server to access
+    ['p4user'] (optional): user to use for access
+    ['p4passwd'] (optional): passwd to try for the user
+    ['p4client'] (optional): client spec to use
+    ['p4views'] (optional): client views to use
+    """
+
+    header = "p4"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.p4port = args['p4port']
+        self.p4client = args['p4client']
+        self.p4user = args['p4user']
+        self.p4passwd = args['p4passwd']
+        self.p4base = args['p4base']
+        self.p4extra_views = args['p4extra_views']
+        self.p4mode = args['mode']
+        self.p4branch = args['branch']
+        self.p4logname = os.environ['LOGNAME']
+
+        self.sourcedata = str([
+            # Perforce server.
+            self.p4port,
+
+            # Client spec.
+            self.p4client,
+
+            # Depot side of view spec.
+            self.p4base,
+            self.p4branch,
+            self.p4extra_views,
+
+            # Local side of view spec (srcdir is made from these).
+            self.builder.basedir,
+            self.mode,
+            self.workdir
+        ])
+
+
+    def sourcedirIsUpdateable(self):
+        if os.path.exists(os.path.join(self.builder.basedir,
+                                       self.srcdir, ".buildbot-patched")):
+            return False
+        # We assume our client spec is still around.
+        # We just say we aren't updateable if the dir doesn't exist so we
+        # don't get ENOENT checking the sourcedata.
+        return os.path.isdir(os.path.join(self.builder.basedir,
+                                          self.srcdir))
+
+    def doVCUpdate(self):
+        return self._doP4Sync(force=False)
+
+    def _doP4Sync(self, force):
+        command = ['p4']
+
+        if self.p4port:
+            command.extend(['-p', self.p4port])
+        if self.p4user:
+            command.extend(['-u', self.p4user])
+        if self.p4passwd:
+            command.extend(['-P', self.p4passwd])
+        if self.p4client:
+            command.extend(['-c', self.p4client])
+        command.extend(['sync'])
+        if force:
+            command.extend(['-f'])
+        if self.revision:
+            command.extend(['@' + str(self.revision)])
+        env = {}
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         environ=env, sendRC=False, timeout=self.timeout,
+                         keepStdout=True)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        return d
+
+
+    def doVCFull(self):
+        env = {}
+        command = ['p4']
+        client_spec = ''
+        client_spec += "Client: %s\n\n" % self.p4client
+        client_spec += "Owner: %s\n\n" % self.p4logname
+        client_spec += "Description:\n\tCreated by %s\n\n" % self.p4logname
+        client_spec += "Root:\t%s\n\n" % self.builder.basedir
+        client_spec += "Options:\tallwrite rmdir\n\n"
+        client_spec += "LineEnd:\tlocal\n\n"
+
+        # Setup a view
+        client_spec += "View:\n\t%s" % (self.p4base)
+        if self.p4branch:
+            client_spec += "%s/" % (self.p4branch)
+        client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir)
+        if self.p4extra_views:
+            for k, v in self.p4extra_views:
+                client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client,
+                                                             self.srcdir, v)
+        if self.p4port:
+            command.extend(['-p', self.p4port])
+        if self.p4user:
+            command.extend(['-u', self.p4user])
+        if self.p4passwd:
+            command.extend(['-P', self.p4passwd])
+        command.extend(['client', '-i'])
+        log.msg(client_spec)
+        c = ShellCommand(self.builder, command, self.builder.basedir,
+                         environ=env, sendRC=False, timeout=self.timeout,
+                         initialStdin=client_spec)
+        self.command = c
+        d = c.start()
+        d.addCallback(self._abandonOnFailure)
+        d.addCallback(lambda _: self._doP4Sync(force=True))
+        return d
+
+registerSlaveCommand("p4", P4, command_version)
+
+
+class P4Sync(SourceBase):
+    """A partial P4 source-updater. Requires manual setup of a per-slave P4
+    environment. The only thing which comes from the master is P4PORT.
+    'mode' is required to be 'copy'.
+
+    ['p4port'] (required): host:port for server to access
+    ['p4user'] (optional): user to use for access
+    ['p4passwd'] (optional): passwd to try for the user
+    ['p4client'] (optional): client spec to use
+    """
+
+    header = "p4 sync"
+
+    def setup(self, args):
+        SourceBase.setup(self, args)
+        self.vcexe = getCommand("p4")
+        self.p4port = args['p4port']
+        self.p4user = args['p4user']
+        self.p4passwd = args['p4passwd']
+        self.p4client = args['p4client']
+
+    def sourcedirIsUpdateable(self):
+        return True
+
+    def _doVC(self, force):
+        d = os.path.join(self.builder.basedir, self.srcdir)
+        command = [self.vcexe]
+        if self.p4port:
+            command.extend(['-p', self.p4port])
+        if self.p4user:
+            command.extend(['-u', self.p4user])
+        if self.p4passwd:
+            command.extend(['-P', self.p4passwd])
+        if self.p4client:
+            command.extend(['-c', self.p4client])
+        command.extend(['sync'])
+        if force:
+            command.extend(['-f'])
+        if self.revision:
+            command.extend(['@' + self.revision])
+        env = {}
+        c = ShellCommand(self.builder, command, d, environ=env,
+                         sendRC=False, timeout=self.timeout)
+        self.command = c
+        return c.start()
+
+    def doVCUpdate(self):
+        return self._doVC(force=False)
+
+    def doVCFull(self):
+        return self._doVC(force=True)
+
+registerSlaveCommand("p4sync", P4Sync, command_version)

Added: vendor/buildbot/current/buildbot/slave/interfaces.py
===================================================================
--- vendor/buildbot/current/buildbot/slave/interfaces.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/slave/interfaces.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,57 @@
+#! /usr/bin/python
+
+from buildbot.twcompat import Interface
+
+class ISlaveCommand(Interface):
+    """This interface is implemented by all of the buildslave's Command
+    subclasses. It specifies how the buildslave can start, interrupt, and
+    query the various Commands running on behalf of the buildmaster."""
+
+    def __init__(builder, stepId, args):
+        """Create the Command. 'builder' is a reference to the parent
+        buildbot.bot.SlaveBuilder instance, which will be used to send status
+        updates (by calling builder.sendStatus). 'stepId' is a random string
+        which helps correlate slave logs with the master. 'args' is a dict of
+        arguments that comes from the master-side BuildStep, with contents
+        that are specific to the individual Command subclass.
+
+        This method is not intended to be subclassed."""
+
+    def setup(args):
+        """This method is provided for subclasses to override, to extract
+        parameters from the 'args' dictionary. The default implemention does
+        nothing. It will be called from __init__"""
+
+    def start():
+        """Begin the command, and return a Deferred.
+
+        While the command runs, it should send status updates to the
+        master-side BuildStep by calling self.sendStatus(status). The
+        'status' argument is typically a dict with keys like 'stdout',
+        'stderr', and 'rc'.
+
+        When the step completes, it should fire the Deferred (the results are
+        not used). If an exception occurs during execution, it may also
+        errback the deferred, however any reasonable errors should be trapped
+        and indicated with a non-zero 'rc' status rather than raising an
+        exception. Exceptions should indicate problems within the buildbot
+        itself, not problems in the project being tested.
+
+        """
+
+    def interrupt():
+        """This is called to tell the Command that the build is being stopped
+        and therefore the command should be terminated as quickly as
+        possible. The command may continue to send status updates, up to and
+        including an 'rc' end-of-command update (which should indicate an
+        error condition). The Command's deferred should still be fired when
+        the command has finally completed.
+
+        If the build is being stopped because the slave it shutting down or
+        because the connection to the buildmaster has been lost, the status
+        updates will simply be discarded. The Command does not need to be
+        aware of this.
+
+        Child shell processes should be killed. Simple ShellCommand classes
+        can just insert a header line indicating that the process will be
+        killed, then os.kill() the child."""

Added: vendor/buildbot/current/buildbot/slave/registry.py
===================================================================
--- vendor/buildbot/current/buildbot/slave/registry.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/slave/registry.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,18 @@
+#! /usr/bin/python
+
+commandRegistry = {}
+
+def registerSlaveCommand(name, factory, version):
+    """
+    Register a slave command with the registry, making it available in slaves.
+
+    @type  name:    string
+    @param name:    name under which the slave command will be registered; used
+                    for L{buildbot.slave.bot.SlaveBuilder.remote_startCommand}
+                    
+    @type  factory: L{buildbot.slave.commands.Command}
+    @type  version: string
+    @param version: version string of the factory code
+    """
+    assert not commandRegistry.has_key(name)
+    commandRegistry[name] = (factory, version)

Added: vendor/buildbot/current/buildbot/slave/trial.py
===================================================================
--- vendor/buildbot/current/buildbot/slave/trial.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/slave/trial.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,175 @@
+# -*- test-case-name: buildbot.test.test_trial.TestRemoteReporter -*-
+
+import types, time
+import zope.interface as zi
+
+from twisted.spread import pb
+from twisted.internet import reactor, defer
+from twisted.python import reflect, failure, log, usage, util
+from twisted.trial import registerAdapter, adaptWithDefault, reporter, runner
+from twisted.trial.interfaces import ITestMethod, ITestSuite, ITestRunner, \
+     IJellied, IUnjellied, IRemoteReporter
+from twisted.application import strports
+
+
+class RemoteTestAny(object, util.FancyStrMixin):
+    def __init__(self, original):
+        self.original = original
+
+    def __getattr__(self, attr):
+        if attr not in self.original:
+            raise AttributeError, "%s has no attribute %s" % (self.__str__(), attr)
+        return self.original[attr]
+
+
+class RemoteTestMethod(RemoteTestAny):
+    zi.implements(ITestMethod)
+
+class RemoteTestSuite(RemoteTestAny):
+    zi.implements(ITestSuite)
+
+
+class RemoteReporter(reporter.Reporter):
+    zi.implements(IRemoteReporter)
+    pbroot = None
+
+    def __init__(self, stream=None, tbformat=None, args=None):
+        super(RemoteReporter, self).__init__(stream, tbformat, args)
+
+    def setUpReporter(self):
+        factory = pb.PBClientFactory()
+        
+        self.pbcnx = reactor.connectTCP("localhost", self.args, factory)
+        assert self.pbcnx is not None
+
+        def _cb(root):
+            self.pbroot = root
+            return root
+
+        return factory.getRootObject().addCallback(_cb
+                                     ).addErrback(log.err)
+        
+    def tearDownReporter(self):
+        def _disconnected(passthru):
+            log.msg(sekritHQ='_disconnected, passthru: %r' % (passthru,))
+            return passthru
+
+        d = defer.Deferred().addCallback(_disconnected
+                           ).addErrback(log.err)
+
+        self.pbroot.notifyOnDisconnect(d.callback)
+        self.pbcnx.transport.loseConnection()
+        return d
+
+    def reportImportError(self, name, fail):
+        pass
+
+    def startTest(self, method):
+        return self.pbroot.callRemote('startTest', IJellied(method))
+
+    def endTest(self, method):
+        return self.pbroot.callRemote('endTest', IJellied(method))
+
+    def startSuite(self, arg):
+        return self.pbroot.callRemote('startSuite', IJellied(arg))
+
+    def endSuite(self, suite):
+        return self.pbroot.callRemote('endSuite', IJellied(suite))
+
+
+# -- Adapters --
+
+def jellyList(L):
+    return [IJellied(i) for i in L]
+    
+def jellyTuple(T):
+    return tuple(IJellied(list(T)))
+    
+def jellyDict(D):
+    def _clean(*a):
+       return tuple(map(lambda x: adaptWithDefault(IJellied, x, None), a))
+    return dict([_clean(k, v) for k, v in D.iteritems()]) 
+
+def jellyTimingInfo(d, timed):
+    for attr in ('startTime', 'endTime'):
+        d[attr] = getattr(timed, attr, 0.0)
+    return d
+
+def _logFormatter(eventDict):
+    #XXX: this is pretty weak, it's basically the guts of
+    # t.p.log.FileLogObserver.emit, but then again, that's been pretty
+    # stable over the past few releases....
+    edm = eventDict['message']
+    if not edm:
+        if eventDict['isError'] and eventDict.has_key('failure'):
+            text = eventDict['failure'].getTraceback()
+        elif eventDict.has_key('format'):
+            try:
+                text = eventDict['format'] % eventDict
+            except:
+                try:
+                    text = ('Invalid format string in log message: %s'
+                            % eventDict)
+                except:
+                    text = 'UNFORMATTABLE OBJECT WRITTEN TO LOG, MESSAGE LOST'
+        else:
+            # we don't know how to log this
+            return
+    else:
+        text = ' '.join(map(str, edm))
+
+    timeStr = time.strftime("%Y/%m/%d %H:%M %Z", time.localtime(eventDict['time']))
+    fmtDict = {'system': eventDict['system'], 'text': text.replace("\n", "\n\t")}
+    msgStr = " [%(system)s] %(text)s\n" % fmtDict
+    return "%s%s" % (timeStr, msgStr)
+
+def jellyTestMethod(testMethod):
+    """@param testMethod: an object that implements L{twisted.trial.interfaces.ITestMethod}"""
+    d = {}
+    for attr in ('status', 'todo', 'skip', 'stdout', 'stderr',
+                 'name', 'fullName', 'runs', 'errors', 'failures', 'module'):
+        d[attr] = getattr(testMethod, attr)
+
+    q = None
+    try:
+        q = reflect.qual(testMethod.klass)
+    except TypeError:
+        # XXX: This may be incorrect somehow
+        q = "%s.%s" % (testMethod.module, testMethod.klass.__name__)
+    d['klass'] = q
+
+    d['logevents'] = [_logFormatter(event) for event in testMethod.logevents]
+            
+    jellyTimingInfo(d, testMethod)
+    
+    return d
+    
+def jellyTestRunner(testRunner):
+    """@param testRunner: an object that implements L{twisted.trial.interfaces.ITestRunner}"""
+    d = dict(testMethods=[IJellied(m) for m in testRunner.testMethods])
+    jellyTimingInfo(d, testRunner)
+    return d
+
+def jellyTestSuite(testSuite):
+    d = {}
+    for attr in ('tests', 'runners', 'couldNotImport'):
+        d[attr] = IJellied(getattr(testSuite, attr))
+
+    jellyTimingInfo(d, testSuite)
+    return d
+
+
+
+for a, o, i in [(jellyTuple, types.TupleType, IJellied),
+                (jellyTestMethod, ITestMethod, IJellied),
+                (jellyList, types.ListType, IJellied),
+                (jellyTestSuite, ITestSuite, IJellied),
+                (jellyTestRunner, ITestRunner, IJellied),
+                (jellyDict, types.DictType, IJellied),
+                (RemoteTestMethod, types.DictType, ITestMethod),
+                (RemoteTestSuite, types.DictType, ITestSuite)]:
+    registerAdapter(a, o, i)
+
+for t in [types.StringType, types.IntType, types.FloatType, failure.Failure]:
+    zi.classImplements(t, IJellied)
+

Added: vendor/buildbot/current/buildbot/sourcestamp.py
===================================================================
--- vendor/buildbot/current/buildbot/sourcestamp.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/sourcestamp.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,85 @@
+
+from buildbot import util, interfaces
+from buildbot.twcompat import implements
+
+class SourceStamp(util.ComparableMixin):
+    """This is a tuple of (branch, revision, patchspec, changes).
+
+    C{branch} is always valid, although it may be None to let the Source
+    step use its default branch. There are four possibilities for the
+    remaining elements:
+     - (revision=REV, patchspec=None, changes=None): build REV
+     - (revision=REV, patchspec=(LEVEL, DIFF), changes=None): checkout REV,
+       then apply a patch to the source, with C{patch -pPATCHLEVEL <DIFF}.
+     - (revision=None, patchspec=None, changes=[CHANGES]): let the Source
+       step check out the latest revision indicated by the given Changes.
+       CHANGES is a list of L{buildbot.changes.changes.Change} instances,
+       and all must be on the same branch.
+     - (revision=None, patchspec=None, changes=None): build the latest code
+       from the given branch.
+    """
+
+    # all four of these are publically visible attributes
+    branch = None
+    revision = None
+    patch = None
+    changes = []
+
+    compare_attrs = ('branch', 'revision', 'patch', 'changes')
+
+    if implements:
+        implements(interfaces.ISourceStamp)
+    else:
+        __implements__ = interfaces.ISourceStamp,
+
+    def __init__(self, branch=None, revision=None, patch=None,
+                 changes=None):
+        self.branch = branch
+        self.revision = revision
+        self.patch = patch
+        if changes:
+            self.changes = changes
+            self.branch = changes[0].branch
+
+    def canBeMergedWith(self, other):
+        if other.branch != self.branch:
+            return False # the builds are completely unrelated
+
+        if self.changes and other.changes:
+            # TODO: consider not merging these. It's a tradeoff between
+            # minimizing the number of builds and obtaining finer-grained
+            # results.
+            return True
+        elif self.changes and not other.changes:
+            return False # we're using changes, they aren't
+        elif not self.changes and other.changes:
+            return False # they're using changes, we aren't
+
+        if self.patch or other.patch:
+            return False # you can't merge patched builds with anything
+        if self.revision == other.revision:
+            # both builds are using the same specific revision, so they can
+            # be merged. It might be the case that revision==None, so they're
+            # both building HEAD.
+            return True
+
+        return False
+
+    def mergeWith(self, others):
+        """Generate a SourceStamp for the merger of me and all the other
+        BuildRequests. This is called by a Build when it starts, to figure
+        out what its sourceStamp should be."""
+
+        # either we're all building the same thing (changes==None), or we're
+        # all building changes (which can be merged)
+        changes = []
+        changes.extend(self.changes)
+        for req in others:
+            assert self.canBeMergedWith(req) # should have been checked already
+            changes.extend(req.changes)
+        newsource = SourceStamp(branch=self.branch,
+                                revision=self.revision,
+                                patch=self.patch,
+                                changes=changes)
+        return newsource
+

Added: vendor/buildbot/current/buildbot/status/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/status/base.py
===================================================================
--- vendor/buildbot/current/buildbot/status/base.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/base.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,70 @@
+#! /usr/bin/python
+
+from twisted.application import service
+from buildbot.twcompat import implements
+
+from buildbot.interfaces import IStatusReceiver
+from buildbot import util, pbutil
+
+class StatusReceiver:
+    if implements:
+        implements(IStatusReceiver)
+    else:
+        __implements__ = IStatusReceiver,
+
+    def buildsetSubmitted(self, buildset):
+        pass
+
+    def builderAdded(self, builderName, builder):
+        pass
+
+    def builderChangedState(self, builderName, state):
+        pass
+
+    def buildStarted(self, builderName, build):
+        pass
+
+    def buildETAUpdate(self, build, ETA):
+        pass
+
+    def stepStarted(self, build, step):
+        pass
+
+    def stepETAUpdate(self, build, step, ETA, expectations):
+        pass
+
+    def logStarted(self, build, step, log):
+        pass
+
+    def logChunk(self, build, step, log, channel, text):
+        pass
+
+    def logFinished(self, build, step, log):
+        pass
+
+    def stepFinished(self, build, step, results):
+        pass
+
+    def buildFinished(self, builderName, build, results):
+        pass
+
+    def builderRemoved(self, builderName):
+        pass
+
+class StatusReceiverMultiService(StatusReceiver, service.MultiService,
+                                 util.ComparableMixin):
+    if implements:
+        implements(IStatusReceiver)
+    else:
+        __implements__ = IStatusReceiver, service.MultiService.__implements__
+
+    def __init__(self):
+        service.MultiService.__init__(self)
+
+
+class StatusReceiverPerspective(StatusReceiver, pbutil.NewCredPerspective):
+    if implements:
+        implements(IStatusReceiver)
+    else:
+        __implements__ = (IStatusReceiver,
+                          pbutil.NewCredPerspective.__implements__)

Added: vendor/buildbot/current/buildbot/status/builder.py
===================================================================
--- vendor/buildbot/current/buildbot/status/builder.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/builder.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,1942 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+from __future__ import generators
+
+from twisted.python import log
+from twisted.persisted import styles
+from twisted.internet import reactor, defer
+from twisted.protocols import basic
+
+import os, shutil, sys, re, urllib
+try:
+    import cPickle
+    pickle = cPickle
+except ImportError:
+    import pickle
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+# sibling imports
+from buildbot import interfaces, util, sourcestamp
+from buildbot.twcompat import implements, providedBy
+
+SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION = range(5)
+Results = ["success", "warnings", "failure", "skipped", "exception"]
+
+
+# build processes call the following methods:
+#
+#  setDefaults
+#
+#  currentlyBuilding
+#  currentlyIdle
+#  currentlyInterlocked
+#  currentlyOffline
+#  currentlyWaiting
+#
+#  setCurrentActivity
+#  updateCurrentActivity
+#  addFileToCurrentActivity
+#  finishCurrentActivity
+#
+#  startBuild
+#  finishBuild
+
+STDOUT = interfaces.LOG_CHANNEL_STDOUT
+STDERR = interfaces.LOG_CHANNEL_STDERR
+HEADER = interfaces.LOG_CHANNEL_HEADER
+ChunkTypes = ["stdout", "stderr", "header"]
+
+class LogFileScanner(basic.NetstringReceiver):
+    def __init__(self, chunk_cb, channels=[]):
+        self.chunk_cb = chunk_cb
+        self.channels = channels
+
+    def stringReceived(self, line):
+        channel = int(line[0])
+        if not self.channels or (channel in self.channels):
+            self.chunk_cb((channel, line[1:]))
+
+class LogFileProducer:
+    """What's the plan?
+
+    the LogFile has just one FD, used for both reading and writing.
+    Each time you add an entry, fd.seek to the end and then write.
+
+    Each reader (i.e. Producer) keeps track of their own offset. The reader
+    starts by seeking to the start of the logfile, and reading forwards.
+    Between each hunk of file they yield chunks, so they must remember their
+    offset before yielding and re-seek back to that offset before reading
+    more data. When their read() returns EOF, they're finished with the first
+    phase of the reading (everything that's already been written to disk).
+
+    After EOF, the remaining data is entirely in the current entries list.
+    These entries are all of the same channel, so we can do one "".join and
+    obtain a single chunk to be sent to the listener. But since that involves
+    a yield, and more data might arrive after we give up control, we have to
+    subscribe them before yielding. We can't subscribe them any earlier,
+    otherwise they'd get data out of order.
+
+    We're using a generator in the first place so that the listener can
+    throttle us, which means they're pulling. But the subscription means
+    we're pushing. Really we're a Producer. In the first phase we can be
+    either a PullProducer or a PushProducer. In the second phase we're only a
+    PushProducer.
+
+    So the client gives a LogFileConsumer to File.subscribeConsumer . This
+    Consumer must have registerProducer(), unregisterProducer(), and
+    writeChunk(), and is just like a regular twisted.interfaces.IConsumer,
+    except that writeChunk() takes chunks (tuples of (channel,text)) instead
+    of the normal write() which takes just text. The LogFileConsumer is
+    allowed to call stopProducing, pauseProducing, and resumeProducing on the
+    producer instance it is given. """
+
+    paused = False
+    subscribed = False
+    BUFFERSIZE = 2048
+
+    def __init__(self, logfile, consumer):
+        self.logfile = logfile
+        self.consumer = consumer
+        self.chunkGenerator = self.getChunks()
+        consumer.registerProducer(self, True)
+
+    def getChunks(self):
+        f = self.logfile.getFile()
+        offset = 0
+        chunks = []
+        p = LogFileScanner(chunks.append)
+        f.seek(offset)
+        data = f.read(self.BUFFERSIZE)
+        offset = f.tell()
+        while data:
+            p.dataReceived(data)
+            while chunks:
+                c = chunks.pop(0)
+                yield c
+            f.seek(offset)
+            data = f.read(self.BUFFERSIZE)
+            offset = f.tell()
+        del f
+
+        # now subscribe them to receive new entries
+        self.subscribed = True
+        self.logfile.watchers.append(self)
+        d = self.logfile.waitUntilFinished()
+
+        # then give them the not-yet-merged data
+        if self.logfile.runEntries:
+            channel = self.logfile.runEntries[0][0]
+            text = "".join([c[1] for c in self.logfile.runEntries])
+            yield (channel, text)
+
+        # now we've caught up to the present. Anything further will come from
+        # the logfile subscription. We add the callback *after* yielding the
+        # data from runEntries, because the logfile might have finished
+        # during the yield.
+        d.addCallback(self.logfileFinished)
+
+    def stopProducing(self):
+        # TODO: should we still call consumer.finish? probably not.
+        self.paused = True
+        self.consumer = None
+        self.done()
+
+    def done(self):
+        if self.chunkGenerator:
+            self.chunkGenerator = None # stop making chunks
+        if self.subscribed:
+            self.logfile.watchers.remove(self)
+            self.subscribed = False
+
+    def pauseProducing(self):
+        self.paused = True
+
+    def resumeProducing(self):
+        # Twisted-1.3.0 has a bug which causes hangs when resumeProducing
+        # calls transport.write (there is a recursive loop, fixed in 2.0 in
+        # t.i.abstract.FileDescriptor.doWrite by setting the producerPaused
+        # flag *before* calling resumeProducing). To work around this, we
+        # just put off the real resumeProducing for a moment. This probably
+        # has a performance hit, but I'm going to assume that the log files
+        # are not retrieved frequently enough for it to be an issue.
+
+        reactor.callLater(0, self._resumeProducing)
+
+    def _resumeProducing(self):
+        self.paused = False
+        if not self.chunkGenerator:
+            return
+        try:
+            while not self.paused:
+                chunk = self.chunkGenerator.next()
+                self.consumer.writeChunk(chunk)
+                # we exit this when the consumer says to stop, or we run out
+                # of chunks
+        except StopIteration:
+            # if the generator finished, it will have done releaseFile
+            self.chunkGenerator = None
+        # now everything goes through the subscription, and they don't get to
+        # pause anymore
+
+    def logChunk(self, build, step, logfile, channel, chunk):
+        if self.consumer:
+            self.consumer.writeChunk((channel, chunk))
+
+    def logfileFinished(self, logfile):
+        self.done()
+        if self.consumer:
+            self.consumer.unregisterProducer()
+            self.consumer.finish()
+            self.consumer = None
+
+class LogFile:
+    """A LogFile keeps all of its contents on disk, in a non-pickle format to
+    which new entries can easily be appended. The file on disk has a name
+    like 12-log-compile-output, under the Builder's directory. The actual
+    filename is generated (before the LogFile is created) by
+    L{BuildStatus.generateLogfileName}.
+
+    Old LogFile pickles (which kept their contents in .entries) must be
+    upgraded. The L{BuilderStatus} is responsible for doing this, when it
+    loads the L{BuildStatus} into memory. The Build pickle is not modified,
+    so users who go from 0.6.5 back to 0.6.4 don't have to lose their
+    logs."""
+
+    if implements:
+        implements(interfaces.IStatusLog, interfaces.ILogFile)
+    else:
+        __implements__ = (interfaces.IStatusLog, interfaces.ILogFile)
+
+    finished = False
+    length = 0
+    chunkSize = 10*1000
+    runLength = 0
+    runEntries = [] # provided so old pickled builds will getChunks() ok
+    entries = None
+    BUFFERSIZE = 2048
+    filename = None # relative to the Builder's basedir
+    openfile = None
+
+    def __init__(self, parent, name, logfilename):
+        """
+        @type  parent: L{BuildStepStatus}
+        @param parent: the Step that this log is a part of
+        @type  name: string
+        @param name: the name of this log, typically 'output'
+        @type  logfilename: string
+        @param logfilename: the Builder-relative pathname for the saved entries
+        """
+        self.step = parent
+        self.name = name
+        self.filename = logfilename
+        fn = self.getFilename()
+        if os.path.exists(fn):
+            # the buildmaster was probably stopped abruptly, before the
+            # BuilderStatus could be saved, so BuilderStatus.nextBuildNumber
+            # is out of date, and we're overlapping with earlier builds now.
+            # Warn about it, but then overwrite the old pickle file
+            log.msg("Warning: Overwriting old serialized Build at %s" % fn)
+        self.openfile = open(fn, "w+")
+        self.runEntries = []
+        self.watchers = []
+        self.finishedWatchers = []
+
+    def getFilename(self):
+        return os.path.join(self.step.build.builder.basedir, self.filename)
+
+    def hasContents(self):
+        return os.path.exists(self.getFilename())
+
+    def getName(self):
+        return self.name
+
+    def getStep(self):
+        return self.step
+
+    def isFinished(self):
+        return self.finished
+    def waitUntilFinished(self):
+        if self.finished:
+            d = defer.succeed(self)
+        else:
+            d = defer.Deferred()
+            self.finishedWatchers.append(d)
+        return d
+
+    def getFile(self):
+        if self.openfile:
+            # this is the filehandle we're using to write to the log, so
+            # don't close it!
+            return self.openfile
+        # otherwise they get their own read-only handle
+        return open(self.getFilename(), "r")
+
+    def getText(self):
+        # this produces one ginormous string
+        return "".join(self.getChunks([STDOUT, STDERR], onlyText=True))
+
+    def getTextWithHeaders(self):
+        return "".join(self.getChunks(onlyText=True))
+
+    def getChunks(self, channels=[], onlyText=False):
+        # generate chunks for everything that was logged at the time we were
+        # first called, so remember how long the file was when we started.
+        # Don't read beyond that point. The current contents of
+        # self.runEntries will follow.
+
+        # this returns an iterator, which means arbitrary things could happen
+        # while we're yielding. This will faithfully deliver the log as it
+        # existed when it was started, and not return anything after that
+        # point. To use this in subscribe(catchup=True) without missing any
+        # data, you must insure that nothing will be added to the log during
+        # yield() calls.
+
+        f = self.getFile()
+        offset = 0
+        f.seek(0, 2)
+        remaining = f.tell()
+
+        leftover = None
+        if self.runEntries and (not channels or
+                                (self.runEntries[0][0] in channels)):
+            leftover = (self.runEntries[0][0],
+                        "".join([c[1] for c in self.runEntries]))
+
+        # freeze the state of the LogFile by passing a lot of parameters into
+        # a generator
+        return self._generateChunks(f, offset, remaining, leftover,
+                                    channels, onlyText)
+
+    def _generateChunks(self, f, offset, remaining, leftover,
+                        channels, onlyText):
+        chunks = []
+        p = LogFileScanner(chunks.append, channels)
+        f.seek(offset)
+        data = f.read(min(remaining, self.BUFFERSIZE))
+        remaining -= len(data)
+        offset = f.tell()
+        while data:
+            p.dataReceived(data)
+            while chunks:
+                channel, text = chunks.pop(0)
+                if onlyText:
+                    yield text
+                else:
+                    yield (channel, text)
+            f.seek(offset)
+            data = f.read(min(remaining, self.BUFFERSIZE))
+            remaining -= len(data)
+            offset = f.tell()
+        del f
+
+        if leftover:
+            if onlyText:
+                yield leftover[1]
+            else:
+                yield leftover
+
+    def readlines(self, channel=STDOUT):
+        """Return an iterator that produces newline-terminated lines,
+        excluding header chunks."""
+        # TODO: make this memory-efficient, by turning it into a generator
+        # that retrieves chunks as necessary, like a pull-driven version of
+        # twisted.protocols.basic.LineReceiver
+        alltext = "".join(self.getChunks([channel], onlyText=True))
+        io = StringIO(alltext)
+        return io.readlines()
+
+    def subscribe(self, receiver, catchup):
+        if self.finished:
+            return
+        self.watchers.append(receiver)
+        if catchup:
+            for channel, text in self.getChunks():
+                # TODO: add logChunks(), to send over everything at once?
+                receiver.logChunk(self.step.build, self.step, self,
+                                  channel, text)
+
+    def unsubscribe(self, receiver):
+        if receiver in self.watchers:
+            self.watchers.remove(receiver)
+
+    def subscribeConsumer(self, consumer):
+        p = LogFileProducer(self, consumer)
+        p.resumeProducing()
+
+    # interface used by the build steps to add things to the log
+
+    def merge(self):
+        # merge all .runEntries (which are all of the same type) into a
+        # single chunk for .entries
+        if not self.runEntries:
+            return
+        channel = self.runEntries[0][0]
+        text = "".join([c[1] for c in self.runEntries])
+        assert channel < 10
+        f = self.openfile
+        f.seek(0, 2)
+        offset = 0
+        while offset < len(text):
+            size = min(len(text)-offset, self.chunkSize)
+            f.write("%d:%d" % (1 + size, channel))
+            f.write(text[offset:offset+size])
+            f.write(",")
+            offset += size
+        self.runEntries = []
+        self.runLength = 0
+
+    def addEntry(self, channel, text):
+        assert not self.finished
+        # we only add to .runEntries here. merge() is responsible for adding
+        # merged chunks to .entries
+        if self.runEntries and channel != self.runEntries[0][0]:
+            self.merge()
+        self.runEntries.append((channel, text))
+        self.runLength += len(text)
+        if self.runLength >= self.chunkSize:
+            self.merge()
+
+        for w in self.watchers:
+            w.logChunk(self.step.build, self.step, self, channel, text)
+        self.length += len(text)
+
+    def addStdout(self, text):
+        self.addEntry(STDOUT, text)
+    def addStderr(self, text):
+        self.addEntry(STDERR, text)
+    def addHeader(self, text):
+        self.addEntry(HEADER, text)
+
+    def finish(self):
+        self.merge()
+        if self.openfile:
+            # we don't do an explicit close, because there might be readers
+            # shareing the filehandle. As soon as they stop reading, the
+            # filehandle will be released and automatically closed. We will
+            # do a sync, however, to make sure the log gets saved in case of
+            # a crash.
+            os.fsync(self.openfile.fileno())
+            del self.openfile
+        self.finished = True
+        watchers = self.finishedWatchers
+        self.finishedWatchers = []
+        for w in watchers:
+            w.callback(self)
+        self.watchers = []
+
+    # persistence stuff
+    def __getstate__(self):
+        d = self.__dict__.copy()
+        del d['step'] # filled in upon unpickling
+        del d['watchers']
+        del d['finishedWatchers']
+        d['entries'] = [] # let 0.6.4 tolerate the saved log. TODO: really?
+        if d.has_key('finished'):
+            del d['finished']
+        if d.has_key('openfile'):
+            del d['openfile']
+        return d
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self.watchers = [] # probably not necessary
+        self.finishedWatchers = [] # same
+        # self.step must be filled in by our parent
+        self.finished = True
+
+    def upgrade(self, logfilename):
+        """Save our .entries to a new-style offline log file (if necessary),
+        and modify our in-memory representation to use it. The original
+        pickled LogFile (inside the pickled Build) won't be modified."""
+        self.filename = logfilename
+        if not os.path.exists(self.getFilename()):
+            self.openfile = open(self.getFilename(), "w")
+            self.finished = False
+            for channel,text in self.entries:
+                self.addEntry(channel, text)
+            self.finish() # releases self.openfile, which will be closed
+        del self.entries
+
+
+class HTMLLogFile:
+    if implements:
+        implements(interfaces.IStatusLog)
+    else:
+        __implements__ = interfaces.IStatusLog,
+
+    filename = None
+
+    def __init__(self, parent, name, logfilename, html):
+        self.step = parent
+        self.name = name
+        self.filename = logfilename
+        self.html = html
+
+    def getName(self):
+        return self.name # set in BuildStepStatus.addLog
+    def getStep(self):
+        return self.step
+
+    def isFinished(self):
+        return True
+    def waitUntilFinished(self):
+        return defer.succeed(self)
+
+    def hasContents(self):
+        return True
+    def getText(self):
+        return self.html # looks kinda like text
+    def getTextWithHeaders(self):
+        return self.html
+    def getChunks(self):
+        return [(STDERR, self.html)]
+
+    def subscribe(self, receiver, catchup):
+        pass
+    def unsubscribe(self, receiver):
+        pass
+
+    def finish(self):
+        pass
+
+    def __getstate__(self):
+        d = self.__dict__.copy()
+        del d['step']
+        return d
+
+    def upgrade(self, logfilename):
+        pass
+
+
+class Event:
+    if implements:
+        implements(interfaces.IStatusEvent)
+    else:
+        __implements__ = interfaces.IStatusEvent,
+
+    started = None
+    finished = None
+    text = []
+    color = None
+
+    # IStatusEvent methods
+    def getTimes(self):
+        return (self.started, self.finished)
+    def getText(self):
+        return self.text
+    def getColor(self):
+        return self.color
+    def getLogs(self):
+        return []
+
+    def finish(self):
+        self.finished = util.now()
+
+class TestResult:
+    if implements:
+        implements(interfaces.ITestResult)
+    else:
+        __implements__ = interfaces.ITestResult,
+
+    def __init__(self, name, results, text, logs):
+        assert isinstance(name, tuple)
+        self.name = name
+        self.results = results
+        self.text = text
+        self.logs = logs
+
+    def getName(self):
+        return self.name
+
+    def getResults(self):
+        return self.results
+
+    def getText(self):
+        return self.text
+
+    def getLogs(self):
+        return self.logs
+
+
+class BuildSetStatus:
+    if implements:
+        implements(interfaces.IBuildSetStatus)
+    else:
+        __implements__ = interfaces.IBuildSetStatus,
+
+    def __init__(self, source, reason, builderNames, bsid=None):
+        self.source = source
+        self.reason = reason
+        self.builderNames = builderNames
+        self.id = bsid
+        self.successWatchers = []
+        self.finishedWatchers = []
+        self.stillHopeful = True
+        self.finished = False
+
+    def setBuildRequestStatuses(self, buildRequestStatuses):
+        self.buildRequests = buildRequestStatuses
+    def setResults(self, results):
+        # the build set succeeds only if all its component builds succeed
+        self.results = results
+    def giveUpHope(self):
+        self.stillHopeful = False
+
+
+    def notifySuccessWatchers(self):
+        for d in self.successWatchers:
+            d.callback(self)
+        self.successWatchers = []
+
+    def notifyFinishedWatchers(self):
+        self.finished = True
+        for d in self.finishedWatchers:
+            d.callback(self)
+        self.finishedWatchers = []
+
+    # methods for our clients
+
+    def getSourceStamp(self):
+        return self.source
+    def getReason(self):
+        return self.reason
+    def getResults(self):
+        return self.results
+    def getID(self):
+        return self.id
+
+    def getBuilderNames(self):
+        return self.builderNames
+    def getBuildRequests(self):
+        return self.buildRequests
+    def isFinished(self):
+        return self.finished
+    
+    def waitUntilSuccess(self):
+        if self.finished or not self.stillHopeful:
+            # the deferreds have already fired
+            return defer.succeed(self)
+        d = defer.Deferred()
+        self.successWatchers.append(d)
+        return d
+
+    def waitUntilFinished(self):
+        if self.finished:
+            return defer.succeed(self)
+        d = defer.Deferred()
+        self.finishedWatchers.append(d)
+        return d
+
+class BuildRequestStatus:
+    if implements:
+        implements(interfaces.IBuildRequestStatus)
+    else:
+        __implements__ = interfaces.IBuildRequestStatus,
+
+    def __init__(self, source, builderName):
+        self.source = source
+        self.builderName = builderName
+        self.builds = [] # list of BuildStatus objects
+        self.observers = []
+
+    def buildStarted(self, build):
+        self.builds.append(build)
+        for o in self.observers[:]:
+            o(build)
+
+    # methods called by our clients
+    def getSourceStamp(self):
+        return self.source
+    def getBuilderName(self):
+        return self.builderName
+    def getBuilds(self):
+        return self.builds
+
+    def subscribe(self, observer):
+        self.observers.append(observer)
+        for b in self.builds:
+            observer(b)
+    def unsubscribe(self, observer):
+        self.observers.remove(observer)
+
+
+class BuildStepStatus(styles.Versioned):
+    """
+    I represent a collection of output status for a
+    L{buildbot.process.step.BuildStep}.
+
+    @type color: string
+    @cvar color: color that this step feels best represents its
+                 current mood. yellow,green,red,orange are the
+                 most likely choices, although purple indicates
+                 an exception
+    @type progress: L{buildbot.status.progress.StepProgress}
+    @cvar progress: tracks ETA for the step
+    @type text: list of strings
+    @cvar text: list of short texts that describe the command and its status
+    @type text2: list of strings
+    @cvar text2: list of short texts added to the overall build description
+    @type logs: dict of string -> L{buildbot.status.builder.LogFile}
+    @ivar logs: logs of steps
+    """
+    # note that these are created when the Build is set up, before each
+    # corresponding BuildStep has started.
+    if implements:
+        implements(interfaces.IBuildStepStatus, interfaces.IStatusEvent)
+    else:
+        __implements__ = interfaces.IBuildStepStatus, interfaces.IStatusEvent
+    persistenceVersion = 1
+
+    started = None
+    finished = None
+    progress = None
+    text = []
+    color = None
+    results = (None, [])
+    text2 = []
+    watchers = []
+    updates = {}
+    finishedWatchers = []
+
+    def __init__(self, parent):
+        assert interfaces.IBuildStatus(parent)
+        self.build = parent
+        self.logs = []
+        self.urls = {}
+        self.watchers = []
+        self.updates = {}
+        self.finishedWatchers = []
+
+    def getName(self):
+        """Returns a short string with the name of this step. This string
+        may have spaces in it."""
+        return self.name
+
+    def getBuild(self):
+        return self.build
+
+    def getTimes(self):
+        return (self.started, self.finished)
+
+    def getExpectations(self):
+        """Returns a list of tuples (name, current, target)."""
+        if not self.progress:
+            return []
+        ret = []
+        metrics = self.progress.progress.keys()
+        metrics.sort()
+        for m in metrics:
+            t = (m, self.progress.progress[m], self.progress.expectations[m])
+            ret.append(t)
+        return ret
+
+    def getLogs(self):
+        return self.logs
+
+    def getURLs(self):
+        return self.urls.copy()
+
+    def isFinished(self):
+        return (self.finished is not None)
+
+    def waitUntilFinished(self):
+        if self.finished:
+            d = defer.succeed(self)
+        else:
+            d = defer.Deferred()
+            self.finishedWatchers.append(d)
+        return d
+
+    # while the step is running, the following methods make sense.
+    # Afterwards they return None
+
+    def getETA(self):
+        if self.started is None:
+            return None # not started yet
+        if self.finished is not None:
+            return None # already finished
+        if not self.progress:
+            return None # no way to predict
+        return self.progress.remaining()
+
+    # Once you know the step has finished, the following methods are legal.
+    # Before this step has finished, they all return None.
+
+    def getText(self):
+        """Returns a list of strings which describe the step. These are
+        intended to be displayed in a narrow column. If more space is
+        available, the caller should join them together with spaces before
+        presenting them to the user."""
+        return self.text
+
+    def getColor(self):
+        """Returns a single string with the color that should be used to
+        display this step. 'green', 'orange', 'red', 'yellow' and 'purple'
+        are the most likely ones."""
+        return self.color
+
+    def getResults(self):
+        """Return a tuple describing the results of the step.
+        'result' is one of the constants in L{buildbot.status.builder}:
+        SUCCESS, WARNINGS, FAILURE, or SKIPPED.
+        'strings' is an optional list of strings that the step wants to
+        append to the overall build's results. These strings are usually
+        more terse than the ones returned by getText(): in particular,
+        successful Steps do not usually contribute any text to the
+        overall build.
+
+        @rtype:   tuple of int, list of strings
+        @returns: (result, strings)
+        """
+        return (self.results, self.text2)
+
+    # subscription interface
+
+    def subscribe(self, receiver, updateInterval=10):
+        # will get logStarted, logFinished, stepETAUpdate
+        assert receiver not in self.watchers
+        self.watchers.append(receiver)
+        self.sendETAUpdate(receiver, updateInterval)
+
+    def sendETAUpdate(self, receiver, updateInterval):
+        self.updates[receiver] = None
+        # they might unsubscribe during stepETAUpdate
+        receiver.stepETAUpdate(self.build, self,
+                           self.getETA(), self.getExpectations())
+        if receiver in self.watchers:
+            self.updates[receiver] = reactor.callLater(updateInterval,
+                                                       self.sendETAUpdate,
+                                                       receiver,
+                                                       updateInterval)
+
+    def unsubscribe(self, receiver):
+        if receiver in self.watchers:
+            self.watchers.remove(receiver)
+        if receiver in self.updates:
+            if self.updates[receiver] is not None:
+                self.updates[receiver].cancel()
+            del self.updates[receiver]
+
+
+    # methods to be invoked by the BuildStep
+
+    def setName(self, stepname):
+        self.name = stepname
+
+    def setProgress(self, stepprogress):
+        self.progress = stepprogress
+
+    def stepStarted(self):
+        self.started = util.now()
+        if self.build:
+            self.build.stepStarted(self)
+
+    def addLog(self, name):
+        assert self.started # addLog before stepStarted won't notify watchers
+        logfilename = self.build.generateLogfileName(self.name, name)
+        log = LogFile(self, name, logfilename)
+        self.logs.append(log)
+        for w in self.watchers:
+            receiver = w.logStarted(self.build, self, log)
+            if receiver:
+                log.subscribe(receiver, True)
+                d = log.waitUntilFinished()
+                d.addCallback(lambda log: log.unsubscribe(receiver))
+        d = log.waitUntilFinished()
+        d.addCallback(self.logFinished)
+        return log
+
+    def addHTMLLog(self, name, html):
+        assert self.started # addLog before stepStarted won't notify watchers
+        logfilename = self.build.generateLogfileName(self.name, name)
+        log = HTMLLogFile(self, name, logfilename, html)
+        self.logs.append(log)
+        for w in self.watchers:
+            receiver = w.logStarted(self.build, self, log)
+            # TODO: think about this: there isn't much point in letting
+            # them subscribe
+            #if receiver:
+            #    log.subscribe(receiver, True)
+            w.logFinished(self.build, self, log)
+
+    def logFinished(self, log):
+        for w in self.watchers:
+            w.logFinished(self.build, self, log)
+
+    def addURL(self, name, url):
+        self.urls[name] = url
+
+    def setColor(self, color):
+        self.color = color
+    def setText(self, text):
+        self.text = text
+    def setText2(self, text):
+        self.text2 = text
+
+    def stepFinished(self, results):
+        self.finished = util.now()
+        self.results = results
+        for loog in self.logs:
+            if not loog.isFinished():
+                loog.finish()
+
+        for r in self.updates.keys():
+            if self.updates[r] is not None:
+                self.updates[r].cancel()
+                del self.updates[r]
+
+        watchers = self.finishedWatchers
+        self.finishedWatchers = []
+        for w in watchers:
+            w.callback(self)
+
+    # persistence
+
+    def __getstate__(self):
+        d = styles.Versioned.__getstate__(self)
+        del d['build'] # filled in when loading
+        if d.has_key('progress'):
+            del d['progress']
+        del d['watchers']
+        del d['finishedWatchers']
+        del d['updates']
+        return d
+
+    def __setstate__(self, d):
+        styles.Versioned.__setstate__(self, d)
+        # self.build must be filled in by our parent
+        for loog in self.logs:
+            loog.step = self
+
+    def upgradeToVersion1(self):
+        if not hasattr(self, "urls"):
+            self.urls = {}
+
+
+class BuildStatus(styles.Versioned):
+    if implements:
+        implements(interfaces.IBuildStatus, interfaces.IStatusEvent)
+    else:
+        __implements__ = interfaces.IBuildStatus, interfaces.IStatusEvent
+    persistenceVersion = 2
+
+    source = None
+    reason = None
+    changes = []
+    blamelist = []
+    progress = None
+    started = None
+    finished = None
+    currentStep = None
+    text = []
+    color = None
+    results = None
+    slavename = "???"
+
+    # these lists/dicts are defined here so that unserialized instances have
+    # (empty) values. They are set in __init__ to new objects to make sure
+    # each instance gets its own copy.
+    watchers = []
+    updates = {}
+    finishedWatchers = []
+    testResults = {}
+
+    def __init__(self, parent, number):
+        """
+        @type  parent: L{BuilderStatus}
+        @type  number: int
+        """
+        assert interfaces.IBuilderStatus(parent)
+        self.builder = parent
+        self.number = number
+        self.watchers = []
+        self.updates = {}
+        self.finishedWatchers = []
+        self.steps = []
+        self.testResults = {}
+        self.properties = {}
+
+    # IBuildStatus
+
+    def getBuilder(self):
+        """
+        @rtype: L{BuilderStatus}
+        """
+        return self.builder
+
+    def getProperty(self, propname):
+        return self.properties[propname]
+
+    def getNumber(self):
+        return self.number
+
+    def getPreviousBuild(self):
+        if self.number == 0:
+            return None
+        return self.builder.getBuild(self.number-1)
+
+    def getSourceStamp(self):
+        return (self.source.branch, self.source.revision, self.source.patch)
+
+    def getReason(self):
+        return self.reason
+
+    def getChanges(self):
+        return self.changes
+
+    def getResponsibleUsers(self):
+        return self.blamelist
+
+    def getInterestedUsers(self):
+        # TODO: the Builder should add others: sheriffs, domain-owners
+        return self.blamelist
+
+    def getSteps(self):
+        """Return a list of IBuildStepStatus objects. For invariant builds
+        (those which always use the same set of Steps), this should be the
+        complete list, however some of the steps may not have started yet
+        (step.getTimes()[0] will be None). For variant builds, this may not
+        be complete (asking again later may give you more of them)."""
+        return self.steps
+
+    def getTimes(self):
+        return (self.started, self.finished)
+
+    def isFinished(self):
+        return (self.finished is not None)
+
+    def waitUntilFinished(self):
+        if self.finished:
+            d = defer.succeed(self)
+        else:
+            d = defer.Deferred()
+            self.finishedWatchers.append(d)
+        return d
+
+    # while the build is running, the following methods make sense.
+    # Afterwards they return None
+
+    def getETA(self):
+        if self.finished is not None:
+            return None
+        if not self.progress:
+            return None
+        eta = self.progress.eta()
+        if eta is None:
+            return None
+        return eta - util.now()
+
+    def getCurrentStep(self):
+        return self.currentStep
+
+    # Once you know the build has finished, the following methods are legal.
+    # Before ths build has finished, they all return None.
+
+    def getText(self):
+        text = []
+        text.extend(self.text)
+        for s in self.steps:
+            text.extend(s.text2)
+        return text
+
+    def getColor(self):
+        return self.color
+
+    def getResults(self):
+        return self.results
+
+    def getSlavename(self):
+        return self.slavename
+
+    def getTestResults(self):
+        return self.testResults
+
+    def getLogs(self):
+        # TODO: steps should contribute significant logs instead of this
+        # hack, which returns every log from every step. The logs should get
+        # names like "compile" and "test" instead of "compile.output"
+        logs = []
+        for s in self.steps:
+            for log in s.getLogs():
+                logs.append(log)
+        return logs
+
+    # subscription interface
+
+    def subscribe(self, receiver, updateInterval=None):
+        # will receive stepStarted and stepFinished messages
+        # and maybe buildETAUpdate
+        self.watchers.append(receiver)
+        if updateInterval is not None:
+            self.sendETAUpdate(receiver, updateInterval)
+
+    def sendETAUpdate(self, receiver, updateInterval):
+        self.updates[receiver] = None
+        ETA = self.getETA()
+        if ETA is not None:
+            receiver.buildETAUpdate(self, self.getETA())
+        # they might have unsubscribed during buildETAUpdate
+        if receiver in self.watchers:
+            self.updates[receiver] = reactor.callLater(updateInterval,
+                                                       self.sendETAUpdate,
+                                                       receiver,
+                                                       updateInterval)
+
+    def unsubscribe(self, receiver):
+        if receiver in self.watchers:
+            self.watchers.remove(receiver)
+        if receiver in self.updates:
+            if self.updates[receiver] is not None:
+                self.updates[receiver].cancel()
+            del self.updates[receiver]
+
+    # methods for the base.Build to invoke
+
+    def addStepWithName(self, name):
+        """The Build is setting up, and has added a new BuildStep to its
+        list. Create a BuildStepStatus object to which it can send status
+        updates."""
+
+        s = BuildStepStatus(self)
+        s.setName(name)
+        self.steps.append(s)
+        return s
+
+    def setProperty(self, propname, value):
+        self.properties[propname] = value
+
+    def addTestResult(self, result):
+        self.testResults[result.getName()] = result
+
+    def setSourceStamp(self, sourceStamp):
+        self.source = sourceStamp
+        self.changes = self.source.changes
+
+    def setReason(self, reason):
+        self.reason = reason
+    def setBlamelist(self, blamelist):
+        self.blamelist = blamelist
+    def setProgress(self, progress):
+        self.progress = progress
+
+    def buildStarted(self, build):
+        """The Build has been set up and is about to be started. It can now
+        be safely queried, so it is time to announce the new build."""
+
+        self.started = util.now()
+        # now that we're ready to report status, let the BuilderStatus tell
+        # the world about us
+        self.builder.buildStarted(self)
+
+    def setSlavename(self, slavename):
+        self.slavename = slavename
+
+    def setText(self, text):
+        assert isinstance(text, (list, tuple))
+        self.text = text
+    def setColor(self, color):
+        self.color = color
+    def setResults(self, results):
+        self.results = results
+
+    def buildFinished(self):
+        self.currentStep = None
+        self.finished = util.now()
+
+        for r in self.updates.keys():
+            if self.updates[r] is not None:
+                self.updates[r].cancel()
+                del self.updates[r]
+
+        watchers = self.finishedWatchers
+        self.finishedWatchers = []
+        for w in watchers:
+            w.callback(self)
+
+    # methods called by our BuildStepStatus children
+
+    def stepStarted(self, step):
+        self.currentStep = step
+        name = self.getBuilder().getName()
+        for w in self.watchers:
+            receiver = w.stepStarted(self, step)
+            if receiver:
+                if type(receiver) == type(()):
+                    step.subscribe(receiver[0], receiver[1])
+                else:
+                    step.subscribe(receiver)
+                d = step.waitUntilFinished()
+                d.addCallback(lambda step: step.unsubscribe(receiver))
+
+        step.waitUntilFinished().addCallback(self._stepFinished)
+
+    def _stepFinished(self, step):
+        results = step.getResults()
+        for w in self.watchers:
+            w.stepFinished(self, step, results)
+
+    # methods called by our BuilderStatus parent
+
+    def pruneLogs(self):
+        # this build is somewhat old: remove the build logs to save space
+        # TODO: delete logs visible through IBuildStatus.getLogs
+        for s in self.steps:
+            s.pruneLogs()
+
+    def pruneSteps(self):
+        # this build is very old: remove the build steps too
+        self.steps = []
+
+    # persistence stuff
+
+    def generateLogfileName(self, stepname, logname):
+        """Return a filename (relative to the Builder's base directory) where
+        the logfile's contents can be stored uniquely.
+
+        The base filename is made by combining our build number, the Step's
+        name, and the log's name, then removing unsuitable characters. The
+        filename is then made unique by appending _0, _1, etc, until it does
+        not collide with any other logfile.
+
+        These files are kept in the Builder's basedir (rather than a
+        per-Build subdirectory) because that makes cleanup easier: cron and
+        find will help get rid of the old logs, but the empty directories are
+        more of a hassle to remove."""
+
+        starting_filename = "%d-log-%s-%s" % (self.number, stepname, logname)
+        starting_filename = re.sub(r'[^\w\.\-]', '_', starting_filename)
+        # now make it unique
+        unique_counter = 0
+        filename = starting_filename
+        while filename in [l.filename
+                           for step in self.steps
+                           for l in step.getLogs()
+                           if l.filename]:
+            filename = "%s_%d" % (starting_filename, unique_counter)
+            unique_counter += 1
+        return filename
+
+    def __getstate__(self):
+        d = styles.Versioned.__getstate__(self)
+        # for now, a serialized Build is always "finished". We will never
+        # save unfinished builds.
+        if not self.finished:
+            d['finished'] = True
+            # TODO: push an "interrupted" step so it is clear that the build
+            # was interrupted. The builder will have a 'shutdown' event, but
+            # someone looking at just this build will be confused as to why
+            # the last log is truncated.
+        del d['builder'] # filled in by our parent when loading
+        del d['watchers']
+        del d['updates']
+        del d['finishedWatchers']
+        return d
+
+    def __setstate__(self, d):
+        styles.Versioned.__setstate__(self, d)
+        # self.builder must be filled in by our parent when loading
+        for step in self.steps:
+            step.build = self
+        self.watchers = []
+        self.updates = {}
+        self.finishedWatchers = []
+
+    def upgradeToVersion1(self):
+        if hasattr(self, "sourceStamp"):
+            # the old .sourceStamp attribute wasn't actually very useful
+            maxChangeNumber, patch = self.sourceStamp
+            changes = getattr(self, 'changes', [])
+            source = sourcestamp.SourceStamp(branch=None,
+                                             revision=None,
+                                             patch=patch,
+                                             changes=changes)
+            self.source = source
+            self.changes = source.changes
+            del self.sourceStamp
+
+    def upgradeToVersion2(self):
+        self.properties = {}
+
+    def upgradeLogfiles(self):
+        # upgrade any LogFiles that need it. This must occur after we've been
+        # attached to our Builder, and after we know about all LogFiles of
+        # all Steps (to get the filenames right).
+        assert self.builder
+        for s in self.steps:
+            for l in s.getLogs():
+                if l.filename:
+                    pass # new-style, log contents are on disk
+                else:
+                    logfilename = self.generateLogfileName(s.name, l.name)
+                    # let the logfile update its .filename pointer,
+                    # transferring its contents onto disk if necessary
+                    l.upgrade(logfilename)
+
+    def saveYourself(self):
+        filename = os.path.join(self.builder.basedir, "%d" % self.number)
+        if os.path.isdir(filename):
+            # leftover from 0.5.0, which stored builds in directories
+            shutil.rmtree(filename, ignore_errors=True)
+        tmpfilename = filename + ".tmp"
+        try:
+            pickle.dump(self, open(tmpfilename, "wb"), -1)
+            if sys.platform == 'win32':
+                # windows cannot rename a file on top of an existing one, so
+                # fall back to delete-first. There are ways this can fail and
+                # lose the builder's history, so we avoid using it in the
+                # general (non-windows) case
+                if os.path.exists(filename):
+                    os.unlink(filename)
+            os.rename(tmpfilename, filename)
+        except:
+            log.msg("unable to save build %s-#%d" % (self.builder.name,
+                                                     self.number))
+            log.err()
+
+
+
+class BuilderStatus(styles.Versioned):
+    """I handle status information for a single process.base.Builder object.
+    That object sends status changes to me (frequently as Events), and I
+    provide them on demand to the various status recipients, like the HTML
+    waterfall display and the live status clients. It also sends build
+    summaries to me, which I log and provide to status clients who aren't
+    interested in seeing details of the individual build steps.
+
+    I am responsible for maintaining the list of historic Events and Builds,
+    pruning old ones, and loading them from / saving them to disk.
+
+    I live in the buildbot.process.base.Builder object, in the .statusbag
+    attribute.
+
+    @type  category: string
+    @ivar  category: user-defined category this builder belongs to; can be
+                     used to filter on in status clients
+    """
+
+    if implements:
+        implements(interfaces.IBuilderStatus)
+    else:
+        __implements__ = interfaces.IBuilderStatus,
+    persistenceVersion = 1
+
+    # these limit the amount of memory we consume, as well as the size of the
+    # main Builder pickle. The Build and LogFile pickles on disk must be
+    # handled separately.
+    buildCacheSize = 30
+    buildHorizon = 100 # forget builds beyond this
+    stepHorizon = 50 # forget steps in builds beyond this
+
+    category = None
+    currentBigState = "offline" # or idle/waiting/interlocked/building
+    basedir = None # filled in by our parent
+
+    def __init__(self, buildername, category=None):
+        self.name = buildername
+        self.category = category
+
+        self.slavenames = []
+        self.events = []
+        # these three hold Events, and are used to retrieve the current
+        # state of the boxes.
+        self.lastBuildStatus = None
+        #self.currentBig = None
+        #self.currentSmall = None
+        self.currentBuilds = []
+        self.pendingBuilds = []
+        self.nextBuild = None
+        self.watchers = []
+        self.buildCache = [] # TODO: age builds out of the cache
+
+    # persistence
+
+    def __getstate__(self):
+        # when saving, don't record transient stuff like what builds are
+        # currently running, because they won't be there when we start back
+        # up. Nor do we save self.watchers, nor anything that gets set by our
+        # parent like .basedir and .status
+        d = styles.Versioned.__getstate__(self)
+        d['watchers'] = []
+        del d['buildCache']
+        for b in self.currentBuilds:
+            b.saveYourself()
+            # TODO: push a 'hey, build was interrupted' event
+        del d['currentBuilds']
+        del d['pendingBuilds']
+        del d['currentBigState']
+        del d['basedir']
+        del d['status']
+        del d['nextBuildNumber']
+        return d
+
+    def __setstate__(self, d):
+        # when loading, re-initialize the transient stuff. Remember that
+        # upgradeToVersion1 and such will be called after this finishes.
+        styles.Versioned.__setstate__(self, d)
+        self.buildCache = []
+        self.currentBuilds = []
+        self.pendingBuilds = []
+        self.watchers = []
+        self.slavenames = []
+        # self.basedir must be filled in by our parent
+        # self.status must be filled in by our parent
+
+    def upgradeToVersion1(self):
+        if hasattr(self, 'slavename'):
+            self.slavenames = [self.slavename]
+            del self.slavename
+        if hasattr(self, 'nextBuildNumber'):
+            del self.nextBuildNumber # determineNextBuildNumber chooses this
+
+    def determineNextBuildNumber(self):
+        """Scan our directory of saved BuildStatus instances to determine
+        what our self.nextBuildNumber should be. Set it one larger than the
+        highest-numbered build we discover. This is called by the top-level
+        Status object shortly after we are created or loaded from disk.
+        """
+        existing_builds = [int(f)
+                           for f in os.listdir(self.basedir)
+                           if re.match("^\d+$", f)]
+        if existing_builds:
+            self.nextBuildNumber = max(existing_builds) + 1
+        else:
+            self.nextBuildNumber = 0
+
+    def saveYourself(self):
+        for b in self.buildCache:
+            if not b.isFinished:
+                # interrupted build, need to save it anyway.
+                # BuildStatus.saveYourself will mark it as interrupted.
+                b.saveYourself()
+        filename = os.path.join(self.basedir, "builder")
+        tmpfilename = filename + ".tmp"
+        try:
+            pickle.dump(self, open(tmpfilename, "wb"), -1)
+            if sys.platform == 'win32':
+                # windows cannot rename a file on top of an existing one
+                if os.path.exists(filename):
+                    os.unlink(filename)
+            os.rename(tmpfilename, filename)
+        except:
+            log.msg("unable to save builder %s" % self.name)
+            log.err()
+        
+
+    # build cache management
+
+    def addBuildToCache(self, build):
+        if build in self.buildCache:
+            return
+        self.buildCache.append(build)
+        while len(self.buildCache) > self.buildCacheSize:
+            self.buildCache.pop(0)
+
+    def getBuildByNumber(self, number):
+        for b in self.currentBuilds:
+            if b.number == number:
+                return b
+        for build in self.buildCache:
+            if build.number == number:
+                return build
+        filename = os.path.join(self.basedir, "%d" % number)
+        try:
+            build = pickle.load(open(filename, "rb"))
+            styles.doUpgrade()
+            build.builder = self
+            # handle LogFiles from after 0.5.0 and before 0.6.5
+            build.upgradeLogfiles()
+            self.addBuildToCache(build)
+            return build
+        except IOError:
+            raise IndexError("no such build %d" % number)
+        except EOFError:
+            raise IndexError("corrupted build pickle %d" % number)
+
+    def prune(self):
+        return # TODO: change this to walk through the filesystem
+        # first, blow away all builds beyond our build horizon
+        self.builds = self.builds[-self.buildHorizon:]
+        # then prune steps in builds past the step horizon
+        for b in self.builds[0:-self.stepHorizon]:
+            b.pruneSteps()
+
+    # IBuilderStatus methods
+    def getName(self):
+        return self.name
+
+    def getState(self):
+        return (self.currentBigState, self.currentBuilds)
+
+    def getSlaves(self):
+        return [self.status.getSlave(name) for name in self.slavenames]
+
+    def getPendingBuilds(self):
+        return self.pendingBuilds
+
+    def getCurrentBuilds(self):
+        return self.currentBuilds
+
+    def getLastFinishedBuild(self):
+        b = self.getBuild(-1)
+        if not (b and b.isFinished()):
+            b = self.getBuild(-2)
+        return b
+
+    def getBuild(self, number):
+        if number < 0:
+            number = self.nextBuildNumber + number
+        if number < 0 or number >= self.nextBuildNumber:
+            return None
+
+        try:
+            return self.getBuildByNumber(number)
+        except IndexError:
+            return None
+
+    def getEvent(self, number):
+        try:
+            return self.events[number]
+        except IndexError:
+            return None
+
+    def eventGenerator(self):
+        """This function creates a generator which will provide all of this
+        Builder's status events, starting with the most recent and
+        progressing backwards in time. """
+
+        # remember the oldest-to-earliest flow here. "next" means earlier.
+
+        # TODO: interleave build steps and self.events by timestamp.
+        # TODO: um, I think we're already doing that.
+
+        eventIndex = -1
+        e = self.getEvent(eventIndex)
+        for Nb in range(1, self.nextBuildNumber+1):
+            b = self.getBuild(-Nb)
+            if not b:
+                break
+            steps = b.getSteps()
+            for Ns in range(1, len(steps)+1):
+                if steps[-Ns].started:
+                    step_start = steps[-Ns].getTimes()[0]
+                    while e is not None and e.getTimes()[0] > step_start:
+                        yield e
+                        eventIndex -= 1
+                        e = self.getEvent(eventIndex)
+                    yield steps[-Ns]
+            yield b
+        while e is not None:
+            yield e
+            eventIndex -= 1
+            e = self.getEvent(eventIndex)
+
+    def subscribe(self, receiver):
+        # will get builderChangedState, buildStarted, and buildFinished
+        self.watchers.append(receiver)
+        self.publishState(receiver)
+
+    def unsubscribe(self, receiver):
+        self.watchers.remove(receiver)
+
+    ## Builder interface (methods called by the Builder which feeds us)
+
+    def setSlavenames(self, names):
+        self.slavenames = names
+
+    def addEvent(self, text=[], color=None):
+        # this adds a duration event. When it is done, the user should call
+        # e.finish(). They can also mangle it by modifying .text and .color
+        e = Event()
+        e.started = util.now()
+        e.text = text
+        e.color = color
+        self.events.append(e)
+        return e # they are free to mangle it further
+
+    def addPointEvent(self, text=[], color=None):
+        # this adds a point event, one which occurs as a single atomic
+        # instant of time.
+        e = Event()
+        e.started = util.now()
+        e.finished = 0
+        e.text = text
+        e.color = color
+        self.events.append(e)
+        return e # for consistency, but they really shouldn't touch it
+
+    def setBigState(self, state):
+        needToUpdate = state != self.currentBigState
+        self.currentBigState = state
+        if needToUpdate:
+            self.publishState()
+
+    def publishState(self, target=None):
+        state = self.currentBigState
+
+        if target is not None:
+            # unicast
+            target.builderChangedState(self.name, state)
+            return
+        for w in self.watchers:
+            w.builderChangedState(self.name, state)
+
+    def newBuild(self):
+        """The Builder has decided to start a build, but the Build object is
+        not yet ready to report status (it has not finished creating the
+        Steps). Create a BuildStatus object that it can use."""
+        number = self.nextBuildNumber
+        self.nextBuildNumber += 1
+        # TODO: self.saveYourself(), to make sure we don't forget about the
+        # build number we've just allocated. This is not quite as important
+        # as it was before we switch to determineNextBuildNumber, but I think
+        # it may still be useful to have the new build save itself.
+        s = BuildStatus(self, number)
+        s.waitUntilFinished().addCallback(self._buildFinished)
+        return s
+
+    def addBuildRequest(self, brstatus):
+        self.pendingBuilds.append(brstatus)
+    def removeBuildRequest(self, brstatus):
+        self.pendingBuilds.remove(brstatus)
+
+    # buildStarted is called by our child BuildStatus instances
+    def buildStarted(self, s):
+        """Now the BuildStatus object is ready to go (it knows all of its
+        Steps, its ETA, etc), so it is safe to notify our watchers."""
+
+        assert s.builder is self # paranoia
+        assert s.number == self.nextBuildNumber - 1
+        assert s not in self.currentBuilds
+        self.currentBuilds.append(s)
+        self.addBuildToCache(s)
+
+        # now that the BuildStatus is prepared to answer queries, we can
+        # announce the new build to all our watchers
+
+        for w in self.watchers: # TODO: maybe do this later? callLater(0)?
+            receiver = w.buildStarted(self.getName(), s)
+            if receiver:
+                if type(receiver) == type(()):
+                    s.subscribe(receiver[0], receiver[1])
+                else:
+                    s.subscribe(receiver)
+                d = s.waitUntilFinished()
+                d.addCallback(lambda s: s.unsubscribe(receiver))
+
+
+    def _buildFinished(self, s):
+        assert s in self.currentBuilds
+        s.saveYourself()
+        self.currentBuilds.remove(s)
+
+        name = self.getName()
+        results = s.getResults()
+        for w in self.watchers:
+            w.buildFinished(name, s, results)
+
+        self.prune() # conserve disk
+
+
+    # waterfall display (history)
+
+    # I want some kind of build event that holds everything about the build:
+    # why, what changes went into it, the results of the build, itemized
+    # test results, etc. But, I do kind of need something to be inserted in
+    # the event log first, because intermixing step events and the larger
+    # build event is fraught with peril. Maybe an Event-like-thing that
+    # doesn't have a file in it but does have links. Hmm, that's exactly
+    # what it does now. The only difference would be that this event isn't
+    # pushed to the clients.
+
+    # publish to clients
+    def sendLastBuildStatus(self, client):
+        #client.newLastBuildStatus(self.lastBuildStatus)
+        pass
+    def sendCurrentActivityBigToEveryone(self):
+        for s in self.subscribers:
+            self.sendCurrentActivityBig(s)
+    def sendCurrentActivityBig(self, client):
+        state = self.currentBigState
+        if state == "offline":
+            client.currentlyOffline()
+        elif state == "idle":
+            client.currentlyIdle()
+        elif state == "building":
+            client.currentlyBuilding()
+        else:
+            log.msg("Hey, self.currentBigState is weird:", state)
+            
+    
+    ## HTML display interface
+
+    def getEventNumbered(self, num):
+        # deal with dropped events, pruned events
+        first = self.events[0].number
+        if first + len(self.events)-1 != self.events[-1].number:
+            log.msg(self,
+                    "lost an event somewhere: [0] is %d, [%d] is %d" % \
+                    (self.events[0].number,
+                     len(self.events) - 1,
+                     self.events[-1].number))
+            for e in self.events:
+                log.msg("e[%d]: " % e.number, e)
+            return None
+        offset = num - first
+        log.msg(self, "offset", offset)
+        try:
+            return self.events[offset]
+        except IndexError:
+            return None
+
+    ## Persistence of Status
+    def loadYourOldEvents(self):
+        if hasattr(self, "allEvents"):
+            # first time, nothing to get from file. Note that this is only if
+            # the Application gets .run() . If it gets .save()'ed, then the
+            # .allEvents attribute goes away in the initial __getstate__ and
+            # we try to load a non-existent file.
+            return
+        self.allEvents = self.loadFile("events", [])
+        if self.allEvents:
+            self.nextEventNumber = self.allEvents[-1].number + 1
+        else:
+            self.nextEventNumber = 0
+    def saveYourOldEvents(self):
+        self.saveFile("events", self.allEvents)
+
+    ## clients
+
+    def addClient(self, client):
+        if client not in self.subscribers:
+            self.subscribers.append(client)
+            self.sendLastBuildStatus(client)
+            self.sendCurrentActivityBig(client)
+            client.newEvent(self.currentSmall)
+    def removeClient(self, client):
+        if client in self.subscribers:
+            self.subscribers.remove(client)
+
+class SlaveStatus:
+    if implements:
+        implements(interfaces.ISlaveStatus)
+    else:
+        __implements__ = interfaces.ISlaveStatus,
+
+    admin = None
+    host = None
+    connected = False
+
+    def __init__(self, name):
+        self.name = name
+
+    def getName(self):
+        return self.name
+    def getAdmin(self):
+        return self.admin
+    def getHost(self):
+        return self.host
+    def isConnected(self):
+        return self.connected
+
+    def setAdmin(self, admin):
+        self.admin = admin
+    def setHost(self, host):
+        self.host = host
+    def setConnected(self, isConnected):
+        self.connected = isConnected
+
+class Status:
+    """
+    I represent the status of the buildmaster.
+    """
+    if implements:
+        implements(interfaces.IStatus)
+    else:
+        __implements__ = interfaces.IStatus,
+
+    def __init__(self, botmaster, basedir):
+        """
+        @type  botmaster: L{buildbot.master.BotMaster}
+        @param botmaster: the Status object uses C{.botmaster} to get at
+                          both the L{buildbot.master.BuildMaster} (for
+                          various buildbot-wide parameters) and the
+                          actual Builders (to get at their L{BuilderStatus}
+                          objects). It is not allowed to change or influence
+                          anything through this reference.
+        @type  basedir: string
+        @param basedir: this provides a base directory in which saved status
+                        information (changes.pck, saved Build status
+                        pickles) can be stored
+        """
+        self.botmaster = botmaster
+        self.basedir = basedir
+        self.watchers = []
+        self.activeBuildSets = []
+        assert os.path.isdir(basedir)
+
+
+    # methods called by our clients
+
+    def getProjectName(self):
+        return self.botmaster.parent.projectName
+    def getProjectURL(self):
+        return self.botmaster.parent.projectURL
+    def getBuildbotURL(self):
+        return self.botmaster.parent.buildbotURL
+
+    def getURLForThing(self, thing):
+        prefix = self.getBuildbotURL()
+        if not prefix:
+            return None
+        if providedBy(thing, interfaces.IStatus):
+            return prefix
+        if providedBy(thing, interfaces.ISchedulerStatus):
+            pass
+        if providedBy(thing, interfaces.IBuilderStatus):
+            builder = thing
+            return prefix + urllib.quote(builder.getName(), safe='')
+        if providedBy(thing, interfaces.IBuildStatus):
+            build = thing
+            builder = build.getBuilder()
+            return "%s%s/builds/%d" % (
+                prefix,
+                urllib.quote(builder.getName(), safe=''),
+                build.getNumber())
+        if providedBy(thing, interfaces.IBuildStepStatus):
+            step = thing
+            build = step.getBuild()
+            builder = build.getBuilder()
+            return "%s%s/builds/%d/%s" % (
+                prefix,
+                urllib.quote(builder.getName(), safe=''),
+                build.getNumber(),
+                "step-" + urllib.quote(step.getName(), safe=''))
+        # IBuildSetStatus
+        # IBuildRequestStatus
+        # ISlaveStatus
+
+        # IStatusEvent
+        if providedBy(thing, interfaces.IStatusEvent):
+            from buildbot.changes import changes
+            # TODO: this is goofy, create IChange or something
+            if isinstance(thing, changes.Change):
+                change = thing
+                return "%schanges/%d" % (prefix, change.number)
+
+        if providedBy(thing, interfaces.IStatusLog):
+            log = thing
+            step = log.getStep()
+            build = step.getBuild()
+            builder = build.getBuilder()
+
+            logs = step.getLogs()
+            for i in range(len(logs)):
+                if log is logs[i]:
+                    lognum = i
+                    break
+            else:
+                return None
+            return "%s%s/builds/%d/%s/%d" % (
+                prefix,
+                urllib.quote(builder.getName(), safe=''),
+                build.getNumber(),
+                "step-" + urllib.quote(step.getName(), safe=''),
+                lognum)
+
+
+    def getSchedulers(self):
+        return self.botmaster.parent.allSchedulers()
+
+    def getBuilderNames(self, categories=None):
+        if categories == None:
+            return self.botmaster.builderNames[:] # don't let them break it
+        
+        l = []
+        # respect addition order
+        for name in self.botmaster.builderNames:
+            builder = self.botmaster.builders[name]
+            if builder.builder_status.category in categories:
+                l.append(name)
+        return l
+
+    def getBuilder(self, name):
+        """
+        @rtype: L{BuilderStatus}
+        """
+        return self.botmaster.builders[name].builder_status
+
+    def getSlave(self, slavename):
+        return self.botmaster.slaves[slavename].slave_status
+
+    def getBuildSets(self):
+        return self.activeBuildSets[:]
+
+    def subscribe(self, target):
+        self.watchers.append(target)
+        for name in self.botmaster.builderNames:
+            self.announceNewBuilder(target, name, self.getBuilder(name))
+    def unsubscribe(self, target):
+        self.watchers.remove(target)
+
+
+    # methods called by upstream objects
+
+    def announceNewBuilder(self, target, name, builder_status):
+        t = target.builderAdded(name, builder_status)
+        if t:
+            builder_status.subscribe(t)
+
+    def builderAdded(self, name, basedir, category=None):
+        """
+        @rtype: L{BuilderStatus}
+        """
+        filename = os.path.join(self.basedir, basedir, "builder")
+        log.msg("trying to load status pickle from %s" % filename)
+        builder_status = None
+        try:
+            builder_status = pickle.load(open(filename, "rb"))
+            styles.doUpgrade()
+        except IOError:
+            log.msg("no saved status pickle, creating a new one")
+        except:
+            log.msg("error while loading status pickle, creating a new one")
+            log.msg("error follows:")
+            log.err()
+        if not builder_status:
+            builder_status = BuilderStatus(name, category)
+            builder_status.addPointEvent(["builder", "created"])
+        log.msg("added builder %s in category %s" % (name, category))
+        # an unpickled object might not have category set from before,
+        # so set it here to make sure
+        builder_status.category = category
+        builder_status.basedir = os.path.join(self.basedir, basedir)
+        builder_status.name = name # it might have been updated
+        builder_status.status = self
+
+        if not os.path.isdir(builder_status.basedir):
+            os.mkdir(builder_status.basedir)
+        builder_status.determineNextBuildNumber()
+
+        builder_status.setBigState("offline")
+
+        for t in self.watchers:
+            self.announceNewBuilder(t, name, builder_status)
+
+        return builder_status
+
+    def builderRemoved(self, name):
+        for t in self.watchers:
+            t.builderRemoved(name)
+
+    def prune(self):
+        for b in self.botmaster.builders.values():
+            b.builder_status.prune()
+
+    def buildsetSubmitted(self, bss):
+        self.activeBuildSets.append(bss)
+        bss.waitUntilFinished().addCallback(self.activeBuildSets.remove)
+        for t in self.watchers:
+            t.buildsetSubmitted(bss)

Added: vendor/buildbot/current/buildbot/status/classic.css
===================================================================
--- vendor/buildbot/current/buildbot/status/classic.css	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/classic.css	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,39 @@
+a:visited {
+	color: #800080;
+}
+
+td.Event, td.BuildStep, td.Activity, td.Change, td.Time, td.Builder {
+    border-top: 1px solid;
+    border-right: 1px solid;
+}
+
+/* Activity states */
+.offline { 
+        background-color: red;
+}
+.idle {
+	background-color: white;
+}
+.waiting { 
+        background-color: yellow;
+}
+.building { 
+        background-color: yellow;
+}
+
+/* LastBuild, BuildStep states */
+.success {
+	background-color: #72ff75;
+}
+.failure {
+	background-color: red;
+}
+.warnings {
+	background-color: #ff8000;
+}
+.exception {
+	background-color: #c000c0;
+}
+.start,.running {
+	background-color: yellow;
+}

Added: vendor/buildbot/current/buildbot/status/client.py
===================================================================
--- vendor/buildbot/current/buildbot/status/client.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/client.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,572 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+from twisted.spread import pb
+from twisted.python import log, components
+from twisted.internet import reactor
+from twisted.application import strports
+from twisted.cred import portal, checkers
+
+from buildbot import interfaces
+from buildbot.twcompat import Interface, implements
+from buildbot.status import builder, base
+from buildbot.changes import changes
+
+class IRemote(Interface):
+    pass
+
+def makeRemote(obj):
+    # we want IRemote(None) to be None, but you can't really do that with
+    # adapters, so we fake it
+    if obj is None:
+        return None
+    return IRemote(obj)
+
+
+class RemoteBuildSet(pb.Referenceable):
+    def __init__(self, buildset):
+        self.b = buildset
+
+    def remote_getSourceStamp(self):
+        return self.b.getSourceStamp()
+
+    def remote_getReason(self):
+        return self.b.getReason()
+
+    def remote_getID(self):
+        return self.b.getID()
+
+    def remote_getBuilderNames(self):
+        return self.b.getBuilderNames()
+
+    def remote_getBuildRequests(self):
+        """Returns a list of (builderName, BuildRequest) tuples."""
+        return [(br.getBuilderName(), IRemote(br))
+                for br in self.b.getBuildRequests()]
+
+    def remote_isFinished(self):
+        return self.b.isFinished()
+
+    def remote_waitUntilSuccess(self):
+        d = self.b.waitUntilSuccess()
+        d.addCallback(lambda res: self)
+        return d
+
+    def remote_waitUntilFinished(self):
+        d = self.b.waitUntilFinished()
+        d.addCallback(lambda res: self)
+        return d
+
+    def remote_getResults(self):
+        return self.b.getResults()
+
+components.registerAdapter(RemoteBuildSet,
+                           interfaces.IBuildSetStatus, IRemote)    
+
+
+class RemoteBuilder(pb.Referenceable):
+    def __init__(self, builder):
+        self.b = builder
+
+    def remote_getName(self):
+        return self.b.getName()
+
+    def remote_getState(self):
+        state, builds = self.b.getState()
+        return (state,
+                None, # TODO: remove leftover ETA
+                [makeRemote(b) for b in builds])
+
+    def remote_getSlaves(self):
+        return [IRemote(s) for s in self.b.getSlaves()]
+
+    def remote_getLastFinishedBuild(self):
+        return makeRemote(self.b.getLastFinishedBuild())
+
+    def remote_getCurrentBuilds(self):
+        return [IRemote(b) for b in self.b.getCurrentBuilds()]
+
+    def remote_getBuild(self, number):
+        return makeRemote(self.b.getBuild(number))
+
+    def remote_getEvent(self, number):
+        return IRemote(self.b.getEvent(number))
+
+components.registerAdapter(RemoteBuilder,
+                           interfaces.IBuilderStatus, IRemote)    
+
+
+class RemoteBuildRequest(pb.Referenceable):
+    def __init__(self, buildreq):
+        self.b = buildreq
+        self.observers = []
+
+    def remote_getSourceStamp(self):
+        return self.b.getSourceStamp()
+
+    def remote_getBuilderName(self):
+        return self.b.getBuilderName()
+
+    def remote_subscribe(self, observer):
+        """The observer's remote_newbuild method will be called (with two
+        arguments: the RemoteBuild object, and our builderName) for each new
+        Build that is created to handle this BuildRequest."""
+        self.observers.append(observer)
+        def send(bs):
+            d = observer.callRemote("newbuild",
+                                    IRemote(bs), self.b.getBuilderName())
+            d.addErrback(lambda err: None)
+        reactor.callLater(0, self.b.subscribe, send)
+
+    def remote_unsubscribe(self, observer):
+        # PB (well, at least oldpb) doesn't re-use RemoteReference instances,
+        # so sending the same object across the wire twice will result in two
+        # separate objects that compare as equal ('a is not b' and 'a == b').
+        # That means we can't use a simple 'self.observers.remove(observer)'
+        # here.
+        for o in self.observers:
+            if o == observer:
+                self.observers.remove(o)
+
+components.registerAdapter(RemoteBuildRequest,
+                           interfaces.IBuildRequestStatus, IRemote)    
+
+class RemoteBuild(pb.Referenceable):
+    def __init__(self, build):
+        self.b = build
+        self.observers = []
+
+    def remote_getBuilderName(self):
+        return self.b.getBuilder().getName()
+
+    def remote_getNumber(self):
+        return self.b.getNumber()
+
+    def remote_getReason(self):
+        return self.b.getReason()
+
+    def remote_getChanges(self):
+        return [IRemote(c) for c in self.b.getChanges()]
+
+    def remote_getResponsibleUsers(self):
+        return self.b.getResponsibleUsers()
+
+    def remote_getSteps(self):
+        return [IRemote(s) for s in self.b.getSteps()]
+
+    def remote_getTimes(self):
+        return self.b.getTimes()
+
+    def remote_isFinished(self):
+        return self.b.isFinished()
+
+    def remote_waitUntilFinished(self):
+        # the Deferred returned by callRemote() will fire when this build is
+        # finished
+        d = self.b.waitUntilFinished()
+        d.addCallback(lambda res: self)
+        return d
+
+    def remote_getETA(self):
+        return self.b.getETA()
+
+    def remote_getCurrentStep(self):
+        return makeRemote(self.b.getCurrentStep())
+
+    def remote_getText(self):
+        return self.b.getText()
+
+    def remote_getColor(self):
+        return self.b.getColor()
+
+    def remote_getResults(self):
+        return self.b.getResults()
+
+    def remote_getLogs(self):
+        logs = {}
+        for name,log in self.b.getLogs().items():
+            logs[name] = IRemote(log)
+        return logs
+
+    def remote_subscribe(self, observer, updateInterval=None):
+        """The observer will have remote_stepStarted(buildername, build,
+        stepname, step), remote_stepFinished(buildername, build, stepname,
+        step, results), and maybe remote_buildETAUpdate(buildername, build,
+        eta)) messages sent to it."""
+        self.observers.append(observer)
+        s = BuildSubscriber(observer)
+        self.b.subscribe(s, updateInterval)
+
+    def remote_unsubscribe(self, observer):
+        # TODO: is the observer automatically unsubscribed when the build
+        # finishes? Or are they responsible for unsubscribing themselves
+        # anyway? How do we avoid a race condition here?
+        for o in self.observers:
+            if o == observer:
+                self.observers.remove(o)
+
+
+components.registerAdapter(RemoteBuild,
+                           interfaces.IBuildStatus, IRemote)    
+
+class BuildSubscriber:
+    def __init__(self, observer):
+        self.observer = observer
+
+    def buildETAUpdate(self, build, eta):
+        self.observer.callRemote("buildETAUpdate",
+                                 build.getBuilder().getName(),
+                                 IRemote(build),
+                                 eta)
+
+    def stepStarted(self, build, step):
+        self.observer.callRemote("stepStarted",
+                                 build.getBuilder().getName(),
+                                 IRemote(build),
+                                 step.getName(), IRemote(step))
+        return None
+
+    def stepFinished(self, build, step, results):
+        self.observer.callRemote("stepFinished",
+                                 build.getBuilder().getName(),
+                                 IRemote(build),
+                                 step.getName(), IRemote(step),
+                                 results)
+
+
+class RemoteBuildStep(pb.Referenceable):
+    def __init__(self, step):
+        self.s = step
+
+    def remote_getName(self):
+        return self.s.getName()
+
+    def remote_getBuild(self):
+        return IRemote(self.s.getBuild())
+
+    def remote_getTimes(self):
+        return self.s.getTimes()
+
+    def remote_getExpectations(self):
+        return self.s.getExpectations()
+
+    def remote_getLogs(self):
+        logs = {}
+        for name,log in self.s.getLogs().items():
+            logs[name] = IRemote(log)
+        return logs
+
+    def remote_isFinished(self):
+        return self.s.isFinished()
+
+    def remote_waitUntilFinished(self):
+        return self.s.waitUntilFinished() # returns a Deferred
+
+    def remote_getETA(self):
+        return self.s.getETA()
+
+    def remote_getText(self):
+        return self.s.getText()
+
+    def remote_getColor(self):
+        return self.s.getColor()
+
+    def remote_getResults(self):
+        return self.s.getResults()
+
+components.registerAdapter(RemoteBuildStep,
+                           interfaces.IBuildStepStatus, IRemote)    
+
+class RemoteSlave:
+    def __init__(self, slave):
+        self.s = slave
+
+    def remote_getName(self):
+        return self.s.getName()
+    def remote_getAdmin(self):
+        return self.s.getAdmin()
+    def remote_getHost(self):
+        return self.s.getHost()
+    def remote_isConnected(self):
+        return self.s.isConnected()
+
+components.registerAdapter(RemoteSlave,
+                           interfaces.ISlaveStatus, IRemote)
+
+class RemoteEvent:
+    def __init__(self, event):
+        self.e = event
+
+    def remote_getTimes(self):
+        return self.s.getTimes()
+    def remote_getText(self):
+        return self.s.getText()
+    def remote_getColor(self):
+        return self.s.getColor()
+
+components.registerAdapter(RemoteEvent,
+                           interfaces.IStatusEvent, IRemote)
+
+class RemoteLog(pb.Referenceable):
+    def __init__(self, log):
+        self.l = log
+
+    def remote_getName(self):
+        return self.l.getName()
+
+    def remote_isFinished(self):
+        return self.l.isFinished()
+    def remote_waitUntilFinished(self):
+        d = self.l.waitUntilFinished()
+        d.addCallback(lambda res: self)
+        return d
+
+    def remote_getText(self):
+        return self.l.getText()
+    def remote_getTextWithHeaders(self):
+        return self.l.getTextWithHeaders()
+    def remote_getChunks(self):
+        return self.l.getChunks()
+    # TODO: subscription interface
+
+components.registerAdapter(RemoteLog, builder.LogFile, IRemote)
+# TODO: something similar for builder.HTMLLogfile ?
+
+class RemoteChange:
+    def __init__(self, change):
+        self.c = change
+
+    def getWho(self):
+        return self.c.who
+    def getFiles(self):
+        return self.c.files
+    def getComments(self):
+        return self.c.comments
+
+components.registerAdapter(RemoteChange, changes.Change, IRemote)
+
+
+class StatusClientPerspective(base.StatusReceiverPerspective):
+
+    subscribed = None
+    client = None
+
+    def __init__(self, status):
+        self.status = status # the IStatus
+        self.subscribed_to_builders = [] # Builders to which we're subscribed
+        self.subscribed_to = [] # everything else we're subscribed to
+
+    def __getstate__(self):
+        d = self.__dict__.copy()
+        d['client'] = None
+        return d
+
+    def attached(self, mind):
+        #log.msg("StatusClientPerspective.attached")
+        return self
+
+    def detached(self, mind):
+        log.msg("PB client detached")
+        self.client = None
+        for name in self.subscribed_to_builders:
+            log.msg(" unsubscribing from Builder(%s)" % name)
+            self.status.getBuilder(name).unsubscribe(self)
+        for s in self.subscribed_to:
+            log.msg(" unsubscribe from %s" % s)
+            s.unsubscribe(self)
+        self.subscribed = None
+
+    def perspective_subscribe(self, mode, interval, target):
+        """The remote client wishes to subscribe to some set of events.
+        'target' will be sent remote messages when these events happen.
+        'mode' indicates which events are desired: it is a string with one
+        of the following values:
+
+        'builders': builderAdded, builderRemoved
+        'builds': those plus builderChangedState, buildStarted, buildFinished
+        'steps': all those plus buildETAUpdate, stepStarted, stepFinished
+        'logs': all those plus stepETAUpdate, logStarted, logFinished
+        'full': all those plus logChunk (with the log contents)
+        
+
+        Messages are defined by buildbot.interfaces.IStatusReceiver .
+        'interval' is used to specify how frequently ETAUpdate messages
+        should be sent.
+
+        Raising or lowering the subscription level will take effect starting
+        with the next build or step."""
+
+        assert mode in ("builders", "builds", "steps", "logs", "full")
+        assert target
+        log.msg("PB subscribe(%s)" % mode)
+
+        self.client = target
+        self.subscribed = mode
+        self.interval = interval
+        self.subscribed_to.append(self.status)
+        # wait a moment before subscribing, so the new-builder messages
+        # won't appear before this remote method finishes
+        reactor.callLater(0, self.status.subscribe, self)
+        return None
+
+    def perspective_unsubscribe(self):
+        log.msg("PB unsubscribe")
+        self.status.unsubscribe(self)
+        self.subscribed_to.remove(self.status)
+        self.client = None
+
+    def perspective_getBuildSets(self):
+        """This returns tuples of (buildset, bsid), because that is much more
+        convenient for tryclient."""
+        return [(IRemote(s), s.getID()) for s in self.status.getBuildSets()]
+
+    def perspective_getBuilderNames(self):
+        return self.status.getBuilderNames()
+
+    def perspective_getBuilder(self, name):
+        b = self.status.getBuilder(name)
+        return IRemote(b)
+
+    def perspective_getSlave(self, name):
+        s = self.status.getSlave(name)
+        return IRemote(s)
+
+    # IStatusReceiver methods, invoked if we've subscribed
+
+    # mode >= builder
+    def builderAdded(self, name, builder):
+        self.client.callRemote("builderAdded", name, IRemote(builder))
+        if self.subscribed in ("builds", "steps", "logs", "full"):
+            self.subscribed_to_builders.append(name)
+            return self
+        return None
+
+    def builderChangedState(self, name, state):
+        self.client.callRemote("builderChangedState", name, state, None)
+        # TODO: remove leftover ETA argument
+
+    def builderRemoved(self, name):
+        if name in self.subscribed_to_builders:
+            self.subscribed_to_builders.remove(name)
+        self.client.callRemote("builderRemoved", name)
+
+    def buildsetSubmitted(self, buildset):
+        # TODO: deliver to client, somehow
+        pass
+
+    # mode >= builds
+    def buildStarted(self, name, build):
+        self.client.callRemote("buildStarted", name, IRemote(build))
+        if self.subscribed in ("steps", "logs", "full"):
+            self.subscribed_to.append(build)
+            return (self, self.interval)
+        return None
+
+    def buildFinished(self, name, build, results):
+        if build in self.subscribed_to:
+            # we might have joined during the build
+            self.subscribed_to.remove(build)
+        self.client.callRemote("buildFinished",
+                               name, IRemote(build), results)
+
+    # mode >= steps
+    def buildETAUpdate(self, build, eta):
+        self.client.callRemote("buildETAUpdate",
+                               build.getBuilder().getName(), IRemote(build),
+                               eta)
+
+    def stepStarted(self, build, step):
+        # we add some information here so the client doesn't have to do an
+        # extra round-trip
+        self.client.callRemote("stepStarted",
+                               build.getBuilder().getName(), IRemote(build),
+                               step.getName(), IRemote(step))
+        if self.subscribed in ("logs", "full"):
+            self.subscribed_to.append(step)
+            return (self, self.interval)
+        return None
+
+    def stepFinished(self, build, step, results):
+        self.client.callRemote("stepFinished",
+                               build.getBuilder().getName(), IRemote(build),
+                               step.getName(), IRemote(step),
+                               results)
+        if step in self.subscribed_to:
+            # eventually (through some new subscription method) we could
+            # join in the middle of the step
+            self.subscribed_to.remove(step)
+
+    # mode >= logs
+    def stepETAUpdate(self, build, step, ETA, expectations):
+        self.client.callRemote("stepETAUpdate",
+                               build.getBuilder().getName(), IRemote(build),
+                               step.getName(), IRemote(step),
+                               ETA, expectations)
+
+    def logStarted(self, build, step, log):
+        # TODO: make the HTMLLog adapter
+        rlog = IRemote(log, None)
+        if not rlog:
+            print "hey, couldn't adapt %s to IRemote" % log
+        self.client.callRemote("logStarted",
+                               build.getBuilder().getName(), IRemote(build),
+                               step.getName(), IRemote(step),
+                               log.getName(), IRemote(log, None))
+        if self.subscribed in ("full",):
+            self.subscribed_to.append(log)
+            return self
+        return None
+
+    def logFinished(self, build, step, log):
+        self.client.callRemote("logFinished",
+                               build.getBuilder().getName(), IRemote(build),
+                               step.getName(), IRemote(step),
+                               log.getName(), IRemote(log, None))
+        if log in self.subscribed_to:
+            self.subscribed_to.remove(log)
+
+    # mode >= full
+    def logChunk(self, build, step, log, channel, text):
+        self.client.callRemote("logChunk",
+                               build.getBuilder().getName(), IRemote(build),
+                               step.getName(), IRemote(step),
+                               log.getName(), IRemote(log),
+                               channel, text)
+
+
+class PBListener(base.StatusReceiverMultiService):
+    """I am a listener for PB-based status clients."""
+
+    compare_attrs = ["port", "cred"]
+    if implements:
+        implements(portal.IRealm)
+    else:
+        __implements__ = (portal.IRealm,
+                          base.StatusReceiverMultiService.__implements__)
+
+    def __init__(self, port, user="statusClient", passwd="clientpw"):
+        base.StatusReceiverMultiService.__init__(self)
+        if type(port) is int:
+            port = "tcp:%d" % port
+        self.port = port
+        self.cred = (user, passwd)
+        p = portal.Portal(self)
+        c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+        c.addUser(user, passwd)
+        p.registerChecker(c)
+        f = pb.PBServerFactory(p)
+        s = strports.service(port, f)
+        s.setServiceParent(self)
+
+    def setServiceParent(self, parent):
+        base.StatusReceiverMultiService.setServiceParent(self, parent)
+        self.setup()
+
+    def setup(self):
+        self.status = self.parent.getStatus()
+
+    def requestAvatar(self, avatarID, mind, interface):
+        assert interface == pb.IPerspective
+        p = StatusClientPerspective(self.status)
+        p.attached(mind) # perhaps .callLater(0) ?
+        return (pb.IPerspective, p,
+                lambda p=p,mind=mind: p.detached(mind))

Added: vendor/buildbot/current/buildbot/status/html.py
===================================================================
--- vendor/buildbot/current/buildbot/status/html.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/html.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,1762 @@
+# -*- test-case-name: buildbot.test.test_web -*-
+
+from __future__ import generators
+
+from twisted.python import log, components
+import urllib, re
+
+from twisted.internet import defer, reactor
+from twisted.web.resource import Resource
+from twisted.web import static, html, server, distrib
+from twisted.web.error import NoResource
+from twisted.web.util import Redirect, DeferredResource
+from twisted.application import strports
+from twisted.spread import pb
+
+from buildbot.twcompat import implements, Interface
+
+import sys, string, types, time, os.path
+
+from buildbot import interfaces, util
+from buildbot import version
+from buildbot.sourcestamp import SourceStamp
+from buildbot.status import builder, base
+from buildbot.changes import changes
+from buildbot.process.base import BuildRequest
+
+class ITopBox(Interface):
+    """I represent a box in the top row of the waterfall display: the one
+    which shows the status of the last build for each builder."""
+    pass
+
+class ICurrentBox(Interface):
+    """I represent the 'current activity' box, just above the builder name."""
+    pass
+
+class IBox(Interface):
+    """I represent a box in the waterfall display."""
+    pass
+
+class IHTMLLog(Interface):
+    pass
+
+ROW_TEMPLATE = '''
+<div class="row">
+  <span class="label">%(label)s</span>
+  <span class="field">%(field)s</span>
+</div>'''
+
+def make_row(label, field):
+    """Create a name/value row for the HTML.
+
+    `label` is plain text; it will be HTML-encoded.
+
+    `field` is a bit of HTML structure; it will not be encoded in
+    any way.
+    """
+    label = html.escape(label)
+    return ROW_TEMPLATE % {"label": label, "field": field}
+
+colormap = {
+    'green': '#72ff75',
+    }
+def td(text="", parms={}, **props):
+    data = ""
+    data += "  "
+    #if not props.has_key("border"):
+    #    props["border"] = 1
+    props.update(parms)
+    if props.has_key("bgcolor"):
+        props["bgcolor"] = colormap.get(props["bgcolor"], props["bgcolor"])
+    comment = props.get("comment", None)
+    if comment:
+        data += "<!-- %s -->" % comment
+    data += "<td"
+    class_ = props.get('class_', None)
+    if class_:
+        props["class"] = class_
+    for prop in ("align", "bgcolor", "colspan", "rowspan", "border",
+                 "valign", "halign", "class"):
+        p = props.get(prop, None)
+        if p != None:
+            data += " %s=\"%s\"" % (prop, p)
+    data += ">"
+    if not text:
+        text = "&nbsp;"
+    if type(text) == types.ListType:
+        data += string.join(text, "<br />")
+    else:
+        data += text
+    data += "</td>\n"
+    return data
+
+def build_get_class(b):
+    """
+    Return the class to use for a finished build or buildstep,
+    based on the result.
+    """
+    # FIXME: this getResults duplicity might need to be fixed
+    result = b.getResults()
+    #print "THOMAS: result for b %r: %r" % (b, result)
+    if isinstance(b, builder.BuildStatus):
+        result = b.getResults()
+    elif isinstance(b, builder.BuildStepStatus):
+        result = b.getResults()[0]
+        # after forcing a build, b.getResults() returns ((None, []), []), ugh
+        if isinstance(result, tuple):
+            result = result[0]
+    else:
+        raise TypeError, "%r is not a BuildStatus or BuildStepStatus" % b
+
+    if result == None:
+        # FIXME: this happens when a buildstep is running ?
+        return "running"
+    return builder.Results[result]
+
+class Box:
+    # a Box wraps an Event. The Box has HTML <td> parameters that Events
+    # lack, and it has a base URL to which each File's name is relative.
+    # Events don't know about HTML.
+    spacer = False
+    def __init__(self, text=[], color=None, class_=None, urlbase=None,
+                 **parms):
+        self.text = text
+        self.color = color
+        self.class_ = class_
+        self.urlbase = urlbase
+        self.show_idle = 0
+        if parms.has_key('show_idle'):
+            del parms['show_idle']
+            self.show_idle = 1
+            
+        self.parms = parms
+        # parms is a dict of HTML parameters for the <td> element that will
+        # represent this Event in the waterfall display.
+
+    def td(self, **props):
+        props.update(self.parms)
+        text = self.text
+        if not text and self.show_idle:
+            text = ["[idle]"]
+        return td(text, props, bgcolor=self.color, class_=self.class_)
+
+
+class HtmlResource(Resource):
+    css = None
+    contentType = "text/html; charset=UTF-8"
+    title = "Dummy"
+
+    def render(self, request):
+        data = self.content(request)
+        if isinstance(data, unicode):
+            data = data.encode("utf-8")
+        request.setHeader("content-type", self.contentType)
+        if request.method == "HEAD":
+            request.setHeader("content-length", len(data))
+            return ''
+        return data
+
+    def content(self, request):
+        data = ('<!DOCTYPE html PUBLIC'
+                ' "-//W3C//DTD XHTML 1.0 Transitional//EN"\n'
+                '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
+                '<html'
+                ' xmlns="http://www.w3.org/1999/xhtml"'
+                ' lang="en"'
+                ' xml:lang="en">\n')
+        data += "<head>\n"
+        data += "  <title>" + self.title + "</title>\n"
+        if self.css:
+            # TODO: use some sort of relative link up to the root page, so
+            # this css can be used from child pages too
+            data += ('  <link href="%s" rel="stylesheet" type="text/css"/>\n'
+                     % "buildbot.css")
+        data += "</head>\n"
+        data += '<body vlink="#800080">\n'
+        data += self.body(request)
+        data += "</body></html>\n"
+        return data
+
+    def body(self, request):
+        return "Dummy\n"
+
+class StaticHTML(HtmlResource):
+    def __init__(self, body, title):
+        HtmlResource.__init__(self)
+        self.bodyHTML = body
+        self.title = title
+    def body(self, request):
+        return self.bodyHTML
+
+# $builder/builds/NN/stepname
+class StatusResourceBuildStep(HtmlResource):
+    title = "Build Step"
+
+    def __init__(self, status, step):
+        HtmlResource.__init__(self)
+        self.status = status
+        self.step = step
+
+    def body(self, request):
+        s = self.step
+        b = s.getBuild()
+        data = "<h1>BuildStep %s:#%d:%s</h1>\n" % \
+               (b.getBuilder().getName(), b.getNumber(), s.getName())
+
+        if s.isFinished():
+            data += ("<h2>Finished</h2>\n"
+                     "<p>%s</p>\n" % html.escape("%s" % s.getText()))
+        else:
+            data += ("<h2>Not Finished</h2>\n"
+                     "<p>ETA %s seconds</p>\n" % s.getETA())
+
+        exp = s.getExpectations()
+        if exp:
+            data += ("<h2>Expectations</h2>\n"
+                     "<ul>\n")
+            for e in exp:
+                data += "<li>%s: current=%s, target=%s</li>\n" % \
+                        (html.escape(e[0]), e[1], e[2])
+            data += "</ul>\n"
+        logs = s.getLogs()
+        if logs:
+            data += ("<h2>Logs</h2>\n"
+                     "<ul>\n")
+            for num in range(len(logs)):
+                if logs[num].hasContents():
+                    # FIXME: If the step name has a / in it, this is broken
+                    # either way.  If we quote it but say '/'s are safe,
+                    # it chops up the step name.  If we quote it and '/'s
+                    # are not safe, it escapes the / that separates the
+                    # step name from the log number.
+                    data += '<li><a href="%s">%s</a></li>\n' % \
+                            (urllib.quote(request.childLink("%d" % num)),
+                             html.escape(logs[num].getName()))
+                else:
+                    data += ('<li>%s</li>\n' %
+                             html.escape(logs[num].getName()))
+            data += "</ul>\n"
+
+        return data
+
+    def getChild(self, path, request):
+        logname = path
+        try:
+            log = self.step.getLogs()[int(logname)]
+            if log.hasContents():
+                return IHTMLLog(interfaces.IStatusLog(log))
+            return NoResource("Empty Log '%s'" % logname)
+        except (IndexError, ValueError):
+            return NoResource("No such Log '%s'" % logname)
+
+# $builder/builds/NN/tests/TESTNAME
+class StatusResourceTestResult(HtmlResource):
+    title = "Test Logs"
+
+    def __init__(self, status, name, result):
+        HtmlResource.__init__(self)
+        self.status = status
+        self.name = name
+        self.result = result
+
+    def body(self, request):
+        dotname = ".".join(self.name)
+        logs = self.result.getLogs()
+        lognames = logs.keys()
+        lognames.sort()
+        data = "<h1>%s</h1>\n" % html.escape(dotname)
+        for name in lognames:
+            data += "<h2>%s</h2>\n" % html.escape(name)
+            data += "<pre>" + logs[name] + "</pre>\n\n"
+
+        return data
+
+
+# $builder/builds/NN/tests
+class StatusResourceTestResults(HtmlResource):
+    title = "Test Results"
+
+    def __init__(self, status, results):
+        HtmlResource.__init__(self)
+        self.status = status
+        self.results = results
+
+    def body(self, request):
+        r = self.results
+        data = "<h1>Test Results</h1>\n"
+        data += "<ul>\n"
+        testnames = r.keys()
+        testnames.sort()
+        for name in testnames:
+            res = r[name]
+            dotname = ".".join(name)
+            data += " <li>%s: " % dotname
+            # TODO: this could break on weird test names. At the moment,
+            # test names only come from Trial tests, where the name
+            # components must be legal python names, but that won't always
+            # be a restriction.
+            url = request.childLink(dotname)
+            data += "<a href=\"%s\">%s</a>" % (url, " ".join(res.getText()))
+            data += "</li>\n"
+        data += "</ul>\n"
+        return data
+
+    def getChild(self, path, request):
+        try:
+            name = tuple(path.split("."))
+            result = self.results[name]
+            return StatusResourceTestResult(self.status, name, result)
+        except KeyError:
+            return NoResource("No such test name '%s'" % path)
+
+
+# $builder/builds/NN
+class StatusResourceBuild(HtmlResource):
+    title = "Build"
+
+    def __init__(self, status, build, builderControl, buildControl):
+        HtmlResource.__init__(self)
+        self.status = status
+        self.build = build
+        self.builderControl = builderControl
+        self.control = buildControl
+
+    def body(self, request):
+        b = self.build
+        buildbotURL = self.status.getBuildbotURL()
+        projectName = self.status.getProjectName()
+        data = '<div class="title"><a href="%s">%s</a></div>\n'%(buildbotURL,
+                                                                 projectName)
+        # the color in the following line gives python-mode trouble
+        data += ("<h1>Build <a href=\"%s\">%s</a>:#%d</h1>\n"
+                 % (self.status.getURLForThing(b.getBuilder()),
+                    b.getBuilder().getName(), b.getNumber()))
+        data += "<h2>Buildslave:</h2>\n %s\n" % html.escape(b.getSlavename())
+        data += "<h2>Reason:</h2>\n%s\n" % html.escape(b.getReason())
+
+        branch, revision, patch = b.getSourceStamp()
+        data += "<h2>SourceStamp:</h2>\n"
+        data += " <ul>\n"
+        if branch:
+            data += "  <li>Branch: %s</li>\n" % html.escape(branch)
+        if revision:
+            data += "  <li>Revision: %s</li>\n" % html.escape(str(revision))
+        if patch:
+            data += "  <li>Patch: YES</li>\n" # TODO: provide link to .diff
+        if b.getChanges():
+            data += "  <li>Changes: see below</li>\n"
+        if (branch is None and revision is None and patch is None
+            and not b.getChanges()):
+            data += "  <li>build of most recent revision</li>\n"
+        data += " </ul>\n"
+        if b.isFinished():
+            data += "<h2>Results:</h2>\n"
+            data += " ".join(b.getText()) + "\n"
+            if b.getTestResults():
+                url = request.childLink("tests")
+                data += "<h3><a href=\"%s\">test results</a></h3>\n" % url
+        else:
+            data += "<h2>Build In Progress</h2>"
+            if self.control is not None:
+                stopURL = urllib.quote(request.childLink("stop"))
+                data += """
+                <form action="%s" class='command stopbuild'>
+                <p>To stop this build, fill out the following fields and
+                push the 'Stop' button</p>\n""" % stopURL
+                data += make_row("Your name:",
+                                 "<input type='text' name='username' />")
+                data += make_row("Reason for stopping build:",
+                                 "<input type='text' name='comments' />")
+                data += """<input type="submit" value="Stop Builder" />
+                </form>
+                """
+
+        if b.isFinished() and self.builderControl is not None:
+            data += "<h3>Resubmit Build:</h3>\n"
+            # can we rebuild it exactly?
+            exactly = (revision is not None) or b.getChanges()
+            if exactly:
+                data += ("<p>This tree was built from a specific set of \n"
+                         "source files, and can be rebuilt exactly</p>\n")
+            else:
+                data += ("<p>This tree was built from the most recent "
+                         "revision")
+                if branch:
+                    data += " (along some branch)"
+                data += (" and thus it might not be possible to rebuild it \n"
+                         "exactly. Any changes that have been committed \n"
+                         "after this build was started <b>will</b> be \n"
+                         "included in a rebuild.</p>\n")
+            rebuildURL = urllib.quote(request.childLink("rebuild"))
+            data += ('<form action="%s" class="command rebuild">\n'
+                     % rebuildURL)
+            data += make_row("Your name:",
+                             "<input type='text' name='username' />")
+            data += make_row("Reason for re-running build:",
+                             "<input type='text' name='comments' />")
+            data += '<input type="submit" value="Rebuild" />\n'
+
+        data += "<h2>Steps and Logfiles:</h2>\n"
+        if b.getLogs():
+            data += "<ol>\n"
+            for s in b.getSteps():
+                data += (" <li><a href=\"%s\">%s</a> [%s]\n"
+                         % (self.status.getURLForThing(s), s.getName(),
+                            " ".join(s.getText())))
+                if s.getLogs():
+                    data += "  <ol>\n"
+                    for logfile in s.getLogs():
+                        data += ("   <li><a href=\"%s\">%s</a></li>\n" %
+                                 (self.status.getURLForThing(logfile),
+                                  logfile.getName()))
+                    data += "  </ol>\n"
+                data += " </li>\n"
+            data += "</ol>\n"
+
+        data += ("<h2>Blamelist:</h2>\n"
+                 " <ol>\n")
+        for who in b.getResponsibleUsers():
+            data += "  <li>%s</li>\n" % html.escape(who)
+        data += (" </ol>\n"
+                 "<h2>All Changes</h2>\n")
+        changes = b.getChanges()
+        if changes:
+            data += "<ol>\n"
+            for c in changes:
+                data += "<li>" + c.asHTML() + "</li>\n"
+            data += "</ol>\n"
+        #data += html.PRE(b.changesText()) # TODO
+        return data
+
+    def stop(self, request):
+        log.msg("web stopBuild of build %s:%s" % \
+                (self.build.getBuilder().getName(),
+                 self.build.getNumber()))
+        name = request.args.get("username", ["<unknown>"])[0]
+        comments = request.args.get("comments", ["<no reason specified>"])[0]
+        reason = ("The web-page 'stop build' button was pressed by "
+                  "'%s': %s\n" % (name, comments))
+        self.control.stopBuild(reason)
+        # we're at http://localhost:8080/svn-hello/builds/5/stop?[args] and
+        # we want to go to: http://localhost:8080/svn-hello/builds/5 or
+        # http://localhost:8080/
+        #
+        #return Redirect("../%d" % self.build.getNumber())
+        r = Redirect("../../..")
+        d = defer.Deferred()
+        reactor.callLater(1, d.callback, r)
+        return DeferredResource(d)
+
+    def rebuild(self, request):
+        log.msg("web rebuild of build %s:%s" % \
+                (self.build.getBuilder().getName(),
+                 self.build.getNumber()))
+        name = request.args.get("username", ["<unknown>"])[0]
+        comments = request.args.get("comments", ["<no reason specified>"])[0]
+        reason = ("The web-page 'rebuild' button was pressed by "
+                  "'%s': %s\n" % (name, comments))
+        if not self.builderControl or not self.build.isFinished():
+            log.msg("could not rebuild: bc=%s, isFinished=%s"
+                    % (self.builderControl, self.build.isFinished()))
+            # TODO: indicate an error
+        else:
+            self.builderControl.resubmitBuild(self.build, reason)
+        # we're at http://localhost:8080/svn-hello/builds/5/rebuild?[args] and
+        # we want to go to the top, at http://localhost:8080/
+        r = Redirect("../../..")
+        d = defer.Deferred()
+        reactor.callLater(1, d.callback, r)
+        return DeferredResource(d)
+
+    def getChild(self, path, request):
+        if path == "tests":
+            return StatusResourceTestResults(self.status,
+                                             self.build.getTestResults())
+        if path == "stop":
+            return self.stop(request)
+        if path == "rebuild":
+            return self.rebuild(request)
+        if path.startswith("step-"):
+            stepname = path[len("step-"):]
+            steps = self.build.getSteps()
+            for s in steps:
+                if s.getName() == stepname:
+                    return StatusResourceBuildStep(self.status, s)
+            return NoResource("No such BuildStep '%s'" % stepname)
+        return NoResource("No such resource '%s'" % path)
+
+# $builder
+class StatusResourceBuilder(HtmlResource):
+
+    def __init__(self, status, builder, control):
+        HtmlResource.__init__(self)
+        self.status = status
+        self.title = builder.getName() + " Builder"
+        self.builder = builder
+        self.control = control
+
+    def body(self, request):
+        b = self.builder
+        slaves = b.getSlaves()
+        connected_slaves = [s for s in slaves if s.isConnected()]
+
+        buildbotURL = self.status.getBuildbotURL()
+        projectName = self.status.getProjectName()
+        data = "<a href=\"%s\">%s</a>\n" % (buildbotURL, projectName)
+        data += make_row("Builder:", html.escape(b.getName()))
+        b1 = b.getBuild(-1)
+        if b1 is not None:
+            data += make_row("Current/last build:", str(b1.getNumber()))
+        data += "\n<br />BUILDSLAVES<br />\n"
+        data += "<ol>\n"
+        for slave in slaves:
+            data += "<li><b>%s</b>: " % html.escape(slave.getName())
+            if slave.isConnected():
+                data += "CONNECTED\n"
+                if slave.getAdmin():
+                    data += make_row("Admin:", html.escape(slave.getAdmin()))
+                if slave.getHost():
+                    data += "<span class='label'>Host info:</span>\n"
+                    data += html.PRE(slave.getHost())
+            else:
+                data += ("NOT CONNECTED\n")
+            data += "</li>\n"
+        data += "</ol>\n"
+
+        if self.control is not None and connected_slaves:
+            forceURL = urllib.quote(request.childLink("force"))
+            data += (
+                """
+                <form action='%(forceURL)s' class='command forcebuild'>
+                <p>To force a build, fill out the following fields and
+                push the 'Force Build' button</p>"""
+                + make_row("Your name:",
+                           "<input type='text' name='username' />")
+                + make_row("Reason for build:",
+                           "<input type='text' name='comments' />")
+                + make_row("Branch to build:",
+                           "<input type='text' name='branch' />")
+                + make_row("Revision to build:",
+                           "<input type='text' name='revision' />")
+                + """
+                <input type='submit' value='Force Build' />
+                </form>
+                """) % {"forceURL": forceURL}
+        elif self.control is not None:
+            data += """
+            <p>All buildslaves appear to be offline, so it's not possible
+            to force this build to execute at this time.</p>
+            """
+
+        if self.control is not None:
+            pingURL = urllib.quote(request.childLink("ping"))
+            data += """
+            <form action="%s" class='command pingbuilder'>
+            <p>To ping the buildslave(s), push the 'Ping' button</p>
+
+            <input type="submit" value="Ping Builder" />
+            </form>
+            """ % pingURL
+
+        return data
+
+    def force(self, request):
+        name = request.args.get("username", ["<unknown>"])[0]
+        reason = request.args.get("comments", ["<no reason specified>"])[0]
+        branch = request.args.get("branch", [""])[0]
+        revision = request.args.get("revision", [""])[0]
+
+        r = "The web-page 'force build' button was pressed by '%s': %s\n" \
+            % (name, reason)
+        log.msg("web forcebuild of builder '%s', branch='%s', revision='%s'"
+                % (self.builder.name, branch, revision))
+
+        if not self.control:
+            # TODO: tell the web user that their request was denied
+            log.msg("but builder control is disabled")
+            return Redirect("..")
+
+        # keep weird stuff out of the branch and revision strings. TODO:
+        # centralize this somewhere.
+        if not re.match(r'^[\w\.\-\/]*$', branch):
+            log.msg("bad branch '%s'" % branch)
+            return Redirect("..")
+        if not re.match(r'^[\w\.\-\/]*$', revision):
+            log.msg("bad revision '%s'" % revision)
+            return Redirect("..")
+        if branch == "":
+            branch = None
+        if revision == "":
+            revision = None
+
+        # TODO: if we can authenticate that a particular User pushed the
+        # button, use their name instead of None, so they'll be informed of
+        # the results.
+        s = SourceStamp(branch=branch, revision=revision)
+        req = BuildRequest(r, s, self.builder.getName())
+        try:
+            self.control.requestBuildSoon(req)
+        except interfaces.NoSlaveError:
+            # TODO: tell the web user that their request could not be
+            # honored
+            pass
+        return Redirect("..")
+
+    def ping(self, request):
+        log.msg("web ping of builder '%s'" % self.builder.name)
+        self.control.ping() # TODO: there ought to be an ISlaveControl
+        return Redirect("..")
+
+    def getChild(self, path, request):
+        if path == "force":
+            return self.force(request)
+        if path == "ping":
+            return self.ping(request)
+        if not path in ("events", "builds"):
+            return NoResource("Bad URL '%s'" % path)
+        num = request.postpath.pop(0)
+        request.prepath.append(num)
+        num = int(num)
+        if path == "events":
+            # TODO: is this dead code? .statusbag doesn't exist,right?
+            log.msg("getChild['path']: %s" % request.uri)
+            return NoResource("events are unavailable until code gets fixed")
+            filename = request.postpath.pop(0)
+            request.prepath.append(filename)
+            e = self.builder.statusbag.getEventNumbered(num)
+            if not e:
+                return NoResource("No such event '%d'" % num)
+            file = e.files.get(filename, None)
+            if file == None:
+                return NoResource("No such file '%s'" % filename)
+            if type(file) == type(""):
+                if file[:6] in ("<HTML>", "<html>"):
+                    return static.Data(file, "text/html")
+                return static.Data(file, "text/plain")
+            return file
+        if path == "builds":
+            build = self.builder.getBuild(num)
+            if build:
+                control = None
+                if self.control:
+                    control = self.control.getBuild(num)
+                return StatusResourceBuild(self.status, build,
+                                           self.control, control)
+            else:
+                return NoResource("No such build '%d'" % num)
+        return NoResource("really weird URL %s" % path)
+
+# $changes/NN
+class StatusResourceChanges(HtmlResource):
+    def __init__(self, status, changemaster):
+        HtmlResource.__init__(self)
+        self.status = status
+        self.changemaster = changemaster
+    def body(self, request):
+        data = ""
+        data += "Change sources:\n"
+        sources = list(self.changemaster)
+        if sources:
+            data += "<ol>\n"
+            for s in sources:
+                data += "<li>%s</li>\n" % s.describe()
+            data += "</ol>\n"
+        else:
+            data += "none (push only)\n"
+        return data
+    def getChild(self, path, request):
+        num = int(path)
+        c = self.changemaster.getChangeNumbered(num)
+        if not c:
+            return NoResource("No change number '%d'" % num)
+        return StaticHTML(c.asHTML(), "Change #%d" % num)
+
+textlog_stylesheet = """
+<style type="text/css">
+ div.data {
+  font-family: "Courier New", courier, monotype;
+ }
+ span.stdout {
+  font-family: "Courier New", courier, monotype;
+ }
+ span.stderr {
+  font-family: "Courier New", courier, monotype;
+  color: red;
+ }
+ span.header {
+  font-family: "Courier New", courier, monotype;
+  color: blue;
+ }
+</style>
+"""
+
+class ChunkConsumer:
+    if implements:
+        implements(interfaces.IStatusLogConsumer)
+    else:
+        __implements__ = interfaces.IStatusLogConsumer,
+
+    def __init__(self, original, textlog):
+        self.original = original
+        self.textlog = textlog
+    def registerProducer(self, producer, streaming):
+        self.producer = producer
+        self.original.registerProducer(producer, streaming)
+    def unregisterProducer(self):
+        self.original.unregisterProducer()
+    def writeChunk(self, chunk):
+        formatted = self.textlog.content([chunk])
+        try:
+            self.original.write(formatted)
+        except pb.DeadReferenceError:
+            self.producing.stopProducing()
+    def finish(self):
+        self.textlog.finished()
+
+class TextLog(Resource):
+    # a new instance of this Resource is created for each client who views
+    # it, so we can afford to track the request in the Resource.
+    if implements:
+        implements(IHTMLLog)
+    else:
+        __implements__ = IHTMLLog,
+
+    asText = False
+    subscribed = False
+
+    def __init__(self, original):
+        Resource.__init__(self)
+        self.original = original
+
+    def getChild(self, path, request):
+        if path == "text":
+            self.asText = True
+            return self
+        return NoResource("bad pathname")
+
+    def htmlHeader(self, request):
+        title = "Log File contents"
+        data = "<html>\n<head><title>" + title + "</title>\n"
+        data += textlog_stylesheet
+        data += "</head>\n"
+        data += "<body vlink=\"#800080\">\n"
+        texturl = request.childLink("text")
+        data += '<a href="%s">(view as text)</a><br />\n' % texturl
+        data += "<pre>\n"
+        return data
+
+    def content(self, entries):
+        spanfmt = '<span class="%s">%s</span>'
+        data = ""
+        for type, entry in entries:
+            if self.asText:
+                if type != builder.HEADER:
+                    data += entry
+            else:
+                data += spanfmt % (builder.ChunkTypes[type],
+                                   html.escape(entry))
+        return data
+
+    def htmlFooter(self):
+        data = "</pre>\n"
+        data += "</body></html>\n"
+        return data
+
+    def render_HEAD(self, request):
+        if self.asText:
+            request.setHeader("content-type", "text/plain")
+        else:
+            request.setHeader("content-type", "text/html")
+
+        # vague approximation, ignores markup
+        request.setHeader("content-length", self.original.length)
+        return ''
+
+    def render_GET(self, req):
+        self.req = req
+
+        if self.asText:
+            req.setHeader("content-type", "text/plain")
+        else:
+            req.setHeader("content-type", "text/html")
+
+        if not self.asText:
+            req.write(self.htmlHeader(req))
+
+        self.original.subscribeConsumer(ChunkConsumer(req, self))
+        return server.NOT_DONE_YET
+
+    def finished(self):
+        if not self.req:
+            return
+        try:
+            if not self.asText:
+                self.req.write(self.htmlFooter())
+            self.req.finish()
+        except pb.DeadReferenceError:
+            pass
+        # break the cycle, the Request's .notifications list includes the
+        # Deferred (from req.notifyFinish) that's pointing at us.
+        self.req = None
+
+components.registerAdapter(TextLog, interfaces.IStatusLog, IHTMLLog)
+
+
+class HTMLLog(Resource):
+    if implements:
+        implements(IHTMLLog)
+    else:
+        __implements__ = IHTMLLog,
+
+
+    def __init__(self, original):
+        Resource.__init__(self)
+        self.original = original
+
+    def render(self, request):
+        request.setHeader("content-type", "text/html")
+        return self.original.html
+
+components.registerAdapter(HTMLLog, builder.HTMLLogFile, IHTMLLog)
+
+
+class CurrentBox(components.Adapter):
+    # this provides the "current activity" box, just above the builder name
+    if implements:
+        implements(ICurrentBox)
+    else:
+        __implements__ = ICurrentBox,
+
+    def formatETA(self, eta):
+        if eta is None:
+            return []
+        if eta < 0:
+            return ["Soon"]
+        abstime = time.strftime("%H:%M:%S", time.localtime(util.now()+eta))
+        return ["ETA in", "%d secs" % eta, "at %s" % abstime]
+
+    def getBox(self, status):
+        # getState() returns offline, idle, or building
+        state, builds = self.original.getState()
+
+        # look for upcoming builds. We say the state is "waiting" if the
+        # builder is otherwise idle and there is a scheduler which tells us a
+        # build will be performed some time in the near future. TODO: this
+        # functionality used to be in BuilderStatus.. maybe this code should
+        # be merged back into it.
+        upcoming = []
+        builderName = self.original.getName()
+        for s in status.getSchedulers():
+            if builderName in s.listBuilderNames():
+                upcoming.extend(s.getPendingBuildTimes())
+        if state == "idle" and upcoming:
+            state = "waiting"
+
+        if state == "building":
+            color = "yellow"
+            text = ["building"]
+            if builds:
+                for b in builds:
+                    eta = b.getETA()
+                    if eta:
+                        text.extend(self.formatETA(eta))
+        elif state == "offline":
+            color = "red"
+            text = ["offline"]
+        elif state == "idle":
+            color = "white"
+            text = ["idle"]
+        elif state == "waiting":
+            color = "yellow"
+            text = ["waiting"]
+        else:
+            # just in case I add a state and forget to update this
+            color = "white"
+            text = [state]
+
+        # TODO: for now, this pending/upcoming stuff is in the "current
+        # activity" box, but really it should go into a "next activity" row
+        # instead. The only times it should show up in "current activity" is
+        # when the builder is otherwise idle.
+
+        # are any builds pending? (waiting for a slave to be free)
+        pbs = self.original.getPendingBuilds()
+        if pbs:
+            text.append("%d pending" % len(pbs))
+        for t in upcoming:
+            text.extend(["next at", 
+                         time.strftime("%H:%M:%S", time.localtime(t)),
+                         "[%d secs]" % (t - util.now()),
+                         ])
+            # TODO: the upcoming-builds box looks like:
+            #  ['waiting', 'next at', '22:14:15', '[86 secs]']
+            # while the currently-building box is reversed:
+            #  ['building', 'ETA in', '2 secs', 'at 22:12:50']
+            # consider swapping one of these to make them look the same. also
+            # consider leaving them reversed to make them look different.
+        return Box(text, color=color, class_="Activity " + state)
+
+components.registerAdapter(CurrentBox, builder.BuilderStatus, ICurrentBox)
+
+class ChangeBox(components.Adapter):
+    if implements:
+        implements(IBox)
+    else:
+        __implements__ = IBox,
+
+    def getBox(self):
+        url = "changes/%d" % self.original.number
+        text = '<a href="%s">%s</a>' % (url, html.escape(self.original.who))
+        return Box([text], color="white", class_="Change")
+components.registerAdapter(ChangeBox, changes.Change, IBox)
+
+class BuildBox(components.Adapter):
+    # this provides the yellow "starting line" box for each build
+    if implements:
+        implements(IBox)
+    else:
+        __implements__ = IBox,
+
+    def getBox(self):
+        b = self.original
+        name = b.getBuilder().getName()
+        number = b.getNumber()
+        url = "%s/builds/%d" % (urllib.quote(name, safe=''), number)
+        reason = b.getReason()
+        text = ('<a title="Reason: %s" href="%s">Build %d</a>'
+                % (html.escape(reason), url, number))
+        color = "yellow"
+        class_ = "start"
+        if b.isFinished() and not b.getSteps():
+            # the steps have been pruned, so there won't be any indication
+            # of whether it succeeded or failed. Color the box red or green
+            # to show its status
+            color = b.getColor()
+            class_ = build_get_class(b)
+        return Box([text], color=color, class_="BuildStep " + class_)
+components.registerAdapter(BuildBox, builder.BuildStatus, IBox)
+
+class StepBox(components.Adapter):
+    if implements:
+        implements(IBox)
+    else:
+        __implements__ = IBox,
+
+    def getBox(self):
+        b = self.original.getBuild()
+        urlbase = "%s/builds/%d/step-%s" % (
+            urllib.quote(b.getBuilder().getName(), safe=''),
+            b.getNumber(),
+            urllib.quote(self.original.getName(), safe=''))
+        text = self.original.getText()
+        if text is None:
+            log.msg("getText() gave None", urlbase)
+            text = []
+        text = text[:]
+        logs = self.original.getLogs()
+        for num in range(len(logs)):
+            name = logs[num].getName()
+            if logs[num].hasContents():
+                url = "%s/%d" % (urlbase, num)
+                text.append("<a href=\"%s\">%s</a>" % (url, html.escape(name)))
+            else:
+                text.append(html.escape(name))
+        urls = self.original.getURLs()
+        ex_url_class = "BuildStep external"
+        for name, target in urls.items():
+            text.append('[<a href="%s" class="%s">%s</a>]' %
+                        (target, ex_url_class, html.escape(name)))
+        color = self.original.getColor()
+        class_ = "BuildStep " + build_get_class(self.original)
+        return Box(text, color, class_=class_)
+components.registerAdapter(StepBox, builder.BuildStepStatus, IBox)
+
+class EventBox(components.Adapter):
+    if implements:
+        implements(IBox)
+    else:
+        __implements__ = IBox,
+
+    def getBox(self):
+        text = self.original.getText()
+        color = self.original.getColor()
+        class_ = "Event"
+        if color:
+            class_ += " " + color
+        return Box(text, color, class_=class_)
+components.registerAdapter(EventBox, builder.Event, IBox)
+        
+
+class BuildTopBox(components.Adapter):
+    # this provides a per-builder box at the very top of the display,
+    # showing the results of the most recent build
+    if implements:
+        implements(IBox)
+    else:
+        __implements__ = IBox,
+
+    def getBox(self):
+        assert interfaces.IBuilderStatus(self.original)
+        b = self.original.getLastFinishedBuild()
+        if not b:
+            return Box(["none"], "white", class_="LastBuild")
+        name = b.getBuilder().getName()
+        number = b.getNumber()
+        url = "%s/builds/%d" % (name, number)
+        text = b.getText()
+        # TODO: add logs?
+        # TODO: add link to the per-build page at 'url'
+        c = b.getColor()
+        class_ = build_get_class(b)
+        return Box(text, c, class_="LastBuild %s" % class_)
+components.registerAdapter(BuildTopBox, builder.BuilderStatus, ITopBox)
+
+class Spacer(builder.Event):
+    def __init__(self, start, finish):
+        self.started = start
+        self.finished = finish
+
+class SpacerBox(components.Adapter):
+    if implements:
+        implements(IBox)
+    else:
+        __implements__ = IBox,
+
+    def getBox(self):
+        #b = Box(["spacer"], "white")
+        b = Box([])
+        b.spacer = True
+        return b
+components.registerAdapter(SpacerBox, Spacer, IBox)
+    
+def insertGaps(g, lastEventTime, idleGap=2):
+    debug = False
+
+    e = g.next()
+    starts, finishes = e.getTimes()
+    if debug: log.msg("E0", starts, finishes)
+    if finishes == 0:
+        finishes = starts
+    if debug: log.msg("E1 finishes=%s, gap=%s, lET=%s" % \
+                      (finishes, idleGap, lastEventTime))
+    if finishes is not None and finishes + idleGap < lastEventTime:
+        if debug: log.msg(" spacer0")
+        yield Spacer(finishes, lastEventTime)
+
+    followingEventStarts = starts
+    if debug: log.msg(" fES0", starts)
+    yield e
+
+    while 1:
+        e = g.next()
+        starts, finishes = e.getTimes()
+        if debug: log.msg("E2", starts, finishes)
+        if finishes == 0:
+            finishes = starts
+        if finishes is not None and finishes + idleGap < followingEventStarts:
+            # there is a gap between the end of this event and the beginning
+            # of the next one. Insert an idle event so the waterfall display
+            # shows a gap here.
+            if debug:
+                log.msg(" finishes=%s, gap=%s, fES=%s" % \
+                        (finishes, idleGap, followingEventStarts))
+            yield Spacer(finishes, followingEventStarts)
+        yield e
+        followingEventStarts = starts
+        if debug: log.msg(" fES1", starts)
+
+
+class WaterfallStatusResource(HtmlResource):
+    """This builds the main status page, with the waterfall display, and
+    all child pages."""
+    title = "BuildBot"
+    def __init__(self, status, changemaster, categories, css=None):
+        HtmlResource.__init__(self)
+        self.status = status
+        self.changemaster = changemaster
+        self.categories = categories
+        p = self.status.getProjectName()
+        if p:
+            self.title = "BuildBot: %s" % p
+        self.css = css
+
+    def body(self, request):
+        "This method builds the main waterfall display."
+
+        data = ''
+
+        projectName = self.status.getProjectName()
+        projectURL = self.status.getProjectURL()
+
+        phase = request.args.get("phase",["2"])
+        phase = int(phase[0])
+
+        showBuilders = request.args.get("show", None)
+        allBuilders = self.status.getBuilderNames(categories=self.categories)
+        if showBuilders:
+            builderNames = []
+            for b in showBuilders:
+                if b not in allBuilders:
+                    continue
+                if b in builderNames:
+                    continue
+                builderNames.append(b)
+        else:
+            builderNames = allBuilders
+        builders = map(lambda name: self.status.getBuilder(name),
+                       builderNames)
+
+        if phase == -1:
+            return self.body0(request, builders)
+        (changeNames, builderNames, timestamps, eventGrid, sourceEvents) = \
+                      self.buildGrid(request, builders)
+        if phase == 0:
+            return self.phase0(request, (changeNames + builderNames),
+                               timestamps, eventGrid)
+        # start the table: top-header material
+        data += '<table border="0" cellspacing="0">\n'
+
+        if projectName and projectURL:
+            # TODO: this is going to look really ugly
+            topleft = "<a href=\"%s\">%s</a><br />last build" % \
+                      (projectURL, projectName)
+        else:
+            topleft = "last build"
+        data += ' <tr class="LastBuild">\n'
+        data += td(topleft, align="right", colspan=2, class_="Project")
+        for b in builders:
+            box = ITopBox(b).getBox()
+            data += box.td(align="center")
+        data += " </tr>\n"
+
+        data += ' <tr class="Activity">\n'
+        data += td('current activity', align='right', colspan=2)
+        for b in builders:
+            box = ICurrentBox(b).getBox(self.status)
+            data += box.td(align="center")
+        data += " </tr>\n"
+        
+        data += " <tr>\n"
+        TZ = time.tzname[time.daylight]
+        data += td("time (%s)" % TZ, align="center", class_="Time")
+        name = changeNames[0]
+        data += td(
+                "<a href=\"%s\">%s</a>" % (urllib.quote(name, safe=''), name),
+                align="center", class_="Change")
+        for name in builderNames:
+            data += td(
+                #"<a href=\"%s\">%s</a>" % (request.childLink(name), name),
+                "<a href=\"%s\">%s</a>" % (urllib.quote(name, safe=''), name),
+                align="center", class_="Builder")
+        data += " </tr>\n"
+
+        if phase == 1:
+            f = self.phase1
+        else:
+            f = self.phase2
+        data += f(request, changeNames + builderNames, timestamps, eventGrid,
+                  sourceEvents)
+
+        data += "</table>\n"
+
+        data += "<hr />\n"
+
+        data += "<a href=\"http://buildbot.sourceforge.net/\">Buildbot</a>"
+        data += "-%s " % version
+        if projectName:
+            data += "working for the "
+            if projectURL:
+                data += "<a href=\"%s\">%s</a> project." % (projectURL,
+                                                            projectName)
+            else:
+                data += "%s project." % projectName
+        data += "<br />\n"
+        # TODO: push this to the right edge, if possible
+        data += ("Page built: " +
+                 time.strftime("%a %d %b %Y %H:%M:%S",
+                               time.localtime(util.now()))
+                 + "\n")
+        return data
+
+    def body0(self, request, builders):
+        # build the waterfall display
+        data = ""
+        data += "<h2>Basic display</h2>\n"
+        data += "<p>See <a href=\"%s\">here</a>" % \
+                urllib.quote(request.childLink("waterfall"))
+        data += " for the waterfall display</p>\n"
+                
+        data += '<table border="0" cellspacing="0">\n'
+        names = map(lambda builder: builder.name, builders)
+
+        # the top row is two blank spaces, then the top-level status boxes
+        data += " <tr>\n"
+        data += td("", colspan=2)
+        for b in builders:
+            text = ""
+            color = "#ca88f7"
+            state, builds = b.getState()
+            if state != "offline":
+                text += "%s<br />\n" % state #b.getCurrentBig().text[0]
+            else:
+                text += "OFFLINE<br />\n"
+                color = "#ffe0e0"
+            data += td(text, align="center", bgcolor=color)
+
+        # the next row has the column headers: time, changes, builder names
+        data += " <tr>\n"
+        data += td("Time", align="center")
+        data += td("Changes", align="center")
+        for name in names:
+            data += td(
+                "<a href=\"%s\">%s</a>" % (urllib.quote(request.childLink(name)), name),
+                align="center")
+        data += " </tr>\n"
+
+        # all further rows involve timestamps, commit events, and build events
+        data += " <tr>\n"
+        data += td("04:00", align="bottom")
+        data += td("fred", align="center")
+        for name in names:
+            data += td("stuff", align="center", bgcolor="red")
+        data += " </tr>\n"
+
+        data += "</table>\n"
+        return data
+    
+    def buildGrid(self, request, builders):
+        debug = False
+
+        # XXX: see if we can use a cached copy
+
+        # first step is to walk backwards in time, asking each column
+        # (commit, all builders) if they have any events there. Build up the
+        # array of events, and stop when we have a reasonable number.
+            
+        commit_source = self.changemaster
+
+        lastEventTime = util.now()
+        sources = [commit_source] + builders
+        changeNames = ["changes"]
+        builderNames = map(lambda builder: builder.getName(), builders)
+        sourceNames = changeNames + builderNames
+        sourceEvents = []
+        sourceGenerators = []
+        for s in sources:
+            gen = insertGaps(s.eventGenerator(), lastEventTime)
+            sourceGenerators.append(gen)
+            # get the first event
+            try:
+                e = gen.next()
+                event = interfaces.IStatusEvent(e)
+                if debug:
+                    log.msg("gen %s gave1 %s" % (gen, event.getText()))
+            except StopIteration:
+                event = None
+            sourceEvents.append(event)
+        eventGrid = []
+        timestamps = []
+        spanLength = 10  # ten-second chunks
+        tooOld = util.now() - 12*60*60 # never show more than 12 hours
+        maxPageLen = 200
+
+        lastEventTime = 0
+        for e in sourceEvents:
+            if e and e.getTimes()[0] > lastEventTime:
+                lastEventTime = e.getTimes()[0]
+        if lastEventTime == 0:
+            lastEventTime = util.now()
+
+        spanStart = lastEventTime - spanLength
+        debugGather = 0
+
+        while 1:
+            if debugGather: log.msg("checking (%s,]" % spanStart)
+            # the tableau of potential events is in sourceEvents[]. The
+            # window crawls backwards, and we examine one source at a time.
+            # If the source's top-most event is in the window, is it pushed
+            # onto the events[] array and the tableau is refilled. This
+            # continues until the tableau event is not in the window (or is
+            # missing).
+
+            spanEvents = [] # for all sources, in this span. row of eventGrid
+            firstTimestamp = None # timestamp of first event in the span
+            lastTimestamp = None # last pre-span event, for next span
+
+            for c in range(len(sourceGenerators)):
+                events = [] # for this source, in this span. cell of eventGrid
+                event = sourceEvents[c]
+                while event and spanStart < event.getTimes()[0]:
+                    # to look at windows that don't end with the present,
+                    # condition the .append on event.time <= spanFinish
+                    if not IBox(event, None):
+                        log.msg("BAD EVENT", event, event.getText())
+                        assert 0
+                    if debug:
+                        log.msg("pushing", event.getText(), event)
+                    events.append(event)
+                    starts, finishes = event.getTimes()
+                    firstTimestamp = util.earlier(firstTimestamp, starts)
+                    try:
+                        event = sourceGenerators[c].next()
+                        #event = interfaces.IStatusEvent(event)
+                        if debug:
+                            log.msg("gen[%s] gave2 %s" % (sourceNames[c],
+                                                          event.getText()))
+                    except StopIteration:
+                        event = None
+                if debug:
+                    log.msg("finished span")
+
+                if event:
+                    # this is the last pre-span event for this source
+                    lastTimestamp = util.later(lastTimestamp,
+                                               event.getTimes()[0])
+                if debugGather:
+                    log.msg(" got %s from %s" % (events, sourceNames[c]))
+                sourceEvents[c] = event # refill the tableau
+                spanEvents.append(events)
+
+            if firstTimestamp is not None:
+                eventGrid.append(spanEvents)
+                timestamps.append(firstTimestamp)
+            
+
+            if lastTimestamp:
+                spanStart = lastTimestamp - spanLength
+            else:
+                # no more events
+                break
+            if lastTimestamp < tooOld:
+                pass
+                #break
+            if len(timestamps) > maxPageLen:
+                break
+            
+            
+            # now loop
+            
+        # loop is finished. now we have eventGrid[] and timestamps[]
+        if debugGather: log.msg("finished loop")
+        assert(len(timestamps) == len(eventGrid))
+        return (changeNames, builderNames, timestamps, eventGrid, sourceEvents)
+    
+    def phase0(self, request, sourceNames, timestamps, eventGrid):
+        # phase0 rendering
+        if not timestamps:
+            return "no events"
+        data = ""
+        for r in range(0, len(timestamps)):
+            data += "<p>\n"
+            data += "[%s]<br />" % timestamps[r]
+            row = eventGrid[r]
+            assert(len(row) == len(sourceNames))
+            for c in range(0, len(row)):
+                if row[c]:
+                    data += "<b>%s</b><br />\n" % sourceNames[c]
+                    for e in row[c]:
+                        log.msg("Event", r, c, sourceNames[c], e.getText())
+                        lognames = [loog.getName() for loog in e.getLogs()]
+                        data += "%s: %s: %s %s<br />" % (e.getText(),
+                                                         e.getTimes()[0],
+                                                         e.getColor(),
+                                                         lognames)
+                else:
+                    data += "<b>%s</b> [none]<br />\n" % sourceNames[c]
+        return data
+    
+    def phase1(self, request, sourceNames, timestamps, eventGrid,
+               sourceEvents):
+        # phase1 rendering: table, but boxes do not overlap
+        data = ""
+        if not timestamps:
+            return data
+        lastDate = None
+        for r in range(0, len(timestamps)):
+            chunkstrip = eventGrid[r]
+            # chunkstrip is a horizontal strip of event blocks. Each block
+            # is a vertical list of events, all for the same source.
+            assert(len(chunkstrip) == len(sourceNames))
+            maxRows = reduce(lambda x,y: max(x,y),
+                             map(lambda x: len(x), chunkstrip))
+            for i in range(maxRows):
+                data += " <tr>\n";
+                if i == 0:
+                    stuff = []
+                    # add the date at the beginning, and each time it changes
+                    today = time.strftime("<b>%d %b %Y</b>",
+                                          time.localtime(timestamps[r]))
+                    todayday = time.strftime("<b>%a</b>",
+                                             time.localtime(timestamps[r]))
+                    if today != lastDate:
+                        stuff.append(todayday)
+                        stuff.append(today)
+                        lastDate = today
+                    stuff.append(
+                        time.strftime("%H:%M:%S",
+                                      time.localtime(timestamps[r])))
+                    data += td(stuff, valign="bottom", align="center",
+                               rowspan=maxRows, class_="Time")
+                for c in range(0, len(chunkstrip)):
+                    block = chunkstrip[c]
+                    assert(block != None) # should be [] instead
+                    # bottom-justify
+                    offset = maxRows - len(block)
+                    if i < offset:
+                        data += td("")
+                    else:
+                        e = block[i-offset]
+                        box = IBox(e).getBox()
+                        box.parms["show_idle"] = 1
+                        data += box.td(valign="top", align="center")
+                data += " </tr>\n"
+        
+        return data
+    
+    def phase2(self, request, sourceNames, timestamps, eventGrid,
+               sourceEvents):
+        data = ""
+        if not timestamps:
+            return data
+        # first pass: figure out the height of the chunks, populate grid
+        grid = []
+        for i in range(1+len(sourceNames)):
+            grid.append([])
+        # grid is a list of columns, one for the timestamps, and one per
+        # event source. Each column is exactly the same height. Each element
+        # of the list is a single <td> box.
+        lastDate = time.strftime("<b>%d %b %Y</b>",
+                                 time.localtime(util.now()))
+        for r in range(0, len(timestamps)):
+            chunkstrip = eventGrid[r]
+            # chunkstrip is a horizontal strip of event blocks. Each block
+            # is a vertical list of events, all for the same source.
+            assert(len(chunkstrip) == len(sourceNames))
+            maxRows = reduce(lambda x,y: max(x,y),
+                             map(lambda x: len(x), chunkstrip))
+            for i in range(maxRows):
+                if i != maxRows-1:
+                    grid[0].append(None)
+                else:
+                    # timestamp goes at the bottom of the chunk
+                    stuff = []
+                    # add the date at the beginning (if it is not the same as
+                    # today's date), and each time it changes
+                    todayday = time.strftime("<b>%a</b>",
+                                             time.localtime(timestamps[r]))
+                    today = time.strftime("<b>%d %b %Y</b>",
+                                          time.localtime(timestamps[r]))
+                    if today != lastDate:
+                        stuff.append(todayday)
+                        stuff.append(today)
+                        lastDate = today
+                    stuff.append(
+                        time.strftime("%H:%M:%S",
+                                      time.localtime(timestamps[r])))
+                    grid[0].append(Box(text=stuff, class_="Time",
+                                       valign="bottom", align="center"))
+
+            # at this point the timestamp column has been populated with
+            # maxRows boxes, most None but the last one has the time string
+            for c in range(0, len(chunkstrip)):
+                block = chunkstrip[c]
+                assert(block != None) # should be [] instead
+                for i in range(maxRows - len(block)):
+                    # fill top of chunk with blank space
+                    grid[c+1].append(None)
+                for i in range(len(block)):
+                    # so the events are bottom-justified
+                    b = IBox(block[i]).getBox()
+                    b.parms['valign'] = "top"
+                    b.parms['align'] = "center"
+                    grid[c+1].append(b)
+            # now all the other columns have maxRows new boxes too
+        # populate the last row, if empty
+        gridlen = len(grid[0])
+        for i in range(len(grid)):
+            strip = grid[i]
+            assert(len(strip) == gridlen)
+            if strip[-1] == None:
+                if sourceEvents[i-1]:
+                    filler = IBox(sourceEvents[i-1]).getBox()
+                else:
+                    # this can happen if you delete part of the build history
+                    filler = Box(text=["?"], align="center")
+                strip[-1] = filler
+            strip[-1].parms['rowspan'] = 1
+        # second pass: bubble the events upwards to un-occupied locations
+        # Every square of the grid that has a None in it needs to have
+        # something else take its place.
+        noBubble = request.args.get("nobubble",['0'])
+        noBubble = int(noBubble[0])
+        if not noBubble:
+            for col in range(len(grid)):
+                strip = grid[col]
+                if col == 1: # changes are handled differently
+                    for i in range(2, len(strip)+1):
+                        # only merge empty boxes. Don't bubble commit boxes.
+                        if strip[-i] == None:
+                            next = strip[-i+1]
+                            assert(next)
+                            if next:
+                                #if not next.event:
+                                if next.spacer:
+                                    # bubble the empty box up
+                                    strip[-i] = next
+                                    strip[-i].parms['rowspan'] += 1
+                                    strip[-i+1] = None
+                                else:
+                                    # we are above a commit box. Leave it
+                                    # be, and turn the current box into an
+                                    # empty one
+                                    strip[-i] = Box([], rowspan=1,
+                                                    comment="commit bubble")
+                                    strip[-i].spacer = True
+                            else:
+                                # we are above another empty box, which
+                                # somehow wasn't already converted.
+                                # Shouldn't happen
+                                pass
+                else:
+                    for i in range(2, len(strip)+1):
+                        # strip[-i] will go from next-to-last back to first
+                        if strip[-i] == None:
+                            # bubble previous item up
+                            assert(strip[-i+1] != None)
+                            strip[-i] = strip[-i+1]
+                            strip[-i].parms['rowspan'] += 1
+                            strip[-i+1] = None
+                        else:
+                            strip[-i].parms['rowspan'] = 1
+        # third pass: render the HTML table
+        for i in range(gridlen):
+            data += " <tr>\n";
+            for strip in grid:
+                b = strip[i]
+                if b:
+                    data += b.td()
+                else:
+                    if noBubble:
+                        data += td([])
+                # Nones are left empty, rowspan should make it all fit
+            data += " </tr>\n"
+        return data
+
+
+class StatusResource(Resource):
+    status = None
+    control = None
+    favicon = None
+    robots_txt = None
+
+    def __init__(self, status, control, changemaster, categories, css):
+        """
+        @type  status:       L{buildbot.status.builder.Status}
+        @type  control:      L{buildbot.master.Control}
+        @type  changemaster: L{buildbot.changes.changes.ChangeMaster}
+        """
+        Resource.__init__(self)
+        self.status = status
+        self.control = control
+        self.changemaster = changemaster
+        self.categories = categories
+        self.css = css
+        waterfall = WaterfallStatusResource(self.status, changemaster,
+                                            categories, css)
+        self.putChild("", waterfall)
+
+    def render(self, request):
+        request.redirect(request.prePathURL() + '/')
+        request.finish()
+
+    def getChild(self, path, request):
+        if path == "robots.txt" and self.robots_txt:
+            return static.File(self.robots_txt)
+        if path == "buildbot.css" and self.css:
+            return static.File(self.css)
+        if path == "changes":
+            return StatusResourceChanges(self.status, self.changemaster)
+        if path == "favicon.ico":
+            if self.favicon:
+                return static.File(self.favicon)
+            return NoResource("No favicon.ico registered")
+
+        if path in self.status.getBuilderNames():
+            builder = self.status.getBuilder(path)
+            control = None
+            if self.control:
+                control = self.control.getBuilder(path)
+            return StatusResourceBuilder(self.status, builder, control)
+
+        return NoResource("No such Builder '%s'" % path)
+
+if hasattr(sys, "frozen"):
+    # all 'data' files are in the directory of our executable
+    here = os.path.dirname(sys.executable)
+    buildbot_icon = os.path.abspath(os.path.join(here, "buildbot.png"))
+    buildbot_css = os.path.abspath(os.path.join(here, "classic.css"))
+else:
+    # running from source
+    # the icon is sibpath(__file__, "../buildbot.png") . This is for
+    # portability.
+    up = os.path.dirname
+    buildbot_icon = os.path.abspath(os.path.join(up(up(__file__)),
+                                                 "buildbot.png"))
+    buildbot_css = os.path.abspath(os.path.join(up(__file__), "classic.css"))
+
+class Waterfall(base.StatusReceiverMultiService):
+    """I implement the primary web-page status interface, called a 'Waterfall
+    Display' because builds and steps are presented in a grid of boxes which
+    move downwards over time. The top edge is always the present. Each column
+    represents a single builder. Each box describes a single Step, which may
+    have logfiles or other status information.
+
+    All these pages are served via a web server of some sort. The simplest
+    approach is to let the buildmaster run its own webserver, on a given TCP
+    port, but it can also publish its pages to a L{twisted.web.distrib}
+    distributed web server (which lets the buildbot pages be a subset of some
+    other web server).
+
+    Since 0.6.3, BuildBot defines class attributes on elements so they can be
+    styled with CSS stylesheets. Buildbot uses some generic classes to
+    identify the type of object, and some more specific classes for the
+    various kinds of those types. It does this by specifying both in the
+    class attributes where applicable, separated by a space. It is important
+    that in your CSS you declare the more generic class styles above the more
+    specific ones. For example, first define a style for .Event, and below
+    that for .SUCCESS
+
+    The following CSS class names are used:
+        - Activity, Event, BuildStep, LastBuild: general classes
+        - waiting, interlocked, building, offline, idle: Activity states
+        - start, running, success, failure, warnings, skipped, exception:
+          LastBuild and BuildStep states
+        - Change: box with change
+        - Builder: box for builder name (at top)
+        - Project
+        - Time
+
+    @type parent: L{buildbot.master.BuildMaster}
+    @ivar parent: like all status plugins, this object is a child of the
+                  BuildMaster, so C{.parent} points to a
+                  L{buildbot.master.BuildMaster} instance, through which
+                  the status-reporting object is acquired.
+    """
+
+    compare_attrs = ["http_port", "distrib_port", "allowForce",
+                     "categories", "css", "favicon", "robots_txt"]
+
+    def __init__(self, http_port=None, distrib_port=None, allowForce=True,
+                 categories=None, css=buildbot_css, favicon=buildbot_icon,
+                 robots_txt=None):
+        """To have the buildbot run its own web server, pass a port number to
+        C{http_port}. To have it run a web.distrib server
+
+        @type  http_port: int or L{twisted.application.strports} string
+        @param http_port: a strports specification describing which port the
+                          buildbot should use for its web server, with the
+                          Waterfall display as the root page. For backwards
+                          compatibility this can also be an int. Use
+                          'tcp:8000' to listen on that port, or
+                          'tcp:12345:interface=127.0.0.1' if you only want
+                          local processes to connect to it (perhaps because
+                          you are using an HTTP reverse proxy to make the
+                          buildbot available to the outside world, and do not
+                          want to make the raw port visible).
+
+        @type  distrib_port: int or L{twisted.application.strports} string
+        @param distrib_port: Use this if you want to publish the Waterfall
+                             page using web.distrib instead. The most common
+                             case is to provide a string that is an absolute
+                             pathname to the unix socket on which the
+                             publisher should listen
+                             (C{os.path.expanduser(~/.twistd-web-pb)} will
+                             match the default settings of a standard
+                             twisted.web 'personal web server'). Another
+                             possibility is to pass an integer, which means
+                             the publisher should listen on a TCP socket,
+                             allowing the web server to be on a different
+                             machine entirely. Both forms are provided for
+                             backwards compatibility; the preferred form is a
+                             strports specification like
+                             'unix:/home/buildbot/.twistd-web-pb'. Providing
+                             a non-absolute pathname will probably confuse
+                             the strports parser.
+
+        @type  allowForce: bool
+        @param allowForce: if True, present a 'Force Build' button on the
+                           per-Builder page that allows visitors to the web
+                           site to initiate a build. If False, don't provide
+                           this button.
+
+        @type  favicon: string
+        @param favicon: if set, provide the pathname of an image file that
+                        will be used for the 'favicon.ico' resource. Many
+                        browsers automatically request this file and use it
+                        as an icon in any bookmark generated from this site.
+                        Defaults to the buildbot/buildbot.png image provided
+                        in the distribution. Can be set to None to avoid
+                        using a favicon at all.
+
+        @type  robots_txt: string
+        @param robots_txt: if set, provide the pathname of a robots.txt file.
+                           Many search engines request this file and obey the
+                           rules in it. E.g. to disallow them to crawl the
+                           status page, put the following two lines in
+                           robots.txt::
+                              User-agent: *
+                              Disallow: /
+        """
+
+        base.StatusReceiverMultiService.__init__(self)
+        assert allowForce in (True, False) # TODO: implement others
+        if type(http_port) is int:
+            http_port = "tcp:%d" % http_port
+        self.http_port = http_port
+        if distrib_port is not None:
+            if type(distrib_port) is int:
+                distrib_port = "tcp:%d" % distrib_port
+            if distrib_port[0] in "/~.": # pathnames
+                distrib_port = "unix:%s" % distrib_port
+        self.distrib_port = distrib_port
+        self.allowForce = allowForce
+        self.categories = categories
+        self.css = css
+        self.favicon = favicon
+        self.robots_txt = robots_txt
+
+    def __repr__(self):
+        if self.http_port is None:
+            return "<Waterfall on path %s>" % self.distrib_port
+        if self.distrib_port is None:
+            return "<Waterfall on port %s>" % self.http_port
+        return "<Waterfall on port %s and path %s>" % (self.http_port,
+                                                       self.distrib_port)
+
+    def setServiceParent(self, parent):
+        """
+        @type  parent: L{buildbot.master.BuildMaster}
+        """
+        base.StatusReceiverMultiService.setServiceParent(self, parent)
+        self.setup()
+
+    def setup(self):
+        status = self.parent.getStatus()
+        if self.allowForce:
+            control = interfaces.IControl(self.parent)
+        else:
+            control = None
+        change_svc = self.parent.change_svc
+        sr = StatusResource(status, control, change_svc, self.categories,
+                            self.css)
+        sr.favicon = self.favicon
+        sr.robots_txt = self.robots_txt
+        self.site = server.Site(sr)
+
+        if self.http_port is not None:
+            s = strports.service(self.http_port, self.site)
+            s.setServiceParent(self)
+        if self.distrib_port is not None:
+            f = pb.PBServerFactory(distrib.ResourcePublisher(self.site))
+            s = strports.service(self.distrib_port, f)
+            s.setServiceParent(self)

Added: vendor/buildbot/current/buildbot/status/mail.py
===================================================================
--- vendor/buildbot/current/buildbot/status/mail.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/mail.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,362 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+# the email.MIMEMultipart module is only available in python-2.2.2 and later
+
+from email.Message import Message
+from email.Utils import formatdate
+from email.MIMEText import MIMEText
+try:
+    from email.MIMEMultipart import MIMEMultipart
+    canDoAttachments = True
+except ImportError:
+    canDoAttachments = False
+import urllib
+
+from twisted.internet import defer
+try:
+    from twisted.mail.smtp import sendmail # Twisted-2.0
+except ImportError:
+    from twisted.protocols.smtp import sendmail # Twisted-1.3
+from twisted.python import log
+
+from buildbot import interfaces, util
+from buildbot.twcompat import implements, providedBy
+from buildbot.status import base
+from buildbot.status.builder import FAILURE, SUCCESS, WARNINGS
+
+
+class Domain(util.ComparableMixin):
+    if implements:
+        implements(interfaces.IEmailLookup)
+    else:
+        __implements__ = interfaces.IEmailLookup
+    compare_attrs = ["domain"]
+
+    def __init__(self, domain):
+        assert "@" not in domain
+        self.domain = domain
+
+    def getAddress(self, name):
+        return name + "@" + self.domain
+
+
+class MailNotifier(base.StatusReceiverMultiService):
+    """This is a status notifier which sends email to a list of recipients
+    upon the completion of each build. It can be configured to only send out
+    mail for certain builds, and only send messages when the build fails, or
+    when it transitions from success to failure. It can also be configured to
+    include various build logs in each message.
+
+    By default, the message will be sent to the Interested Users list, which
+    includes all developers who made changes in the build. You can add
+    additional recipients with the extraRecipients argument.
+
+    To get a simple one-message-per-build (say, for a mailing list), use
+    sendToInterestedUsers=False, extraRecipients=['listaddr at example.org']
+
+    Each MailNotifier sends mail to a single set of recipients. To send
+    different kinds of mail to different recipients, use multiple
+    MailNotifiers.
+    """
+
+    if implements:
+        implements(interfaces.IEmailSender)
+    else:
+        __implements__ = (interfaces.IEmailSender,
+                          base.StatusReceiverMultiService.__implements__)
+
+    compare_attrs = ["extraRecipients", "lookup", "fromaddr", "mode",
+                     "categories", "builders", "addLogs", "relayhost",
+                     "subject", "sendToInterestedUsers"]
+
+    def __init__(self, fromaddr, mode="all", categories=None, builders=None,
+                 addLogs=False, relayhost="localhost",
+                 subject="buildbot %(result)s in %(builder)s",
+                 lookup=None, extraRecipients=[],
+                 sendToInterestedUsers=True):
+        """
+        @type  fromaddr: string
+        @param fromaddr: the email address to be used in the 'From' header.
+        @type  sendToInterestedUsers: boolean
+        @param sendToInterestedUsers: if True (the default), send mail to all 
+                                      of the Interested Users. If False, only
+                                      send mail to the extraRecipients list.
+
+        @type  extraRecipients: tuple of string
+        @param extraRecipients: a list of email addresses to which messages
+                                should be sent (in addition to the
+                                InterestedUsers list, which includes any
+                                developers who made Changes that went into this
+                                build). It is a good idea to create a small
+                                mailing list and deliver to that, then let
+                                subscribers come and go as they please.
+
+        @type  subject: string
+        @param subject: a string to be used as the subject line of the message.
+                        %(builder)s will be replaced with the name of the
+                        builder which provoked the message.
+
+        @type  mode: string (defaults to all)
+        @param mode: one of:
+                     - 'all': send mail about all builds, passing and failing
+                     - 'failing': only send mail about builds which fail
+                     - 'problem': only send mail about a build which failed
+                     when the previous build passed
+
+        @type  builders: list of strings
+        @param builders: a list of builder names for which mail should be
+                         sent. Defaults to None (send mail for all builds).
+                         Use either builders or categories, but not both.
+
+        @type  categories: list of strings
+        @param categories: a list of category names to serve status
+                           information for. Defaults to None (all
+                           categories). Use either builders or categories,
+                           but not both.
+
+        @type  addLogs: boolean.
+        @param addLogs: if True, include all build logs as attachments to the
+                        messages.  These can be quite large. This can also be
+                        set to a list of log names, to send a subset of the
+                        logs. Defaults to False.
+
+        @type  relayhost: string
+        @param relayhost: the host to which the outbound SMTP connection
+                          should be made. Defaults to 'localhost'
+
+        @type  lookup:    implementor of {IEmailLookup}
+        @param lookup:    object which provides IEmailLookup, which is
+                          responsible for mapping User names (which come from
+                          the VC system) into valid email addresses. If not
+                          provided, the notifier will only be able to send mail
+                          to the addresses in the extraRecipients list. Most of
+                          the time you can use a simple Domain instance. As a
+                          shortcut, you can pass as string: this will be
+                          treated as if you had provided Domain(str). For
+                          example, lookup='twistedmatrix.com' will allow mail
+                          to be sent to all developers whose SVN usernames
+                          match their twistedmatrix.com account names.
+        """
+
+        base.StatusReceiverMultiService.__init__(self)
+        assert isinstance(extraRecipients, (list, tuple))
+        for r in extraRecipients:
+            assert isinstance(r, str)
+            assert "@" in r # require full email addresses, not User names
+        self.extraRecipients = extraRecipients
+        self.sendToInterestedUsers = sendToInterestedUsers
+        self.fromaddr = fromaddr
+        self.mode = mode
+        self.categories = categories
+        self.builders = builders
+        self.addLogs = addLogs
+        self.relayhost = relayhost
+        self.subject = subject
+        if lookup is not None:
+            if type(lookup) is str:
+                lookup = Domain(lookup)
+            assert providedBy(lookup, interfaces.IEmailLookup)
+        self.lookup = lookup
+        self.watched = []
+        self.status = None
+
+        # you should either limit on builders or categories, not both
+        if self.builders != None and self.categories != None:
+            log.err("Please specify only builders to ignore or categories to include")
+            raise # FIXME: the asserts above do not raise some Exception either
+
+    def setServiceParent(self, parent):
+        """
+        @type  parent: L{buildbot.master.BuildMaster}
+        """
+        base.StatusReceiverMultiService.setServiceParent(self, parent)
+        self.setup()
+
+    def setup(self):
+        self.status = self.parent.getStatus()
+        self.status.subscribe(self)
+
+    def disownServiceParent(self):
+        self.status.unsubscribe(self)
+        for w in self.watched:
+            w.unsubscribe(self)
+        return base.StatusReceiverMultiService.disownServiceParent(self)
+
+    def builderAdded(self, name, builder):
+        # only subscribe to builders we are interested in
+        if self.categories != None and builder.category not in self.categories:
+            return None
+
+        self.watched.append(builder)
+        return self # subscribe to this builder
+
+    def builderRemoved(self, name):
+        pass
+
+    def builderChangedState(self, name, state):
+        pass
+    def buildStarted(self, name, build):
+        pass
+    def buildFinished(self, name, build, results):
+        # here is where we actually do something.
+        builder = build.getBuilder()
+        if self.builders is not None and name not in self.builders:
+            return # ignore this build
+        if self.categories is not None and \
+               builder.category not in self.categories:
+            return # ignore this build
+
+        if self.mode == "failing" and results != FAILURE:
+            return
+        if self.mode == "problem":
+            if results != FAILURE:
+                return
+            prev = build.getPreviousBuild()
+            if prev and prev.getResults() == FAILURE:
+                return
+        # for testing purposes, buildMessage returns a Deferred that fires
+        # when the mail has been sent. To help unit tests, we return that
+        # Deferred here even though the normal IStatusReceiver.buildFinished
+        # signature doesn't do anything with it. If that changes (if
+        # .buildFinished's return value becomes significant), we need to
+        # rearrange this.
+        return self.buildMessage(name, build, results)
+
+    def buildMessage(self, name, build, results):
+        text = ""
+        if self.mode == "all":
+            text += "The Buildbot has finished a build of %s.\n" % name
+        elif self.mode == "failing":
+            text += "The Buildbot has detected a failed build of %s.\n" % name
+        else:
+            text += "The Buildbot has detected a new failure of %s.\n" % name
+        buildurl = self.status.getURLForThing(build)
+        if buildurl:
+            text += "Full details are available at:\n %s\n" % buildurl
+        text += "\n"
+
+        url = self.status.getBuildbotURL()
+        if url:
+            text += "Buildbot URL: %s\n\n" % urllib.quote(url, '/:')
+
+        text += "Buildslave for this Build: %s\n\n" % build.getSlavename()
+        text += "Build Reason: %s\n" % build.getReason()
+
+        patch = None
+        ss = build.getSourceStamp()
+        if ss is None:
+            source = "unavailable"
+        else:
+            branch, revision, patch = ss
+            source = ""
+            if branch:
+                source += "[branch %s] " % branch
+            if revision:
+                source += revision
+            else:
+                source += "HEAD"
+            if patch is not None:
+                source += " (plus patch)"
+        text += "Build Source Stamp: %s\n" % source
+
+        text += "Blamelist: %s\n" % ",".join(build.getResponsibleUsers())
+
+        # TODO: maybe display changes here? or in an attachment?
+        text += "\n"
+
+        t = build.getText()
+        if t:
+            t = ": " + " ".join(t)
+        else:
+            t = ""
+
+        if results == SUCCESS:
+            text += "Build succeeded!\n"
+            res = "success"
+        elif results == WARNINGS:
+            text += "Build Had Warnings%s\n" % t
+            res = "warnings"
+        else:
+            text += "BUILD FAILED%s\n" % t
+            res = "failure"
+
+        if self.addLogs and build.getLogs():
+            text += "Logs are attached.\n"
+
+        # TODO: it would be nice to provide a URL for the specific build
+        # here. That involves some coordination with html.Waterfall .
+        # Ideally we could do:
+        #  helper = self.parent.getServiceNamed("html")
+        #  if helper:
+        #      url = helper.getURLForBuild(build)
+
+        text += "\n"
+        text += "sincerely,\n"
+        text += " -The Buildbot\n"
+        text += "\n"
+
+        haveAttachments = False
+        if patch or self.addLogs:
+            haveAttachments = True
+            if not canDoAttachments:
+                log.msg("warning: I want to send mail with attachments, "
+                        "but this python is too old to have "
+                        "email.MIMEMultipart . Please upgrade to python-2.3 "
+                        "or newer to enable addLogs=True")
+
+        if haveAttachments and canDoAttachments:
+            m = MIMEMultipart()
+            m.attach(MIMEText(text))
+        else:
+            m = Message()
+            m.set_payload(text)
+
+        m['Date'] = formatdate(localtime=True)
+        m['Subject'] = self.subject % { 'result': res,
+                                        'builder': name,
+                                        }
+        m['From'] = self.fromaddr
+        # m['To'] is added later
+
+        if patch:
+            a = MIMEText(patch)
+            a.add_header('Content-Disposition', "attachment",
+                         filename="source patch")
+            m.attach(a)
+        if self.addLogs:
+            for log in build.getLogs():
+                name = "%s.%s" % (log.getStep().getName(),
+                                  log.getName())
+                a = MIMEText(log.getText())
+                a.add_header('Content-Disposition', "attachment",
+                             filename=name)
+                m.attach(a)
+
+        # now, who is this message going to?
+        dl = []
+        recipients = self.extraRecipients[:]
+        if self.sendToInterestedUsers and self.lookup:
+            for u in build.getInterestedUsers():
+                d = defer.maybeDeferred(self.lookup.getAddress, u)
+                d.addCallback(recipients.append)
+                dl.append(d)
+        d = defer.DeferredList(dl)
+        d.addCallback(self._gotRecipients, recipients, m)
+        return d
+
+    def _gotRecipients(self, res, rlist, m):
+        recipients = []
+        for r in rlist:
+            if r is not None and r not in recipients:
+                recipients.append(r)
+        recipients.sort()
+        m['To'] = ", ".join(recipients)
+        return self.sendMessage(m, recipients)
+
+    def sendMessage(self, m, recipients):
+        s = m.as_string()
+        ds = []
+        log.msg("sending mail (%d bytes) to" % len(s), recipients)
+        for recip in recipients:
+            ds.append(sendmail(self.relayhost, self.fromaddr, recip, s))
+        return defer.DeferredList(ds)

Added: vendor/buildbot/current/buildbot/status/progress.py
===================================================================
--- vendor/buildbot/current/buildbot/status/progress.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/progress.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,308 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+from twisted.internet import reactor
+from twisted.spread import pb
+from twisted.python import log
+from buildbot import util
+
+class StepProgress:
+    """I keep track of how much progress a single BuildStep has made.
+
+    Progress is measured along various axes. Time consumed is one that is
+    available for all steps. Amount of command output is another, and may be
+    better quantified by scanning the output for markers to derive number of
+    files compiled, directories walked, tests run, etc.
+
+    I am created when the build begins, and given to a BuildProgress object
+    so it can track the overall progress of the whole build.
+
+    """
+
+    startTime = None
+    stopTime = None
+    expectedTime = None
+    buildProgress = None
+    debug = False
+
+    def __init__(self, name, metricNames):
+        self.name = name
+        self.progress = {}
+        self.expectations = {}
+        for m in metricNames:
+            self.progress[m] = None
+            self.expectations[m] = None
+
+    def setBuildProgress(self, bp):
+        self.buildProgress = bp
+
+    def setExpectations(self, metrics):
+        """The step can call this to explicitly set a target value for one
+        of its metrics. E.g., ShellCommands knows how many commands it will
+        execute, so it could set the 'commands' expectation."""
+        for metric, value in metrics.items():
+            self.expectations[metric] = value
+        self.buildProgress.newExpectations()
+
+    def setExpectedTime(self, seconds):
+        self.expectedTime = seconds
+        self.buildProgress.newExpectations()
+
+    def start(self):
+        if self.debug: print "StepProgress.start[%s]" % self.name
+        self.startTime = util.now()
+
+    def setProgress(self, metric, value):
+        """The step calls this as progress is made along various axes."""
+        if self.debug:
+            print "setProgress[%s][%s] = %s" % (self.name, metric, value)
+        self.progress[metric] = value
+        if self.debug:
+            r = self.remaining()
+            print " step remaining:", r
+        self.buildProgress.newProgress()
+
+    def finish(self):
+        """This stops the 'time' metric and marks the step as finished
+        overall. It should be called after the last .setProgress has been
+        done for each axis."""
+        if self.debug: print "StepProgress.finish[%s]" % self.name
+        self.stopTime = util.now()
+        self.buildProgress.stepFinished(self.name)
+
+    def totalTime(self):
+        if self.startTime != None and self.stopTime != None:
+            return self.stopTime - self.startTime
+
+    def remaining(self):
+        if self.startTime == None:
+            return self.expectedTime
+        if self.stopTime != None:
+            return 0 # already finished
+        # TODO: replace this with cleverness that graphs each metric vs.
+        # time, then finds the inverse function. Will probably need to save
+        # a timestamp with each setProgress update, when finished, go back
+        # and find the 2% transition points, then save those 50 values in a
+        # list. On the next build, do linear interpolation between the two
+        # closest samples to come up with a percentage represented by that
+        # metric.
+
+        # TODO: If no other metrics are available, just go with elapsed
+        # time. Given the non-time-uniformity of text output from most
+        # steps, this would probably be better than the text-percentage
+        # scheme currently implemented.
+
+        percentages = []
+        for metric, value in self.progress.items():
+            expectation = self.expectations[metric]
+            if value != None and expectation != None:
+                p = 1.0 * value / expectation
+                percentages.append(p)
+        if percentages:
+            avg = reduce(lambda x,y: x+y, percentages) / len(percentages)
+            if avg > 1.0:
+                # overdue
+                avg = 1.0
+            if avg < 0.0:
+                avg = 0.0
+        if percentages and self.expectedTime != None:
+            return self.expectedTime - (avg * self.expectedTime)
+        if self.expectedTime is not None:
+            # fall back to pure time
+            return self.expectedTime - (util.now() - self.startTime)
+        return None # no idea
+
+
+class WatcherState:
+    def __init__(self, interval):
+        self.interval = interval
+        self.timer = None
+        self.needUpdate = 0
+
+class BuildProgress(pb.Referenceable):
+    """I keep track of overall build progress. I hold a list of StepProgress
+    objects.
+    """
+
+    def __init__(self, stepProgresses):
+        self.steps = {}
+        for s in stepProgresses:
+            self.steps[s.name] = s
+            s.setBuildProgress(self)
+        self.finishedSteps = []
+        self.watchers = {}
+        self.debug = 0
+
+    def setExpectationsFrom(self, exp):
+        """Set our expectations from the builder's Expectations object."""
+        for name, metrics in exp.steps.items():
+            s = self.steps[name]
+            s.setExpectedTime(exp.times[name])
+            s.setExpectations(exp.steps[name])
+
+    def newExpectations(self):
+        """Call this when one of the steps has changed its expectations.
+        This should trigger us to update our ETA value and notify any
+        subscribers."""
+        pass # subscribers are not implemented: they just poll
+
+    def stepFinished(self, stepname):
+        assert(stepname not in self.finishedSteps)
+        self.finishedSteps.append(stepname)
+        if len(self.finishedSteps) == len(self.steps.keys()):
+            self.sendLastUpdates()
+            
+    def newProgress(self):
+        r = self.remaining()
+        if self.debug:
+            print " remaining:", r
+        if r != None:
+            self.sendAllUpdates()
+        
+    def remaining(self):
+        # sum eta of all steps
+        sum = 0
+        for name, step in self.steps.items():
+            rem = step.remaining()
+            if rem == None:
+                return None # not sure
+            sum += rem
+        return sum
+    def eta(self):
+        left = self.remaining()
+        if left == None:
+            return None # not sure
+        done = util.now() + left
+        return done
+
+
+    def remote_subscribe(self, remote, interval=5):
+        # [interval, timer, needUpdate]
+        # don't send an update more than once per interval
+        self.watchers[remote] = WatcherState(interval)
+        remote.notifyOnDisconnect(self.removeWatcher)
+        self.updateWatcher(remote)
+        self.startTimer(remote)
+        log.msg("BuildProgress.remote_subscribe(%s)" % remote)
+    def remote_unsubscribe(self, remote):
+        # TODO: this doesn't work. I think 'remote' will always be different
+        # than the object that appeared in _subscribe.
+        log.msg("BuildProgress.remote_unsubscribe(%s)" % remote)
+        self.removeWatcher(remote)
+        #remote.dontNotifyOnDisconnect(self.removeWatcher)
+    def removeWatcher(self, remote):
+        #log.msg("removeWatcher(%s)" % remote)
+        try:
+            timer = self.watchers[remote].timer
+            if timer:
+                timer.cancel()
+            del self.watchers[remote]
+        except KeyError:
+            log.msg("Weird, removeWatcher on non-existent subscriber:",
+                    remote)
+    def sendAllUpdates(self):
+        for r in self.watchers.keys():
+            self.updateWatcher(r)
+    def updateWatcher(self, remote):
+        # an update wants to go to this watcher. Send it if we can, otherwise
+        # queue it for later
+        w = self.watchers[remote]
+        if not w.timer:
+            # no timer, so send update now and start the timer
+            self.sendUpdate(remote)
+            self.startTimer(remote)
+        else:
+            # timer is running, just mark as needing an update
+            w.needUpdate = 1
+    def startTimer(self, remote):
+        w = self.watchers[remote]
+        timer = reactor.callLater(w.interval, self.watcherTimeout, remote)
+        w.timer = timer
+    def sendUpdate(self, remote, last=0):
+        self.watchers[remote].needUpdate = 0
+        #text = self.asText() # TODO: not text, duh
+        try:
+            remote.callRemote("progress", self.remaining())
+            if last:
+                remote.callRemote("finished", self)
+        except:
+            log.deferr()
+            self.removeWatcher(remote)
+
+    def watcherTimeout(self, remote):
+        w = self.watchers.get(remote, None)
+        if not w:
+            return # went away
+        w.timer = None
+        if w.needUpdate:
+            self.sendUpdate(remote)
+            self.startTimer(remote)
+    def sendLastUpdates(self):
+        for remote in self.watchers.keys():
+            self.sendUpdate(remote, 1)
+            self.removeWatcher(remote)
+
+        
+class Expectations:
+    debug = False
+    # decay=1.0 ignores all but the last build
+    # 0.9 is short time constant. 0.1 is very long time constant
+    # TODO: let decay be specified per-metric
+    decay = 0.5
+
+    def __init__(self, buildprogress):
+        """Create us from a successful build. We will expect each step to
+        take as long as it did in that build."""
+
+        # .steps maps stepname to dict2
+        # dict2 maps metricname to final end-of-step value
+        self.steps = {}
+
+        # .times maps stepname to per-step elapsed time
+        self.times = {}
+
+        for name, step in buildprogress.steps.items():
+            self.steps[name] = {}
+            for metric, value in step.progress.items():
+                self.steps[name][metric] = value
+            self.times[name] = None
+            if step.startTime is not None and step.stopTime is not None:
+                self.times[name] = step.stopTime - step.startTime
+
+    def wavg(self, old, current):
+        if old is None:
+            return current
+        if current is None:
+            return old
+        else:
+            return (current * self.decay) + (old * (1 - self.decay))
+
+    def update(self, buildprogress):
+        for name, stepprogress in buildprogress.steps.items():
+            old = self.times[name]
+            current = stepprogress.totalTime()
+            if current == None:
+                log.msg("Expectations.update: current[%s] was None!" % name)
+                continue
+            new = self.wavg(old, current)
+            self.times[name] = new
+            if self.debug:
+                print "new expected time[%s] = %s, old %s, cur %s" % \
+                      (name, new, old, current)
+            
+            for metric, current in stepprogress.progress.items():
+                old = self.steps[name][metric]
+                new = self.wavg(old, current)
+                if self.debug:
+                    print "new expectation[%s][%s] = %s, old %s, cur %s" % \
+                          (name, metric, new, old, current)
+                self.steps[name][metric] = new
+
+    def expectedBuildTime(self):
+        if None in self.times.values():
+            return None
+        #return sum(self.times.values())
+        # python-2.2 doesn't have 'sum'. TODO: drop python-2.2 support
+        s = 0
+        for v in self.times.values():
+            s += v
+        return s

Added: vendor/buildbot/current/buildbot/status/tests.py
===================================================================
--- vendor/buildbot/current/buildbot/status/tests.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/tests.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,75 @@
+#! /usr/bin/python
+
+from twisted.web import resource
+from twisted.web.error import NoResource
+from twisted.web.html import PRE
+
+# these are our test result types. Steps are responsible for mapping results
+# into these values.
+SKIP, EXPECTED_FAILURE, FAILURE, ERROR, UNEXPECTED_SUCCESS, SUCCESS = \
+      "skip", "expected failure", "failure", "error", "unexpected success", \
+      "success"
+UNKNOWN = "unknown" # catch-all
+
+
+class OneTest(resource.Resource):
+    isLeaf = 1
+    def __init__(self, parent, testName, results):
+        self.parent = parent
+        self.testName = testName
+        self.resultType, self.results = results
+
+    def render(self, request):
+        request.setHeader("content-type", "text/html")
+        if request.method == "HEAD":
+            request.setHeader("content-length", len(self.html(request)))
+            return ''
+        return self.html(request)
+
+    def html(self, request):
+        # turn ourselves into HTML
+        raise NotImplementedError
+
+class TestResults(resource.Resource):
+    oneTestClass = OneTest
+    def __init__(self):
+        resource.Resource.__init__(self)
+        self.tests = {}
+    def addTest(self, testName, resultType, results=None):
+        self.tests[testName] = (resultType, results)
+    # TODO: .setName and .delete should be used on our Swappable
+    def countTests(self):
+        return len(self.tests)
+    def countFailures(self):
+        failures = 0
+        for t in self.tests.values():
+            if t[0] in (FAILURE, ERROR):
+                failures += 1
+        return failures
+    def summary(self):
+        """Return a short list of text strings as a summary, suitable for
+        inclusion in an Event"""
+        return ["some", "tests"]
+    def describeOneTest(self, testname):
+        return "%s: %s\n" % (testname, self.tests[testname][0])
+    def html(self):
+        data = "<html>\n<head><title>Test Results</title></head>\n"
+        data += "<body>\n"
+        data += "<pre>\n"
+        tests = self.tests.keys()
+        tests.sort()
+        for testname in tests:
+            data += self.describeOneTest(testname)
+        data += "</pre>\n"
+        data += "</body></html>\n"
+        return data
+    def render(self, request):
+        request.setHeader("content-type", "text/html")
+        if request.method == "HEAD":
+            request.setHeader("content-length", len(self.html()))
+            return ''
+        return self.html()
+    def getChild(self, path, request):
+        if self.tests.has_key(path):
+            return self.oneTestClass(self, path, self.tests[path])
+        return NoResource("No such test '%s'" % path)

Added: vendor/buildbot/current/buildbot/status/tinderbox.py
===================================================================
--- vendor/buildbot/current/buildbot/status/tinderbox.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/tinderbox.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,176 @@
+
+from email.Message import Message
+from email.Utils import formatdate
+
+from twisted.internet import defer
+
+from buildbot import interfaces
+from buildbot.twcompat import implements
+from buildbot.status import base, mail
+from buildbot.status.builder import SUCCESS, WARNINGS
+
+import zlib, bz2, base64
+
+# TODO: docs, maybe a test of some sort just to make sure it actually imports
+# and can format email without raising an exception.
+
+class TinderboxMailNotifier(mail.MailNotifier):
+    """This is a Tinderbox status notifier. It can send e-mail to a number of
+    different tinderboxes or people. E-mails are sent at the beginning and
+    upon completion of each build. It can be configured to send out e-mails
+    for only certain builds.
+
+    The most basic usage is as follows::
+        TinderboxMailNotifier(fromaddr="buildbot at localhost",
+                              tree="MyTinderboxTree",
+                              extraRecipients=["tinderboxdaemon at host.org"])
+
+    The builder name (as specified in master.cfg) is used as the "build"
+    tinderbox option.
+
+    """
+    if implements:
+        implements(interfaces.IEmailSender)
+    else:
+        __implements__ = (interfaces.IEmailSender,
+                          base.StatusReceiverMultiService.__implements__)
+
+    compare_attrs = ["extraRecipients", "fromaddr", "categories", "builders",
+                     "addLogs", "relayhost", "subject", "binaryURL", "tree",
+                     "logCompression"]
+
+    def __init__(self, fromaddr, tree, extraRecipients,
+                 categories=None, builders=None, relayhost="localhost",
+                 subject="buildbot %(result)s in %(builder)s", binaryURL="",
+                 logCompression=""):
+        """
+        @type  fromaddr: string
+        @param fromaddr: the email address to be used in the 'From' header.
+
+        @type  tree: string
+        @param tree: The Tinderbox tree to post to.
+
+        @type  extraRecipients: tuple of string
+        @param extraRecipients: E-mail addresses of recipients. This should at
+                                least include the tinderbox daemon.
+
+        @type  categories: list of strings
+        @param categories: a list of category names to serve status
+                           information for. Defaults to None (all
+                           categories). Use either builders or categories,
+                           but not both.
+
+        @type  builders: list of strings
+        @param builders: a list of builder names for which mail should be
+                         sent. Defaults to None (send mail for all builds).
+                         Use either builders or categories, but not both.
+
+        @type  relayhost: string
+        @param relayhost: the host to which the outbound SMTP connection
+                          should be made. Defaults to 'localhost'
+
+        @type  subject: string
+        @param subject: a string to be used as the subject line of the message.
+                        %(builder)s will be replaced with the name of the
+                        %builder which provoked the message.
+                        This parameter is not significant for the tinderbox
+                        daemon.
+
+        @type  binaryURL: string
+        @param binaryURL: If specified, this should be the location where final
+                          binary for a build is located.
+                          (ie. http://www.myproject.org/nightly/08-08-2006.tgz)
+                          It will be posted to the Tinderbox.
+
+        @type  logCompression: string
+        @param logCompression: The type of compression to use on the log.
+                               Valid options are"bzip2" and "gzip". gzip is
+                               only known to work on Python 2.4 and above.
+        """
+
+        mail.MailNotifier.__init__(self, fromaddr, categories=categories,
+                                   builders=builders, relayhost=relayhost,
+                                   subject=subject,
+                                   extraRecipients=extraRecipients,
+                                   sendToInterestedUsers=False)
+        self.tree = tree
+        self.binaryURL = binaryURL
+        self.logCompression = logCompression
+
+    def buildStarted(self, name, build):
+        self.buildMessage(name, build, "building")
+
+    def buildMessage(self, name, build, results):
+        text = ""
+        res = ""
+        # shortform
+        t = "tinderbox:"
+
+        text += "%s tree: %s\n" % (t, self.tree)
+        # the start time
+        # getTimes() returns a fractioned time that tinderbox doesn't understand
+        text += "%s builddate: %s\n" % (t, int(build.getTimes()[0]))
+        text += "%s status: " % t
+
+        if results == "building":
+            res = "building"
+            text += res
+        elif results == SUCCESS:
+            res = "success"
+            text += res
+        elif results == WARNINGS:
+            res = "testfailed"
+            text += res
+        else:
+            res += "busted"
+            text += res
+
+        text += "\n";
+
+        text += "%s build: %s\n" % (t, name)
+        text += "%s errorparser: unix\n" % t # always use the unix errorparser
+
+        # if the build just started...
+        if results == "building":
+            text += "%s END\n" % t
+        # if the build finished...
+        else:
+            text += "%s binaryurl: %s\n" % (t, self.binaryURL)
+            text += "%s logcompression: %s\n" % (t, self.logCompression)
+
+            # logs will always be appended
+            tinderboxLogs = ""
+            for log in build.getLogs():
+                l = ""
+                logEncoding = ""
+                if self.logCompression == "bzip2":
+                    compressedLog = bz2.compress(log.getText())
+                    l = base64.encodestring(compressedLog)
+                    logEncoding = "base64";
+                elif self.logCompression == "gzip":
+                    compressedLog = zlib.compress(log.getText())
+                    l = base64.encodestring(compressedLog)
+                    logEncoding = "base64";
+                else:
+                    l = log.getText()
+                tinderboxLogs += l
+
+            text += "%s logencoding: %s\n" % (t, logEncoding)
+            text += "%s END\n\n" % t
+            text += tinderboxLogs
+            text += "\n"
+
+        m = Message()
+        m.set_payload(text)
+
+        m['Date'] = formatdate(localtime=True)
+        m['Subject'] = self.subject % { 'result': res,
+                                        'builder': name,
+                                        }
+        m['From'] = self.fromaddr
+        # m['To'] is added later
+
+        d = defer.DeferredList([])
+        d.addCallback(self._gotRecipients, self.extraRecipients, m)
+        return d
+

Added: vendor/buildbot/current/buildbot/status/words.py
===================================================================
--- vendor/buildbot/current/buildbot/status/words.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/status/words.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,614 @@
+#! /usr/bin/python
+
+# code to deliver build status through twisted.words (instant messaging
+# protocols: irc, etc)
+
+import re, shlex
+
+from twisted.internet import protocol, reactor
+try:
+    # Twisted-2.0
+    from twisted.words.protocols import irc
+except ImportError:
+    # Twisted-1.3
+    from twisted.protocols import irc
+from twisted.python import log, failure
+from twisted.application import internet
+
+from buildbot import interfaces, util
+from buildbot import version
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.status import base
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
+from buildbot.scripts.runner import ForceOptions
+
+class UsageError(ValueError):
+    def __init__(self, string = "Invalid usage", *more):
+        ValueError.__init__(self, string, *more)
+
+class IrcBuildRequest:
+    hasStarted = False
+    timer = None
+
+    def __init__(self, parent, reply):
+        self.parent = parent
+        self.reply = reply
+        self.timer = reactor.callLater(5, self.soon)
+
+    def soon(self):
+        del self.timer
+        if not self.hasStarted:
+            self.parent.reply(self.reply,
+                              "The build has been queued, I'll give a shout"
+                              " when it starts")
+
+    def started(self, c):
+        self.hasStarted = True
+        if self.timer:
+            self.timer.cancel()
+            del self.timer
+        s = c.getStatus()
+        eta = s.getETA()
+        response = "build #%d forced" % s.getNumber()
+        if eta is not None:
+            response = "build forced [ETA %s]" % self.parent.convertTime(eta)
+        self.parent.reply(self.reply, response)
+        self.parent.reply(self.reply,
+                          "I'll give a shout when the build finishes")
+        d = s.waitUntilFinished()
+        d.addCallback(self.parent.buildFinished, self.reply)
+
+
+class IrcStatusBot(irc.IRCClient):
+    silly = {
+        "What happen ?": "Somebody set up us the bomb.",
+        "It's You !!": ["How are you gentlemen !!",
+                        "All your base are belong to us.",
+                        "You are on the way to destruction."],
+        "What you say !!": ["You have no chance to survive make your time.",
+                            "HA HA HA HA ...."],
+        }
+    def __init__(self, nickname, password, channels, status, categories):
+        """
+        @type  nickname: string
+        @param nickname: the nickname by which this bot should be known
+        @type  password: string
+        @param password: the password to use for identifying with Nickserv
+        @type  channels: list of strings
+        @param channels: the bot will maintain a presence in these channels
+        @type  status: L{buildbot.status.builder.Status}
+        @param status: the build master's Status object, through which the
+                       bot retrieves all status information
+        """
+        self.nickname = nickname
+        self.channels = channels
+        self.password = password
+        self.status = status
+        self.categories = categories
+        self.counter = 0
+        self.hasQuit = 0
+
+    def signedOn(self):
+        if self.password:
+            self.msg("Nickserv", "IDENTIFY " + self.password)
+        for c in self.channels:
+            self.join(c)
+    def joined(self, channel):
+        log.msg("I have joined", channel)
+    def left(self, channel):
+        log.msg("I have left", channel)
+    def kickedFrom(self, channel, kicker, message):
+        log.msg("I have been kicked from %s by %s: %s" % (channel,
+                                                          kicker,
+                                                          message))
+
+    # input
+    def privmsg(self, user, channel, message):
+        user = user.split('!', 1)[0] # rest is ~user at hostname
+        # channel is '#twisted' or 'buildbot' (for private messages)
+        channel = channel.lower()
+        #print "privmsg:", user, channel, message
+        if channel == self.nickname:
+            # private message
+            message = "%s: %s" % (self.nickname, message)
+            reply = user
+        else:
+            reply = channel
+        if message.startswith("%s:" % self.nickname):
+            message = message[len("%s:" % self.nickname):]
+
+            message = message.lstrip()
+            if self.silly.has_key(message):
+                return self.doSilly(user, reply, message)
+
+            parts = message.split(' ', 1)
+            if len(parts) == 1:
+                parts = parts + ['']
+            cmd, args = parts
+            log.msg("irc command", cmd)
+
+            meth = self.getCommandMethod(cmd)
+            if not meth and message[-1] == '!':
+                meth = self.command_EXCITED
+
+            error = None
+            try:
+                if meth:
+                    meth(user, reply, args.strip())
+            except UsageError, e:
+                self.reply(reply, str(e))
+            except:
+                f = failure.Failure()
+                log.err(f)
+                error = "Something bad happened (see logs): %s" % f.type
+
+            if error:
+                try:
+                    self.reply(reply, error)
+                except:
+                    log.err()
+
+            #self.say(channel, "count %d" % self.counter)
+            self.counter += 1
+    def reply(self, dest, message):
+        # maybe self.notice(dest, message) instead?
+        self.msg(dest, message)
+
+    def getCommandMethod(self, command):
+        meth = getattr(self, 'command_' + command.upper(), None)
+        return meth
+
+    def getBuilder(self, which):
+        try:
+            b = self.status.getBuilder(which)
+        except KeyError:
+            raise UsageError, "no such builder '%s'" % which
+        return b
+
+    def getControl(self, which):
+        if not self.control:
+            raise UsageError("builder control is not enabled")
+        try:
+            bc = self.control.getBuilder(which)
+        except KeyError:
+            raise UsageError("no such builder '%s'" % which)
+        return bc
+
+    def getAllBuilders(self):
+        """
+        @rtype: list of L{buildbot.process.builder.Builder}
+        """
+        names = self.status.getBuilderNames(categories=self.categories)
+        names.sort()
+        builders = [self.status.getBuilder(n) for n in names]
+        return builders
+
+    def convertTime(self, seconds):
+        if seconds < 60:
+            return "%d seconds" % seconds
+        minutes = int(seconds / 60)
+        seconds = seconds - 60*minutes
+        if minutes < 60:
+            return "%dm%02ds" % (minutes, seconds)
+        hours = int(minutes / 60)
+        minutes = minutes - 60*hours
+        return "%dh%02dm%02ds" % (hours, minutes, seconds)
+
+    def doSilly(self, user, reply, message):
+        response = self.silly[message]
+        if type(response) != type([]):
+            response = [response]
+        when = 0.5
+        for r in response:
+            reactor.callLater(when, self.reply, reply, r)
+            when += 2.5
+
+    def command_HELLO(self, user, reply, args):
+        self.reply(reply, "yes?")
+
+    def command_VERSION(self, user, reply, args):
+        self.reply(reply, "buildbot-%s at your service" % version)
+
+    def command_LIST(self, user, reply, args):
+        args = args.split()
+        if len(args) == 0:
+            raise UsageError, "try 'list builders'"
+        if args[0] == 'builders':
+            builders = self.getAllBuilders()
+            str = "Configured builders: "
+            for b in builders:
+                str += b.name
+                state = b.getState()[0]
+                if state == 'offline':
+                    str += "[offline]"
+                str += " "
+            str.rstrip()
+            self.reply(reply, str)
+            return
+    command_LIST.usage = "list builders - List configured builders"
+
+    def command_STATUS(self, user, reply, args):
+        args = args.split()
+        if len(args) == 0:
+            which = "all"
+        elif len(args) == 1:
+            which = args[0]
+        else:
+            raise UsageError, "try 'status <builder>'"
+        if which == "all":
+            builders = self.getAllBuilders()
+            for b in builders:
+                self.emit_status(reply, b.name)
+            return
+        self.emit_status(reply, which)
+    command_STATUS.usage = "status [<which>] - List status of a builder (or all builders)"
+
+    def command_WATCH(self, user, reply, args):
+        args = args.split()
+        if len(args) != 1:
+            raise UsageError("try 'watch <builder>'")
+        which = args[0]
+        b = self.getBuilder(which)
+        builds = b.getCurrentBuilds()
+        if not builds:
+            self.reply(reply, "there are no builds currently running")
+            return
+        for build in builds:
+            assert not build.isFinished()
+            d = build.waitUntilFinished()
+            d.addCallback(self.buildFinished, reply)
+            r = "watching build %s #%d until it finishes" \
+                % (which, build.getNumber())
+            eta = build.getETA()
+            if eta is not None:
+                r += " [%s]" % self.convertTime(eta)
+            r += ".."
+            self.reply(reply, r)
+    command_WATCH.usage = "watch <which> - announce the completion of an active build"
+
+    def buildFinished(self, b, reply):
+        results = {SUCCESS: "Success",
+                   WARNINGS: "Warnings",
+                   FAILURE: "Failure",
+                   EXCEPTION: "Exception",
+                   }
+
+        # only notify about builders we are interested in
+        builder = b.getBuilder()
+        log.msg('builder %r in category %s finished' % (builder,
+                                                        builder.category))
+        if (self.categories != None and
+            builder.category not in self.categories):
+            return
+
+        r = "Hey! build %s #%d is complete: %s" % \
+            (b.getBuilder().getName(),
+             b.getNumber(),
+             results.get(b.getResults(), "??"))
+        r += " [%s]" % " ".join(b.getText())
+        self.reply(reply, r)
+        buildurl = self.status.getURLForThing(b)
+        if buildurl:
+            self.reply(reply, "Build details are at %s" % buildurl)
+
+    def command_FORCE(self, user, reply, args):
+        args = shlex.split(args) # TODO: this requires python2.3 or newer
+        if args.pop(0) != "build":
+            raise UsageError("try 'force build WHICH <REASON>'")
+        opts = ForceOptions()
+        opts.parseOptions(args)
+        
+        which = opts['builder']
+        branch = opts['branch']
+        revision = opts['revision']
+        reason = opts['reason']
+
+        # keep weird stuff out of the branch and revision strings. TODO:
+        # centralize this somewhere.
+        if branch and not re.match(r'^[\w\.\-\/]*$', branch):
+            log.msg("bad branch '%s'" % branch)
+            self.reply(reply, "sorry, bad branch '%s'" % branch)
+            return
+        if revision and not re.match(r'^[\w\.\-\/]*$', revision):
+            log.msg("bad revision '%s'" % revision)
+            self.reply(reply, "sorry, bad revision '%s'" % revision)
+            return
+
+        bc = self.getControl(which)
+
+        who = None # TODO: if we can authenticate that a particular User
+                   # asked for this, use User Name instead of None so they'll
+                   # be informed of the results.
+        # TODO: or, monitor this build and announce the results through the
+        # 'reply' argument.
+        r = "forced: by IRC user <%s>: %s" % (user, reason)
+        # TODO: maybe give certain users the ability to request builds of
+        # certain branches
+        s = SourceStamp(branch=branch, revision=revision)
+        req = BuildRequest(r, s, which)
+        try:
+            bc.requestBuildSoon(req)
+        except interfaces.NoSlaveError:
+            self.reply(reply,
+                       "sorry, I can't force a build: all slaves are offline")
+            return
+        ireq = IrcBuildRequest(self, reply)
+        req.subscribe(ireq.started)
+
+
+    command_FORCE.usage = "force build <which> <reason> - Force a build"
+
+    def command_STOP(self, user, reply, args):
+        args = args.split(None, 2)
+        if len(args) < 3 or args[0] != 'build':
+            raise UsageError, "try 'stop build WHICH <REASON>'"
+        which = args[1]
+        reason = args[2]
+
+        buildercontrol = self.getControl(which)
+
+        who = None
+        r = "stopped: by IRC user <%s>: %s" % (user, reason)
+
+        # find an in-progress build
+        builderstatus = self.getBuilder(which)
+        builds = builderstatus.getCurrentBuilds()
+        if not builds:
+            self.reply(reply, "sorry, no build is currently running")
+            return
+        for build in builds:
+            num = build.getNumber()
+
+            # obtain the BuildControl object
+            buildcontrol = buildercontrol.getBuild(num)
+
+            # make it stop
+            buildcontrol.stopBuild(r)
+
+            self.reply(reply, "build %d interrupted" % num)
+
+    command_STOP.usage = "stop build <which> <reason> - Stop a running build"
+
+    def emit_status(self, reply, which):
+        b = self.getBuilder(which)
+        str = "%s: " % which
+        state, builds = b.getState()
+        str += state
+        if state == "idle":
+            last = b.getLastFinishedBuild()
+            if last:
+                start,finished = last.getTimes()
+                str += ", last build %s secs ago: %s" % \
+                       (int(util.now() - finished), " ".join(last.getText()))
+        if state == "building":
+            t = []
+            for build in builds:
+                step = build.getCurrentStep()
+                s = "(%s)" % " ".join(step.getText())
+                ETA = build.getETA()
+                if ETA is not None:
+                    s += " [ETA %s]" % self.convertTime(ETA)
+                t.append(s)
+            str += ", ".join(t)
+        self.reply(reply, str)
+
+    def emit_last(self, reply, which):
+        last = self.getBuilder(which).getLastFinishedBuild()
+        if not last:
+            str = "(no builds run since last restart)"
+        else:
+            start,finish = last.getTimes()
+            str = "%s secs ago: " % (int(util.now() - finish))
+            str += " ".join(last.getText())
+        self.reply(reply, "last build [%s]: %s" % (which, str))
+
+    def command_LAST(self, user, reply, args):
+        args = args.split()
+        if len(args) == 0:
+            which = "all"
+        elif len(args) == 1:
+            which = args[0]
+        else:
+            raise UsageError, "try 'last <builder>'"
+        if which == "all":
+            builders = self.getAllBuilders()
+            for b in builders:
+                self.emit_last(reply, b.name)
+            return
+        self.emit_last(reply, which)
+    command_LAST.usage = "last <which> - list last build status for builder <which>"
+
+    def build_commands(self):
+        commands = []
+        for k in self.__class__.__dict__.keys():
+            if k.startswith('command_'):
+                commands.append(k[8:].lower())
+        commands.sort()
+        return commands
+
+    def command_HELP(self, user, reply, args):
+        args = args.split()
+        if len(args) == 0:
+            self.reply(reply, "Get help on what? (try 'help <foo>', or 'commands' for a command list)")
+            return
+        command = args[0]
+        meth = self.getCommandMethod(command)
+        if not meth:
+            raise UsageError, "no such command '%s'" % command
+        usage = getattr(meth, 'usage', None)
+        if usage:
+            self.reply(reply, "Usage: %s" % usage)
+        else:
+            self.reply(reply, "No usage info for '%s'" % command)
+    command_HELP.usage = "help <command> - Give help for <command>"
+
+    def command_SOURCE(self, user, reply, args):
+        banner = "My source can be found at http://buildbot.sourceforge.net/"
+        self.reply(reply, banner)
+
+    def command_COMMANDS(self, user, reply, args):
+        commands = self.build_commands()
+        str = "buildbot commands: " + ", ".join(commands)
+        self.reply(reply, str)
+    command_COMMANDS.usage = "commands - List available commands"
+
+    def command_DESTROY(self, user, reply, args):
+        self.me(reply, "readies phasers")
+
+    def command_DANCE(self, user, reply, args):
+        reactor.callLater(1.0, self.reply, reply, "0-<")
+        reactor.callLater(3.0, self.reply, reply, "0-/")
+        reactor.callLater(3.5, self.reply, reply, "0-\\")
+
+    def command_EXCITED(self, user, reply, args):
+        # like 'buildbot: destroy the sun!'
+        self.reply(reply, "What you say!")
+
+    def action(self, user, channel, data):
+        #log.msg("action: %s,%s,%s" % (user, channel, data))
+        user = user.split('!', 1)[0] # rest is ~user at hostname
+        # somebody did an action (/me actions)
+        if data.endswith("s buildbot"):
+            words = data.split()
+            verb = words[-2]
+            timeout = 4
+            if verb == "kicks":
+                response = "%s back" % verb
+                timeout = 1
+            else:
+                response = "%s %s too" % (verb, user)
+            reactor.callLater(timeout, self.me, channel, response)
+    # userJoined(self, user, channel)
+    
+    # output
+    # self.say(channel, message) # broadcast
+    # self.msg(user, message) # unicast
+    # self.me(channel, action) # send action
+    # self.away(message='')
+    # self.quit(message='')
+    
+class ThrottledClientFactory(protocol.ClientFactory):
+    lostDelay = 2
+    failedDelay = 60
+    def clientConnectionLost(self, connector, reason):
+        reactor.callLater(self.lostDelay, connector.connect)
+    def clientConnectionFailed(self, connector, reason):
+        reactor.callLater(self.failedDelay, connector.connect)
+
+class IrcStatusFactory(ThrottledClientFactory):
+    protocol = IrcStatusBot
+
+    status = None
+    control = None
+    shuttingDown = False
+    p = None
+
+    def __init__(self, nickname, password, channels, categories):
+        #ThrottledClientFactory.__init__(self) # doesn't exist
+        self.status = None
+        self.nickname = nickname
+        self.password = password
+        self.channels = channels
+        self.categories = categories
+
+    def __getstate__(self):
+        d = self.__dict__.copy()
+        del d['p']
+        return d
+
+    def shutdown(self):
+        self.shuttingDown = True
+        if self.p:
+            self.p.quit("buildmaster reconfigured: bot disconnecting")
+
+    def buildProtocol(self, address):
+        p = self.protocol(self.nickname, self.password,
+                          self.channels, self.status,
+                          self.categories)
+        p.factory = self
+        p.status = self.status
+        p.control = self.control
+        self.p = p
+        return p
+
+    # TODO: I think a shutdown that occurs while the connection is being
+    # established will make this explode
+
+    def clientConnectionLost(self, connector, reason):
+        if self.shuttingDown:
+            log.msg("not scheduling reconnection attempt")
+            return
+        ThrottledClientFactory.clientConnectionLost(self, connector, reason)
+
+    def clientConnectionFailed(self, connector, reason):
+        if self.shuttingDown:
+            log.msg("not scheduling reconnection attempt")
+            return
+        ThrottledClientFactory.clientConnectionFailed(self, connector, reason)
+
+
+class IRC(base.StatusReceiverMultiService):
+    """I am an IRC bot which can be queried for status information. I
+    connect to a single IRC server and am known by a single nickname on that
+    server, however I can join multiple channels."""
+
+    compare_attrs = ["host", "port", "nick", "password",
+                     "channels", "allowForce",
+                     "categories"]
+
+    def __init__(self, host, nick, channels, port=6667, allowForce=True,
+                 categories=None, password=None):
+        base.StatusReceiverMultiService.__init__(self)
+
+        assert allowForce in (True, False) # TODO: implement others
+
+        # need to stash these so we can detect changes later
+        self.host = host
+        self.port = port
+        self.nick = nick
+        self.channels = channels
+        self.password = password
+        self.allowForce = allowForce
+        self.categories = categories
+
+        # need to stash the factory so we can give it the status object
+        self.f = IrcStatusFactory(self.nick, self.password,
+                                  self.channels, self.categories)
+
+        c = internet.TCPClient(host, port, self.f)
+        c.setServiceParent(self)
+
+    def setServiceParent(self, parent):
+        base.StatusReceiverMultiService.setServiceParent(self, parent)
+        self.f.status = parent.getStatus()
+        if self.allowForce:
+            self.f.control = interfaces.IControl(parent)
+
+    def stopService(self):
+        # make sure the factory will stop reconnecting
+        self.f.shutdown()
+        return base.StatusReceiverMultiService.stopService(self)
+
+
+def main():
+    from twisted.internet import app
+    a = app.Application("irctest")
+    f = IrcStatusFactory()
+    host = "localhost"
+    port = 6667
+    f.addNetwork((host, port), ["private", "other"])
+    a.connectTCP(host, port, f)
+    a.run(save=0)
+    
+
+if __name__ == '__main__':
+    main()
+
+## buildbot: list builders
+# buildbot: watch quick
+#  print notification when current build in 'quick' finishes
+## buildbot: status
+## buildbot: status full-2.3
+##  building, not, % complete, ETA
+## buildbot: force build full-2.3 "reason"

Added: vendor/buildbot/current/buildbot/steps/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/steps/dummy.py
===================================================================
--- vendor/buildbot/current/buildbot/steps/dummy.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/steps/dummy.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,100 @@
+
+from twisted.internet import reactor
+from buildbot.process.buildstep import BuildStep, LoggingBuildStep
+from buildbot.process.buildstep import LoggedRemoteCommand
+from buildbot.status.builder import SUCCESS, FAILURE
+
+# these classes are used internally by buildbot unit tests
+
+class Dummy(BuildStep):
+    """I am a dummy no-op step, which runs entirely on the master, and simply
+    waits 5 seconds before finishing with SUCCESS
+    """
+
+    haltOnFailure = True
+    name = "dummy"
+
+    def __init__(self, timeout=5, **kwargs):
+        """
+        @type  timeout: int
+        @param timeout: the number of seconds to delay before completing
+        """
+        BuildStep.__init__(self, **kwargs)
+        self.timeout = timeout
+        self.timer = None
+
+    def start(self):
+        self.step_status.setColor("yellow")
+        self.step_status.setText(["delay", "%s secs" % self.timeout])
+        self.timer = reactor.callLater(self.timeout, self.done)
+
+    def interrupt(self, reason):
+        if self.timer:
+            self.timer.cancel()
+            self.timer = None
+            self.step_status.setColor("red")
+            self.step_status.setText(["delay", "interrupted"])
+            self.finished(FAILURE)
+
+    def done(self):
+        self.step_status.setColor("green")
+        self.finished(SUCCESS)
+
+class FailingDummy(Dummy):
+    """I am a dummy no-op step that 'runs' master-side and finishes (with a
+    FAILURE status) after 5 seconds."""
+
+    name = "failing dummy"
+
+    def start(self):
+        self.step_status.setColor("yellow")
+        self.step_status.setText(["boom", "%s secs" % self.timeout])
+        self.timer = reactor.callLater(self.timeout, self.done)
+
+    def done(self):
+        self.step_status.setColor("red")
+        self.finished(FAILURE)
+
+class RemoteDummy(LoggingBuildStep):
+    """I am a dummy no-op step that runs on the remote side and
+    simply waits 5 seconds before completing with success.
+    See L{buildbot.slave.commands.DummyCommand}
+    """
+
+    haltOnFailure = True
+    name = "remote dummy"
+
+    def __init__(self, timeout=5, **kwargs):
+        """
+        @type  timeout: int
+        @param timeout: the number of seconds to delay
+        """
+        LoggingBuildStep.__init__(self, **kwargs)
+        self.timeout = timeout
+        self.description = ["remote", "delay", "%s secs" % timeout]
+
+    def describe(self, done=False):
+        return self.description
+
+    def start(self):
+        args = {'timeout': self.timeout}
+        cmd = LoggedRemoteCommand("dummy", args)
+        self.startCommand(cmd)
+
+class Wait(LoggingBuildStep):
+    """I start a command on the slave that waits for the unit test to
+    tell it when to finish.
+    """
+
+    name = "wait"
+    def __init__(self, handle, **kwargs):
+        LoggingBuildStep.__init__(self, **kwargs)
+        self.handle = handle
+
+    def describe(self, done=False):
+        return ["wait: %s" % self.handle]
+
+    def start(self):
+        args = {'handle': (self.handle, self.build.reason)}
+        cmd = LoggedRemoteCommand("dummy.wait", args)
+        self.startCommand(cmd)

Added: vendor/buildbot/current/buildbot/steps/maxq.py
===================================================================
--- vendor/buildbot/current/buildbot/steps/maxq.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/steps/maxq.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,46 @@
+from buildbot.steps.shell import ShellCommand
+from buildbot.status import event, builder
+
+class MaxQ(ShellCommand):
+    flunkOnFailure = True
+    name = "maxq"
+
+    def __init__(self, testdir=None, **kwargs):
+        if not testdir:
+            raise TypeError("please pass testdir")
+        command = 'run_maxq.py %s' % (testdir,)
+        ShellCommand.__init__(self, command=command, **kwargs)
+
+    def startStatus(self):
+        evt = event.Event("yellow", ['running', 'maxq', 'tests'],
+                      files={'log': self.log})
+        self.setCurrentActivity(evt)
+
+
+    def finished(self, rc):
+        self.failures = 0
+        if rc:
+            self.failures = 1
+        output = self.log.getAll()
+        self.failures += output.count('\nTEST FAILURE:')
+
+        result = (builder.SUCCESS, ['maxq'])
+
+        if self.failures:
+            result = (builder.FAILURE,
+                      [str(self.failures), 'maxq', 'failures'])
+
+        return self.stepComplete(result)
+
+    def finishStatus(self, result):
+        if self.failures:
+            color = "red"
+            text = ["maxq", "failed"]
+        else:
+            color = "green"
+            text = ['maxq', 'tests']
+        self.updateCurrentActivity(color=color, text=text)
+        self.finishStatusSummary()
+        self.finishCurrentActivity()
+
+

Added: vendor/buildbot/current/buildbot/steps/python.py
===================================================================
--- vendor/buildbot/current/buildbot/steps/python.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/steps/python.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,112 @@
+
+from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS
+from buildbot.steps.shell import ShellCommand
+
+try:
+    import cStringIO
+    StringIO = cStringIO.StringIO
+except ImportError:
+    from StringIO import StringIO
+
+
+class BuildEPYDoc(ShellCommand):
+    name = "epydoc"
+    command = ["make", "epydocs"]
+    description = ["building", "epydocs"]
+    descriptionDone = ["epydoc"]
+
+    def createSummary(self, log):
+        import_errors = 0
+        warnings = 0
+        errors = 0
+
+        for line in StringIO(log.getText()):
+            if line.startswith("Error importing "):
+                import_errors += 1
+            if line.find("Warning: ") != -1:
+                warnings += 1
+            if line.find("Error: ") != -1:
+                errors += 1
+
+        self.descriptionDone = self.descriptionDone[:]
+        if import_errors:
+            self.descriptionDone.append("ierr=%d" % import_errors)
+        if warnings:
+            self.descriptionDone.append("warn=%d" % warnings)
+        if errors:
+            self.descriptionDone.append("err=%d" % errors)
+
+        self.import_errors = import_errors
+        self.warnings = warnings
+        self.errors = errors
+
+    def evaluateCommand(self, cmd):
+        if cmd.rc != 0:
+            return FAILURE
+        if self.warnings or self.errors:
+            return WARNINGS
+        return SUCCESS
+
+
+class PyFlakes(ShellCommand):
+    name = "pyflakes"
+    command = ["make", "pyflakes"]
+    description = ["running", "pyflakes"]
+    descriptionDone = ["pyflakes"]
+    flunkOnFailure = False
+    flunkingIssues = ["undefined"] # any pyflakes lines like this cause FAILURE
+
+    MESSAGES = ("unused", "undefined", "redefs", "import*", "misc")
+
+    def createSummary(self, log):
+        counts = {}
+        summaries = {}
+        for m in self.MESSAGES:
+            counts[m] = 0
+            summaries[m] = []
+
+        first = True
+        for line in StringIO(log.getText()).readlines():
+            # the first few lines might contain echoed commands from a 'make
+            # pyflakes' step, so don't count these as warnings. Stop ignoring
+            # the initial lines as soon as we see one with a colon.
+            if first:
+                if line.find(":") != -1:
+                    # there's the colon, this is the first real line
+                    first = False
+                    # fall through and parse the line
+                else:
+                    # skip this line, keep skipping non-colon lines
+                    continue
+            if line.find("imported but unused") != -1:
+                m = "unused"
+            elif line.find("*' used; unable to detect undefined names") != -1:
+                m = "import*"
+            elif line.find("undefined name") != -1:
+                m = "undefined"
+            elif line.find("redefinition of unused") != -1:
+                m = "redefs"
+            else:
+                m = "misc"
+            summaries[m].append(line)
+            counts[m] += 1
+
+        self.descriptionDone = self.descriptionDone[:]
+        for m in self.MESSAGES:
+            if counts[m]:
+                self.descriptionDone.append("%s=%d" % (m, counts[m]))
+                self.addCompleteLog(m, "".join(summaries[m]))
+            self.setProperty("pyflakes-%s" % m, counts[m])
+        self.setProperty("pyflakes-total", sum(counts.values()))
+
+
+    def evaluateCommand(self, cmd):
+        if cmd.rc != 0:
+            return FAILURE
+        for m in self.flunkingIssues:
+            if self.getProperty("pyflakes-%s" % m):
+                return FAILURE
+        if self.getProperty("pyflakes-total"):
+            return WARNINGS
+        return SUCCESS
+

Added: vendor/buildbot/current/buildbot/steps/python_twisted.py
===================================================================
--- vendor/buildbot/current/buildbot/steps/python_twisted.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/steps/python_twisted.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,806 @@
+# -*- test-case-name: buildbot.test.test_twisted -*-
+
+from twisted.python import log
+
+from buildbot.status import tests, builder
+from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS, SKIPPED
+from buildbot.process.buildstep import LogLineObserver, OutputProgressObserver
+from buildbot.process.buildstep import RemoteShellCommand
+from buildbot.steps.shell import ShellCommand
+
+try:
+    import cStringIO
+    StringIO = cStringIO
+except ImportError:
+    import StringIO
+import re
+
+# BuildSteps that are specific to the Twisted source tree
+
+class HLint(ShellCommand):
+    """I run a 'lint' checker over a set of .xhtml files. Any deviations
+    from recommended style is flagged and put in the output log.
+
+    This step looks at .changes in the parent Build to extract a list of
+    Lore XHTML files to check."""
+
+    name = "hlint"
+    description = ["running", "hlint"]
+    descriptionDone = ["hlint"]
+    warnOnWarnings = True
+    warnOnFailure = True
+    # TODO: track time, but not output
+    warnings = 0
+
+    def __init__(self, python=None, **kwargs):
+        ShellCommand.__init__(self, **kwargs)
+        self.python = python
+
+    def start(self):
+        # create the command
+        htmlFiles = {}
+        for f in self.build.allFiles():
+            if f.endswith(".xhtml") and not f.startswith("sandbox/"):
+                htmlFiles[f] = 1
+        # remove duplicates
+        hlintTargets = htmlFiles.keys()
+        hlintTargets.sort()
+        if not hlintTargets:
+            return SKIPPED
+        self.hlintFiles = hlintTargets
+        c = []
+        if self.python:
+            c.append(self.python)
+        c += ["bin/lore", "-p", "--output", "lint"] + self.hlintFiles
+        self.setCommand(c)
+
+        # add an extra log file to show the .html files we're checking
+        self.addCompleteLog("files", "\n".join(self.hlintFiles)+"\n")
+
+        ShellCommand.start(self)
+
+    def commandComplete(self, cmd):
+        # TODO: remove the 'files' file (a list of .xhtml files that were
+        # submitted to hlint) because it is available in the logfile and
+        # mostly exists to give the user an idea of how long the step will
+        # take anyway).
+        lines = cmd.logs['stdio'].getText().split("\n")
+        warningLines = filter(lambda line:':' in line, lines)
+        if warningLines:
+            self.addCompleteLog("warnings", "".join(warningLines))
+        warnings = len(warningLines)
+        self.warnings = warnings
+
+    def evaluateCommand(self, cmd):
+        # warnings are in stdout, rc is always 0, unless the tools break
+        if cmd.rc != 0:
+            return FAILURE
+        if self.warnings:
+            return WARNINGS
+        return SUCCESS
+
+    def getText2(self, cmd, results):
+        if cmd.rc != 0:
+            return ["hlint"]
+        return ["%d hlin%s" % (self.warnings,
+                               self.warnings == 1 and 't' or 'ts')]
+
+def countFailedTests(output):
+    # start scanning 10kb from the end, because there might be a few kb of
+    # import exception tracebacks between the total/time line and the errors
+    # line
+    chunk = output[-10000:]
+    lines = chunk.split("\n")
+    lines.pop() # blank line at end
+    # lines[-3] is "Ran NN tests in 0.242s"
+    # lines[-2] is blank
+    # lines[-1] is 'OK' or 'FAILED (failures=1, errors=12)'
+    #  or 'FAILED (failures=1)'
+    #  or "PASSED (skips=N, successes=N)"  (for Twisted-2.0)
+    # there might be other lines dumped here. Scan all the lines.
+    res = {'total': None,
+           'failures': 0,
+           'errors': 0,
+           'skips': 0,
+           'expectedFailures': 0,
+           'unexpectedSuccesses': 0,
+           }
+    for l in lines:
+        out = re.search(r'Ran (\d+) tests', l)
+        if out:
+            res['total'] = int(out.group(1))
+        if (l.startswith("OK") or
+            l.startswith("FAILED ") or
+            l.startswith("PASSED")):
+            # the extra space on FAILED_ is to distinguish the overall
+            # status from an individual test which failed. The lack of a
+            # space on the OK is because it may be printed without any
+            # additional text (if there are no skips,etc)
+            out = re.search(r'failures=(\d+)', l)
+            if out: res['failures'] = int(out.group(1))
+            out = re.search(r'errors=(\d+)', l)
+            if out: res['errors'] = int(out.group(1))
+            out = re.search(r'skips=(\d+)', l)
+            if out: res['skips'] = int(out.group(1))
+            out = re.search(r'expectedFailures=(\d+)', l)
+            if out: res['expectedFailures'] = int(out.group(1))
+            out = re.search(r'unexpectedSuccesses=(\d+)', l)
+            if out: res['unexpectedSuccesses'] = int(out.group(1))
+            # successes= is a Twisted-2.0 addition, and is not currently used
+            out = re.search(r'successes=(\d+)', l)
+            if out: res['successes'] = int(out.group(1))
+
+    return res
+
+
+class TrialTestCaseCounter(LogLineObserver):
+    _line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
+    numTests = 0
+    finished = False
+
+    def outLineReceived(self, line):
+        # different versions of Twisted emit different per-test lines with
+        # the bwverbose reporter.
+        #  2.0.0: testSlave (buildbot.test.test_runner.Create) ... [OK]
+        #  2.1.0: buildbot.test.test_runner.Create.testSlave ... [OK]
+        #  2.4.0: buildbot.test.test_runner.Create.testSlave ... [OK]
+        # Let's just handle the most recent version, since it's the easiest.
+
+        if self.finished:
+            return
+        if line.startswith("=" * 40):
+            self.finished = True
+            return
+
+        m = self._line_re.search(line.strip())
+        if m:
+            testname, result = m.groups()
+            self.numTests += 1
+            self.step.setProgress('tests', self.numTests)
+
+
+UNSPECIFIED=() # since None is a valid choice
+
+class Trial(ShellCommand):
+    """I run a unit test suite using 'trial', a unittest-like testing
+    framework that comes with Twisted. Trial is used to implement Twisted's
+    own unit tests, and is the unittest-framework of choice for many projects
+    that use Twisted internally.
+
+    Projects that use trial typically have all their test cases in a 'test'
+    subdirectory of their top-level library directory. I.e. for my package
+    'petmail', the tests are in 'petmail/test/test_*.py'. More complicated
+    packages (like Twisted itself) may have multiple test directories, like
+    'twisted/test/test_*.py' for the core functionality and
+    'twisted/mail/test/test_*.py' for the email-specific tests.
+
+    To run trial tests, you run the 'trial' executable and tell it where the
+    test cases are located. The most common way of doing this is with a
+    module name. For petmail, I would run 'trial petmail.test' and it would
+    locate all the test_*.py files under petmail/test/, running every test
+    case it could find in them. Unlike the unittest.py that comes with
+    Python, you do not run the test_foo.py as a script; you always let trial
+    do the importing and running. The 'tests' parameter controls which tests
+    trial will run: it can be a string or a list of strings.
+
+    You can also use a higher-level module name and pass the --recursive flag
+    to trial: this will search recursively within the named module to find
+    all test cases. For large multiple-test-directory projects like Twisted,
+    this means you can avoid specifying all the test directories explicitly.
+    Something like 'trial --recursive twisted' will pick up everything.
+
+    To find these test cases, you must set a PYTHONPATH that allows something
+    like 'import petmail.test' to work. For packages that don't use a
+    separate top-level 'lib' directory, PYTHONPATH=. will work, and will use
+    the test cases (and the code they are testing) in-place.
+    PYTHONPATH=build/lib or PYTHONPATH=build/lib.$ARCH are also useful when
+    you do a'setup.py build' step first. The 'testpath' attribute of this
+    class controls what PYTHONPATH= is set to.
+
+    Trial has the ability (through the --testmodule flag) to run only the set
+    of test cases named by special 'test-case-name' tags in source files. We
+    can get the list of changed source files from our parent Build and
+    provide them to trial, thus running the minimal set of test cases needed
+    to cover the Changes. This is useful for quick builds, especially in
+    trees with a lot of test cases. The 'testChanges' parameter controls this
+    feature: if set, it will override 'tests'.
+
+    The trial executable itself is typically just 'trial' (which is usually
+    found on your $PATH as /usr/bin/trial), but it can be overridden with the
+    'trial' parameter. This is useful for Twisted's own unittests, which want
+    to use the copy of bin/trial that comes with the sources. (when bin/trial
+    discovers that it is living in a subdirectory named 'Twisted', it assumes
+    it is being run from the source tree and adds that parent directory to
+    PYTHONPATH. Therefore the canonical way to run Twisted's own unittest
+    suite is './bin/trial twisted.test' rather than 'PYTHONPATH=.
+    /usr/bin/trial twisted.test', especially handy when /usr/bin/trial has
+    not yet been installed).
+
+    To influence the version of python being used for the tests, or to add
+    flags to the command, set the 'python' parameter. This can be a string
+    (like 'python2.2') or a list (like ['python2.3', '-Wall']).
+
+    Trial creates and switches into a directory named _trial_temp/ before
+    running the tests, and sends the twisted log (which includes all
+    exceptions) to a file named test.log . This file will be pulled up to
+    the master where it can be seen as part of the status output.
+
+    There are some class attributes which may be usefully overridden
+    by subclasses. 'trialMode' and 'trialArgs' can influence the trial
+    command line.
+    """
+
+    name = "trial"
+    progressMetrics = ('output', 'tests', 'test.log')
+    # note: the slash only works on unix buildslaves, of course, but we have
+    # no way to know what the buildslave uses as a separator. TODO: figure
+    # out something clever.
+    logfiles = {"test.log": "_trial_temp/test.log"}
+    # we use test.log to track Progress at the end of __init__()
+
+    flunkOnFailure = True
+    python = None
+    trial = "trial"
+    trialMode = ["--reporter=bwverbose"] # requires Twisted-2.1.0 or newer
+    # for Twisted-2.0.0 or 1.3.0, use ["-o"] instead
+    trialArgs = []
+    testpath = UNSPECIFIED # required (but can be None)
+    testChanges = False # TODO: needs better name
+    recurse = False
+    reactor = None
+    randomly = False
+    tests = None # required
+
+    def __init__(self, reactor=UNSPECIFIED, python=None, trial=None,
+                 testpath=UNSPECIFIED,
+                 tests=None, testChanges=None,
+                 recurse=None, randomly=None,
+                 trialMode=None, trialArgs=None,
+                 **kwargs):
+        """
+        @type  testpath: string
+        @param testpath: use in PYTHONPATH when running the tests. If
+                         None, do not set PYTHONPATH. Setting this to '.' will
+                         cause the source files to be used in-place.
+
+        @type  python: string (without spaces) or list
+        @param python: which python executable to use. Will form the start of
+                       the argv array that will launch trial. If you use this,
+                       you should set 'trial' to an explicit path (like
+                       /usr/bin/trial or ./bin/trial). Defaults to None, which
+                       leaves it out entirely (running 'trial args' instead of
+                       'python ./bin/trial args'). Likely values are 'python',
+                       ['python2.2'], ['python', '-Wall'], etc.
+
+        @type  trial: string
+        @param trial: which 'trial' executable to run.
+                      Defaults to 'trial', which will cause $PATH to be
+                      searched and probably find /usr/bin/trial . If you set
+                      'python', this should be set to an explicit path (because
+                      'python2.3 trial' will not work).
+
+        @type trialMode: list of strings
+        @param trialMode: a list of arguments to pass to trial, specifically
+                          to set the reporting mode. This defaults to ['-to']
+                          which means 'verbose colorless output' to the trial
+                          that comes with Twisted-2.0.x and at least -2.1.0 .
+                          Newer versions of Twisted may come with a trial
+                          that prefers ['--reporter=bwverbose'].
+
+        @type trialArgs: list of strings
+        @param trialArgs: a list of arguments to pass to trial, available to
+                          turn on any extra flags you like. Defaults to [].
+
+        @type  tests: list of strings
+        @param tests: a list of test modules to run, like
+                      ['twisted.test.test_defer', 'twisted.test.test_process'].
+                      If this is a string, it will be converted into a one-item
+                      list.
+
+        @type  testChanges: boolean
+        @param testChanges: if True, ignore the 'tests' parameter and instead
+                            ask the Build for all the files that make up the
+                            Changes going into this build. Pass these filenames
+                            to trial and ask it to look for test-case-name
+                            tags, running just the tests necessary to cover the
+                            changes.
+
+        @type  recurse: boolean
+        @param recurse: If True, pass the --recurse option to trial, allowing
+                        test cases to be found in deeper subdirectories of the
+                        modules listed in 'tests'. This does not appear to be
+                        necessary when using testChanges.
+
+        @type  reactor: string
+        @param reactor: which reactor to use, like 'gtk' or 'java'. If not
+                        provided, the Twisted's usual platform-dependent
+                        default is used.
+
+        @type  randomly: boolean
+        @param randomly: if True, add the --random=0 argument, which instructs
+                         trial to run the unit tests in a random order each
+                         time. This occasionally catches problems that might be
+                         masked when one module always runs before another
+                         (like failing to make registerAdapter calls before
+                         lookups are done).
+
+        @type  kwargs: dict
+        @param kwargs: parameters. The following parameters are inherited from
+                       L{ShellCommand} and may be useful to set: workdir,
+                       haltOnFailure, flunkOnWarnings, flunkOnFailure,
+                       warnOnWarnings, warnOnFailure, want_stdout, want_stderr,
+                       timeout.
+        """
+        ShellCommand.__init__(self, **kwargs)
+
+        if python:
+            self.python = python
+        if self.python is not None:
+            if type(self.python) is str:
+                self.python = [self.python]
+            for s in self.python:
+                if " " in s:
+                    # this is not strictly an error, but I suspect more
+                    # people will accidentally try to use python="python2.3
+                    # -Wall" than will use embedded spaces in a python flag
+                    log.msg("python= component '%s' has spaces")
+                    log.msg("To add -Wall, use python=['python', '-Wall']")
+                    why = "python= value has spaces, probably an error"
+                    raise ValueError(why)
+
+        if trial:
+            self.trial = trial
+        if " " in self.trial:
+            raise ValueError("trial= value has spaces")
+        if trialMode is not None:
+            self.trialMode = trialMode
+        if trialArgs is not None:
+            self.trialArgs = trialArgs
+
+        if testpath is not UNSPECIFIED:
+            self.testpath = testpath
+        if self.testpath is UNSPECIFIED:
+            raise ValueError("You must specify testpath= (it can be None)")
+        assert isinstance(self.testpath, str) or self.testpath is None
+
+        if reactor is not UNSPECIFIED:
+            self.reactor = reactor
+
+        if tests is not None:
+            self.tests = tests
+        if type(self.tests) is str:
+            self.tests = [self.tests]
+        if testChanges is not None:
+            self.testChanges = testChanges
+            #self.recurse = True  # not sure this is necessary
+
+        if not self.testChanges and self.tests is None:
+            raise ValueError("Must either set testChanges= or provide tests=")
+
+        if recurse is not None:
+            self.recurse = recurse
+        if randomly is not None:
+            self.randomly = randomly
+
+        # build up most of the command, then stash it until start()
+        command = []
+        if self.python:
+            command.extend(self.python)
+        command.append(self.trial)
+        command.extend(self.trialMode)
+        if self.recurse:
+            command.append("--recurse")
+        if self.reactor:
+            command.append("--reactor=%s" % reactor)
+        if self.randomly:
+            command.append("--random=0")
+        command.extend(self.trialArgs)
+        self.command = command
+
+        if self.reactor:
+            self.description = ["testing", "(%s)" % self.reactor]
+            self.descriptionDone = ["tests"]
+            # commandComplete adds (reactorname) to self.text
+        else:
+            self.description = ["testing"]
+            self.descriptionDone = ["tests"]
+
+        # this counter will feed Progress along the 'test cases' metric
+        self.addLogObserver('stdio', TrialTestCaseCounter())
+        # this one just measures bytes of output in _trial_temp/test.log
+        self.addLogObserver('test.log', OutputProgressObserver('test.log'))
+
+    def setupEnvironment(self, cmd):
+        ShellCommand.setupEnvironment(self, cmd)
+        if self.testpath != None:
+            e = cmd.args['env']
+            if e is None:
+                cmd.args['env'] = {'PYTHONPATH': self.testpath}
+            else:
+                # TODO: somehow, each build causes another copy of
+                # self.testpath to get prepended
+                if e.get('PYTHONPATH', "") == "":
+                    e['PYTHONPATH'] = self.testpath
+                else:
+                    e['PYTHONPATH'] = self.testpath + ":" + e['PYTHONPATH']
+        try:
+            p = cmd.args['env']['PYTHONPATH']
+            if type(p) is not str:
+                log.msg("hey, not a string:", p)
+                assert False
+        except (KeyError, TypeError):
+            # KeyError if args doesn't have ['env']
+            # KeyError if args['env'] doesn't have ['PYTHONPATH']
+            # TypeError if args is None
+            pass
+
+    def start(self):
+        # now that self.build.allFiles() is nailed down, finish building the
+        # command
+        if self.testChanges:
+            for f in self.build.allFiles():
+                if f.endswith(".py"):
+                    self.command.append("--testmodule=%s" % f)
+        else:
+            self.command.extend(self.tests)
+        log.msg("Trial.start: command is", self.command)
+
+        # if our slave is too old to understand logfiles=, fetch them
+        # manually. This is a fallback for the Twisted buildbot and some old
+        # buildslaves.
+        self._needToPullTestDotLog = False
+        if self.slaveVersionIsOlderThan("shell", "2.1"):
+            log.msg("Trial: buildslave %s is too old to accept logfiles=" %
+                    self.getSlaveName())
+            log.msg(" falling back to 'cat _trial_temp/test.log' instead")
+            self.logfiles = {}
+            self._needToPullTestDotLog = True
+
+        ShellCommand.start(self)
+
+
+    def commandComplete(self, cmd):
+        if not self._needToPullTestDotLog:
+            return self._gotTestDotLog(cmd)
+
+        # if the buildslave was too old, pull test.log now
+        catcmd = ["cat", "_trial_temp/test.log"]
+        c2 = RemoteShellCommand(command=catcmd, workdir=self.workdir)
+        loog = self.addLog("test.log")
+        c2.useLog(loog, True, logfileName="stdio")
+        self.cmd = c2 # to allow interrupts
+        d = c2.run(self, self.remote)
+        d.addCallback(lambda res: self._gotTestDotLog(cmd))
+        return d
+
+    def rtext(self, fmt='%s'):
+        if self.reactor:
+            rtext = fmt % self.reactor
+            return rtext.replace("reactor", "")
+        return ""
+
+    def _gotTestDotLog(self, cmd):
+        # figure out all status, then let the various hook functions return
+        # different pieces of it
+
+        # 'cmd' is the original trial command, so cmd.logs['stdio'] is the
+        # trial output. We don't have access to test.log from here.
+        output = cmd.logs['stdio'].getText()
+        counts = countFailedTests(output)
+
+        total = counts['total']
+        failures, errors = counts['failures'], counts['errors']
+        parsed = (total != None)
+        text = []
+        text2 = ""
+
+        if cmd.rc == 0:
+            if parsed:
+                results = SUCCESS
+                if total:
+                    text += ["%d %s" % \
+                             (total,
+                              total == 1 and "test" or "tests"),
+                             "passed"]
+                else:
+                    text += ["no tests", "run"]
+            else:
+                results = FAILURE
+                text += ["testlog", "unparseable"]
+                text2 = "tests"
+        else:
+            # something failed
+            results = FAILURE
+            if parsed:
+                text.append("tests")
+                if failures:
+                    text.append("%d %s" % \
+                                (failures,
+                                 failures == 1 and "failure" or "failures"))
+                if errors:
+                    text.append("%d %s" % \
+                                (errors,
+                                 errors == 1 and "error" or "errors"))
+                count = failures + errors
+                text2 = "%d tes%s" % (count, (count == 1 and 't' or 'ts'))
+            else:
+                text += ["tests", "failed"]
+                text2 = "tests"
+
+        if counts['skips']:
+            text.append("%d %s" %  \
+                        (counts['skips'],
+                         counts['skips'] == 1 and "skip" or "skips"))
+        if counts['expectedFailures']:
+            text.append("%d %s" %  \
+                        (counts['expectedFailures'],
+                         counts['expectedFailures'] == 1 and "todo"
+                         or "todos"))
+            if 0: # TODO
+                results = WARNINGS
+                if not text2:
+                    text2 = "todo"
+
+        if 0:
+            # ignore unexpectedSuccesses for now, but it should really mark
+            # the build WARNING
+            if counts['unexpectedSuccesses']:
+                text.append("%d surprises" % counts['unexpectedSuccesses'])
+                results = WARNINGS
+                if not text2:
+                    text2 = "tests"
+
+        if self.reactor:
+            text.append(self.rtext('(%s)'))
+            if text2:
+                text2 = "%s %s" % (text2, self.rtext('(%s)'))
+
+        self.results = results
+        self.text = text
+        self.text2 = [text2]
+
+    def addTestResult(self, testname, results, text, tlog):
+        if self.reactor is not None:
+            testname = (self.reactor,) + testname
+        tr = builder.TestResult(testname, results, text, logs={'log': tlog})
+        #self.step_status.build.addTestResult(tr)
+        self.build.build_status.addTestResult(tr)
+
+    def createSummary(self, loog):
+        output = loog.getText()
+        problems = ""
+        sio = StringIO.StringIO(output)
+        warnings = {}
+        while 1:
+            line = sio.readline()
+            if line == "":
+                break
+            if line.find(" exceptions.DeprecationWarning: ") != -1:
+                # no source
+                warning = line # TODO: consider stripping basedir prefix here
+                warnings[warning] = warnings.get(warning, 0) + 1
+            elif (line.find(" DeprecationWarning: ") != -1 or
+                line.find(" UserWarning: ") != -1):
+                # next line is the source
+                warning = line + sio.readline()
+                warnings[warning] = warnings.get(warning, 0) + 1
+            elif line.find("Warning: ") != -1:
+                warning = line
+                warnings[warning] = warnings.get(warning, 0) + 1
+
+            if line.find("=" * 60) == 0 or line.find("-" * 60) == 0:
+                problems += line
+                problems += sio.read()
+                break
+
+        if problems:
+            self.addCompleteLog("problems", problems)
+            # now parse the problems for per-test results
+            pio = StringIO.StringIO(problems)
+            pio.readline() # eat the first separator line
+            testname = None
+            done = False
+            while not done:
+                while 1:
+                    line = pio.readline()
+                    if line == "":
+                        done = True
+                        break
+                    if line.find("=" * 60) == 0:
+                        break
+                    if line.find("-" * 60) == 0:
+                        # the last case has --- as a separator before the
+                        # summary counts are printed
+                        done = True
+                        break
+                    if testname is None:
+                        # the first line after the === is like:
+# EXPECTED FAILURE: testLackOfTB (twisted.test.test_failure.FailureTestCase)
+# SKIPPED: testRETR (twisted.test.test_ftp.TestFTPServer)
+# FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
+                        r = re.search(r'^([^:]+): (\w+) \(([\w\.]+)\)', line)
+                        if not r:
+                            # TODO: cleanup, if there are no problems,
+                            # we hit here
+                            continue
+                        result, name, case = r.groups()
+                        testname = tuple(case.split(".") + [name])
+                        results = {'SKIPPED': SKIPPED,
+                                   'EXPECTED FAILURE': SUCCESS,
+                                   'UNEXPECTED SUCCESS': WARNINGS,
+                                   'FAILURE': FAILURE,
+                                   'ERROR': FAILURE,
+                                   'SUCCESS': SUCCESS, # not reported
+                                   }.get(result, WARNINGS)
+                        text = result.lower().split()
+                        loog = line
+                        # the next line is all dashes
+                        loog += pio.readline()
+                    else:
+                        # the rest goes into the log
+                        loog += line
+                if testname:
+                    self.addTestResult(testname, results, text, loog)
+                    testname = None
+
+        if warnings:
+            lines = warnings.keys()
+            lines.sort()
+            self.addCompleteLog("warnings", "".join(lines))
+
+    def evaluateCommand(self, cmd):
+        return self.results
+
+    def getText(self, cmd, results):
+        return self.text
+    def getText2(self, cmd, results):
+        return self.text2
+
+    
+class ProcessDocs(ShellCommand):
+    """I build all docs. This requires some LaTeX packages to be installed.
+    It will result in the full documentation book (dvi, pdf, etc).
+    
+    """
+
+    name = "process-docs"
+    warnOnWarnings = 1
+    command = ["admin/process-docs"]
+    description = ["processing", "docs"]
+    descriptionDone = ["docs"]
+    # TODO: track output and time
+
+    def __init__(self, **kwargs):
+        """
+        @type    workdir: string
+        @keyword workdir: the workdir to start from: must be the base of the
+                          Twisted tree
+
+        @type    results: triple of (int, int, string)
+        @keyword results: [rc, warnings, output]
+                          - rc==0 if all files were converted successfully.
+                          - warnings is a count of hlint warnings. 
+                          - output is the verbose output of the command.
+        """
+        ShellCommand.__init__(self, **kwargs)
+
+    def createSummary(self, log):
+        output = log.getText()
+        # hlint warnings are of the format: 'WARNING: file:line:col: stuff
+        # latex warnings start with "WARNING: LaTeX Warning: stuff", but
+        # sometimes wrap around to a second line.
+        lines = output.split("\n")
+        warningLines = []
+        wantNext = False
+        for line in lines:
+            wantThis = wantNext
+            wantNext = False
+            if line.startswith("WARNING: "):
+                wantThis = True
+                wantNext = True
+            if wantThis:
+                warningLines.append(line)
+
+        if warningLines:
+            self.addCompleteLog("warnings", "\n".join(warningLines) + "\n")
+        self.warnings = len(warningLines)
+
+    def evaluateCommand(self, cmd):
+        if cmd.rc != 0:
+            return FAILURE
+        if self.warnings:
+            return WARNINGS
+        return SUCCESS
+
+    def getText(self, cmd, results):
+        if results == SUCCESS:
+            return ["docs", "successful"]
+        if results == WARNINGS:
+            return ["docs",
+                    "%d warnin%s" % (self.warnings,
+                                     self.warnings == 1 and 'g' or 'gs')]
+        if results == FAILURE:
+            return ["docs", "failed"]
+
+    def getText2(self, cmd, results):
+        if results == WARNINGS:
+            return ["%d do%s" % (self.warnings,
+                                 self.warnings == 1 and 'c' or 'cs')]
+        return ["docs"]
+
+
+    
+class BuildDebs(ShellCommand):
+    """I build the .deb packages."""
+ 
+    name = "debuild"
+    flunkOnFailure = 1
+    command = ["debuild", "-uc", "-us"]
+    description = ["building", "debs"]
+    descriptionDone = ["debs"]
+
+    def __init__(self, **kwargs):
+        """
+        @type    workdir: string
+        @keyword workdir: the workdir to start from (must be the base of the
+                          Twisted tree)
+        @type    results: double of [int, string]
+        @keyword results: [rc, output].
+                          - rc == 0 if all .debs were created successfully
+                          - output: string with any errors or warnings
+        """
+        ShellCommand.__init__(self, **kwargs)
+
+    def commandComplete(self, cmd):
+        errors, warnings = 0, 0
+        output = cmd.logs['stdio'].getText()
+        summary = ""
+        sio = StringIO.StringIO(output)
+        for line in sio.readlines():
+            if line.find("E: ") == 0:
+                summary += line
+                errors += 1
+            if line.find("W: ") == 0:
+                summary += line
+                warnings += 1
+        if summary:
+            self.addCompleteLog("problems", summary)
+        self.errors = errors
+        self.warnings = warnings
+
+    def evaluateCommand(self, cmd):
+        if cmd.rc != 0:
+            return FAILURE
+        if self.errors:
+            return FAILURE
+        if self.warnings:
+            return WARNINGS
+        return SUCCESS
+
+    def getText(self, cmd, results):
+        text = ["debuild"]
+        if cmd.rc != 0:
+            text.append("failed")
+        errors, warnings = self.errors, self.warnings
+        if warnings or errors:
+            text.append("lintian:")
+            if warnings:
+                text.append("%d warnin%s" % (warnings,
+                                             warnings == 1 and 'g' or 'gs'))
+            if errors:
+                text.append("%d erro%s" % (errors,
+                                           errors == 1 and 'r' or 'rs'))
+        return text
+
+    def getText2(self, cmd, results):
+        if cmd.rc != 0:
+            return ["debuild"]
+        if self.errors or self.warnings:
+            return ["%d lintian" % (self.errors + self.warnings)]
+        return []
+
+class RemovePYCs(ShellCommand):
+    name = "remove-.pyc"
+    command = 'find . -name "*.pyc" | xargs rm'
+    description = ["removing", ".pyc", "files"]
+    descriptionDone = ["remove", ".pycs"]

Added: vendor/buildbot/current/buildbot/steps/shell.py
===================================================================
--- vendor/buildbot/current/buildbot/steps/shell.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/steps/shell.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,273 @@
+# -*- test-case-name: buildbot.test.test_steps,buildbot.test.test_properties -*-
+
+import types, re
+from twisted.python import log
+from buildbot import util
+from buildbot.process.buildstep import LoggingBuildStep, RemoteShellCommand
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE
+
+class _BuildPropertyDictionary:
+    def __init__(self, build):
+        self.build = build
+    def __getitem__(self, name):
+        p = self.build.getProperty(name)
+        if p is None:
+            p = ""
+        return p
+
+class WithProperties(util.ComparableMixin):
+    """This is a marker class, used in ShellCommand's command= argument to
+    indicate that we want to interpolate a build property.
+    """
+
+    compare_attrs = ('fmtstring', 'args')
+
+    def __init__(self, fmtstring, *args):
+        self.fmtstring = fmtstring
+        self.args = args
+
+    def render(self, build):
+        if self.args:
+            strings = []
+            for name in self.args:
+                p = build.getProperty(name)
+                if p is None:
+                    p = ""
+                strings.append(p)
+            s = self.fmtstring % tuple(strings)
+        else:
+            s = self.fmtstring % _BuildPropertyDictionary(build)
+        return s
+
+class ShellCommand(LoggingBuildStep):
+    """I run a single shell command on the buildslave. I return FAILURE if
+    the exit code of that command is non-zero, SUCCESS otherwise. To change
+    this behavior, override my .evaluateCommand method.
+
+    By default, a failure of this step will mark the whole build as FAILURE.
+    To override this, give me an argument of flunkOnFailure=False .
+
+    I create a single Log named 'log' which contains the output of the
+    command. To create additional summary Logs, override my .createSummary
+    method.
+
+    The shell command I run (a list of argv strings) can be provided in
+    several ways:
+      - a class-level .command attribute
+      - a command= parameter to my constructor (overrides .command)
+      - set explicitly with my .setCommand() method (overrides both)
+
+    @ivar command: a list of argv strings (or WithProperties instances).
+                   This will be used by start() to create a
+                   RemoteShellCommand instance.
+
+    @ivar logfiles: a dict mapping log NAMEs to workdir-relative FILENAMEs
+                    of their corresponding logfiles. The contents of the file
+                    named FILENAME will be put into a LogFile named NAME, ina
+                    something approximating real-time. (note that logfiles=
+                    is actually handled by our parent class LoggingBuildStep)
+
+    """
+
+    name = "shell"
+    description = None # set this to a list of short strings to override
+    descriptionDone = None # alternate description when the step is complete
+    command = None # set this to a command, or set in kwargs
+    # logfiles={} # you can also set 'logfiles' to a dictionary, and it
+    #               will be merged with any logfiles= argument passed in
+    #               to __init__
+
+    # override this on a specific ShellCommand if you want to let it fail
+    # without dooming the entire build to a status of FAILURE
+    flunkOnFailure = True
+
+    def __init__(self, workdir,
+                 description=None, descriptionDone=None,
+                 command=None,
+                 **kwargs):
+        # most of our arguments get passed through to the RemoteShellCommand
+        # that we create, but first strip out the ones that we pass to
+        # BuildStep (like haltOnFailure and friends), and a couple that we
+        # consume ourselves.
+        self.workdir = workdir # required by RemoteShellCommand
+        if description:
+            self.description = description
+        if isinstance(self.description, str):
+            self.description = [self.description]
+        if descriptionDone:
+            self.descriptionDone = descriptionDone
+        if isinstance(self.descriptionDone, str):
+            self.descriptionDone = [self.descriptionDone]
+        if command:
+            self.command = command
+
+        # pull out the ones that LoggingBuildStep wants, then upcall
+        buildstep_kwargs = {}
+        for k in kwargs.keys()[:]:
+            if k in self.__class__.parms:
+                buildstep_kwargs[k] = kwargs[k]
+                del kwargs[k]
+        LoggingBuildStep.__init__(self, **buildstep_kwargs)
+
+        # everything left over goes to the RemoteShellCommand
+        kwargs['workdir'] = workdir # including a copy of 'workdir'
+        self.remote_kwargs = kwargs
+
+
+    def setCommand(self, command):
+        self.command = command
+
+    def describe(self, done=False):
+        """Return a list of short strings to describe this step, for the
+        status display. This uses the first few words of the shell command.
+        You can replace this by setting .description in your subclass, or by
+        overriding this method to describe the step better.
+
+        @type  done: boolean
+        @param done: whether the command is complete or not, to improve the
+                     way the command is described. C{done=False} is used
+                     while the command is still running, so a single
+                     imperfect-tense verb is appropriate ('compiling',
+                     'testing', ...) C{done=True} is used when the command
+                     has finished, and the default getText() method adds some
+                     text, so a simple noun is appropriate ('compile',
+                     'tests' ...)
+        """
+
+        if done and self.descriptionDone is not None:
+            return self.descriptionDone
+        if self.description is not None:
+            return self.description
+
+        words = self.command
+        # TODO: handle WithProperties here
+        if isinstance(words, types.StringTypes):
+            words = words.split()
+        if len(words) < 1:
+            return ["???"]
+        if len(words) == 1:
+            return ["'%s'" % words[0]]
+        if len(words) == 2:
+            return ["'%s" % words[0], "%s'" % words[1]]
+        return ["'%s" % words[0], "%s" % words[1], "...'"]
+
+    def _interpolateProperties(self, command):
+        # interpolate any build properties into our command
+        if not isinstance(command, (list, tuple)):
+            return command
+        command_argv = []
+        for argv in command:
+            if isinstance(argv, WithProperties):
+                command_argv.append(argv.render(self.build))
+            else:
+                command_argv.append(argv)
+        return command_argv
+
+    def setupEnvironment(self, cmd):
+        # merge in anything from Build.slaveEnvironment . Earlier steps
+        # (perhaps ones which compile libraries or sub-projects that need to
+        # be referenced by later steps) can add keys to
+        # self.build.slaveEnvironment to affect later steps.
+        slaveEnv = self.build.slaveEnvironment
+        if slaveEnv:
+            if cmd.args['env'] is None:
+                cmd.args['env'] = {}
+            cmd.args['env'].update(slaveEnv)
+            # note that each RemoteShellCommand gets its own copy of the
+            # dictionary, so we shouldn't be affecting anyone but ourselves.
+
+    def checkForOldSlaveAndLogfiles(self):
+        if not self.logfiles:
+            return # doesn't matter
+        if not self.slaveVersionIsOlderThan("shell", "2.1"):
+            return # slave is new enough
+        # this buildslave is too old and will ignore the 'logfiles'
+        # argument. You'll either have to pull the logfiles manually
+        # (say, by using 'cat' in a separate RemoteShellCommand) or
+        # upgrade the buildslave.
+        msg1 = ("Warning: buildslave %s is too old "
+                "to understand logfiles=, ignoring it."
+               % self.getSlaveName())
+        msg2 = "You will have to pull this logfile (%s) manually."
+        log.msg(msg1)
+        for logname,remotefilename in self.logfiles.items():
+            newlog = self.addLog(logname)
+            newlog.addHeader(msg1 + "\n")
+            newlog.addHeader(msg2 % remotefilename + "\n")
+            newlog.finish()
+        # now prevent setupLogfiles() from adding them
+        self.logfiles = {}
+
+    def start(self):
+        # this block is specific to ShellCommands. subclasses that don't need
+        # to set up an argv array, an environment, or extra logfiles= (like
+        # the Source subclasses) can just skip straight to startCommand()
+        command = self._interpolateProperties(self.command)
+        assert isinstance(command, (list, tuple, str))
+        # create the actual RemoteShellCommand instance now
+        kwargs = self.remote_kwargs
+        kwargs['command'] = command
+        kwargs['logfiles'] = self.logfiles
+        cmd = RemoteShellCommand(**kwargs)
+        self.setupEnvironment(cmd)
+        self.checkForOldSlaveAndLogfiles()
+
+        self.startCommand(cmd)
+
+
+
+class TreeSize(ShellCommand):
+    name = "treesize"
+    command = ["du", "-s", "."]
+    kb = None
+
+    def commandComplete(self, cmd):
+        out = cmd.log.getText()
+        m = re.search(r'^(\d+)', out)
+        if m:
+            self.kb = int(m.group(1))
+
+    def evaluateCommand(self, cmd):
+        if cmd.rc != 0:
+            return FAILURE
+        if self.kb is None:
+            return WARNINGS # not sure how 'du' could fail, but whatever
+        return SUCCESS
+
+    def getText(self, cmd, results):
+        if self.kb is not None:
+            return ["treesize", "%d kb" % self.kb]
+        return ["treesize", "unknown"]
+
+class Configure(ShellCommand):
+
+    name = "configure"
+    haltOnFailure = 1
+    description = ["configuring"]
+    descriptionDone = ["configure"]
+    command = ["./configure"]
+
+class Compile(ShellCommand):
+
+    name = "compile"
+    haltOnFailure = 1
+    description = ["compiling"]
+    descriptionDone = ["compile"]
+    command = ["make", "all"]
+
+    OFFprogressMetrics = ('output',)
+    # things to track: number of files compiled, number of directories
+    # traversed (assuming 'make' is being used)
+
+    def createSummary(self, cmd):
+        # TODO: grep for the characteristic GCC warning/error lines and
+        # assemble them into a pair of buffers
+        pass
+
+class Test(ShellCommand):
+
+    name = "test"
+    warnOnFailure = 1
+    description = ["testing"]
+    descriptionDone = ["test"]
+    command = ["make", "test"]

Added: vendor/buildbot/current/buildbot/steps/source.py
===================================================================
--- vendor/buildbot/current/buildbot/steps/source.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/steps/source.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,932 @@
+# -*- test-case-name: buildbot.test.test_vc -*-
+
+import warnings
+from email.Utils import formatdate
+from twisted.python import log
+from buildbot.process.buildstep import LoggingBuildStep, LoggedRemoteCommand
+from buildbot.interfaces import BuildSlaveTooOldError
+from buildbot.status.builder import SKIPPED
+
+
+class Source(LoggingBuildStep):
+    """This is a base class to generate a source tree in the buildslave.
+    Each version control system has a specialized subclass, and is expected
+    to override __init__ and implement computeSourceRevision() and
+    startVC(). The class as a whole builds up the self.args dictionary, then
+    starts a LoggedRemoteCommand with those arguments.
+    """
+
+    # if the checkout fails, there's no point in doing anything else
+    haltOnFailure = True
+    notReally = False
+
+    branch = None # the default branch, should be set in __init__
+
+    def __init__(self, workdir, mode='update', alwaysUseLatest=False,
+                 timeout=20*60, retry=None, **kwargs):
+        """
+        @type  workdir: string
+        @param workdir: local directory (relative to the Builder's root)
+                        where the tree should be placed
+
+        @type  mode: string
+        @param mode: the kind of VC operation that is desired:
+           - 'update': specifies that the checkout/update should be
+             performed directly into the workdir. Each build is performed
+             in the same directory, allowing for incremental builds. This
+             minimizes disk space, bandwidth, and CPU time. However, it
+             may encounter problems if the build process does not handle
+             dependencies properly (if you must sometimes do a 'clean
+             build' to make sure everything gets compiled), or if source
+             files are deleted but generated files can influence test
+             behavior (e.g. python's .pyc files), or when source
+             directories are deleted but generated files prevent CVS from
+             removing them.
+
+           - 'copy': specifies that the source-controlled workspace
+             should be maintained in a separate directory (called the
+             'copydir'), using checkout or update as necessary. For each
+             build, a new workdir is created with a copy of the source
+             tree (rm -rf workdir; cp -r copydir workdir). This doubles
+             the disk space required, but keeps the bandwidth low
+             (update instead of a full checkout). A full 'clean' build
+             is performed each time.  This avoids any generated-file
+             build problems, but is still occasionally vulnerable to
+             problems such as a CVS repository being manually rearranged
+             (causing CVS errors on update) which are not an issue with
+             a full checkout.
+
+           - 'clobber': specifies that the working directory should be
+             deleted each time, necessitating a full checkout for each
+             build. This insures a clean build off a complete checkout,
+             avoiding any of the problems described above, but is
+             bandwidth intensive, as the whole source tree must be
+             pulled down for each build.
+
+           - 'export': is like 'clobber', except that e.g. the 'cvs
+             export' command is used to create the working directory.
+             This command removes all VC metadata files (the
+             CVS/.svn/{arch} directories) from the tree, which is
+             sometimes useful for creating source tarballs (to avoid
+             including the metadata in the tar file). Not all VC systems
+             support export.
+
+        @type  alwaysUseLatest: boolean
+        @param alwaysUseLatest: whether to always update to the most
+        recent available sources for this build.
+
+        Normally the Source step asks its Build for a list of all
+        Changes that are supposed to go into the build, then computes a
+        'source stamp' (revision number or timestamp) that will cause
+        exactly that set of changes to be present in the checked out
+        tree. This is turned into, e.g., 'cvs update -D timestamp', or
+        'svn update -r revnum'. If alwaysUseLatest=True, bypass this
+        computation and always update to the latest available sources
+        for each build.
+
+        The source stamp helps avoid a race condition in which someone
+        commits a change after the master has decided to start a build
+        but before the slave finishes checking out the sources. At best
+        this results in a build which contains more changes than the
+        buildmaster thinks it has (possibly resulting in the wrong
+        person taking the blame for any problems that result), at worst
+        is can result in an incoherent set of sources (splitting a
+        non-atomic commit) which may not build at all.
+
+        @type  retry: tuple of ints (delay, repeats) (or None)
+        @param retry: if provided, VC update failures are re-attempted up
+                      to REPEATS times, with DELAY seconds between each
+                      attempt. Some users have slaves with poor connectivity
+                      to their VC repository, and they say that up to 80% of
+                      their build failures are due to transient network
+                      failures that could be handled by simply retrying a
+                      couple times.
+
+        """
+
+        LoggingBuildStep.__init__(self, **kwargs)
+
+        assert mode in ("update", "copy", "clobber", "export")
+        if retry:
+            delay, repeats = retry
+            assert isinstance(repeats, int)
+            assert repeats > 0
+        self.args = {'mode': mode,
+                     'workdir': workdir,
+                     'timeout': timeout,
+                     'retry': retry,
+                     'patch': None, # set during .start
+                     }
+        self.alwaysUseLatest = alwaysUseLatest
+
+        # Compute defaults for descriptions:
+        description = ["updating"]
+        descriptionDone = ["update"]
+        if mode == "clobber":
+            description = ["checkout"]
+            # because checkingouting takes too much space
+            descriptionDone = ["checkout"]
+        elif mode == "export":
+            description = ["exporting"]
+            descriptionDone = ["export"]
+        self.description = description
+        self.descriptionDone = descriptionDone
+
+    def describe(self, done=False):
+        if done:
+            return self.descriptionDone
+        return self.description
+
+    def computeSourceRevision(self, changes):
+        """Each subclass must implement this method to do something more
+        precise than -rHEAD every time. For version control systems that use
+        repository-wide change numbers (SVN, P4), this can simply take the
+        maximum such number from all the changes involved in this build. For
+        systems that do not (CVS), it needs to create a timestamp based upon
+        the latest Change, the Build's treeStableTimer, and an optional
+        self.checkoutDelay value."""
+        return None
+
+    def start(self):
+        if self.notReally:
+            log.msg("faking %s checkout/update" % self.name)
+            self.step_status.setColor("green")
+            self.step_status.setText(["fake", self.name, "successful"])
+            self.addCompleteLog("log",
+                                "Faked %s checkout/update 'successful'\n" \
+                                % self.name)
+            return SKIPPED
+
+        # what source stamp would this build like to use?
+        s = self.build.getSourceStamp()
+        # if branch is None, then use the Step's "default" branch
+        branch = s.branch or self.branch
+        # if revision is None, use the latest sources (-rHEAD)
+        revision = s.revision
+        if not revision and not self.alwaysUseLatest:
+            revision = self.computeSourceRevision(s.changes)
+        # if patch is None, then do not patch the tree after checkout
+
+        # 'patch' is None or a tuple of (patchlevel, diff)
+        patch = s.patch
+
+        self.startVC(branch, revision, patch)
+
+    def commandComplete(self, cmd):
+        got_revision = None
+        if cmd.updates.has_key("got_revision"):
+            got_revision = cmd.updates["got_revision"][-1]
+        self.setProperty("got_revision", got_revision)
+
+
+
+class CVS(Source):
+    """I do CVS checkout/update operations.
+
+    Note: if you are doing anonymous/pserver CVS operations, you will need
+    to manually do a 'cvs login' on each buildslave before the slave has any
+    hope of success. XXX: fix then, take a cvs password as an argument and
+    figure out how to do a 'cvs login' on each build
+    """
+
+    name = "cvs"
+
+    #progressMetrics = ('output',)
+    #
+    # additional things to track: update gives one stderr line per directory
+    # (starting with 'cvs server: Updating ') (and is fairly stable if files
+    # is empty), export gives one line per directory (starting with 'cvs
+    # export: Updating ') and another line per file (starting with U). Would
+    # be nice to track these, requires grepping LogFile data for lines,
+    # parsing each line. Might be handy to have a hook in LogFile that gets
+    # called with each complete line.
+
+    def __init__(self, cvsroot, cvsmodule, 
+                 global_options=[], branch=None, checkoutDelay=None,
+                 login=None,
+                 clobber=0, export=0, copydir=None,
+                 **kwargs):
+
+        """
+        @type  cvsroot: string
+        @param cvsroot: CVS Repository from which the source tree should
+                        be obtained. '/home/warner/Repository' for local
+                        or NFS-reachable repositories,
+                        ':pserver:anon at foo.com:/cvs' for anonymous CVS,
+                        'user at host.com:/cvs' for non-anonymous CVS or
+                        CVS over ssh. Lots of possibilities, check the
+                        CVS documentation for more.
+
+        @type  cvsmodule: string
+        @param cvsmodule: subdirectory of CVS repository that should be
+                          retrieved
+
+        @type  login: string or None
+        @param login: if not None, a string which will be provided as a
+                      password to the 'cvs login' command, used when a
+                      :pserver: method is used to access the repository.
+                      This login is only needed once, but must be run
+                      each time (just before the CVS operation) because
+                      there is no way for the buildslave to tell whether
+                      it was previously performed or not.
+
+        @type  branch: string
+        @param branch: the default branch name, will be used in a '-r'
+                       argument to specify which branch of the source tree
+                       should be used for this checkout. Defaults to None,
+                       which means to use 'HEAD'.
+
+        @type  checkoutDelay: int or None
+        @param checkoutDelay: if not None, the number of seconds to put
+                              between the last known Change and the
+                              timestamp given to the -D argument. This
+                              defaults to exactly half of the parent
+                              Build's .treeStableTimer, but it could be
+                              set to something else if your CVS change
+                              notification has particularly weird
+                              latency characteristics.
+
+        @type  global_options: list of strings
+        @param global_options: these arguments are inserted in the cvs
+                               command line, before the
+                               'checkout'/'update' command word. See
+                               'cvs --help-options' for a list of what
+                               may be accepted here.  ['-r'] will make
+                               the checked out files read only. ['-r',
+                               '-R'] will also assume the repository is
+                               read-only (I assume this means it won't
+                               use locks to insure atomic access to the
+                               ,v files)."""
+                               
+        self.checkoutDelay = checkoutDelay
+        self.branch = branch
+
+        if not kwargs.has_key('mode') and (clobber or export or copydir):
+            # deal with old configs
+            warnings.warn("Please use mode=, not clobber/export/copydir",
+                          DeprecationWarning)
+            if export:
+                kwargs['mode'] = "export"
+            elif clobber:
+                kwargs['mode'] = "clobber"
+            elif copydir:
+                kwargs['mode'] = "copy"
+            else:
+                kwargs['mode'] = "update"
+
+        Source.__init__(self, **kwargs)
+
+        self.args.update({'cvsroot': cvsroot,
+                          'cvsmodule': cvsmodule,
+                          'global_options': global_options,
+                          'login': login,
+                          })
+
+    def computeSourceRevision(self, changes):
+        if not changes:
+            return None
+        lastChange = max([c.when for c in changes])
+        if self.checkoutDelay is not None:
+            when = lastChange + self.checkoutDelay
+        else:
+            lastSubmit = max([r.submittedAt for r in self.build.requests])
+            when = (lastChange + lastSubmit) / 2
+        return formatdate(when)
+
+    def startVC(self, branch, revision, patch):
+        if self.slaveVersionIsOlderThan("cvs", "1.39"):
+            # the slave doesn't know to avoid re-using the same sourcedir
+            # when the branch changes. We have no way of knowing which branch
+            # the last build used, so if we're using a non-default branch and
+            # either 'update' or 'copy' modes, it is safer to refuse to
+            # build, and tell the user they need to upgrade the buildslave.
+            if (branch != self.branch
+                and self.args['mode'] in ("update", "copy")):
+                m = ("This buildslave (%s) does not know about multiple "
+                     "branches, and using mode=%s would probably build the "
+                     "wrong tree. "
+                     "Refusing to build. Please upgrade the buildslave to "
+                     "buildbot-0.7.0 or newer." % (self.build.slavename,
+                                                   self.args['mode']))
+                log.msg(m)
+                raise BuildSlaveTooOldError(m)
+
+        if branch is None:
+            branch = "HEAD"
+        self.args['branch'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+
+        if self.args['branch'] == "HEAD" and self.args['revision']:
+            # special case. 'cvs update -r HEAD -D today' gives no files
+            # TODO: figure out why, see if it applies to -r BRANCH
+            self.args['branch'] = None
+
+        # deal with old slaves
+        warnings = []
+        slavever = self.slaveVersion("cvs", "old")
+
+        if slavever == "old":
+            # 0.5.0
+            if self.args['mode'] == "export":
+                self.args['export'] = 1
+            elif self.args['mode'] == "clobber":
+                self.args['clobber'] = 1
+            elif self.args['mode'] == "copy":
+                self.args['copydir'] = "source"
+            self.args['tag'] = self.args['branch']
+            assert not self.args['patch'] # 0.5.0 slave can't do patch
+
+        cmd = LoggedRemoteCommand("cvs", self.args)
+        self.startCommand(cmd, warnings)
+
+
+class SVN(Source):
+    """I perform Subversion checkout/update operations."""
+
+    name = 'svn'
+
+    def __init__(self, svnurl=None, baseURL=None, defaultBranch=None,
+                 directory=None, **kwargs):
+        """
+        @type  svnurl: string
+        @param svnurl: the URL which points to the Subversion server,
+                       combining the access method (HTTP, ssh, local file),
+                       the repository host/port, the repository path, the
+                       sub-tree within the repository, and the branch to
+                       check out. Using C{svnurl} does not enable builds of
+                       alternate branches: use C{baseURL} to enable this.
+                       Use exactly one of C{svnurl} and C{baseURL}.
+
+        @param baseURL: if branches are enabled, this is the base URL to
+                        which a branch name will be appended. It should
+                        probably end in a slash. Use exactly one of
+                        C{svnurl} and C{baseURL}.
+                         
+        @param defaultBranch: if branches are enabled, this is the branch
+                              to use if the Build does not specify one
+                              explicitly. It will simply be appended
+                              to C{baseURL} and the result handed to
+                              the SVN command.
+        """
+
+        if not kwargs.has_key('workdir') and directory is not None:
+            # deal with old configs
+            warnings.warn("Please use workdir=, not directory=",
+                          DeprecationWarning)
+            kwargs['workdir'] = directory
+
+        self.svnurl = svnurl
+        self.baseURL = baseURL
+        self.branch = defaultBranch
+
+        Source.__init__(self, **kwargs)
+
+        if not svnurl and not baseURL:
+            raise ValueError("you must use exactly one of svnurl and baseURL")
+
+
+    def computeSourceRevision(self, changes):
+        if not changes:
+            return None
+        lastChange = max([int(c.revision) for c in changes])
+        return lastChange
+
+    def startVC(self, branch, revision, patch):
+
+        # handle old slaves
+        warnings = []
+        slavever = self.slaveVersion("svn", "old")
+        if not slavever:
+            m = "slave does not have the 'svn' command"
+            raise BuildSlaveTooOldError(m)
+
+        if self.slaveVersionIsOlderThan("svn", "1.39"):
+            # the slave doesn't know to avoid re-using the same sourcedir
+            # when the branch changes. We have no way of knowing which branch
+            # the last build used, so if we're using a non-default branch and
+            # either 'update' or 'copy' modes, it is safer to refuse to
+            # build, and tell the user they need to upgrade the buildslave.
+            if (branch != self.branch
+                and self.args['mode'] in ("update", "copy")):
+                m = ("This buildslave (%s) does not know about multiple "
+                     "branches, and using mode=%s would probably build the "
+                     "wrong tree. "
+                     "Refusing to build. Please upgrade the buildslave to "
+                     "buildbot-0.7.0 or newer." % (self.build.slavename,
+                                                   self.args['mode']))
+                raise BuildSlaveTooOldError(m)
+
+        if slavever == "old":
+            # 0.5.0 compatibility
+            if self.args['mode'] in ("clobber", "copy"):
+                # TODO: use some shell commands to make up for the
+                # deficiency, by blowing away the old directory first (thus
+                # forcing a full checkout)
+                warnings.append("WARNING: this slave can only do SVN updates"
+                                ", not mode=%s\n" % self.args['mode'])
+                log.msg("WARNING: this slave only does mode=update")
+            if self.args['mode'] == "export":
+                raise BuildSlaveTooOldError("old slave does not have "
+                                            "mode=export")
+            self.args['directory'] = self.args['workdir']
+            if revision is not None:
+                # 0.5.0 can only do HEAD. We have no way of knowing whether
+                # the requested revision is HEAD or not, and for
+                # slowly-changing trees this will probably do the right
+                # thing, so let it pass with a warning
+                m = ("WARNING: old slave can only update to HEAD, not "
+                     "revision=%s" % revision)
+                log.msg(m)
+                warnings.append(m + "\n")
+            revision = "HEAD" # interprets this key differently
+            if patch:
+                raise BuildSlaveTooOldError("old slave can't do patch")
+
+        if self.svnurl:
+            assert not branch # we need baseURL= to use branches
+            self.args['svnurl'] = self.svnurl
+        else:
+            self.args['svnurl'] = self.baseURL + branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+
+        revstuff = []
+        if branch is not None and branch != self.branch:
+            revstuff.append("[branch]")
+        if revision is not None:
+            revstuff.append("r%s" % revision)
+        self.description.extend(revstuff)
+        self.descriptionDone.extend(revstuff)
+
+        cmd = LoggedRemoteCommand("svn", self.args)
+        self.startCommand(cmd, warnings)
+
+
+class Darcs(Source):
+    """Check out a source tree from a Darcs repository at 'repourl'.
+
+    To the best of my knowledge, Darcs has no concept of file modes. This
+    means the eXecute-bit will be cleared on all source files. As a result,
+    you may need to invoke configuration scripts with something like:
+
+    C{s(step.Configure, command=['/bin/sh', './configure'])}
+    """
+
+    name = "darcs"
+
+    def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
+                 **kwargs):
+        """
+        @type  repourl: string
+        @param repourl: the URL which points at the Darcs repository. This
+                        is used as the default branch. Using C{repourl} does
+                        not enable builds of alternate branches: use
+                        C{baseURL} to enable this. Use either C{repourl} or
+                        C{baseURL}, not both.
+
+        @param baseURL: if branches are enabled, this is the base URL to
+                        which a branch name will be appended. It should
+                        probably end in a slash. Use exactly one of
+                        C{repourl} and C{baseURL}.
+                         
+        @param defaultBranch: if branches are enabled, this is the branch
+                              to use if the Build does not specify one
+                              explicitly. It will simply be appended to
+                              C{baseURL} and the result handed to the
+                              'darcs pull' command.
+        """
+        self.repourl = repourl
+        self.baseURL = baseURL
+        self.branch = defaultBranch
+        Source.__init__(self, **kwargs)
+        assert kwargs['mode'] != "export", \
+               "Darcs does not have an 'export' mode"
+        if (not repourl and not baseURL) or (repourl and baseURL):
+            raise ValueError("you must provide exactly one of repourl and"
+                             " baseURL")
+
+    def startVC(self, branch, revision, patch):
+        slavever = self.slaveVersion("darcs")
+        if not slavever:
+            m = "slave is too old, does not know about darcs"
+            raise BuildSlaveTooOldError(m)
+
+        if self.slaveVersionIsOlderThan("darcs", "1.39"):
+            if revision:
+                # TODO: revisit this once we implement computeSourceRevision
+                m = "0.6.6 slaves can't handle args['revision']"
+                raise BuildSlaveTooOldError(m)
+
+            # the slave doesn't know to avoid re-using the same sourcedir
+            # when the branch changes. We have no way of knowing which branch
+            # the last build used, so if we're using a non-default branch and
+            # either 'update' or 'copy' modes, it is safer to refuse to
+            # build, and tell the user they need to upgrade the buildslave.
+            if (branch != self.branch
+                and self.args['mode'] in ("update", "copy")):
+                m = ("This buildslave (%s) does not know about multiple "
+                     "branches, and using mode=%s would probably build the "
+                     "wrong tree. "
+                     "Refusing to build. Please upgrade the buildslave to "
+                     "buildbot-0.7.0 or newer." % (self.build.slavename,
+                                                   self.args['mode']))
+                raise BuildSlaveTooOldError(m)
+
+        if self.repourl:
+            assert not branch # we need baseURL= to use branches
+            self.args['repourl'] = self.repourl
+        else:
+            self.args['repourl'] = self.baseURL + branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+
+        revstuff = []
+        if branch is not None and branch != self.branch:
+            revstuff.append("[branch]")
+        self.description.extend(revstuff)
+        self.descriptionDone.extend(revstuff)
+
+        cmd = LoggedRemoteCommand("darcs", self.args)
+        self.startCommand(cmd)
+
+
+class Git(Source):
+    """Check out a source tree from a git repository 'repourl'."""
+
+    name = "git"
+
+    def __init__(self, repourl, **kwargs):
+        """
+        @type  repourl: string
+        @param repourl: the URL which points at the git repository
+        """
+        self.branch = None # TODO
+        Source.__init__(self, **kwargs)
+        self.args['repourl'] = repourl
+
+    def startVC(self, branch, revision, patch):
+        self.args['branch'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+        slavever = self.slaveVersion("git")
+        if not slavever:
+            raise BuildSlaveTooOldError("slave is too old, does not know "
+                                        "about git")
+        cmd = LoggedRemoteCommand("git", self.args)
+        self.startCommand(cmd)
+
+
+class Arch(Source):
+    """Check out a source tree from an Arch repository named 'archive'
+    available at 'url'. 'version' specifies which version number (development
+    line) will be used for the checkout: this is mostly equivalent to a
+    branch name. This version uses the 'tla' tool to do the checkout, to use
+    'baz' see L{Bazaar} instead.
+    """
+
+    name = "arch"
+    # TODO: slaves >0.6.6 will accept args['build-config'], so use it
+
+    def __init__(self, url, version, archive=None, **kwargs):
+        """
+        @type  url: string
+        @param url: the Arch coordinates of the repository. This is
+                    typically an http:// URL, but could also be the absolute
+                    pathname of a local directory instead.
+
+        @type  version: string
+        @param version: the category--branch--version to check out. This is
+                        the default branch. If a build specifies a different
+                        branch, it will be used instead of this.
+
+        @type  archive: string
+        @param archive: The archive name. If provided, it must match the one
+                        that comes from the repository. If not, the
+                        repository's default will be used.
+        """
+        self.branch = version
+        Source.__init__(self, **kwargs)
+        self.args.update({'url': url,
+                          'archive': archive,
+                          })
+
+    def computeSourceRevision(self, changes):
+        # in Arch, fully-qualified revision numbers look like:
+        #  arch at buildbot.sourceforge.net--2004/buildbot--dev--0--patch-104
+        # For any given builder, all of this is fixed except the patch-104.
+        # The Change might have any part of the fully-qualified string, so we
+        # just look for the last part. We return the "patch-NN" string.
+        if not changes:
+            return None
+        lastChange = None
+        for c in changes:
+            if not c.revision:
+                continue
+            if c.revision.endswith("--base-0"):
+                rev = 0
+            else:
+                i = c.revision.rindex("patch")
+                rev = int(c.revision[i+len("patch-"):])
+            lastChange = max(lastChange, rev)
+        if lastChange is None:
+            return None
+        if lastChange == 0:
+            return "base-0"
+        return "patch-%d" % lastChange
+
+    def checkSlaveVersion(self, cmd, branch):
+        warnings = []
+        slavever = self.slaveVersion(cmd)
+        if not slavever:
+            m = "slave is too old, does not know about %s" % cmd
+            raise BuildSlaveTooOldError(m)
+
+        # slave 1.28 and later understand 'revision'
+        if self.slaveVersionIsOlderThan(cmd, "1.28"):
+            if not self.alwaysUseLatest:
+                # we don't know whether our requested revision is the latest
+                # or not. If the tree does not change very quickly, this will
+                # probably build the right thing, so emit a warning rather
+                # than refuse to build at all
+                m = "WARNING, buildslave is too old to use a revision"
+                log.msg(m)
+                warnings.append(m + "\n")
+
+        if self.slaveVersionIsOlderThan(cmd, "1.39"):
+            # the slave doesn't know to avoid re-using the same sourcedir
+            # when the branch changes. We have no way of knowing which branch
+            # the last build used, so if we're using a non-default branch and
+            # either 'update' or 'copy' modes, it is safer to refuse to
+            # build, and tell the user they need to upgrade the buildslave.
+            if (branch != self.branch
+                and self.args['mode'] in ("update", "copy")):
+                m = ("This buildslave (%s) does not know about multiple "
+                     "branches, and using mode=%s would probably build the "
+                     "wrong tree. "
+                     "Refusing to build. Please upgrade the buildslave to "
+                     "buildbot-0.7.0 or newer." % (self.build.slavename,
+                                                   self.args['mode']))
+                log.msg(m)
+                raise BuildSlaveTooOldError(m)
+
+        return warnings
+
+    def startVC(self, branch, revision, patch):
+        self.args['version'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+        warnings = self.checkSlaveVersion("arch", branch)
+
+        revstuff = []
+        if branch is not None and branch != self.branch:
+            revstuff.append("[branch]")
+        if revision is not None:
+            revstuff.append("patch%s" % revision)
+        self.description.extend(revstuff)
+        self.descriptionDone.extend(revstuff)
+
+        cmd = LoggedRemoteCommand("arch", self.args)
+        self.startCommand(cmd, warnings)
+
+
+class Bazaar(Arch):
+    """Bazaar is an alternative client for Arch repositories. baz is mostly
+    compatible with tla, but archive registration is slightly different."""
+
+    # TODO: slaves >0.6.6 will accept args['build-config'], so use it
+
+    def __init__(self, url, version, archive, **kwargs):
+        """
+        @type  url: string
+        @param url: the Arch coordinates of the repository. This is
+                    typically an http:// URL, but could also be the absolute
+                    pathname of a local directory instead.
+
+        @type  version: string
+        @param version: the category--branch--version to check out
+
+        @type  archive: string
+        @param archive: The archive name (required). This must always match
+                        the one that comes from the repository, otherwise the
+                        buildslave will attempt to get sources from the wrong
+                        archive.
+        """
+        self.branch = version
+        Source.__init__(self, **kwargs)
+        self.args.update({'url': url,
+                          'archive': archive,
+                          })
+
+    def startVC(self, branch, revision, patch):
+        self.args['version'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+        warnings = self.checkSlaveVersion("bazaar", branch)
+
+        revstuff = []
+        if branch is not None and branch != self.branch:
+            revstuff.append("[branch]")
+        if revision is not None:
+            revstuff.append("patch%s" % revision)
+        self.description.extend(revstuff)
+        self.descriptionDone.extend(revstuff)
+
+        cmd = LoggedRemoteCommand("bazaar", self.args)
+        self.startCommand(cmd, warnings)
+
+class Mercurial(Source):
+    """Check out a source tree from a mercurial repository 'repourl'."""
+
+    name = "hg"
+
+    def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
+                 **kwargs):
+        """
+        @type  repourl: string
+        @param repourl: the URL which points at the Mercurial repository.
+                        This is used as the default branch. Using C{repourl}
+                        does not enable builds of alternate branches: use
+                        C{baseURL} to enable this. Use either C{repourl} or
+                        C{baseURL}, not both.
+
+        @param baseURL: if branches are enabled, this is the base URL to
+                        which a branch name will be appended. It should
+                        probably end in a slash. Use exactly one of
+                        C{repourl} and C{baseURL}.
+
+        @param defaultBranch: if branches are enabled, this is the branch
+                              to use if the Build does not specify one
+                              explicitly. It will simply be appended to
+                              C{baseURL} and the result handed to the
+                              'hg clone' command.
+        """
+        self.repourl = repourl
+        self.baseURL = baseURL
+        self.branch = defaultBranch
+        Source.__init__(self, **kwargs)
+        if (not repourl and not baseURL) or (repourl and baseURL):
+            raise ValueError("you must provide exactly one of repourl and"
+                             " baseURL")
+
+    def startVC(self, branch, revision, patch):
+        slavever = self.slaveVersion("hg")
+        if not slavever:
+            raise BuildSlaveTooOldError("slave is too old, does not know "
+                                        "about hg")
+
+        if self.repourl:
+            assert not branch # we need baseURL= to use branches
+            self.args['repourl'] = self.repourl
+        else:
+            self.args['repourl'] = self.baseURL + branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+
+        revstuff = []
+        if branch is not None and branch != self.branch:
+            revstuff.append("[branch]")
+        self.description.extend(revstuff)
+        self.descriptionDone.extend(revstuff)
+
+        cmd = LoggedRemoteCommand("hg", self.args)
+        self.startCommand(cmd)
+
+
+class P4(Source):
+    """ P4 is a class for accessing perforce revision control"""
+    name = "p4"
+
+    def __init__(self, p4base, defaultBranch=None, p4port=None, p4user=None,
+                 p4passwd=None, p4extra_views=[],
+                 p4client='buildbot_%(slave)s_%(builder)s', **kwargs):
+        """
+        @type  p4base: string
+        @param p4base: A view into a perforce depot, typically
+                       "//depot/proj/"
+
+        @type  defaultBranch: string
+        @param defaultBranch: Identify a branch to build by default. Perforce
+                              is a view based branching system. So, the branch
+                              is normally the name after the base. For example,
+                              branch=1.0 is view=//depot/proj/1.0/...
+                              branch=1.1 is view=//depot/proj/1.1/...
+
+        @type  p4port: string
+        @param p4port: Specify the perforce server to connection in the format
+                       <host>:<port>. Example "perforce.example.com:1666"
+
+        @type  p4user: string
+        @param p4user: The perforce user to run the command as.
+
+        @type  p4passwd: string
+        @param p4passwd: The password for the perforce user.
+
+        @type  p4extra_views: list of tuples
+        @param p4extra_views: Extra views to be added to
+                              the client that is being used.
+
+        @type  p4client: string
+        @param p4client: The perforce client to use for this buildslave.
+        """
+
+        self.branch = defaultBranch
+        Source.__init__(self, **kwargs)
+        self.args['p4port'] = p4port
+        self.args['p4user'] = p4user
+        self.args['p4passwd'] = p4passwd
+        self.args['p4base'] = p4base
+        self.args['p4extra_views'] = p4extra_views
+        self.args['p4client'] = p4client % {
+            'slave': self.build.slavename,
+            'builder': self.build.builder.name,
+        }
+
+    def computeSourceRevision(self, changes):
+        if not changes:
+            return None
+        lastChange = max([int(c.revision) for c in changes])
+        return lastChange
+
+    def startVC(self, branch, revision, patch):
+        slavever = self.slaveVersion("p4")
+        assert slavever, "slave is too old, does not know about p4"
+        args = dict(self.args)
+        args['branch'] = branch or self.branch
+        args['revision'] = revision
+        args['patch'] = patch
+        cmd = LoggedRemoteCommand("p4", args)
+        self.startCommand(cmd)
+
+class P4Sync(Source):
+    """This is a partial solution for using a P4 source repository. You are
+    required to manually set up each build slave with a useful P4
+    environment, which means setting various per-slave environment variables,
+    and creating a P4 client specification which maps the right files into
+    the slave's working directory. Once you have done that, this step merely
+    performs a 'p4 sync' to update that workspace with the newest files.
+
+    Each slave needs the following environment:
+
+     - PATH: the 'p4' binary must be on the slave's PATH
+     - P4USER: each slave needs a distinct user account
+     - P4CLIENT: each slave needs a distinct client specification
+
+    You should use 'p4 client' (?) to set up a client view spec which maps
+    the desired files into $SLAVEBASE/$BUILDERBASE/source .
+    """
+
+    name = "p4sync"
+
+    def __init__(self, p4port, p4user, p4passwd, p4client, **kwargs):
+        assert kwargs['mode'] == "copy", "P4Sync can only be used in mode=copy"
+        self.branch = None
+        Source.__init__(self, **kwargs)
+        self.args['p4port'] = p4port
+        self.args['p4user'] = p4user
+        self.args['p4passwd'] = p4passwd
+        self.args['p4client'] = p4client
+
+    def computeSourceRevision(self, changes):
+        if not changes:
+            return None
+        lastChange = max([int(c.revision) for c in changes])
+        return lastChange
+
+    def startVC(self, branch, revision, patch):
+        slavever = self.slaveVersion("p4sync")
+        assert slavever, "slave is too old, does not know about p4"
+        cmd = LoggedRemoteCommand("p4sync", self.args)
+        self.startCommand(cmd)
+
+class Monotone(Source):
+    """Check out a revision from a monotone server at 'server_addr',
+    branch 'branch'.  'revision' specifies which revision id to check
+    out.
+
+    This step will first create a local database, if necessary, and then pull
+    the contents of the server into the database.  Then it will do the
+    checkout/update from this database."""
+
+    name = "monotone"
+
+    def __init__(self, server_addr, branch, db_path="monotone.db",
+                 monotone="monotone",
+                 **kwargs):
+        Source.__init__(self, **kwargs)
+        self.args.update({"server_addr": server_addr,
+                          "branch": branch,
+                          "db_path": db_path,
+                          "monotone": monotone})
+
+    def computeSourceRevision(self, changes):
+        if not changes:
+            return None
+        return changes[-1].revision
+
+    def startVC(self):
+        slavever = self.slaveVersion("monotone")
+        assert slavever, "slave is too old, does not know about monotone"
+        cmd = LoggedRemoteCommand("monotone", self.args)
+        self.startCommand(cmd)
+

Added: vendor/buildbot/current/buildbot/steps/transfer.py
===================================================================
--- vendor/buildbot/current/buildbot/steps/transfer.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/steps/transfer.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,277 @@
+# -*- test-case-name: buildbot.test.test_transfer -*-
+
+import os.path
+from twisted.internet import reactor
+from twisted.spread import pb
+from twisted.python import log
+from buildbot.process.buildstep import RemoteCommand, BuildStep
+from buildbot.process.buildstep import SUCCESS, FAILURE
+from buildbot.interfaces import BuildSlaveTooOldError
+
+
+class _FileWriter(pb.Referenceable):
+    """
+    Helper class that acts as a file-object with write access
+    """
+
+    def __init__(self, destfile, maxsize, mode):
+        self.destfile = destfile
+        self.fp = open(destfile, "w")
+        if mode is not None:
+            os.chmod(destfile, mode)
+	self.remaining = maxsize
+
+    def remote_write(self, data):
+	"""
+	Called from remote slave to write L{data} to L{fp} within boundaries
+	of L{maxsize}
+
+	@type  data: C{string}
+	@param data: String of data to write
+	"""
+        if self.remaining is not None:
+            if len(data) > self.remaining:
+                data = data[:self.remaining]
+            self.fp.write(data)
+            self.remaining = self.remaining - len(data)
+        else:
+            self.fp.write(data)
+
+    def remote_close(self):
+        """
+        Called by remote slave to state that no more data will be transfered
+        """
+        self.fp.close()
+        self.fp = None
+
+    def __del__(self):
+        # unclean shutdown, the file is probably truncated, so delete it
+        # altogether rather than deliver a corrupted file
+        fp = getattr(self, "fp", None)
+        if fp:
+            fp.close()
+            os.unlink(self.destfile)
+
+
+class StatusRemoteCommand(RemoteCommand):
+    def __init__(self, remote_command, args):
+        RemoteCommand.__init__(self, remote_command, args)
+
+        self.rc = None
+        self.stderr = ''
+
+    def remoteUpdate(self, update):
+        #log.msg('StatusRemoteCommand: update=%r' % update)
+        if 'rc' in update:
+            self.rc = update['rc']
+        if 'stderr' in update:
+            self.stderr = self.stderr + update['stderr'] + '\n'
+
+
+class FileUpload(BuildStep):
+    """
+    Build step to transfer a file from the slave to the master.
+
+    arguments:
+
+    - ['slavesrc']   filename of source file at slave, relative to workdir
+    - ['masterdest'] filename of destination file at master
+    - ['workdir']    string with slave working directory relative to builder
+                     base dir, default 'build'
+    - ['maxsize']    maximum size of the file, default None (=unlimited)
+    - ['blocksize']  maximum size of each block being transfered
+    - ['mode']       file access mode for the resulting master-side file.
+                     The default (=None) is to leave it up to the umask of
+                     the buildmaster process.
+
+    """
+
+    name = 'upload'
+
+    def __init__(self, build, slavesrc, masterdest,
+                 workdir="build", maxsize=None, blocksize=16*1024, mode=None,
+                 **buildstep_kwargs):
+        BuildStep.__init__(self, build, **buildstep_kwargs)
+
+        self.slavesrc = slavesrc
+        self.masterdest = masterdest
+        self.workdir = workdir
+        self.maxsize = maxsize
+        self.blocksize = blocksize
+        assert isinstance(mode, (int, type(None)))
+        self.mode = mode
+
+    def start(self):
+        version = self.slaveVersion("uploadFile")
+        if not version:
+            m = "slave is too old, does not know about uploadFile"
+            raise BuildSlaveTooOldError(m)
+
+        source = self.slavesrc
+        masterdest = self.masterdest
+        # we rely upon the fact that the buildmaster runs chdir'ed into its
+        # basedir to make sure that relative paths in masterdest are expanded
+        # properly. TODO: maybe pass the master's basedir all the way down
+        # into the BuildStep so we can do this better.
+        target = os.path.expanduser(masterdest)
+        log.msg("FileUpload started, from slave %r to master %r"
+                % (source, target))
+
+        self.step_status.setColor('yellow')
+        self.step_status.setText(['uploading', os.path.basename(source)])
+
+        # we use maxsize to limit the amount of data on both sides
+        fileWriter = _FileWriter(self.masterdest, self.maxsize, self.mode)
+
+        # default arguments
+        args = {
+            'slavesrc': source,
+            'workdir': self.workdir,
+            'writer': fileWriter,
+            'maxsize': self.maxsize,
+            'blocksize': self.blocksize,
+            }
+
+        self.cmd = StatusRemoteCommand('uploadFile', args)
+        d = self.runCommand(self.cmd)
+        d.addCallback(self.finished).addErrback(self.failed)
+
+    def finished(self, result):
+        if self.cmd.stderr != '':
+            self.addCompleteLog('stderr', self.cmd.stderr)
+
+        if self.cmd.rc is None or self.cmd.rc == 0:
+            self.step_status.setColor('green')
+            return BuildStep.finished(self, SUCCESS)
+        self.step_status.setColor('red')
+        return BuildStep.finished(self, FAILURE)
+
+
+
+
+
+class _FileReader(pb.Referenceable):
+    """
+    Helper class that acts as a file-object with read access
+    """
+
+    def __init__(self, fp):
+        self.fp = fp
+
+    def remote_read(self, maxlength):
+	"""
+	Called from remote slave to read at most L{maxlength} bytes of data
+
+	@type  maxlength: C{integer}
+	@param maxlength: Maximum number of data bytes that can be returned
+
+        @return: Data read from L{fp}
+        @rtype: C{string} of bytes read from file
+	"""
+        if self.fp is None:
+            return ''
+
+        data = self.fp.read(maxlength)
+        return data
+
+    def remote_close(self):
+        """
+        Called by remote slave to state that no more data will be transfered
+        """
+        if self.fp is not None:
+            self.fp.close()
+            self.fp = None
+
+
+class FileDownload(BuildStep):
+    """
+    Download the first 'maxsize' bytes of a file, from the buildmaster to the
+    buildslave. Set the mode of the file
+
+    Arguments::
+
+     ['mastersrc'] filename of source file at master
+     ['slavedest'] filename of destination file at slave
+     ['workdir']   string with slave working directory relative to builder
+                   base dir, default 'build'
+     ['maxsize']   maximum size of the file, default None (=unlimited)
+     ['blocksize'] maximum size of each block being transfered
+     ['mode']      use this to set the access permissions of the resulting
+                   buildslave-side file. This is traditionally an octal
+                   integer, like 0644 to be world-readable (but not
+                   world-writable), or 0600 to only be readable by
+                   the buildslave account, or 0755 to be world-executable.
+                   The default (=None) is to leave it up to the umask of
+                   the buildslave process.
+
+    """
+
+    name = 'download'
+
+    def __init__(self, build, mastersrc, slavedest,
+                 workdir="build", maxsize=None, blocksize=16*1024, mode=None,
+                 **buildstep_kwargs):
+        BuildStep.__init__(self, build, **buildstep_kwargs)
+
+        self.mastersrc = mastersrc
+        self.slavedest = slavedest
+        self.workdir = workdir
+        self.maxsize = maxsize
+        self.blocksize = blocksize
+        assert isinstance(mode, (int, type(None)))
+        self.mode = mode
+
+    def start(self):
+        version = self.slaveVersion("downloadFile")
+        if not version:
+            m = "slave is too old, does not know about downloadFile"
+            raise BuildSlaveTooOldError(m)
+
+        # we are currently in the buildmaster's basedir, so any non-absolute
+        # paths will be interpreted relative to that
+        source = os.path.expanduser(self.mastersrc)
+        slavedest = self.slavedest
+        log.msg("FileDownload started, from master %r to slave %r" %
+                (source, slavedest))
+
+        self.step_status.setColor('yellow')
+        self.step_status.setText(['downloading', "to",
+                                  os.path.basename(slavedest)])
+
+        # setup structures for reading the file
+        try:
+            fp = open(source, 'r')
+        except IOError:
+            # if file does not exist, bail out with an error
+            self.addCompleteLog('stderr',
+                                'File %r not available at master' % source)
+            # TODO: once BuildStep.start() gets rewritten to use
+            # maybeDeferred, just re-raise the exception here.
+            reactor.callLater(0, BuildStep.finished, self, FAILURE)
+            return
+        fileReader = _FileReader(fp)
+
+        # default arguments
+        args = {
+            'slavedest': self.slavedest,
+            'maxsize': self.maxsize,
+            'reader': fileReader,
+            'blocksize': self.blocksize,
+            'workdir': self.workdir,
+            'mode': self.mode,
+            }
+
+        self.cmd = StatusRemoteCommand('downloadFile', args)
+        d = self.runCommand(self.cmd)
+        d.addCallback(self.finished).addErrback(self.failed)
+
+    def finished(self, result):
+        if self.cmd.stderr != '':
+            self.addCompleteLog('stderr', self.cmd.stderr)
+
+        if self.cmd.rc is None or self.cmd.rc == 0:
+            self.step_status.setColor('green')
+            return BuildStep.finished(self, SUCCESS)
+        self.step_status.setColor('red')
+        return BuildStep.finished(self, FAILURE)
+

Added: vendor/buildbot/current/buildbot/test/__init__.py
===================================================================

Added: vendor/buildbot/current/buildbot/test/emit.py
===================================================================
--- vendor/buildbot/current/buildbot/test/emit.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/emit.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,12 @@
+#! /usr/bin/python
+
+import os, sys
+
+sys.stdout.write("this is stdout\n")
+sys.stderr.write("this is stderr\n")
+if os.environ.has_key("EMIT_TEST"):
+    sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
+open("log1.out","wt").write("this is log1\n")
+
+rc = int(sys.argv[1])
+sys.exit(rc)

Added: vendor/buildbot/current/buildbot/test/emitlogs.py
===================================================================
--- vendor/buildbot/current/buildbot/test/emitlogs.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/emitlogs.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,44 @@
+#! /usr/bin/python
+
+import sys, time, os.path, StringIO
+
+mode = 0
+if len(sys.argv) > 1:
+    mode = int(sys.argv[1])
+
+if mode == 0:
+    log2 = open("log2.out", "wt")
+    log3 = open("log3.out", "wt")
+elif mode == 1:
+    # delete the logfiles first, and wait a moment to exercise a failure path
+    if os.path.exists("log2.out"):
+        os.unlink("log2.out")
+    if os.path.exists("log3.out"):
+        os.unlink("log3.out")
+    time.sleep(2)
+    log2 = open("log2.out", "wt")
+    log3 = open("log3.out", "wt")
+elif mode == 2:
+    # don't create the logfiles at all
+    log2 = StringIO.StringIO()
+    log3 = StringIO.StringIO()
+
+def write(i):
+    log2.write("this is log2 %d\n" % i)
+    log2.flush()
+    log3.write("this is log3 %d\n" % i)
+    log3.flush()
+    sys.stdout.write("this is stdout %d\n" % i)
+    sys.stdout.flush()
+
+write(0)
+time.sleep(1)
+write(1)
+sys.stdin.read(1)
+write(2)
+
+log2.close()
+log3.close()
+
+sys.exit(0)
+

Added: vendor/buildbot/current/buildbot/test/mail/msg1
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg1	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg1	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,68 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 11151 invoked by uid 1000); 11 Jan 2003 17:10:04 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 1548 invoked by uid 13574); 11 Jan 2003 17:06:39 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 11 Jan 2003 17:06:39 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18XP0U-0002Mq-00; Sat, 11 Jan 2003 11:01:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18XP02-0002MN-00
+	for <twisted-commits at twistedmatrix.com>; Sat, 11 Jan 2003 11:00:46 -0600
+To: twisted-commits at twistedmatrix.com
+From: moshez CVS <moshez at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+X-Mailer: CVSToys
+From: moshez CVS <moshez at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+Message-Id: <E18XP02-0002MN-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Instance massenger, apparently
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Sat, 11 Jan 2003 11:00:46 -0600
+Status:   
+
+Modified files:
+Twisted/debian/python-twisted.menu.in 1.3 1.4
+
+Log message:
+Instance massenger, apparently
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/debian/python-twisted.menu.in.diff?r1=text&tr1=1.3&r2=text&tr2=1.4&cvsroot=Twisted
+
+Index: Twisted/debian/python-twisted.menu.in
+diff -u Twisted/debian/python-twisted.menu.in:1.3 Twisted/debian/python-twisted.menu.in:1.4
+--- Twisted/debian/python-twisted.menu.in:1.3	Sat Dec 28 10:02:12 2002
++++ Twisted/debian/python-twisted.menu.in	Sat Jan 11 09:00:44 2003
+@@ -1,7 +1,7 @@
+ ?package(python at VERSION@-twisted):\
+ needs=x11\
+ section="Apps/Net"\
+-title="Twisted Instant Messenger (@VERSION@)"\
++title="Twisted Instance Messenger (@VERSION@)"\
+ command="/usr/bin/t-im at VERSION@"
+ 
+ ?package(python at VERSION@-twisted):\
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits at twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits

Added: vendor/buildbot/current/buildbot/test/mail/msg2
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg2	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg2	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,101 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18YYq7-0005eQ-00
+	for <twisted-commits at twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits at twistedmatrix.com
+From: itamarst CVS <itamarst at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:   
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/web/woven/form.py.diff?r1=text&tr1=1.20&r2=text&tr2=1.21&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/python/formmethod.py.diff?r1=text&tr1=1.12&r2=text&tr2=1.13&cvsroot=Twisted
+
+Index: Twisted/twisted/web/woven/form.py
+diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
+--- Twisted/twisted/web/woven/form.py:1.20	Tue Jan 14 12:07:29 2003
++++ Twisted/twisted/web/woven/form.py	Tue Jan 14 13:43:16 2003
+@@ -140,8 +140,8 @@
+ 
+     def input_submit(self, request, content, arg):
+         div = content.div()
+-        for value in arg.buttons:
+-            div.input(type="submit", name=arg.name, value=value)
++        for tag, value, desc in arg.choices:
++            div.input(type="submit", name=arg.name, value=tag)
+             div.text(" ")
+         if arg.reset:
+             div.input(type="reset")
+
+Index: Twisted/twisted/python/formmethod.py
+diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
+--- Twisted/twisted/python/formmethod.py:1.12	Tue Jan 14 12:07:30 2003
++++ Twisted/twisted/python/formmethod.py	Tue Jan 14 13:43:17 2003
+@@ -180,19 +180,13 @@
+         return 1
+ 
+ 
+-class Submit(Argument):
++class Submit(Choice):
+     """Submit button or a reasonable facsimile thereof."""
+ 
+-    def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
+-        Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
+-        self.buttons = buttons
++    def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
++                 reset=0, shortDesc=None, longDesc=None):
++        Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
+         self.reset = reset
+-
+-    def coerce(self, val):
+-        if val in self.buttons:
+-            return val
+-        else:
+-            raise InputError, "no such action"
+ 
+ 
+ class PresentationHint:
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits at twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits

Added: vendor/buildbot/current/buildbot/test/mail/msg3
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg3	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg3	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,97 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18YYq7-0005eQ-00
+	for <twisted-commits at twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits at twistedmatrix.com
+From: itamarst CVS <itamarst at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:   
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
+
+Index: Twisted/twisted/web/woven/form.py
+diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
+--- Twisted/twisted/web/woven/form.py:1.20	Tue Jan 14 12:07:29 2003
++++ Twisted/twisted/web/woven/form.py	Tue Jan 14 13:43:16 2003
+@@ -140,8 +140,8 @@
+ 
+     def input_submit(self, request, content, arg):
+         div = content.div()
+-        for value in arg.buttons:
+-            div.input(type="submit", name=arg.name, value=value)
++        for tag, value, desc in arg.choices:
++            div.input(type="submit", name=arg.name, value=tag)
+             div.text(" ")
+         if arg.reset:
+             div.input(type="reset")
+
+Index: Twisted/twisted/python/formmethod.py
+diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
+--- Twisted/twisted/python/formmethod.py:1.12	Tue Jan 14 12:07:30 2003
++++ Twisted/twisted/python/formmethod.py	Tue Jan 14 13:43:17 2003
+@@ -180,19 +180,13 @@
+         return 1
+ 
+ 
+-class Submit(Argument):
++class Submit(Choice):
+     """Submit button or a reasonable facsimile thereof."""
+ 
+-    def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
+-        Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
+-        self.buttons = buttons
++    def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
++                 reset=0, shortDesc=None, longDesc=None):
++        Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
+         self.reset = reset
+-
+-    def coerce(self, val):
+-        if val in self.buttons:
+-            return val
+-        else:
+-            raise InputError, "no such action"
+ 
+ 
+ class PresentationHint:
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits at twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits

Added: vendor/buildbot/current/buildbot/test/mail/msg4
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg4	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg4	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,45 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18YYq7-0005eQ-00
+	for <twisted-commits at twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits at twistedmatrix.com
+From: itamarst CVS <itamarst at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:   
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+

Added: vendor/buildbot/current/buildbot/test/mail/msg5
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg5	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg5	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,54 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 5865 invoked by uid 1000); 17 Jan 2003 07:00:04 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 40460 invoked by uid 13574); 17 Jan 2003 06:51:55 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 17 Jan 2003 06:51:55 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18ZQGk-0003WL-00; Fri, 17 Jan 2003 00:46:22 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18ZQFy-0003VP-00
+	for <twisted-commits at twistedmatrix.com>; Fri, 17 Jan 2003 00:45:34 -0600
+To: twisted-commits at twistedmatrix.com
+From: etrepum CVS <etrepum at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+Message-Id: <E18ZQFy-0003VP-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 00:45:34 -0600
+Status:   
+
+Modified files:
+Twisted/doc/examples/cocoaDemo 0 0
+
+Log message:
+Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo.diff?r1=text&tr1=NONE&r2=text&tr2=NONE&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits at twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits

Added: vendor/buildbot/current/buildbot/test/mail/msg6
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg6	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg6	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,70 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 7252 invoked by uid 1000); 17 Jan 2003 07:10:04 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 43115 invoked by uid 13574); 17 Jan 2003 07:07:57 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 17 Jan 2003 07:07:57 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18ZQW6-0003dA-00; Fri, 17 Jan 2003 01:02:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18ZQV7-0003cm-00
+	for <twisted-commits at twistedmatrix.com>; Fri, 17 Jan 2003 01:01:13 -0600
+To: twisted-commits at twistedmatrix.com
+From: etrepum CVS <etrepum at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+Message-Id: <E18ZQV7-0003cm-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Cocoa (OS X) clone of the QT demo, using polling reactor
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 01:01:13 -0600
+Status:   
+
+Modified files:
+Twisted/doc/examples/cocoaDemo/MyAppDelegate.py None 1.1
+Twisted/doc/examples/cocoaDemo/__main__.py None 1.1
+Twisted/doc/examples/cocoaDemo/bin-python-main.m None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib None 1.1
+Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj None 1.1
+
+Log message:
+Cocoa (OS X) clone of the QT demo, using polling reactor
+
+Requires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project.  The reactor is iterated periodically by a repeating NSTimer.
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits at twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits

Added: vendor/buildbot/current/buildbot/test/mail/msg7
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg7	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg7	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,68 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 8665 invoked by uid 1000); 17 Jan 2003 08:00:03 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 50728 invoked by uid 13574); 17 Jan 2003 07:51:14 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 17 Jan 2003 07:51:14 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18ZRBm-0003pN-00; Fri, 17 Jan 2003 01:45:18 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18ZRBQ-0003ou-00
+	for <twisted-commits at twistedmatrix.com>; Fri, 17 Jan 2003 01:44:56 -0600
+To: twisted-commits at twistedmatrix.com
+From: etrepum CVS <etrepum at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum at twistedmatrix.com>
+Reply-To: twisted-python at twistedmatrix.com
+Message-Id: <E18ZRBQ-0003ou-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Directories break debian build script, waiting for reasonable fix
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 01:44:56 -0600
+Status:   
+
+Modified files:
+Twisted/doc/examples/cocoaDemo/MyAppDelegate.py 1.1 None
+Twisted/doc/examples/cocoaDemo/__main__.py 1.1 None
+Twisted/doc/examples/cocoaDemo/bin-python-main.m 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj 1.1 None
+
+Log message:
+Directories break debian build script, waiting for reasonable fix
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits at twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits

Added: vendor/buildbot/current/buildbot/test/mail/msg8
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg8	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg8	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,61 @@
+Return-Path: <twisted-commits-admin at twistedmatrix.com>
+Delivered-To: warner-twistedcvs at luther.lothar.com
+Received: (qmail 10804 invoked by uid 1000); 19 Jan 2003 14:10:03 -0000
+Delivered-To: warner-twistedcvs at lothar.com
+Received: (qmail 6704 invoked by uid 13574); 19 Jan 2003 14:00:20 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin at twistedmatrix.com>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-twistedcvs at lothar.com>; 19 Jan 2003 14:00:20 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+	by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+	id 18aFtx-0002WS-00; Sun, 19 Jan 2003 07:54:17 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+	id 18aFtH-0002W3-00
+	for <twisted-commits at twistedmatrix.com>; Sun, 19 Jan 2003 07:53:35 -0600
+To: twisted-commits at twistedmatrix.com
+From: acapnotic CVS <acapnotic at twistedmatrix.com>
+X-Mailer: CVSToys
+Message-Id: <E18aFtH-0002W3-00 at pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] it doesn't work with invalid syntax
+Sender: twisted-commits-admin at twistedmatrix.com
+Errors-To: twisted-commits-admin at twistedmatrix.com
+X-BeenThere: twisted-commits at twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request at twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits at twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+	<mailto:twisted-commits-request at twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Sun, 19 Jan 2003 07:53:35 -0600
+Status:   
+
+Modified files:
+CVSROOT/freshCfg 1.16 1.17
+
+Log message:
+it doesn't work with invalid syntax
+
+
+Index: CVSROOT/freshCfg
+diff -u CVSROOT/freshCfg:1.16 CVSROOT/freshCfg:1.17
+--- CVSROOT/freshCfg:1.16	Sun Jan 19 05:52:34 2003
++++ CVSROOT/freshCfg	Sun Jan 19 05:53:34 2003
+@@ -27,7 +27,7 @@
+     ('/cvs', '^Reality', None, MailNotification(['reality-commits'])),
+     ('/cvs', '^Twistby', None, MailNotification(['acapnotic'])),
+     ('/cvs', '^CVSToys', None,
+-     MailNotification(['CVSToys-list']
++     MailNotification(['CVSToys-list'],
+                       "http://twistedmatrix.com/users/jh.twistd/"
+                       "viewcvs/cgi/viewcvs.cgi/",
+                       replyTo="cvstoys-list at twistedmatrix.com"),)
+
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits at twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits

Added: vendor/buildbot/current/buildbot/test/mail/msg9
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/msg9	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/msg9	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,18 @@
+From twisted-python at twistedmatrix.com  Fri Dec 26 07:25:13 2003
+From: twisted-python at twistedmatrix.com (exarkun CVS)
+Date: Fri, 26 Dec 2003 00:25:13 -0700
+Subject: [Twisted-commits] Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
+Message-ID: <E1AZmLR-0000Tl-00 at wolfwood>
+
+Modified files:
+Twisted/sandbox/exarkun/persist-plugin
+
+Log message:
+Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
+
+
+ViewCVS links:
+http://cvs.twistedmatrix.com/cvs/sandbox/exarkun/persist-plugin?cvsroot=Twisted
+
+
+

Added: vendor/buildbot/current/buildbot/test/mail/syncmail.1
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/syncmail.1	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/syncmail.1	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,152 @@
+Return-Path: <warner at users.sourceforge.net>
+Delivered-To: warner-sourceforge at luther.lothar.com
+Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
+Delivered-To: warner-sourceforge at lothar.com
+Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner at users.sourceforge.net>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-sourceforge at lothar.com>; 28 Jul 2003 07:22:03 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+	by sc8-sf-list1.sourceforge.net with esmtp 
+	(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+	id 19h2KY-0004Nr-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+	id 19h2KY-0001rv-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+	id 19h2KY-0003r4-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+From: warner at users.sourceforge.net
+To: warner at users.sourceforge.net
+Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
+Message-Id: <E19h2KY-0003r4-00 at sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 00:22:02 -0700
+Status:   
+
+Update of /cvsroot/buildbot/buildbot/buildbot/changes
+In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
+
+Modified Files:
+	freshcvsmail.py 
+Log Message:
+remove leftover code, leave a temporary compatibility import. Note! Start
+importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
+
+
+Index: freshcvsmail.py
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
+retrieving revision 1.2
+retrieving revision 1.3
+diff -C2 -d -r1.2 -r1.3
+*** freshcvsmail.py	27 Jul 2003 18:54:08 -0000	1.2
+--- freshcvsmail.py	28 Jul 2003 07:22:00 -0000	1.3
+***************
+*** 1,96 ****
+  #! /usr/bin/python
+  
+! from buildbot.interfaces import IChangeSource
+! from buildbot.changes.maildirtwisted import MaildirTwisted
+! from buildbot.changes.changes import Change
+! from rfc822 import Message
+! import os, os.path
+! 
+! def parseFreshCVSMail(fd, prefix=None):
+!     """Parse mail sent by FreshCVS"""
+!     # this uses rfc822.Message so it can run under python2.1 . In the future
+!     # it will be updated to use python2.2's "email" module.
+! 
+!     m = Message(fd)
+!     # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+!     # modified by the MTA (to include a local domain)
+!     name, addr = m.getaddr("from")
+!     if not name:
+!         return None # no From means this message isn't from FreshCVS
+!     cvs = name.find(" CVS")
+!     if cvs == -1:
+!         return None # this message isn't from FreshCVS
+!     who = name[:cvs]
+! 
+!     # we take the time of receipt as the time of checkin. Not correct,
+!     # but it avoids the out-of-order-changes issue
+!     #when = m.getdate() # and convert from 9-tuple, and handle timezone
+! 
+!     files = []
+!     comments = ""
+!     isdir = 0
+!     lines = m.fp.readlines()
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "Modified files:\n":
+!             break
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "\n":
+!             break
+!         line = line.rstrip("\n")
+!         file, junk = line.split(None, 1)
+!         if prefix:
+!             # insist that the file start with the prefix: FreshCVS sends
+!             # changes we don't care about too
+!             bits = file.split(os.sep)
+!             if bits[0] == prefix:
+!                 file = apply(os.path.join, bits[1:])
+!             else:
+!                 break
+!         if junk == "0 0":
+!             isdir = 1
+!         files.append(file)
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "Log message:\n":
+!             break
+!     # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "ViewCVS links:\n":
+!             break
+!         if line.find("Index: ") == 0:
+!             break
+!         comments += line
+!     comments = comments.rstrip() + "\n"
+! 
+!     if not files:
+!         return None
+!     
+!     change = Change(who, files, comments, isdir)
+! 
+!     return change
+!     
+!         
+!         
+! class FCMaildirSource(MaildirTwisted):
+!     """This source will watch a maildir that is subscribed to a FreshCVS
+!     change-announcement mailing list.
+!     """
+! 
+!     __implements__ = IChangeSource,
+  
+!     def __init__(self, maildir, prefix=None):
+!         MaildirTwisted.__init__(self, maildir)
+!         self.changemaster = None # filled in when added
+!         self.prefix = prefix
+!     def describe(self):
+!         return "FreshCVS mailing list in maildir %s" % self.maildir.where
+!     def messageReceived(self, filename):
+!         path = os.path.join(self.basedir, "new", filename)
+!         change = parseFreshCVSMail(open(path, "r"), self.prefix)
+!         if change:
+!             self.changemaster.addChange(change)
+!         os.rename(os.path.join(self.basedir, "new", filename),
+!                   os.path.join(self.basedir, "cur", filename))
+--- 1,5 ----
+  #! /usr/bin/python
+  
+! # leftover import for compatibility
+  
+! from buildbot.changes.mail import FCMaildirSource
+
+

Added: vendor/buildbot/current/buildbot/test/mail/syncmail.2
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/syncmail.2	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/syncmail.2	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,56 @@
+Return-Path: <warner at users.sourceforge.net>
+Delivered-To: warner-sourceforge at luther.lothar.com
+Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
+Delivered-To: warner-sourceforge at lothar.com
+Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner at users.sourceforge.net>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-sourceforge at lothar.com>; 28 Jul 2003 06:53:09 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+	by sc8-sf-list1.sourceforge.net with esmtp 
+	(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+	id 19h1sb-0003nw-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+	id 19h1sa-00018t-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+	id 19h1sa-0002mX-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+From: warner at users.sourceforge.net
+To: warner at users.sourceforge.net
+Subject: buildbot ChangeLog,1.93,1.94
+Message-Id: <E19h1sa-0002mX-00 at sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:53:08 -0700
+Status:   
+
+Update of /cvsroot/buildbot/buildbot
+In directory sc8-pr-cvs1:/tmp/cvs-serv10689
+
+Modified Files:
+	ChangeLog 
+Log Message:
+	* NEWS: started adding new features
+
+
+Index: ChangeLog
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
+retrieving revision 1.93
+retrieving revision 1.94
+diff -C2 -d -r1.93 -r1.94
+*** ChangeLog	27 Jul 2003 22:53:27 -0000	1.93
+--- ChangeLog	28 Jul 2003 06:53:06 -0000	1.94
+***************
+*** 1,4 ****
+--- 1,6 ----
+  2003-07-27  Brian Warner  <warner at lothar.com>
+  
++ 	* NEWS: started adding new features
++ 
+  	* buildbot/changes/mail.py: start work on Syncmail parser, move
+  	mail sources into their own file
+
+

Added: vendor/buildbot/current/buildbot/test/mail/syncmail.3
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/syncmail.3	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/syncmail.3	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,39 @@
+Return-Path: <warner at users.sourceforge.net>
+Delivered-To: warner-sourceforge at luther.lothar.com
+Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
+Delivered-To: warner-sourceforge at lothar.com
+Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner at users.sourceforge.net>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-sourceforge at lothar.com>; 28 Jul 2003 06:51:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+	by sc8-sf-list1.sourceforge.net with esmtp 
+	(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+	id 19h1rF-00027s-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+	id 19h1rF-00017O-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+	id 19h1rF-0002jg-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+From: warner at users.sourceforge.net
+To: warner at users.sourceforge.net
+Subject: CVSROOT syncmail,1.1,NONE
+Message-Id: <E19h1rF-0002jg-00 at sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:51:45 -0700
+Status:   
+
+Update of /cvsroot/buildbot/CVSROOT
+In directory sc8-pr-cvs1:/tmp/cvs-serv10515
+
+Removed Files:
+	syncmail 
+Log Message:
+nevermind
+
+--- syncmail DELETED ---
+
+

Added: vendor/buildbot/current/buildbot/test/mail/syncmail.4
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/syncmail.4	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/syncmail.4	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,290 @@
+Return-Path: <warner at users.sourceforge.net>
+Delivered-To: warner-sourceforge at luther.lothar.com
+Received: (qmail 24111 invoked by uid 1000); 28 Jul 2003 08:01:54 -0000
+Delivered-To: warner-sourceforge at lothar.com
+Received: (qmail 68756 invoked by uid 13574); 28 Jul 2003 08:01:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner at users.sourceforge.net>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-sourceforge at lothar.com>; 28 Jul 2003 08:01:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+	by sc8-sf-list1.sourceforge.net with esmtp 
+	(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+	id 19h2wz-00029d-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+	id 19h2wz-0002XB-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+	id 19h2wz-0005a9-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+From: warner at users.sourceforge.net
+To: warner at users.sourceforge.net
+Subject: buildbot/test/mail syncmail.1,NONE,1.1 syncmail.2,NONE,1.1 syncmail.3,NONE,1.1
+Message-Id: <E19h2wz-0005a9-00 at sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 01:01:45 -0700
+Status:   
+
+Update of /cvsroot/buildbot/buildbot/test/mail
+In directory sc8-pr-cvs1:/tmp/cvs-serv21445
+
+Added Files:
+	syncmail.1 syncmail.2 syncmail.3 
+Log Message:
+test cases for syncmail parser
+
+--- NEW FILE: syncmail.1 ---
+Return-Path: <warner at users.sourceforge.net>
+Delivered-To: warner-sourceforge at luther.lothar.com
+Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
+Delivered-To: warner-sourceforge at lothar.com
+Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner at users.sourceforge.net>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-sourceforge at lothar.com>; 28 Jul 2003 07:22:03 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+	by sc8-sf-list1.sourceforge.net with esmtp 
+	(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+	id 19h2KY-0004Nr-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+	id 19h2KY-0001rv-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+	id 19h2KY-0003r4-00
+	for <warner at users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+From: warner at users.sourceforge.net
+To: warner at users.sourceforge.net
+Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
+Message-Id: <E19h2KY-0003r4-00 at sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 00:22:02 -0700
+Status:   
+
+Update of /cvsroot/buildbot/buildbot/buildbot/changes
+In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
+
+Modified Files:
+	freshcvsmail.py 
+Log Message:
+remove leftover code, leave a temporary compatibility import. Note! Start
+importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
+
+
+Index: freshcvsmail.py
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
+retrieving revision 1.2
+retrieving revision 1.3
+diff -C2 -d -r1.2 -r1.3
+*** freshcvsmail.py	27 Jul 2003 18:54:08 -0000	1.2
+--- freshcvsmail.py	28 Jul 2003 07:22:00 -0000	1.3
+***************
+*** 1,96 ****
+  #! /usr/bin/python
+  
+! from buildbot.interfaces import IChangeSource
+! from buildbot.changes.maildirtwisted import MaildirTwisted
+! from buildbot.changes.changes import Change
+! from rfc822 import Message
+! import os, os.path
+! 
+! def parseFreshCVSMail(fd, prefix=None):
+!     """Parse mail sent by FreshCVS"""
+!     # this uses rfc822.Message so it can run under python2.1 . In the future
+!     # it will be updated to use python2.2's "email" module.
+! 
+!     m = Message(fd)
+!     # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+!     # modified by the MTA (to include a local domain)
+!     name, addr = m.getaddr("from")
+!     if not name:
+!         return None # no From means this message isn't from FreshCVS
+!     cvs = name.find(" CVS")
+!     if cvs == -1:
+!         return None # this message isn't from FreshCVS
+!     who = name[:cvs]
+! 
+!     # we take the time of receipt as the time of checkin. Not correct,
+!     # but it avoids the out-of-order-changes issue
+!     #when = m.getdate() # and convert from 9-tuple, and handle timezone
+! 
+!     files = []
+!     comments = ""
+!     isdir = 0
+!     lines = m.fp.readlines()
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "Modified files:\n":
+!             break
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "\n":
+!             break
+!         line = line.rstrip("\n")
+!         file, junk = line.split(None, 1)
+!         if prefix:
+!             # insist that the file start with the prefix: FreshCVS sends
+!             # changes we don't care about too
+!             bits = file.split(os.sep)
+!             if bits[0] == prefix:
+!                 file = apply(os.path.join, bits[1:])
+!             else:
+!                 break
+!         if junk == "0 0":
+!             isdir = 1
+!         files.append(file)
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "Log message:\n":
+!             break
+!     # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+!     while lines:
+!         line = lines.pop(0)
+!         if line == "ViewCVS links:\n":
+!             break
+!         if line.find("Index: ") == 0:
+!             break
+!         comments += line
+!     comments = comments.rstrip() + "\n"
+! 
+!     if not files:
+!         return None
+!     
+!     change = Change(who, files, comments, isdir)
+! 
+!     return change
+!     
+!         
+!         
+! class FCMaildirSource(MaildirTwisted):
+!     """This source will watch a maildir that is subscribed to a FreshCVS
+!     change-announcement mailing list.
+!     """
+! 
+!     __implements__ = IChangeSource,
+  
+!     def __init__(self, maildir, prefix=None):
+!         MaildirTwisted.__init__(self, maildir)
+!         self.changemaster = None # filled in when added
+!         self.prefix = prefix
+!     def describe(self):
+!         return "FreshCVS mailing list in maildir %s" % self.maildir.where
+!     def messageReceived(self, filename):
+!         path = os.path.join(self.basedir, "new", filename)
+!         change = parseFreshCVSMail(open(path, "r"), self.prefix)
+!         if change:
+!             self.changemaster.addChange(change)
+!         os.rename(os.path.join(self.basedir, "new", filename),
+!                   os.path.join(self.basedir, "cur", filename))
+--- 1,5 ----
+  #! /usr/bin/python
+  
+! # leftover import for compatibility
+  
+! from buildbot.changes.mail import FCMaildirSource
+
+
+
+--- NEW FILE: syncmail.2 ---
+Return-Path: <warner at users.sourceforge.net>
+Delivered-To: warner-sourceforge at luther.lothar.com
+Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
+Delivered-To: warner-sourceforge at lothar.com
+Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner at users.sourceforge.net>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-sourceforge at lothar.com>; 28 Jul 2003 06:53:09 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+	by sc8-sf-list1.sourceforge.net with esmtp 
+	(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+	id 19h1sb-0003nw-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+	id 19h1sa-00018t-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+	id 19h1sa-0002mX-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+From: warner at users.sourceforge.net
+To: warner at users.sourceforge.net
+Subject: buildbot ChangeLog,1.93,1.94
+Message-Id: <E19h1sa-0002mX-00 at sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:53:08 -0700
+Status:   
+
+Update of /cvsroot/buildbot/buildbot
+In directory sc8-pr-cvs1:/tmp/cvs-serv10689
+
+Modified Files:
+	ChangeLog 
+Log Message:
+	* NEWS: started adding new features
+
+
+Index: ChangeLog
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
+retrieving revision 1.93
+retrieving revision 1.94
+diff -C2 -d -r1.93 -r1.94
+*** ChangeLog	27 Jul 2003 22:53:27 -0000	1.93
+--- ChangeLog	28 Jul 2003 06:53:06 -0000	1.94
+***************
+*** 1,4 ****
+--- 1,6 ----
+  2003-07-27  Brian Warner  <warner at lothar.com>
+  
++ 	* NEWS: started adding new features
++ 
+  	* buildbot/changes/mail.py: start work on Syncmail parser, move
+  	mail sources into their own file
+
+
+
+--- NEW FILE: syncmail.3 ---
+Return-Path: <warner at users.sourceforge.net>
+Delivered-To: warner-sourceforge at luther.lothar.com
+Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
+Delivered-To: warner-sourceforge at lothar.com
+Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner at users.sourceforge.net>)
+          by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+          for <warner-sourceforge at lothar.com>; 28 Jul 2003 06:51:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+	by sc8-sf-list1.sourceforge.net with esmtp 
+	(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+	id 19h1rF-00027s-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+	id 19h1rF-00017O-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+	by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+	id 19h1rF-0002jg-00
+	for <warner at users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+From: warner at users.sourceforge.net
+To: warner at users.sourceforge.net
+Subject: CVSROOT syncmail,1.1,NONE
+Message-Id: <E19h1rF-0002jg-00 at sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:51:45 -0700
+Status:   
+
+Update of /cvsroot/buildbot/CVSROOT
+In directory sc8-pr-cvs1:/tmp/cvs-serv10515
+
+Removed Files:
+	syncmail 
+Log Message:
+nevermind
+
+--- syncmail DELETED ---
+
+
+
+

Added: vendor/buildbot/current/buildbot/test/mail/syncmail.5
===================================================================
--- vendor/buildbot/current/buildbot/test/mail/syncmail.5	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/mail/syncmail.5	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,70 @@
+From thomas at otto.amantes Mon Feb 21 17:46:45 2005
+Return-Path: <thomas at otto.amantes>
+Received: from otto.amantes (otto.amantes [127.0.0.1]) by otto.amantes
+	(8.13.1/8.13.1) with ESMTP id j1LGkjr3011986 for <thomas at localhost>; Mon,
+	21 Feb 2005 17:46:45 +0100
+Message-Id: <200502211646.j1LGkjr3011986 at otto.amantes>
+From: Thomas Vander Stichele <thomas at otto.amantes>
+To: thomas at otto.amantes
+Subject: test1    s
+Date: Mon, 21 Feb 2005 16:46:45 +0000
+X-Mailer: Python syncmail $Revision: 1.1 $
+	<http://sf.net/projects/cvs-syncmail>
+Content-Transfer-Encoding: 8bit
+Mime-Version: 1.0
+
+Update of /home/cvs/test/test1
+In directory otto.amantes:/home/thomas/dev/tests/cvs/test1
+
+Added Files:
+      Tag: BRANCH-DEVEL
+	MANIFEST Makefile.am autogen.sh configure.in 
+Log Message:
+stuff on the branch
+
+--- NEW FILE: Makefile.am ---
+SUBDIRS = src
+
+# normally I wouldn't distribute autogen.sh and friends with a tarball
+# but this one is specifically distributed for demonstration purposes
+
+EXTRA_DIST = autogen.sh
+
+# target for making the "import this into svn" tarball
+test:
+	mkdir test
+	for a in `cat MANIFEST`; do \
+		cp -pr $$a test/$$a; done
+	tar czf test.tar.gz test
+	rm -rf test
+
+--- NEW FILE: MANIFEST ---
+MANIFEST
+autogen.sh
+configure.in
+Makefile.am
+src
+src/Makefile.am
+src/test.c
+
+--- NEW FILE: autogen.sh ---
+#!/bin/sh
+
+set -x
+
+aclocal && \
+autoheader && \
+autoconf && \
+automake -a --foreign && \
+./configure $@
+
+--- NEW FILE: configure.in ---
+dnl configure.ac for version macro
+AC_INIT
+
+AM_CONFIG_HEADER(config.h)
+
+AM_INIT_AUTOMAKE(test, 0.0.0)
+AC_PROG_CC
+
+AC_OUTPUT(Makefile src/Makefile)

Added: vendor/buildbot/current/buildbot/test/runutils.py
===================================================================
--- vendor/buildbot/current/buildbot/test/runutils.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/runutils.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,416 @@
+
+import signal
+import shutil, os, errno
+from twisted.internet import defer, reactor
+from twisted.python import log, util
+
+from buildbot import master, interfaces
+from buildbot.twcompat import maybeWait
+from buildbot.slave import bot
+from buildbot.process.builder import Builder
+from buildbot.process.base import BuildRequest, Build
+from buildbot.process.buildstep import BuildStep
+from buildbot.sourcestamp import SourceStamp
+from buildbot.status import builder
+
+class MyBot(bot.Bot):
+    def remote_getSlaveInfo(self):
+        return self.parent.info
+
+class MyBuildSlave(bot.BuildSlave):
+    botClass = MyBot
+
+def rmtree(d):
+    try:
+        shutil.rmtree(d, ignore_errors=1)
+    except OSError, e:
+        # stupid 2.2 appears to ignore ignore_errors
+        if e.errno != errno.ENOENT:
+            raise
+
+class RunMixin:
+    master = None
+
+    def rmtree(self, d):
+        rmtree(d)
+
+    def setUp(self):
+        self.slaves = {}
+        self.rmtree("basedir")
+        os.mkdir("basedir")
+        self.master = master.BuildMaster("basedir")
+        self.status = self.master.getStatus()
+        self.control = interfaces.IControl(self.master)
+
+    def connectOneSlave(self, slavename, opts={}):
+        port = self.master.slavePort._port.getHost().port
+        self.rmtree("slavebase-%s" % slavename)
+        os.mkdir("slavebase-%s" % slavename)
+        slave = MyBuildSlave("localhost", port, slavename, "sekrit",
+                             "slavebase-%s" % slavename,
+                             keepalive=0, usePTY=1, debugOpts=opts)
+        slave.info = {"admin": "one"}
+        self.slaves[slavename] = slave
+        slave.startService()
+
+    def connectSlave(self, builders=["dummy"], slavename="bot1",
+                     opts={}):
+        # connect buildslave 'slavename' and wait for it to connect to all of
+        # the given builders
+        dl = []
+        # initiate call for all of them, before waiting on result,
+        # otherwise we might miss some
+        for b in builders:
+            dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
+        d = defer.DeferredList(dl)
+        self.connectOneSlave(slavename, opts)
+        return d
+
+    def connectSlaves(self, slavenames, builders):
+        dl = []
+        # initiate call for all of them, before waiting on result,
+        # otherwise we might miss some
+        for b in builders:
+            dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
+        d = defer.DeferredList(dl)
+        for name in slavenames:
+            self.connectOneSlave(name)
+        return d
+
+    def connectSlave2(self):
+        # this takes over for bot1, so it has to share the slavename
+        port = self.master.slavePort._port.getHost().port
+        self.rmtree("slavebase-bot2")
+        os.mkdir("slavebase-bot2")
+        # this uses bot1, really
+        slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+                             "slavebase-bot2", keepalive=0, usePTY=1)
+        slave.info = {"admin": "two"}
+        self.slaves['bot2'] = slave
+        slave.startService()
+
+    def connectSlaveFastTimeout(self):
+        # this slave has a very fast keepalive timeout
+        port = self.master.slavePort._port.getHost().port
+        self.rmtree("slavebase-bot1")
+        os.mkdir("slavebase-bot1")
+        slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+                             "slavebase-bot1", keepalive=2, usePTY=1,
+                             keepaliveTimeout=1)
+        slave.info = {"admin": "one"}
+        self.slaves['bot1'] = slave
+        slave.startService()
+        d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+        return d
+
+    # things to start builds
+    def requestBuild(self, builder):
+        # returns a Deferred that fires with an IBuildStatus object when the
+        # build is finished
+        req = BuildRequest("forced build", SourceStamp())
+        self.control.getBuilder(builder).requestBuild(req)
+        return req.waitUntilFinished()
+
+    def failUnlessBuildSucceeded(self, bs):
+        if bs.getResults() != builder.SUCCESS:
+            log.msg("failUnlessBuildSucceeded noticed that the build failed")
+            self.logBuildResults(bs)
+        self.failUnless(bs.getResults() == builder.SUCCESS)
+        return bs # useful for chaining
+
+    def logBuildResults(self, bs):
+        # emit the build status and the contents of all logs to test.log
+        log.msg("logBuildResults starting")
+        log.msg(" bs.getResults() == %s" % builder.Results[bs.getResults()])
+        log.msg(" bs.isFinished() == %s" % bs.isFinished())
+        for s in bs.getSteps():
+            for l in s.getLogs():
+                log.msg("--- START step %s / log %s ---" % (s.getName(),
+                                                            l.getName()))
+                if not l.getName().endswith(".html"):
+                    log.msg(l.getTextWithHeaders())
+                log.msg("--- STOP ---")
+        log.msg("logBuildResults finished")
+
+    def tearDown(self):
+        log.msg("doing tearDown")
+        d = self.shutdownAllSlaves()
+        d.addCallback(self._tearDown_1)
+        d.addCallback(self._tearDown_2)
+        return maybeWait(d)
+    def _tearDown_1(self, res):
+        if self.master:
+            return defer.maybeDeferred(self.master.stopService)
+    def _tearDown_2(self, res):
+        self.master = None
+        log.msg("tearDown done")
+        
+
+    # various forms of slave death
+
+    def shutdownAllSlaves(self):
+        # the slave has disconnected normally: they SIGINT'ed it, or it shut
+        # down willingly. This will kill child processes and give them a
+        # chance to finish up. We return a Deferred that will fire when
+        # everything is finished shutting down.
+
+        log.msg("doing shutdownAllSlaves")
+        dl = []
+        for slave in self.slaves.values():
+            dl.append(slave.waitUntilDisconnected())
+            dl.append(defer.maybeDeferred(slave.stopService))
+        d = defer.DeferredList(dl)
+        d.addCallback(self._shutdownAllSlavesDone)
+        return d
+    def _shutdownAllSlavesDone(self, res):
+        for name in self.slaves.keys():
+            del self.slaves[name]
+        return self.master.botmaster.waitUntilBuilderFullyDetached("dummy")
+
+    def shutdownSlave(self, slavename, buildername):
+        # this slave has disconnected normally: they SIGINT'ed it, or it shut
+        # down willingly. This will kill child processes and give them a
+        # chance to finish up. We return a Deferred that will fire when
+        # everything is finished shutting down, and the given Builder knows
+        # that the slave has gone away.
+
+        s = self.slaves[slavename]
+        dl = [self.master.botmaster.waitUntilBuilderDetached(buildername),
+              s.waitUntilDisconnected()]
+        d = defer.DeferredList(dl)
+        d.addCallback(self._shutdownSlave_done, slavename)
+        s.stopService()
+        return d
+    def _shutdownSlave_done(self, res, slavename):
+        del self.slaves[slavename]
+
+    def killSlave(self):
+        # the slave has died, its host sent a FIN. The .notifyOnDisconnect
+        # callbacks will terminate the current step, so the build should be
+        # flunked (no further steps should be started).
+        self.slaves['bot1'].bf.continueTrying = 0
+        bot = self.slaves['bot1'].getServiceNamed("bot")
+        broker = bot.builders["dummy"].remote.broker
+        broker.transport.loseConnection()
+        del self.slaves['bot1']
+
+    def disappearSlave(self, slavename="bot1", buildername="dummy"):
+        # the slave's host has vanished off the net, leaving the connection
+        # dangling. This will be detected quickly by app-level keepalives or
+        # a ping, or slowly by TCP timeouts.
+
+        # simulate this by replacing the slave Broker's .dataReceived method
+        # with one that just throws away all data.
+        def discard(data):
+            pass
+        bot = self.slaves[slavename].getServiceNamed("bot")
+        broker = bot.builders[buildername].remote.broker
+        broker.dataReceived = discard # seal its ears
+        broker.transport.write = discard # and take away its voice
+
+    def ghostSlave(self):
+        # the slave thinks it has lost the connection, and initiated a
+        # reconnect. The master doesn't yet realize it has lost the previous
+        # connection, and sees two connections at once.
+        raise NotImplementedError
+
+
+def setupBuildStepStatus(basedir):
+    """Return a BuildStep with a suitable BuildStepStatus object, ready to
+    use."""
+    os.mkdir(basedir)
+    botmaster = None
+    s0 = builder.Status(botmaster, basedir)
+    s1 = s0.builderAdded("buildername", "buildername")
+    s2 = builder.BuildStatus(s1, 1)
+    s3 = builder.BuildStepStatus(s2)
+    s3.setName("foostep")
+    s3.started = True
+    s3.stepStarted()
+    return s3
+
+def fake_slaveVersion(command, oldversion=None):
+    from buildbot.slave.registry import commandRegistry
+    return commandRegistry[command]
+
+def makeBuildStep(basedir, step_class=BuildStep, **kwargs):
+    bss = setupBuildStepStatus(basedir)
+
+    ss = SourceStamp()
+    setup = {'name': "builder1", "slavename": "bot1",
+             'builddir': "builddir", 'factory': None}
+    b0 = Builder(setup, bss.getBuild().getBuilder())
+    br = BuildRequest("reason", ss)
+    b = Build([br])
+    b.setBuilder(b0)
+    s = step_class(build=b, **kwargs)
+    s.setStepStatus(bss)
+    b.setupStatus(bss.getBuild())
+    s.slaveVersion = fake_slaveVersion
+    return s
+
+
+def findDir():
+    # the same directory that holds this script
+    return util.sibpath(__file__, ".")
+
+class SignalMixin:
+    sigchldHandler = None
+    
+    def setUpClass(self):
+        # make sure SIGCHLD handler is installed, as it should be on
+        # reactor.run(). problem is reactor may not have been run when this
+        # test runs.
+        if hasattr(reactor, "_handleSigchld") and hasattr(signal, "SIGCHLD"):
+            self.sigchldHandler = signal.signal(signal.SIGCHLD,
+                                                reactor._handleSigchld)
+
+    def tearDownClass(self):
+        if self.sigchldHandler:
+            signal.signal(signal.SIGCHLD, self.sigchldHandler)
+
+# these classes are used to test SlaveCommands in isolation
+
+class FakeSlaveBuilder:
+    debug = False
+    def __init__(self, usePTY, basedir):
+        self.updates = []
+        self.basedir = basedir
+        self.usePTY = usePTY
+
+    def sendUpdate(self, data):
+        if self.debug:
+            print "FakeSlaveBuilder.sendUpdate", data
+        self.updates.append(data)
+
+
+class SlaveCommandTestBase(SignalMixin):
+    usePTY = False
+
+    def setUpBuilder(self, basedir):
+        if not os.path.exists(basedir):
+            os.mkdir(basedir)
+        self.builder = FakeSlaveBuilder(self.usePTY, basedir)
+
+    def startCommand(self, cmdclass, args):
+        stepId = 0
+        self.cmd = c = cmdclass(self.builder, stepId, args)
+        c.running = True
+        d = c.doStart()
+        return d
+
+    def collectUpdates(self, res=None):
+        logs = {}
+        for u in self.builder.updates:
+            for k in u.keys():
+                if k == "log":
+                    logname,data = u[k]
+                    oldlog = logs.get(("log",logname), "")
+                    logs[("log",logname)] = oldlog + data
+                elif k == "rc":
+                    pass
+                else:
+                    logs[k] = logs.get(k, "") + u[k]
+        return logs
+
+    def findRC(self):
+        for u in self.builder.updates:
+            if "rc" in u:
+                return u["rc"]
+        return None
+
+    def printStderr(self):
+        for u in self.builder.updates:
+            if "stderr" in u:
+                print u["stderr"]
+
+# ----------------------------------------
+
+class LocalWrapper:
+    # r = pb.Referenceable()
+    # w = LocalWrapper(r)
+    # now you can do things like w.callRemote()
+    def __init__(self, target):
+        self.target = target
+
+    def callRemote(self, name, *args, **kwargs):
+        d = defer.maybeDeferred(self._callRemote, name, *args, **kwargs)
+        return d
+
+    def _callRemote(self, name, *args, **kwargs):
+        method = getattr(self.target, "remote_"+name)
+        return method(*args, **kwargs)
+
+    def notifyOnDisconnect(self, observer):
+        pass
+    def dontNotifyOnDisconnect(self, observer):
+        pass
+
+
+class LocalSlaveBuilder(bot.SlaveBuilder):
+    """I am object that behaves like a pb.RemoteReference, but in fact I
+    invoke methods locally."""
+    _arg_filter = None
+
+    def setArgFilter(self, filter):
+        self._arg_filter = filter
+
+    def remote_startCommand(self, stepref, stepId, command, args):
+        if self._arg_filter:
+            args = self._arg_filter(args)
+        # stepref should be a RemoteReference to the RemoteCommand
+        return bot.SlaveBuilder.remote_startCommand(self,
+                                                    LocalWrapper(stepref),
+                                                    stepId, command, args)
+
+class StepTester:
+    """Utility class to exercise BuildSteps and RemoteCommands, without
+    really using a Build or a Bot. No networks are used.
+
+    Use this as follows::
+
+    class MyTest(StepTester, unittest.TestCase):
+        def testOne(self):
+            self.slavebase = 'testOne.slave'
+            self.masterbase = 'testOne.master'
+            sb = self.makeSlaveBuilder()
+            step = self.makeStep(stepclass, **kwargs)
+            d = self.runStep(step)
+            d.addCallback(_checkResults)
+            return d
+    """
+
+    #slavebase = "slavebase"
+    slavebuilderbase = "slavebuilderbase"
+    #masterbase = "masterbase"
+
+    def makeSlaveBuilder(self):
+        os.mkdir(self.slavebase)
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase))
+        b = bot.Bot(self.slavebase, False)
+        b.startService()
+        sb = LocalSlaveBuilder("slavebuildername", False)
+        sb.setArgFilter(self.filterArgs)
+        sb.usePTY = False
+        sb.setServiceParent(b)
+        sb.setBuilddir(self.slavebuilderbase)
+        self.remote = LocalWrapper(sb)
+        return sb
+
+    workdir = "build"
+    def makeStep(self, factory, **kwargs):
+        if not kwargs.has_key("workdir"):
+            kwargs['workdir'] = self.workdir
+        step = makeBuildStep(self.masterbase, factory, **kwargs)
+        return step
+
+    def runStep(self, step):
+        d = defer.maybeDeferred(step.startStep, self.remote)
+        return d
+
+    def wrap(self, target):
+        return LocalWrapper(target)
+
+    def filterArgs(self, args):
+        # this can be overridden
+        return args

Added: vendor/buildbot/current/buildbot/test/sleep.py
===================================================================
--- vendor/buildbot/current/buildbot/test/sleep.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/sleep.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,9 @@
+#! /usr/bin/python
+
+import sys, time
+delay = int(sys.argv[1])
+
+sys.stdout.write("sleeping for %d seconds\n" % delay)
+time.sleep(delay)
+sys.stdout.write("woke up\n")
+sys.exit(0)

Added: vendor/buildbot/current/buildbot/test/subdir/emit.py
===================================================================
--- vendor/buildbot/current/buildbot/test/subdir/emit.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/subdir/emit.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,11 @@
+#! /usr/bin/python
+
+import os, sys
+
+sys.stdout.write("this is stdout in subdir\n")
+sys.stderr.write("this is stderr\n")
+if os.environ.has_key("EMIT_TEST"):
+    sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
+open("log1.out","wt").write("this is log1\n")
+rc = int(sys.argv[1])
+sys.exit(rc)

Added: vendor/buildbot/current/buildbot/test/test__versions.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test__versions.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test__versions.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,16 @@
+
+# This is a fake test which just logs the version of Twisted, to make it
+# easier to track down failures in other tests.
+
+from twisted.trial import unittest
+from twisted.python import log
+from twisted import copyright
+import sys
+import buildbot
+
+class Versions(unittest.TestCase):
+    def test_versions(self):
+        log.msg("Python Version: %s" % sys.version)
+        log.msg("Twisted Version: %s" % copyright.version)
+        log.msg("Buildbot Version: %s" % buildbot.version)
+

Added: vendor/buildbot/current/buildbot/test/test_bonsaipoller.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_bonsaipoller.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_bonsaipoller.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,182 @@
+# -*- test-case-name: buildbot.test.test_bonsaipoller -*-
+
+from twisted.trial import unittest
+from buildbot.changes.bonsaipoller import FileNode, CiNode, BonsaiResult, \
+     BonsaiParser, BonsaiPoller, InvalidResultError, EmptyResult
+
+from StringIO import StringIO
+from copy import deepcopy
+import re
+
+log1 = "Add Bug 338541a"
+who1 = "sar at gmail.com"
+date1 = 1161908700
+log2 = "bug 357427 add static ctor/dtor methods"
+who2 = "aarrg at ooacm.org"
+date2 = 1161910620
+log3 = "Testing log #3 lbah blah"
+who3 = "huoents at hueont.net"
+date3 = 1889822728
+rev1 = "1.8"
+file1 = "mozilla/testing/mochitest/tests/index.html"
+rev2 = "1.1"
+file2 = "mozilla/testing/mochitest/tests/test_bug338541.xhtml"
+rev3 = "1.1812"
+file3 = "mozilla/xpcom/threads/nsAutoLock.cpp"
+rev4 = "1.3"
+file4 = "mozilla/xpcom/threads/nsAutoLock.h"
+rev5 = "2.4"
+file5 = "mozilla/xpcom/threads/test.cpp"
+
+nodes = []
+files = []
+files.append(FileNode(rev1,file1))
+nodes.append(CiNode(log1, who1, date1, files))
+
+files = []
+files.append(FileNode(rev2, file2))
+files.append(FileNode(rev3, file3))
+nodes.append(CiNode(log2, who2, date2, files))
+
+nodes.append(CiNode(log3, who3, date3, []))
+
+goodParsedResult = BonsaiResult(nodes)
+
+goodUnparsedResult = """\
+<?xml version="1.0"?>
+<queryResults>
+<ci who="%s" date="%d">
+  <log>%s</log>
+  <files>
+    <f rev="%s">%s</f>
+  </files>
+</ci>
+<ci who="%s" date="%d">
+  <log>%s</log>
+  <files>
+    <f rev="%s">%s</f>
+    <f rev="%s">%s</f>
+  </files>
+</ci>
+<ci who="%s" date="%d">
+  <log>%s</log>
+  <files>
+  </files>
+</ci>
+</queryResults>
+""" % (who1, date1, log1, rev1, file1,
+       who2, date2, log2, rev2, file2, rev3, file3,
+       who3, date3, log3)
+
+badUnparsedResult = deepcopy(goodUnparsedResult)
+badUnparsedResult = badUnparsedResult.replace("</queryResults>", "")
+
+invalidDateResult = deepcopy(goodUnparsedResult)
+invalidDateResult = invalidDateResult.replace(str(date1), "foobar")
+
+missingRevisionResult = deepcopy(goodUnparsedResult)
+missingRevisionResult = missingRevisionResult.replace("rev=\""+rev3+"\"", "")
+
+missingFilenameResult = deepcopy(goodUnparsedResult)
+missingFilenameResult = missingFilenameResult.replace(file2, "")
+
+duplicateLogResult = deepcopy(goodUnparsedResult)
+duplicateLogResult = re.sub("<log>"+log1+"</log>",
+                            "<log>blah</log><log>blah</log>",
+                            duplicateLogResult)
+
+duplicateFilesResult = deepcopy(goodUnparsedResult)
+duplicateFilesResult = re.sub("<files>\s*</files>",
+                              "<files></files><files></files>",
+                              duplicateFilesResult)
+
+missingCiResult = deepcopy(goodUnparsedResult)
+r = re.compile("<ci.*</ci>", re.DOTALL | re.MULTILINE)
+missingCiResult = re.sub(r, "", missingCiResult)
+
+badResultMsgs = { 'badUnparsedResult':
+    "BonsaiParser did not raise an exception when given a bad query",
+                  'invalidDateResult':
+    "BonsaiParser did not raise an exception when given an invalid date",
+                  'missingRevisionResult':
+    "BonsaiParser did not raise an exception when a revision was missing",
+                  'missingFilenameResult':
+    "BonsaiParser did not raise an exception when a filename was missing",
+                  'duplicateLogResult':
+    "BonsaiParser did not raise an exception when there was two <log> tags",
+                  'duplicateFilesResult':
+    "BonsaiParser did not raise an exception when there was two <files> tags",
+                  'missingCiResult':
+    "BonsaiParser did not raise an exception when there was no <ci> tags"
+}
+
+class FakeBonsaiPoller(BonsaiPoller):
+    def __init__(self):
+        BonsaiPoller.__init__(self, "fake url", "fake module", "fake branch")
+
+class TestBonsaiPoller(unittest.TestCase):
+    def testFullyFormedResult(self):
+        br = BonsaiParser(StringIO(goodUnparsedResult))
+        result = br.getData()
+        # make sure the result is a BonsaiResult
+        self.failUnless(isinstance(result, BonsaiResult))
+        # test for successful parsing
+        self.failUnlessEqual(goodParsedResult, result,
+            "BonsaiParser did not return the expected BonsaiResult")
+
+    def testBadUnparsedResult(self):
+        try:
+            BonsaiParser(StringIO(badUnparsedResult))
+            self.fail(badResultMsgs["badUnparsedResult"])
+        except InvalidResultError:
+            pass
+
+    def testInvalidDateResult(self):
+        try:
+            BonsaiParser(StringIO(invalidDateResult))
+            self.fail(badResultMsgs["invalidDateResult"])
+        except InvalidResultError:
+            pass
+
+    def testMissingRevisionResult(self):
+        try:
+            BonsaiParser(StringIO(missingRevisionResult))
+            self.fail(badResultMsgs["missingRevisionResult"])
+        except InvalidResultError:
+            pass
+
+    def testMissingFilenameResult(self):
+        try:
+            BonsaiParser(StringIO(missingFilenameResult))
+            self.fail(badResultMsgs["missingFilenameResult"])
+        except InvalidResultError:
+            pass
+
+    def testDuplicateLogResult(self):
+        try:
+            BonsaiParser(StringIO(duplicateLogResult))
+            self.fail(badResultMsgs["duplicateLogResult"])
+        except InvalidResultError:
+            pass
+
+    def testDuplicateFilesResult(self):
+        try:
+            BonsaiParser(StringIO(duplicateFilesResult))
+            self.fail(badResultMsgs["duplicateFilesResult"])
+        except InvalidResultError:
+            pass
+
+    def testMissingCiResult(self):
+        try:
+            BonsaiParser(StringIO(missingCiResult))
+            self.fail(badResultMsgs["missingCiResult"])
+        except EmptyResult:
+            pass
+
+    def testChangeNotSubmitted(self):
+        "Make sure a change is not submitted if the BonsaiParser fails"
+        poller = FakeBonsaiPoller()
+        lastChangeBefore = poller.lastChange
+        poller._process_changes(StringIO(badUnparsedResult))
+        # self.lastChange will not be updated if the change was not submitted
+        self.failUnlessEqual(lastChangeBefore, poller.lastChange)

Added: vendor/buildbot/current/buildbot/test/test_buildreq.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_buildreq.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_buildreq.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,181 @@
+# -*- test-case-name: buildbot.test.test_buildreq -*-
+
+from twisted.trial import unittest
+
+from buildbot import buildset, interfaces, sourcestamp
+from buildbot.process import base
+from buildbot.status import builder
+from buildbot.changes.changes import Change
+
+class Request(unittest.TestCase):
+    def testMerge(self):
+        R = base.BuildRequest
+        S = sourcestamp.SourceStamp
+        b1 =  R("why", S("branch1", None, None, None))
+        b1r1 = R("why2", S("branch1", "rev1", None, None))
+        b1r1a = R("why not", S("branch1", "rev1", None, None))
+        b1r2 = R("why3", S("branch1", "rev2", None, None))
+        b2r2 = R("why4", S("branch2", "rev2", None, None))
+        b1r1p1 = R("why5", S("branch1", "rev1", (3, "diff"), None))
+        c1 = Change("alice", [], "changed stuff", branch="branch1")
+        c2 = Change("alice", [], "changed stuff", branch="branch1")
+        c3 = Change("alice", [], "changed stuff", branch="branch1")
+        c4 = Change("alice", [], "changed stuff", branch="branch1")
+        c5 = Change("alice", [], "changed stuff", branch="branch1")
+        c6 = Change("alice", [], "changed stuff", branch="branch1")
+        b1c1 = R("changes", S("branch1", None, None, [c1,c2,c3]))
+        b1c2 = R("changes", S("branch1", None, None, [c4,c5,c6]))
+
+        self.failUnless(b1.canBeMergedWith(b1))
+        self.failIf(b1.canBeMergedWith(b1r1))
+        self.failIf(b1.canBeMergedWith(b2r2))
+        self.failIf(b1.canBeMergedWith(b1r1p1))
+        self.failIf(b1.canBeMergedWith(b1c1))
+
+        self.failIf(b1r1.canBeMergedWith(b1))
+        self.failUnless(b1r1.canBeMergedWith(b1r1))
+        self.failIf(b1r1.canBeMergedWith(b2r2))
+        self.failIf(b1r1.canBeMergedWith(b1r1p1))
+        self.failIf(b1r1.canBeMergedWith(b1c1))
+
+        self.failIf(b1r2.canBeMergedWith(b1))
+        self.failIf(b1r2.canBeMergedWith(b1r1))
+        self.failUnless(b1r2.canBeMergedWith(b1r2))
+        self.failIf(b1r2.canBeMergedWith(b2r2))
+        self.failIf(b1r2.canBeMergedWith(b1r1p1))
+
+        self.failIf(b1r1p1.canBeMergedWith(b1))
+        self.failIf(b1r1p1.canBeMergedWith(b1r1))
+        self.failIf(b1r1p1.canBeMergedWith(b1r2))
+        self.failIf(b1r1p1.canBeMergedWith(b2r2))
+        self.failIf(b1r1p1.canBeMergedWith(b1c1))
+
+        self.failIf(b1c1.canBeMergedWith(b1))
+        self.failIf(b1c1.canBeMergedWith(b1r1))
+        self.failIf(b1c1.canBeMergedWith(b1r2))
+        self.failIf(b1c1.canBeMergedWith(b2r2))
+        self.failIf(b1c1.canBeMergedWith(b1r1p1))
+        self.failUnless(b1c1.canBeMergedWith(b1c1))
+        self.failUnless(b1c1.canBeMergedWith(b1c2))
+
+        sm = b1.mergeWith([])
+        self.failUnlessEqual(sm.branch, "branch1")
+        self.failUnlessEqual(sm.revision, None)
+        self.failUnlessEqual(sm.patch, None)
+        self.failUnlessEqual(sm.changes, [])
+
+        ss = b1r1.mergeWith([b1r1])
+        self.failUnlessEqual(ss, S("branch1", "rev1", None, None))
+        why = b1r1.mergeReasons([b1r1])
+        self.failUnlessEqual(why, "why2")
+        why = b1r1.mergeReasons([b1r1a])
+        self.failUnlessEqual(why, "why2, why not")
+
+        ss = b1c1.mergeWith([b1c2])
+        self.failUnlessEqual(ss, S("branch1", None, None, [c1,c2,c3,c4,c5,c6]))
+        why = b1c1.mergeReasons([b1c2])
+        self.failUnlessEqual(why, "changes")
+
+
+class FakeBuilder:
+    name = "fake"
+    def __init__(self):
+        self.requests = []
+    def submitBuildRequest(self, req):
+        self.requests.append(req)
+
+
+class Set(unittest.TestCase):
+    def testBuildSet(self):
+        S = buildset.BuildSet
+        a,b = FakeBuilder(), FakeBuilder()
+
+        # two builds, the first one fails, the second one succeeds. The
+        # waitUntilSuccess watcher fires as soon as the first one fails,
+        # while the waitUntilFinished watcher doesn't fire until all builds
+        # are complete.
+
+        source = sourcestamp.SourceStamp()
+        s = S(["a","b"], source, "forced build")
+        s.start([a,b])
+        self.failUnlessEqual(len(a.requests), 1)
+        self.failUnlessEqual(len(b.requests), 1)
+        r1 = a.requests[0]
+        self.failUnlessEqual(r1.reason, s.reason)
+        self.failUnlessEqual(r1.source, s.source)
+
+        st = s.status
+        self.failUnlessEqual(st.getSourceStamp(), source)
+        self.failUnlessEqual(st.getReason(), "forced build")
+        self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
+        self.failIf(st.isFinished())
+        brs = st.getBuildRequests()
+        self.failUnlessEqual(len(brs), 2)
+
+        res = []
+        d1 = s.waitUntilSuccess()
+        d1.addCallback(lambda r: res.append(("success", r)))
+        d2 = s.waitUntilFinished()
+        d2.addCallback(lambda r: res.append(("finished", r)))
+
+        self.failUnlessEqual(res, [])
+
+        # the first build finishes here, with FAILURE
+        builderstatus_a = builder.BuilderStatus("a")
+        bsa = builder.BuildStatus(builderstatus_a, 1)
+        bsa.setResults(builder.FAILURE)
+        a.requests[0].finished(bsa)
+
+        # any FAILURE flunks the BuildSet immediately, so the
+        # waitUntilSuccess deferred fires right away. However, the
+        # waitUntilFinished deferred must wait until all builds have
+        # completed.
+        self.failUnlessEqual(len(res), 1)
+        self.failUnlessEqual(res[0][0], "success")
+        bss = res[0][1]
+        self.failUnless(interfaces.IBuildSetStatus(bss, None))
+        self.failUnlessEqual(bss.getResults(), builder.FAILURE)
+
+        # here we finish the second build
+        builderstatus_b = builder.BuilderStatus("b")
+        bsb = builder.BuildStatus(builderstatus_b, 1)
+        bsb.setResults(builder.SUCCESS)
+        b.requests[0].finished(bsb)
+
+        # .. which ought to fire the waitUntilFinished deferred
+        self.failUnlessEqual(len(res), 2)
+        self.failUnlessEqual(res[1][0], "finished")
+        self.failUnlessEqual(res[1][1], bss)
+
+        # and finish the BuildSet overall
+        self.failUnless(st.isFinished())
+        self.failUnlessEqual(st.getResults(), builder.FAILURE)
+
+    def testSuccess(self):
+        S = buildset.BuildSet
+        a,b = FakeBuilder(), FakeBuilder()
+        # this time, both builds succeed
+
+        source = sourcestamp.SourceStamp()
+        s = S(["a","b"], source, "forced build")
+        s.start([a,b])
+
+        st = s.status
+        self.failUnlessEqual(st.getSourceStamp(), source)
+        self.failUnlessEqual(st.getReason(), "forced build")
+        self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
+        self.failIf(st.isFinished())
+
+        builderstatus_a = builder.BuilderStatus("a")
+        bsa = builder.BuildStatus(builderstatus_a, 1)
+        bsa.setResults(builder.SUCCESS)
+        a.requests[0].finished(bsa)
+
+        builderstatus_b = builder.BuilderStatus("b")
+        bsb = builder.BuildStatus(builderstatus_b, 1)
+        bsb.setResults(builder.SUCCESS)
+        b.requests[0].finished(bsb)
+
+        self.failUnless(st.isFinished())
+        self.failUnlessEqual(st.getResults(), builder.SUCCESS)
+        

Added: vendor/buildbot/current/buildbot/test/test_changes.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_changes.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_changes.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,230 @@
+# -*- test-case-name: buildbot.test.test_changes -*-
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot import master
+from buildbot.twcompat import maybeWait
+from buildbot.changes import pb
+from buildbot.scripts import runner
+
+d1 = {'files': ["Project/foo.c", "Project/bar/boo.c"],
+      'who': "marvin",
+      'comments': "Some changes in Project"}
+d2 = {'files': ["OtherProject/bar.c"],
+      'who': "zaphod",
+      'comments': "other changes"}
+d3 = {'files': ["Project/baz.c", "OtherProject/bloo.c"],
+      'who': "alice",
+      'comments': "mixed changes"}
+d4 = {'files': ["trunk/baz.c", "branches/foobranch/foo.c", "trunk/bar.c"],
+      'who': "alice",
+      'comments': "mixed changes"}
+
+class TestChangePerspective(unittest.TestCase):
+
+    def setUp(self):
+        self.changes = []
+
+    def addChange(self, c):
+        self.changes.append(c)
+
+    def testNoPrefix(self):
+        p = pb.ChangePerspective(self, None)
+        p.perspective_addChange(d1)
+        self.failUnlessEqual(len(self.changes), 1)
+        c1 = self.changes[0]
+        self.failUnlessEqual(c1.files,
+                             ["Project/foo.c", "Project/bar/boo.c"])
+        self.failUnlessEqual(c1.comments, "Some changes in Project")
+        self.failUnlessEqual(c1.who, "marvin")
+
+    def testPrefix(self):
+        p = pb.ChangePerspective(self, "Project/")
+
+        p.perspective_addChange(d1)
+        self.failUnlessEqual(len(self.changes), 1)
+        c1 = self.changes[-1]
+        self.failUnlessEqual(c1.files, ["foo.c", "bar/boo.c"])
+        self.failUnlessEqual(c1.comments, "Some changes in Project")
+        self.failUnlessEqual(c1.who, "marvin")
+
+        p.perspective_addChange(d2) # should be ignored
+        self.failUnlessEqual(len(self.changes), 1)
+
+        p.perspective_addChange(d3) # should ignore the OtherProject file
+        self.failUnlessEqual(len(self.changes), 2)
+
+        c3 = self.changes[-1]
+        self.failUnlessEqual(c3.files, ["baz.c"])
+        self.failUnlessEqual(c3.comments, "mixed changes")
+        self.failUnlessEqual(c3.who, "alice")
+
+    def testPrefix2(self):
+        p = pb.ChangePerspective(self, "Project/bar/")
+
+        p.perspective_addChange(d1)
+        self.failUnlessEqual(len(self.changes), 1)
+        c1 = self.changes[-1]
+        self.failUnlessEqual(c1.files, ["boo.c"])
+        self.failUnlessEqual(c1.comments, "Some changes in Project")
+        self.failUnlessEqual(c1.who, "marvin")
+
+        p.perspective_addChange(d2) # should be ignored
+        self.failUnlessEqual(len(self.changes), 1)
+
+        p.perspective_addChange(d3) # should ignore this too
+        self.failUnlessEqual(len(self.changes), 1)
+
+    def testPrefix3(self):
+        p = pb.ChangePerspective(self, "trunk/")
+
+        p.perspective_addChange(d4)
+        self.failUnlessEqual(len(self.changes), 1)
+        c1 = self.changes[-1]
+        self.failUnlessEqual(c1.files, ["baz.c", "bar.c"])
+        self.failUnlessEqual(c1.comments, "mixed changes")
+
+    def testPrefix4(self):
+        p = pb.ChangePerspective(self, "branches/foobranch/")
+
+        p.perspective_addChange(d4)
+        self.failUnlessEqual(len(self.changes), 1)
+        c1 = self.changes[-1]
+        self.failUnlessEqual(c1.files, ["foo.c"])
+        self.failUnlessEqual(c1.comments, "mixed changes")
+
+
+
+config_empty = """
+BuildmasterConfig = c = {}
+c['bots'] = []
+c['builders'] = []
+c['sources'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+"""
+
+config_sender = config_empty + \
+"""
+from buildbot.changes import pb
+c['sources'] = [pb.PBChangeSource(port=None)]
+"""
+
+class Sender(unittest.TestCase):
+    def setUp(self):
+        self.master = master.BuildMaster(".")
+    def tearDown(self):
+        d = defer.maybeDeferred(self.master.stopService)
+        # TODO: something in Twisted-2.0.0 (and probably 2.0.1) doesn't shut
+        # down the Broker listening socket when it's supposed to.
+        # Twisted-1.3.0, and current SVN (which will be post-2.0.1) are ok.
+        # This iterate() is a quick hack to deal with the problem. I need to
+        # investigate more thoroughly and find a better solution.
+        d.addCallback(self.stall, 0.1)
+        return maybeWait(d)
+
+    def stall(self, res, timeout):
+        d = defer.Deferred()
+        reactor.callLater(timeout, d.callback, res)
+        return d
+
+    def testSender(self):
+        self.master.loadConfig(config_empty)
+        self.master.startService()
+        # TODO: BuildMaster.loadChanges replaces the change_svc object, so we
+        # have to load it twice. Clean this up.
+        d = self.master.loadConfig(config_sender)
+        d.addCallback(self._testSender_1)
+        return maybeWait(d)
+
+    def _testSender_1(self, res):
+        self.cm = cm = self.master.change_svc
+        s1 = list(self.cm)[0]
+        port = self.master.slavePort._port.getHost().port
+
+        self.options = {'username': "alice",
+                        'master': "localhost:%d" % port,
+                        'files': ["foo.c"],
+                        }
+
+        d = runner.sendchange(self.options)
+        d.addCallback(self._testSender_2)
+        return d
+
+    def _testSender_2(self, res):
+        # now check that the change was received
+        self.failUnlessEqual(len(self.cm.changes), 1)
+        c = self.cm.changes.pop()
+        self.failUnlessEqual(c.who, "alice")
+        self.failUnlessEqual(c.files, ["foo.c"])
+        self.failUnlessEqual(c.comments, "")
+        self.failUnlessEqual(c.revision, None)
+
+        self.options['revision'] = "r123"
+        self.options['comments'] = "test change"
+
+        d = runner.sendchange(self.options)
+        d.addCallback(self._testSender_3)
+        return d
+
+    def _testSender_3(self, res):
+        self.failUnlessEqual(len(self.cm.changes), 1)
+        c = self.cm.changes.pop()
+        self.failUnlessEqual(c.who, "alice")
+        self.failUnlessEqual(c.files, ["foo.c"])
+        self.failUnlessEqual(c.comments, "test change")
+        self.failUnlessEqual(c.revision, "r123")
+
+        # test options['logfile'] by creating a temporary file
+        logfile = self.mktemp()
+        f = open(logfile, "wt")
+        f.write("longer test change")
+        f.close()
+        self.options['comments'] = None
+        self.options['logfile'] = logfile
+
+        d = runner.sendchange(self.options)
+        d.addCallback(self._testSender_4)
+        return d
+
+    def _testSender_4(self, res):
+        self.failUnlessEqual(len(self.cm.changes), 1)
+        c = self.cm.changes.pop()
+        self.failUnlessEqual(c.who, "alice")
+        self.failUnlessEqual(c.files, ["foo.c"])
+        self.failUnlessEqual(c.comments, "longer test change")
+        self.failUnlessEqual(c.revision, "r123")
+
+        # make sure that numeric revisions work too
+        self.options['logfile'] = None
+        del self.options['revision']
+        self.options['revision_number'] = 42
+
+        d = runner.sendchange(self.options)
+        d.addCallback(self._testSender_5)
+        return d
+
+    def _testSender_5(self, res):
+        self.failUnlessEqual(len(self.cm.changes), 1)
+        c = self.cm.changes.pop()
+        self.failUnlessEqual(c.who, "alice")
+        self.failUnlessEqual(c.files, ["foo.c"])
+        self.failUnlessEqual(c.comments, "")
+        self.failUnlessEqual(c.revision, 42)
+
+        # verify --branch too
+        self.options['branch'] = "branches/test"
+
+        d = runner.sendchange(self.options)
+        d.addCallback(self._testSender_6)
+        return d
+
+    def _testSender_6(self, res):
+        self.failUnlessEqual(len(self.cm.changes), 1)
+        c = self.cm.changes.pop()
+        self.failUnlessEqual(c.who, "alice")
+        self.failUnlessEqual(c.files, ["foo.c"])
+        self.failUnlessEqual(c.comments, "")
+        self.failUnlessEqual(c.revision, 42)
+        self.failUnlessEqual(c.branch, "branches/test")

Added: vendor/buildbot/current/buildbot/test/test_config.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_config.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_config.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,1102 @@
+# -*- test-case-name: buildbot.test.test_config -*-
+
+from __future__ import generators
+import os
+
+from twisted.trial import unittest
+from twisted.python import failure
+from twisted.internet import defer
+
+cvstoys = None
+try:
+    import cvstoys
+    from buildbot.changes.freshcvs import FreshCVSSource
+except ImportError:
+    pass
+
+from buildbot.twcompat import providedBy, maybeWait
+from buildbot.master import BuildMaster
+from buildbot import scheduler
+from twisted.application import service, internet
+from twisted.spread import pb
+from twisted.web.server import Site
+from twisted.web.distrib import ResourcePublisher
+from buildbot.process.builder import Builder
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.steps.source import CVS
+from buildbot.steps.shell import Compile, Test
+from buildbot.status import base
+words = None
+try:
+    from buildbot.status import words
+except ImportError:
+    pass
+
+emptyCfg = \
+"""
+BuildmasterConfig = c = {}
+c['bots'] = []
+c['sources'] = []
+c['schedulers'] = []
+c['builders'] = []
+c['slavePortnum'] = 9999
+c['projectName'] = 'dummy project'
+c['projectURL'] = 'http://dummy.example.com'
+c['buildbotURL'] = 'http://dummy.example.com/buildbot'
+"""
+
+buildersCfg = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+BuildmasterConfig = c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 9999
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+                  'builddir':'workdir', 'factory':f1}]
+"""
+
+buildersCfg2 = buildersCfg + \
+"""
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule2')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+                  'builddir':'workdir', 'factory':f1}]
+"""
+
+buildersCfg3 = buildersCfg2 + \
+"""
+c['builders'].append({'name': 'builder2', 'slavename': 'bot1',
+                      'builddir': 'workdir2', 'factory': f1 })
+"""
+
+buildersCfg4 = buildersCfg2 + \
+"""
+c['builders'] = [{ 'name': 'builder1', 'slavename': 'bot1',
+                   'builddir': 'newworkdir', 'factory': f1 },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                   'builddir': 'workdir2', 'factory': f1 }]
+"""
+
+wpCfg1 = buildersCfg + \
+"""
+from buildbot.steps import shell
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+f1.addStep(shell.ShellCommand, command=[shell.WithProperties('echo')])
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+                  'builddir':'workdir1', 'factory': f1}]
+"""
+
+wpCfg2 = buildersCfg + \
+"""
+from buildbot.steps import shell
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+f1.addStep(shell.ShellCommand,
+           command=[shell.WithProperties('echo %s', 'revision')])
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+                  'builddir':'workdir1', 'factory': f1}]
+"""
+
+
+
+ircCfg1 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted'])]
+"""
+
+ircCfg2 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted']),
+               words.IRC('irc.example.com', 'otherbot', ['chan1', 'chan2'])]
+"""
+
+ircCfg3 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['knotted'])]
+"""
+
+webCfg1 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port=9980)]
+"""
+
+webCfg2 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port=9981)]
+"""
+
+webCfg3 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port='tcp:9981:interface=127.0.0.1')]
+"""
+
+webNameCfg1 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(distrib_port='~/.twistd-web-pb')]
+"""
+
+webNameCfg2 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(distrib_port='./bar.socket')]
+"""
+
+debugPasswordCfg = emptyCfg + \
+"""
+c['debugPassword'] = 'sekrit'
+"""
+
+interlockCfgBad = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1 },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f1 },
+                   ]
+# interlocks have been removed
+c['interlocks'] = [('lock1', ['builder1'], ['builder2', 'builder3']),
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad1 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[])])
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f1 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad2 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock, SlaveLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = SlaveLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[])])
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f1 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad3 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[l2])])
+f2 = BuildFactory([s(Dummy)])
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f2, 'locks': [l1] },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f1 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg1a = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.locks import MasterLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f1 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg1b = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.locks import MasterLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1, 'locks': [l1] },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f1 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+# test out step Locks
+lockCfg2a = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy, locks=[l1,l2])])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1 },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f2 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg2b = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy, locks=[l1])])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1 },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f2 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg2c = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy)])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+                 { 'name': 'builder1', 'slavename': 'bot1',
+                 'builddir': 'workdir', 'factory': f1 },
+                 { 'name': 'builder2', 'slavename': 'bot1',
+                 'builddir': 'workdir2', 'factory': f2 },
+                   ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+schedulersCfg = \
+"""
+from buildbot.scheduler import Scheduler, Dependent
+from buildbot.process.factory import BasicBuildFactory
+c = {}
+c['bots'] = [('bot1', 'pw1')]
+c['sources'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+b1 = {'name':'builder1', 'slavename':'bot1',
+      'builddir':'workdir', 'factory':f1}
+c['builders'] = [b1]
+c['schedulers'] = [Scheduler('full', None, 60, ['builder1'])]
+c['slavePortnum'] = 9999
+c['projectName'] = 'dummy project'
+c['projectURL'] = 'http://dummy.example.com'
+c['buildbotURL'] = 'http://dummy.example.com/buildbot'
+BuildmasterConfig = c
+"""
+
+class ConfigTest(unittest.TestCase):
+    def setUp(self):
+        self.buildmaster = BuildMaster(".")
+        
+    def failUnlessListsEquivalent(self, list1, list2):
+        l1 = list1[:]
+        l1.sort()
+        l2 = list2[:]
+        l2.sort()
+        self.failUnlessEqual(l1, l2)
+
+    def servers(self, s, types):
+        # perform a recursive search of s.services, looking for instances of
+        # twisted.application.internet.TCPServer, then extract their .args
+        # values to find the TCP ports they want to listen on
+        for child in s:
+            if providedBy(child, service.IServiceCollection):
+                for gc in self.servers(child, types):
+                    yield gc
+            if isinstance(child, types):
+                yield child
+
+    def TCPports(self, s):
+        return list(self.servers(s, internet.TCPServer))
+    def UNIXports(self, s):
+        return list(self.servers(s, internet.UNIXServer))
+    def TCPclients(self, s):
+        return list(self.servers(s, internet.TCPClient))
+
+    def checkPorts(self, svc, expected):
+        """Verify that the TCPServer and UNIXServer children of the given
+        service have the expected portnum/pathname and factory classes. As a
+        side-effect, return a list of servers in the same order as the
+        'expected' list. This can be used to verify properties of the
+        factories contained therein."""
+        
+        expTCP = [e for e in expected if type(e[0]) == int]
+        expUNIX = [e for e in expected if type(e[0]) == str]
+        haveTCP = [(p.args[0], p.args[1].__class__)
+                   for p in self.TCPports(svc)]
+        haveUNIX = [(p.args[0], p.args[1].__class__)
+                    for p in self.UNIXports(svc)]
+        self.failUnlessListsEquivalent(expTCP, haveTCP)
+        self.failUnlessListsEquivalent(expUNIX, haveUNIX)
+        ret = []
+        for e in expected:
+            for have in self.TCPports(svc) + self.UNIXports(svc):
+                if have.args[0] == e[0]:
+                    ret.append(have)
+                    continue
+        assert(len(ret) == len(expected))
+        return ret
+
+    def testEmpty(self):
+        self.failUnlessRaises(KeyError, self.buildmaster.loadConfig, "")
+
+    def testSimple(self):
+        # covers slavePortnum, base checker passwords
+        master = self.buildmaster
+        master.loadChanges()
+
+        master.loadConfig(emptyCfg)
+        # note: this doesn't actually start listening, because the app
+        # hasn't been started running
+        self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+        self.checkPorts(master, [(9999, pb.PBServerFactory)])
+        self.failUnlessEqual(list(master.change_svc), [])
+        self.failUnlessEqual(master.botmaster.builders, {})
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw"})
+        self.failUnlessEqual(master.projectName, "dummy project")
+        self.failUnlessEqual(master.projectURL, "http://dummy.example.com")
+        self.failUnlessEqual(master.buildbotURL,
+                             "http://dummy.example.com/buildbot")
+
+    def testSlavePortnum(self):
+        master = self.buildmaster
+        master.loadChanges()
+
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+        ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
+        p = ports[0]
+
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+        ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
+        self.failUnlessIdentical(p, ports[0],
+                                 "the slave port was changed even " + \
+                                 "though the configuration was not")
+
+        master.loadConfig(emptyCfg + "c['slavePortnum'] = 9000\n")
+        self.failUnlessEqual(master.slavePortnum, "tcp:9000")
+        ports = self.checkPorts(master, [(9000, pb.PBServerFactory)])
+        self.failIf(p is ports[0],
+                    "slave port was unchanged but configuration was changed")
+
+    def testBots(self):
+        master = self.buildmaster
+        master.loadChanges()
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.botmaster.builders, {})
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw"})
+        botsCfg = (emptyCfg +
+                   "c['bots'] = [('bot1', 'pw1'), ('bot2', 'pw2')]\n")
+        master.loadConfig(botsCfg)
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw",
+                              "bot1": "pw1",
+                              "bot2": "pw2"})
+        master.loadConfig(botsCfg)
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw",
+                              "bot1": "pw1",
+                              "bot2": "pw2"})
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw"})
+
+
+    def testSources(self):
+        if not cvstoys:
+            raise unittest.SkipTest("this test needs CVSToys installed")
+        master = self.buildmaster
+        master.loadChanges()
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(list(master.change_svc), [])
+
+        self.sourcesCfg = emptyCfg + \
+"""
+from buildbot.changes.freshcvs import FreshCVSSource
+s1 = FreshCVSSource('cvs.example.com', 1000, 'pname', 'spass',
+                    prefix='Prefix/')
+c['sources'] = [s1]
+"""
+
+        d = master.loadConfig(self.sourcesCfg)
+        d.addCallback(self._testSources_1)
+        return maybeWait(d)
+
+    def _testSources_1(self, res):
+        self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+        s1 = list(self.buildmaster.change_svc)[0]
+        self.failUnless(isinstance(s1, FreshCVSSource))
+        self.failUnlessEqual(s1.host, "cvs.example.com")
+        self.failUnlessEqual(s1.port, 1000)
+        self.failUnlessEqual(s1.prefix, "Prefix/")
+        self.failUnlessEqual(s1, list(self.buildmaster.change_svc)[0])
+        self.failUnless(s1.parent)
+
+        # verify that unchanged sources are not interrupted
+        d = self.buildmaster.loadConfig(self.sourcesCfg)
+        d.addCallback(self._testSources_2, s1)
+        return d
+
+    def _testSources_2(self, res, s1):
+        self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+        s2 = list(self.buildmaster.change_svc)[0]
+        self.failUnlessIdentical(s1, s2)
+        self.failUnless(s1.parent)
+
+        # make sure we can get rid of the sources too
+        d = self.buildmaster.loadConfig(emptyCfg)
+        d.addCallback(self._testSources_3)
+        return d
+
+    def _testSources_3(self, res):
+        self.failUnlessEqual(list(self.buildmaster.change_svc), [])
+
+    def shouldBeFailure(self, res, *expected):
+        self.failUnless(isinstance(res, failure.Failure),
+                        "we expected this to fail, not produce %s" % (res,))
+        res.trap(*expected)
+        return None # all is good
+
+    def testSchedulerErrors(self):
+        master = self.buildmaster
+        master.loadChanges()
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.allSchedulers(), [])
+
+        def _shouldBeFailure(res, hint=None):
+            self.shouldBeFailure(res, AssertionError, ValueError)
+            if hint:
+                self.failUnless(str(res).find(hint) != -1)
+
+        def _loadConfig(res, newcfg):
+            return self.buildmaster.loadConfig(newcfg)
+        d = defer.succeed(None)
+
+        # c['schedulers'] must be a list
+        badcfg = schedulersCfg + \
+"""
+c['schedulers'] = Scheduler('full', None, 60, ['builder1'])
+"""
+        d.addCallback(_loadConfig, badcfg)
+        d.addBoth(_shouldBeFailure,
+                  "c['schedulers'] must be a list of Scheduler instances")
+
+        # c['schedulers'] must be a list of IScheduler objects
+        badcfg = schedulersCfg + \
+"""
+c['schedulers'] = ['oops', 'problem']
+"""
+        d.addCallback(_loadConfig, badcfg)
+        d.addBoth(_shouldBeFailure,
+                  "c['schedulers'] must be a list of Scheduler instances")
+
+        # c['schedulers'] must point at real builders
+        badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, ['builder-bogus'])]
+"""
+        d.addCallback(_loadConfig, badcfg)
+        d.addBoth(_shouldBeFailure, "uses unknown builder")
+
+        # builderNames= must be a list
+        badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, 'builder1')]
+"""
+        d.addCallback(_loadConfig, badcfg)
+        d.addBoth(_shouldBeFailure,
+                  "must be a list of Builder description names")
+
+        # builderNames= must be a list of strings, not dicts
+        badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, [b1])]
+"""
+        d.addCallback(_loadConfig, badcfg)
+        d.addBoth(_shouldBeFailure,
+                  "must be a list of Builder description names")
+
+        # builderNames= must be a list of strings, not a dict
+        badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, b1)]
+"""
+        d.addCallback(_loadConfig, badcfg)
+        d.addBoth(_shouldBeFailure,
+                  "must be a list of Builder description names")
+
+        # each Scheduler must have a unique name
+        badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('dup', None, 60, []),
+                   Scheduler('dup', None, 60, [])]
+"""
+        d.addCallback(_loadConfig, badcfg)
+        d.addBoth(_shouldBeFailure, "Schedulers must have unique names")
+
+        return maybeWait(d)
+
+    def testSchedulers(self):
+        master = self.buildmaster
+        master.loadChanges()
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.allSchedulers(), [])
+
+        d = self.buildmaster.loadConfig(schedulersCfg)
+        d.addCallback(self._testSchedulers_1)
+        return maybeWait(d)
+
+    def _testSchedulers_1(self, res):
+        sch = self.buildmaster.allSchedulers()
+        self.failUnlessEqual(len(sch), 1)
+        s = sch[0]
+        self.failUnless(isinstance(s, scheduler.Scheduler))
+        self.failUnlessEqual(s.name, "full")
+        self.failUnlessEqual(s.branch, None)
+        self.failUnlessEqual(s.treeStableTimer, 60)
+        self.failUnlessEqual(s.builderNames, ['builder1'])
+
+        newcfg = schedulersCfg + \
+"""
+s1 = Scheduler('full', None, 60, ['builder1'])
+c['schedulers'] = [s1, Dependent('downstream', s1, ['builder1'])]
+"""
+        d = self.buildmaster.loadConfig(newcfg)
+        d.addCallback(self._testSchedulers_2, newcfg)
+        return d
+    def _testSchedulers_2(self, res, newcfg):
+        sch = self.buildmaster.allSchedulers()
+        self.failUnlessEqual(len(sch), 2)
+        s = sch[0]
+        self.failUnless(isinstance(s, scheduler.Scheduler))
+        s = sch[1]
+        self.failUnless(isinstance(s, scheduler.Dependent))
+        self.failUnlessEqual(s.name, "downstream")
+        self.failUnlessEqual(s.builderNames, ['builder1'])
+
+        # reloading the same config file should leave the schedulers in place
+        d = self.buildmaster.loadConfig(newcfg)
+        d.addCallback(self._testSchedulers_3, sch)
+        return d
+    def _testSchedulers_3(self, res, sch1):
+        sch2 = self.buildmaster.allSchedulers()
+        self.failUnlessEqual(len(sch2), 2)
+        sch1.sort()
+        sch2.sort()
+        self.failUnlessEqual(sch1, sch2)
+        self.failUnlessIdentical(sch1[0], sch2[0])
+        self.failUnlessIdentical(sch1[1], sch2[1])
+        self.failUnlessIdentical(sch1[0].parent, self.buildmaster)
+        self.failUnlessIdentical(sch1[1].parent, self.buildmaster)
+
+
+
+    def testBuilders(self):
+        master = self.buildmaster
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.botmaster.builders, {})
+
+        master.loadConfig(buildersCfg)
+        self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+        self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+        b = master.botmaster.builders["builder1"]
+        self.failUnless(isinstance(b, Builder))
+        self.failUnlessEqual(b.name, "builder1")
+        self.failUnlessEqual(b.slavenames, ["bot1"])
+        self.failUnlessEqual(b.builddir, "workdir")
+        f1 = b.buildFactory
+        self.failUnless(isinstance(f1, BasicBuildFactory))
+        steps = f1.steps
+        self.failUnlessEqual(len(steps), 3)
+        self.failUnlessEqual(steps[0], (CVS,
+                                        {'cvsroot': 'cvsroot',
+                                         'cvsmodule': 'cvsmodule',
+                                         'mode': 'clobber'}))
+        self.failUnlessEqual(steps[1], (Compile,
+                                        {'command': 'make all'}))
+        self.failUnlessEqual(steps[2], (Test,
+                                        {'command': 'make check'}))
+
+
+        # make sure a reload of the same data doesn't interrupt the Builder
+        master.loadConfig(buildersCfg)
+        self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+        self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+        b2 = master.botmaster.builders["builder1"]
+        self.failUnlessIdentical(b, b2)
+        # TODO: test that the BuilderStatus object doesn't change
+        #statusbag2 = master.client_svc.statusbags["builder1"]
+        #self.failUnlessIdentical(statusbag, statusbag2)
+
+        # but changing something should result in a new Builder
+        master.loadConfig(buildersCfg2)
+        self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+        self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+        b3 = master.botmaster.builders["builder1"]
+        self.failIf(b is b3)
+        # the statusbag remains the same TODO
+        #statusbag3 = master.client_svc.statusbags["builder1"]
+        #self.failUnlessIdentical(statusbag, statusbag3)
+
+        # adding new builder
+        master.loadConfig(buildersCfg3)
+        self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
+                                                             "builder2"])
+        self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
+                                       ["builder1", "builder2"])
+        b4 = master.botmaster.builders["builder1"]
+        self.failUnlessIdentical(b3, b4)
+
+        # changing first builder should leave it at the same place in the list
+        master.loadConfig(buildersCfg4)
+        self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
+                                                             "builder2"])
+        self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
+                                       ["builder1", "builder2"])
+        b5 = master.botmaster.builders["builder1"]
+        self.failIf(b4 is b5)
+        
+        # and removing it should make the Builder go away
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.botmaster.builderNames, [])
+        self.failUnlessEqual(master.botmaster.builders, {})
+        #self.failUnlessEqual(master.client_svc.statusbags, {}) # TODO
+
+    def testWithProperties(self):
+        master = self.buildmaster
+        master.loadConfig(wpCfg1)
+        self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+        self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+        b1 = master.botmaster.builders["builder1"]
+
+        # reloading the same config should leave the builder unchanged
+        master.loadConfig(wpCfg1)
+        b2 = master.botmaster.builders["builder1"]
+        self.failUnlessIdentical(b1, b2)
+
+        # but changing the parameters of the WithProperties should change it
+        master.loadConfig(wpCfg2)
+        b3 = master.botmaster.builders["builder1"]
+        self.failIf(b1 is b3)
+
+        # again, reloading same config should leave the builder unchanged
+        master.loadConfig(wpCfg2)
+        b4 = master.botmaster.builders["builder1"]
+        self.failUnlessIdentical(b3, b4)
+
+    def checkIRC(self, m, expected):
+        ircs = {}
+        for irc in self.servers(m, words.IRC):
+            ircs[irc.host] = (irc.nick, irc.channels)
+        self.failUnlessEqual(ircs, expected)
+
+    def testIRC(self):
+        if not words:
+            raise unittest.SkipTest("Twisted Words package is not installed")
+        master = self.buildmaster
+        master.loadChanges()
+        d = master.loadConfig(emptyCfg)
+        e1 = {}
+        d.addCallback(lambda res: self.checkIRC(master, e1))
+        d.addCallback(lambda res: master.loadConfig(ircCfg1))
+        e2 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
+        d.addCallback(lambda res: self.checkIRC(master, e2))
+        d.addCallback(lambda res: master.loadConfig(ircCfg2))
+        e3 = {'irc.us.freenode.net': ('buildbot', ['twisted']),
+              'irc.example.com': ('otherbot', ['chan1', 'chan2'])}
+        d.addCallback(lambda res: self.checkIRC(master, e3))
+        d.addCallback(lambda res: master.loadConfig(ircCfg3))
+        e4 = {'irc.us.freenode.net': ('buildbot', ['knotted'])}
+        d.addCallback(lambda res: self.checkIRC(master, e4))
+        d.addCallback(lambda res: master.loadConfig(ircCfg1))
+        e5 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
+        d.addCallback(lambda res: self.checkIRC(master, e5))
+        return maybeWait(d)
+
+    def testWebPortnum(self):
+        master = self.buildmaster
+        master.loadChanges()
+
+        d = master.loadConfig(webCfg1)
+        d.addCallback(self._testWebPortnum_1)
+        return maybeWait(d)
+    def _testWebPortnum_1(self, res):
+        ports = self.checkPorts(self.buildmaster, [(9999, pb.PBServerFactory),
+                                                   (9980, Site)])
+        p = ports[1]
+
+        d = self.buildmaster.loadConfig(webCfg1) # nothing should be changed
+        d.addCallback(self._testWebPortnum_2, p)
+        return d
+    def _testWebPortnum_2(self, res, p):
+        ports = self.checkPorts(self.buildmaster, [(9999, pb.PBServerFactory),
+                                                   (9980, Site)])
+        self.failUnlessIdentical(p, ports[1],
+                                 "web port was changed even though " + \
+                                 "configuration was not")
+
+        d = self.buildmaster.loadConfig(webCfg2) # changes to 9981
+        d.addCallback(self._testWebPortnum_3, p)
+        return d
+    def _testWebPortnum_3(self, res, p):
+        ports = self.checkPorts(self.buildmaster, [(9999, pb.PBServerFactory),
+                                                   (9981, Site)])
+        self.failIf(p is ports[1],
+                    "configuration was changed but web port was unchanged")
+        d = self.buildmaster.loadConfig(webCfg3) # 9981 on only localhost
+        d.addCallback(self._testWebPortnum_4, ports[1])
+        return d
+    def _testWebPortnum_4(self, res, p):
+        ports = self.checkPorts(self.buildmaster, [(9999, pb.PBServerFactory),
+                                                   (9981, Site)])
+        self.failUnlessEqual(ports[1].kwargs['interface'], "127.0.0.1")
+        d = self.buildmaster.loadConfig(emptyCfg)
+        d.addCallback(lambda res:
+                      self.checkPorts(self.buildmaster,
+                                      [(9999, pb.PBServerFactory)]))
+        return d
+
+    def testWebPathname(self):
+        master = self.buildmaster
+        master.loadChanges()
+
+        d = master.loadConfig(webNameCfg1)
+        d.addCallback(self._testWebPathname_1)
+        return maybeWait(d)
+    def _testWebPathname_1(self, res):
+        self.checkPorts(self.buildmaster,
+                        [(9999, pb.PBServerFactory),
+                         ('~/.twistd-web-pb', pb.PBServerFactory)])
+        unixports = self.UNIXports(self.buildmaster)
+        f = unixports[0].args[1]
+        self.failUnless(isinstance(f.root, ResourcePublisher))
+
+        d = self.buildmaster.loadConfig(webNameCfg1)
+        # nothing should be changed
+        d.addCallback(self._testWebPathname_2, f)
+        return d
+    def _testWebPathname_2(self, res, f):
+        self.checkPorts(self.buildmaster,
+                        [(9999, pb.PBServerFactory),
+                         ('~/.twistd-web-pb', pb.PBServerFactory)])
+        self.failUnlessIdentical(f,
+                                 self.UNIXports(self.buildmaster)[0].args[1],
+                                 "web factory was changed even though " + \
+                                 "configuration was not")
+
+        d = self.buildmaster.loadConfig(webNameCfg2)
+        d.addCallback(self._testWebPathname_3, f)
+        return d
+    def _testWebPathname_3(self, res, f):
+        self.checkPorts(self.buildmaster,
+                        [(9999, pb.PBServerFactory),
+                         ('./bar.socket', pb.PBServerFactory)])
+        self.failIf(f is self.UNIXports(self.buildmaster)[0].args[1],
+                    "web factory was unchanged but configuration was changed")
+
+        d = self.buildmaster.loadConfig(emptyCfg)
+        d.addCallback(lambda res:
+                      self.checkPorts(self.buildmaster,
+                                      [(9999, pb.PBServerFactory)]))
+        return d
+
+    def testDebugPassword(self):
+        master = self.buildmaster
+
+        master.loadConfig(debugPasswordCfg)
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw",
+                              "debug": "sekrit"})
+
+        master.loadConfig(debugPasswordCfg)
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw",
+                              "debug": "sekrit"})
+
+        master.loadConfig(emptyCfg)
+        self.failUnlessEqual(master.checker.users,
+                             {"change": "changepw"})
+
+    def testLocks(self):
+        master = self.buildmaster
+        botmaster = master.botmaster
+
+        # make sure that c['interlocks'] is rejected properly
+        self.failUnlessRaises(KeyError, master.loadConfig, interlockCfgBad)
+        # and that duplicate-named Locks are caught
+        self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad1)
+        self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad2)
+        self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad3)
+
+        # create a Builder that uses Locks
+        master.loadConfig(lockCfg1a)
+        b1 = master.botmaster.builders["builder1"]
+        self.failUnlessEqual(len(b1.locks), 2)
+
+        # reloading the same config should not change the Builder
+        master.loadConfig(lockCfg1a)
+        self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
+        # but changing the set of locks used should change it
+        master.loadConfig(lockCfg1b)
+        self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+        b1 = master.botmaster.builders["builder1"]
+        self.failUnlessEqual(len(b1.locks), 1)
+
+        # similar test with step-scoped locks
+        master.loadConfig(lockCfg2a)
+        b1 = master.botmaster.builders["builder1"]
+        # reloading the same config should not change the Builder
+        master.loadConfig(lockCfg2a)
+        self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
+        # but changing the set of locks used should change it
+        master.loadConfig(lockCfg2b)
+        self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+        b1 = master.botmaster.builders["builder1"]
+        # remove the locks entirely
+        master.loadConfig(lockCfg2c)
+        self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+
+class ConfigElements(unittest.TestCase):
+    # verify that ComparableMixin is working
+    def testSchedulers(self):
+        s1 = scheduler.Scheduler(name='quick', branch=None,
+                                 treeStableTimer=30,
+                                 builderNames=['quick'])
+        s2 = scheduler.Scheduler(name="all", branch=None,
+                                 treeStableTimer=5*60,
+                                 builderNames=["a", "b"])
+        s3 = scheduler.Try_Userpass("try", ["a","b"], port=9989,
+                                    userpass=[("foo","bar")])
+        s1a = scheduler.Scheduler(name='quick', branch=None,
+                                  treeStableTimer=30,
+                                  builderNames=['quick'])
+        s2a = scheduler.Scheduler(name="all", branch=None,
+                                  treeStableTimer=5*60,
+                                  builderNames=["a", "b"])
+        s3a = scheduler.Try_Userpass("try", ["a","b"], port=9989,
+                                     userpass=[("foo","bar")])
+        self.failUnless(s1 == s1)
+        self.failUnless(s1 == s1a)
+        self.failUnless(s1a in [s1, s2, s3])
+        self.failUnless(s2a in [s1, s2, s3])
+        self.failUnless(s3a in [s1, s2, s3])
+
+
+
+class ConfigFileTest(unittest.TestCase):
+
+    def testFindConfigFile(self):
+        os.mkdir("test_cf")
+        open(os.path.join("test_cf", "master.cfg"), "w").write(emptyCfg)
+        slaveportCfg = emptyCfg + "c['slavePortnum'] = 9000\n"
+        open(os.path.join("test_cf", "alternate.cfg"), "w").write(slaveportCfg)
+
+        m = BuildMaster("test_cf")
+        m.loadTheConfigFile()
+        self.failUnlessEqual(m.slavePortnum, "tcp:9999")
+
+        m = BuildMaster("test_cf", "alternate.cfg")
+        m.loadTheConfigFile()
+        self.failUnlessEqual(m.slavePortnum, "tcp:9000")
+
+
+class MyTarget(base.StatusReceiverMultiService):
+    def __init__(self, name):
+        self.name = name
+        base.StatusReceiverMultiService.__init__(self)
+    def startService(self):
+        # make a note in a list stashed in the BuildMaster
+        self.parent.targetevents.append(("start", self.name))
+        return base.StatusReceiverMultiService.startService(self)
+    def stopService(self):
+        self.parent.targetevents.append(("stop", self.name))
+        return base.StatusReceiverMultiService.stopService(self)
+
+class MySlowTarget(MyTarget):
+    def stopService(self):
+        from twisted.internet import reactor
+        d = base.StatusReceiverMultiService.stopService(self)
+        def stall(res):
+            d2 = defer.Deferred()
+            reactor.callLater(0.1, d2.callback, res)
+            return d2
+        d.addCallback(stall)
+        m = self.parent
+        def finishedStalling(res):
+            m.targetevents.append(("stop", self.name))
+            return res
+        d.addCallback(finishedStalling)
+        return d
+
+# we can't actually startService a buildmaster with a config that uses a
+# fixed slavePortnum like 9999, so instead this makes it possible to pass '0'
+# for the first time, and then substitute back in the allocated port number
+# on subsequent passes.
+startableEmptyCfg = emptyCfg + \
+"""
+c['slavePortnum'] = %d
+"""
+                    
+targetCfg1 = startableEmptyCfg + \
+"""
+from buildbot.test.test_config import MyTarget
+c['status'] = [MyTarget('a')]
+"""
+
+targetCfg2 = startableEmptyCfg + \
+"""
+from buildbot.test.test_config import MySlowTarget
+c['status'] = [MySlowTarget('b')]
+"""
+
+class StartService(unittest.TestCase):
+    def tearDown(self):
+        return self.master.stopService()
+
+    def testStartService(self):
+        os.mkdir("test_ss")
+        self.master = m = BuildMaster("test_ss")
+        # inhibit the usual read-config-on-startup behavior
+        m.readConfig = True
+        m.startService()
+        d = m.loadConfig(startableEmptyCfg % 0)
+        d.addCallback(self._testStartService_0)
+        return maybeWait(d)
+
+    def _testStartService_0(self, res):
+        m = self.master
+        m.targetevents = []
+        # figure out what port got allocated
+        self.portnum = m.slavePort._port.getHost().port
+        d = m.loadConfig(targetCfg1 % self.portnum)
+        d.addCallback(self._testStartService_1)
+        return d
+
+    def _testStartService_1(self, res):
+        self.failUnlessEqual(len(self.master.statusTargets), 1)
+        self.failUnless(isinstance(self.master.statusTargets[0], MyTarget))
+        self.failUnlessEqual(self.master.targetevents,
+                             [('start', 'a')])
+        self.master.targetevents = []
+        # reloading the same config should not start or stop the target
+        d = self.master.loadConfig(targetCfg1 % self.portnum)
+        d.addCallback(self._testStartService_2)
+        return d
+
+    def _testStartService_2(self, res):
+        self.failUnlessEqual(self.master.targetevents, [])
+        # but loading a new config file should stop the old one, then
+        # start the new one
+        d = self.master.loadConfig(targetCfg2 % self.portnum)
+        d.addCallback(self._testStartService_3)
+        return d
+
+    def _testStartService_3(self, res):
+        self.failUnlessEqual(self.master.targetevents,
+                             [('stop', 'a'), ('start', 'b')])
+        self.master.targetevents = []
+        # and going back to the old one should do the same, in the same
+        # order, even though the current MySlowTarget takes a moment to shut
+        # down
+        d = self.master.loadConfig(targetCfg1 % self.portnum)
+        d.addCallback(self._testStartService_4)
+        return d
+
+    def _testStartService_4(self, res):
+        self.failUnlessEqual(self.master.targetevents,
+                             [('stop', 'b'), ('start', 'a')])

Added: vendor/buildbot/current/buildbot/test/test_control.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_control.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_control.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,105 @@
+# -*- test-case-name: buildbot.test.test_control -*-
+
+import os
+
+from twisted.trial import unittest
+from twisted.internet import defer
+
+from buildbot import master, interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.twcompat import providedBy, maybeWait
+from buildbot.slave import bot
+from buildbot.status.builder import SUCCESS
+from buildbot.process import base
+from buildbot.test.runutils import rmtree
+
+config = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+
+def s(klass, **kwargs):
+    return (klass, kwargs)
+
+f1 = factory.BuildFactory([
+    s(dummy.Dummy, timeout=1),
+    ])
+c = {}
+c['bots'] = [['bot1', 'sekrit']]
+c['sources'] = []
+c['schedulers'] = []
+c['builders'] = [{'name': 'force', 'slavename': 'bot1',
+                  'builddir': 'force-dir', 'factory': f1}]
+c['slavePortnum'] = 0
+BuildmasterConfig = c
+"""
+
+class FakeBuilder:
+    name = "fake"
+    def getSlaveCommandVersion(self, command, oldversion=None):
+        return "1.10"
+
+
+class Force(unittest.TestCase):
+
+    def rmtree(self, d):
+        rmtree(d)
+
+    def setUp(self):
+        self.master = None
+        self.slave = None
+        self.rmtree("control_basedir")
+        os.mkdir("control_basedir")
+        self.master = master.BuildMaster("control_basedir")
+        self.slavebase = os.path.abspath("control_slavebase")
+        self.rmtree(self.slavebase)
+        os.mkdir("control_slavebase")
+
+    def connectSlave(self):
+        port = self.master.slavePort._port.getHost().port
+        slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
+                               self.slavebase, keepalive=0, usePTY=1)
+        self.slave = slave
+        slave.startService()
+        d = self.master.botmaster.waitUntilBuilderAttached("force")
+        return d
+
+    def tearDown(self):
+        dl = []
+        if self.slave:
+            dl.append(self.master.botmaster.waitUntilBuilderDetached("force"))
+            dl.append(defer.maybeDeferred(self.slave.stopService))
+        if self.master:
+            dl.append(defer.maybeDeferred(self.master.stopService))
+        return maybeWait(defer.DeferredList(dl))
+
+    def testRequest(self):
+        m = self.master
+        m.loadConfig(config)
+        m.startService()
+        d = self.connectSlave()
+        d.addCallback(self._testRequest_1)
+        return maybeWait(d)
+    def _testRequest_1(self, res):
+        c = interfaces.IControl(self.master)
+        req = base.BuildRequest("I was bored", SourceStamp())
+        builder_control = c.getBuilder("force")
+        d = defer.Deferred()
+        req.subscribe(d.callback)
+        builder_control.requestBuild(req)
+        d.addCallback(self._testRequest_2)
+        # we use the same check-the-results code as testForce
+        return d
+
+    def _testRequest_2(self, build_control):
+        self.failUnless(providedBy(build_control, interfaces.IBuildControl))
+        d = build_control.getStatus().waitUntilFinished()
+        d.addCallback(self._testRequest_3)
+        return d
+
+    def _testRequest_3(self, bs):
+        self.failUnless(providedBy(bs, interfaces.IBuildStatus))
+        self.failUnless(bs.isFinished())
+        self.failUnlessEqual(bs.getResults(), SUCCESS)
+        #self.failUnlessEqual(bs.getResponsibleUsers(), ["bob"]) # TODO
+        self.failUnlessEqual(bs.getChanges(), [])
+        #self.failUnlessEqual(bs.getReason(), "forced") # TODO

Added: vendor/buildbot/current/buildbot/test/test_dependencies.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_dependencies.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_dependencies.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,167 @@
+# -*- test-case-name: buildbot.test.test_dependencies -*-
+
+from twisted.trial import unittest
+
+from twisted.internet import reactor, defer
+
+from buildbot.test.runutils import RunMixin
+from buildbot.twcompat import maybeWait
+from buildbot.status import base
+
+config_1 = """
+from buildbot import scheduler
+from buildbot.process import factory
+from buildbot.steps import dummy
+s = factory.s
+from buildbot.test.test_locks import LockStep
+
+BuildmasterConfig = c = {}
+c['bots'] = [('bot1', 'sekrit'), ('bot2', 'sekrit')]
+c['sources'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+# upstream1 (fastfail, slowpass)
+#  -> downstream2 (b3, b4)
+# upstream3 (slowfail, slowpass)
+#  -> downstream4 (b3, b4)
+#  -> downstream5 (b5)
+
+s1 = scheduler.Scheduler('upstream1', None, 10, ['slowpass', 'fastfail'])
+s2 = scheduler.Dependent('downstream2', s1, ['b3', 'b4'])
+s3 = scheduler.Scheduler('upstream3', None, 10, ['fastpass', 'slowpass'])
+s4 = scheduler.Dependent('downstream4', s3, ['b3', 'b4'])
+s5 = scheduler.Dependent('downstream5', s4, ['b5'])
+c['schedulers'] = [s1, s2, s3, s4, s5]
+
+f_fastpass = factory.BuildFactory([s(dummy.Dummy, timeout=1)])
+f_slowpass = factory.BuildFactory([s(dummy.Dummy, timeout=2)])
+f_fastfail = factory.BuildFactory([s(dummy.FailingDummy, timeout=1)])
+
+def builder(name, f):
+    d = {'name': name, 'slavename': 'bot1', 'builddir': name, 'factory': f}
+    return d
+
+c['builders'] = [builder('slowpass', f_slowpass),
+                 builder('fastfail', f_fastfail),
+                 builder('fastpass', f_fastpass),
+                 builder('b3', f_fastpass),
+                 builder('b4', f_fastpass),
+                 builder('b5', f_fastpass),
+                 ]
+"""
+
+class Logger(base.StatusReceiverMultiService):
+    def __init__(self, master):
+        base.StatusReceiverMultiService.__init__(self)
+        self.builds = []
+        for bn in master.status.getBuilderNames():
+            master.status.getBuilder(bn).subscribe(self)
+
+    def buildStarted(self, builderName, build):
+        self.builds.append(builderName)
+
+class Dependencies(RunMixin, unittest.TestCase):
+    def setUp(self):
+        RunMixin.setUp(self)
+        self.master.loadConfig(config_1)
+        self.master.startService()
+        d = self.connectSlave(["slowpass", "fastfail", "fastpass",
+                               "b3", "b4", "b5"])
+        return maybeWait(d)
+
+    def findScheduler(self, name):
+        for s in self.master.allSchedulers():
+            if s.name == name:
+                return s
+        raise KeyError("No Scheduler named '%s'" % name)
+
+    def testParse(self):
+        self.master.loadConfig(config_1)
+        # that's it, just make sure this config file is loaded successfully
+
+    def testRun_Fail(self):
+        # add an extra status target to make pay attention to which builds
+        # start and which don't.
+        self.logger = Logger(self.master)
+
+        # kick off upstream1, which has a failing Builder and thus will not
+        # trigger downstream3
+        s = self.findScheduler("upstream1")
+        # this is an internal function of the Scheduler class
+        s.fireTimer() # fires a build
+        # t=0: two builders start: 'slowpass' and 'fastfail'
+        # t=1: builder 'fastfail' finishes
+        # t=2: builder 'slowpass' finishes
+        d = defer.Deferred()
+        d.addCallback(self._testRun_Fail_1)
+        reactor.callLater(5, d.callback, None)
+        return maybeWait(d)
+
+    def _testRun_Fail_1(self, res):
+        # 'slowpass' and 'fastfail' should have run one build each
+        b = self.status.getBuilder('slowpass').getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getNumber(), 0)
+        b = self.status.getBuilder('fastfail').getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getNumber(), 0)
+        
+        # none of the other builders should have run
+        self.failIf(self.status.getBuilder('b3').getLastFinishedBuild())
+        self.failIf(self.status.getBuilder('b4').getLastFinishedBuild())
+        self.failIf(self.status.getBuilder('b5').getLastFinishedBuild())
+
+        # in fact, none of them should have even started
+        self.failUnlessEqual(len(self.logger.builds), 2)
+        self.failUnless("slowpass" in self.logger.builds)
+        self.failUnless("fastfail" in self.logger.builds)
+        self.failIf("b3" in self.logger.builds)
+        self.failIf("b4" in self.logger.builds)
+        self.failIf("b5" in self.logger.builds)
+
+    def testRun_Pass(self):
+        # kick off upstream3, which will fire downstream4 and then
+        # downstream5
+        s = self.findScheduler("upstream3")
+        # this is an internal function of the Scheduler class
+        s.fireTimer() # fires a build
+        # t=0: slowpass and fastpass start
+        # t=1: builder 'fastpass' finishes
+        # t=2: builder 'slowpass' finishes
+        #      scheduler 'downstream4' fires
+        #      builds b3 and b4 are started
+        # t=3: builds b3 and b4 finish
+        #      scheduler 'downstream5' fires
+        #      build b5 is started
+        # t=4: build b5 is finished
+        d = defer.Deferred()
+        d.addCallback(self._testRun_Pass_1)
+        reactor.callLater(5, d.callback, None)
+        return maybeWait(d)
+
+    def _testRun_Pass_1(self, res):
+        # 'fastpass' and 'slowpass' should have run one build each
+        b = self.status.getBuilder('fastpass').getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getNumber(), 0)
+
+        b = self.status.getBuilder('slowpass').getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getNumber(), 0)
+
+        self.failIf(self.status.getBuilder('fastfail').getLastFinishedBuild())
+
+        b = self.status.getBuilder('b3').getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getNumber(), 0)
+
+        b = self.status.getBuilder('b4').getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getNumber(), 0)
+
+        b = self.status.getBuilder('b4').getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getNumber(), 0)
+        
+        

Added: vendor/buildbot/current/buildbot/test/test_locks.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_locks.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_locks.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,426 @@
+# -*- test-case-name: buildbot.test.test_locks -*-
+
+import random
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot import master
+from buildbot.steps import dummy
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.test.runutils import RunMixin
+from buildbot.twcompat import maybeWait
+from buildbot import locks
+
+def claimHarder(lock, owner):
+    """Return a Deferred that will fire when the lock is claimed. Keep trying
+    until we succeed."""
+    if lock.isAvailable():
+        #print "claimHarder(%s): claiming" % owner
+        lock.claim(owner)
+        return defer.succeed(lock)
+    #print "claimHarder(%s): waiting" % owner
+    d = lock.waitUntilMaybeAvailable(owner)
+    d.addCallback(claimHarder, owner)
+    return d
+
+def hold(lock, owner, mode="now"):
+    if mode == "now":
+        lock.release(owner)
+    elif mode == "very soon":
+        reactor.callLater(0, lock.release, owner)
+    elif mode == "soon":
+        reactor.callLater(0.1, lock.release, owner)
+
+
+class Unit(unittest.TestCase):
+    def testNow(self):
+        l = locks.BaseLock("name")
+        self.failUnless(l.isAvailable())
+        l.claim("owner1")
+        self.failIf(l.isAvailable())
+        l.release("owner1")
+        self.failUnless(l.isAvailable())
+
+    def testLater(self):
+        lock = locks.BaseLock("name")
+        d = claimHarder(lock, "owner1")
+        d.addCallback(lambda lock: lock.release("owner1"))
+        return maybeWait(d)
+
+    def testCompetition(self):
+        lock = locks.BaseLock("name")
+        d = claimHarder(lock, "owner1")
+        d.addCallback(self._claim1)
+        return maybeWait(d)
+    def _claim1(self, lock):
+        # we should have claimed it by now
+        self.failIf(lock.isAvailable())
+        # now set up two competing owners. We don't know which will get the
+        # lock first.
+        d2 = claimHarder(lock, "owner2")
+        d2.addCallback(hold, "owner2", "now")
+        d3 = claimHarder(lock, "owner3")
+        d3.addCallback(hold, "owner3", "soon")
+        dl = defer.DeferredList([d2,d3])
+        dl.addCallback(self._cleanup, lock)
+        # and release the lock in a moment
+        reactor.callLater(0.1, lock.release, "owner1")
+        return dl
+
+    def _cleanup(self, res, lock):
+        d = claimHarder(lock, "cleanup")
+        d.addCallback(lambda lock: lock.release("cleanup"))
+        return d
+
+    def testRandom(self):
+        lock = locks.BaseLock("name")
+        dl = []
+        for i in range(100):
+            owner = "owner%d" % i
+            mode = random.choice(["now", "very soon", "soon"])
+            d = claimHarder(lock, owner)
+            d.addCallback(hold, owner, mode)
+            dl.append(d)
+        d = defer.DeferredList(dl)
+        d.addCallback(self._cleanup, lock)
+        return maybeWait(d)
+
+class Multi(unittest.TestCase):
+    def testNow(self):
+        lock = locks.BaseLock("name", 2)
+        self.failUnless(lock.isAvailable())
+        lock.claim("owner1")
+        self.failUnless(lock.isAvailable())
+        lock.claim("owner2")
+        self.failIf(lock.isAvailable())
+        lock.release("owner1")
+        self.failUnless(lock.isAvailable())
+        lock.release("owner2")
+        self.failUnless(lock.isAvailable())
+
+    def testLater(self):
+        lock = locks.BaseLock("name", 2)
+        lock.claim("owner1")
+        lock.claim("owner2")
+        d = claimHarder(lock, "owner3")
+        d.addCallback(lambda lock: lock.release("owner3"))
+        lock.release("owner2")
+        lock.release("owner1")
+        return maybeWait(d)
+
+    def _cleanup(self, res, lock, count):
+        dl = []
+        for i in range(count):
+            d = claimHarder(lock, "cleanup%d" % i)
+            dl.append(d)
+        d2 = defer.DeferredList(dl)
+        # once all locks are claimed, we know that any previous owners have
+        # been flushed out
+        def _release(res):
+            for i in range(count):
+                lock.release("cleanup%d" % i)
+        d2.addCallback(_release)
+        return d2
+
+    def testRandom(self):
+        COUNT = 5
+        lock = locks.BaseLock("name", COUNT)
+        dl = []
+        for i in range(100):
+            owner = "owner%d" % i
+            mode = random.choice(["now", "very soon", "soon"])
+            d = claimHarder(lock, owner)
+            def _check(lock):
+                self.failIf(len(lock.owners) > COUNT)
+                return lock
+            d.addCallback(_check)
+            d.addCallback(hold, owner, mode)
+            dl.append(d)
+        d = defer.DeferredList(dl)
+        d.addCallback(self._cleanup, lock, COUNT)
+        return maybeWait(d)
+
+class Dummy:
+    pass
+
+def slave(slavename):
+    slavebuilder = Dummy()
+    slavebuilder.slave = Dummy()
+    slavebuilder.slave.slavename = slavename
+    return slavebuilder
+
+class MakeRealLock(unittest.TestCase):
+
+    def make(self, lockid):
+        return lockid.lockClass(lockid)
+
+    def testMaster(self):
+        mid1 = locks.MasterLock("name1")
+        mid2 = locks.MasterLock("name1")
+        mid3 = locks.MasterLock("name3")
+        mid4 = locks.MasterLock("name1", 3)
+        self.failUnlessEqual(mid1, mid2)
+        self.failIfEqual(mid1, mid3)
+        # they should all be hashable
+        d = {mid1: 1, mid2: 2, mid3: 3, mid4: 4}
+
+        l1 = self.make(mid1)
+        self.failUnlessEqual(l1.name, "name1")
+        self.failUnlessEqual(l1.maxCount, 1)
+        self.failUnlessIdentical(l1.getLock(slave("slave1")), l1)
+        l4 = self.make(mid4)
+        self.failUnlessEqual(l4.name, "name1")
+        self.failUnlessEqual(l4.maxCount, 3)
+        self.failUnlessIdentical(l4.getLock(slave("slave1")), l4)
+
+    def testSlave(self):
+        sid1 = locks.SlaveLock("name1")
+        sid2 = locks.SlaveLock("name1")
+        sid3 = locks.SlaveLock("name3")
+        sid4 = locks.SlaveLock("name1", maxCount=3)
+        mcfs = {"bigslave": 4, "smallslave": 1}
+        sid5 = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs)
+        mcfs2 = {"bigslave": 4, "smallslave": 1}
+        sid5a = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs2)
+        mcfs3 = {"bigslave": 1, "smallslave": 99}
+        sid5b = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs3)
+        self.failUnlessEqual(sid1, sid2)
+        self.failIfEqual(sid1, sid3)
+        self.failIfEqual(sid1, sid4)
+        self.failIfEqual(sid1, sid5)
+        self.failUnlessEqual(sid5, sid5a)
+        self.failIfEqual(sid5a, sid5b)
+        # they should all be hashable
+        d = {sid1: 1, sid2: 2, sid3: 3, sid4: 4, sid5: 5, sid5a: 6, sid5b: 7}
+
+        l1 = self.make(sid1)
+        self.failUnlessEqual(l1.name, "name1")
+        self.failUnlessEqual(l1.maxCount, 1)
+        l1s1 = l1.getLock(slave("slave1"))
+        self.failIfIdentical(l1s1, l1)
+
+        l4 = self.make(sid4)
+        self.failUnlessEqual(l4.maxCount, 3)
+        l4s1 = l4.getLock(slave("slave1"))
+        self.failUnlessEqual(l4s1.maxCount, 3)
+
+        l5 = self.make(sid5)
+        l5s1 = l5.getLock(slave("bigslave"))
+        l5s2 = l5.getLock(slave("smallslave"))
+        l5s3 = l5.getLock(slave("unnamedslave"))
+        self.failUnlessEqual(l5s1.maxCount, 4)
+        self.failUnlessEqual(l5s2.maxCount, 1)
+        self.failUnlessEqual(l5s3.maxCount, 3)
+
+class GetLock(unittest.TestCase):
+    def testGet(self):
+        # the master.cfg file contains "lock ids", which are instances of
+        # MasterLock and SlaveLock but which are not actually Locks per se.
+        # When the build starts, these markers are turned into RealMasterLock
+        # and RealSlaveLock instances. This insures that any builds running
+        # on slaves that were unaffected by the config change are still
+        # referring to the same Lock instance as new builds by builders that
+        # *were* affected by the change. There have been bugs in the past in
+        # which this didn't happen, and the Locks were bypassed because half
+        # the builders were using one incarnation of the lock while the other
+        # half were using a separate (but equal) incarnation.
+        #
+        # Changing the lock id in any way should cause it to be replaced in
+        # the BotMaster. This will result in a couple of funky artifacts:
+        # builds in progress might pay attention to a different lock, so we
+        # might bypass the locking for the duration of a couple builds.
+        # There's also the problem of old Locks lingering around in
+        # BotMaster.locks, but they're small and shouldn't really cause a
+        # problem.
+
+        b = master.BotMaster()
+        l1 = locks.MasterLock("one")
+        l1a = locks.MasterLock("one")
+        l2 = locks.MasterLock("one", maxCount=4)
+
+        rl1 = b.getLockByID(l1)
+        rl2 = b.getLockByID(l1a)
+        self.failUnlessIdentical(rl1, rl2)
+        rl3 = b.getLockByID(l2)
+        self.failIfIdentical(rl1, rl3)
+
+        s1 = locks.SlaveLock("one")
+        s1a = locks.SlaveLock("one")
+        s2 = locks.SlaveLock("one", maxCount=4)
+        s3 = locks.SlaveLock("one", maxCount=4,
+                             maxCountForSlave={"a":1, "b":2})
+        s3a = locks.SlaveLock("one", maxCount=4,
+                              maxCountForSlave={"a":1, "b":2})
+        s4 = locks.SlaveLock("one", maxCount=4,
+                             maxCountForSlave={"a":4, "b":4})
+
+        rl1 = b.getLockByID(s1)
+        rl2 = b.getLockByID(s1a)
+        self.failUnlessIdentical(rl1, rl2)
+        rl3 = b.getLockByID(s2)
+        self.failIfIdentical(rl1, rl3)
+        rl4 = b.getLockByID(s3)
+        self.failIfIdentical(rl1, rl4)
+        self.failIfIdentical(rl3, rl4)
+        rl5 = b.getLockByID(s3a)
+        self.failUnlessIdentical(rl4, rl5)
+        rl6 = b.getLockByID(s4)
+        self.failIfIdentical(rl5, rl6)
+
+
+
+class LockStep(dummy.Dummy):
+    def start(self):
+        number = self.build.requests[0].number
+        self.build.requests[0].events.append(("start", number))
+        dummy.Dummy.start(self)
+    def done(self):
+        number = self.build.requests[0].number
+        self.build.requests[0].events.append(("done", number))
+        dummy.Dummy.done(self)
+
+config_1 = """
+from buildbot import locks
+from buildbot.process import factory
+s = factory.s
+from buildbot.test.test_locks import LockStep
+
+BuildmasterConfig = c = {}
+c['bots'] = [('bot1', 'sekrit'), ('bot2', 'sekrit')]
+c['sources'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+first_lock = locks.SlaveLock('first')
+second_lock = locks.MasterLock('second')
+f1 = factory.BuildFactory([s(LockStep, timeout=2, locks=[first_lock])])
+f2 = factory.BuildFactory([s(LockStep, timeout=3, locks=[second_lock])])
+f3 = factory.BuildFactory([s(LockStep, timeout=2, locks=[])])
+
+b1a = {'name': 'full1a', 'slavename': 'bot1', 'builddir': '1a', 'factory': f1}
+b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1b', 'factory': f1}
+b1c = {'name': 'full1c', 'slavename': 'bot1', 'builddir': '1c', 'factory': f3,
+       'locks': [first_lock, second_lock]}
+b1d = {'name': 'full1d', 'slavename': 'bot1', 'builddir': '1d', 'factory': f2}
+b2a = {'name': 'full2a', 'slavename': 'bot2', 'builddir': '2a', 'factory': f1}
+b2b = {'name': 'full2b', 'slavename': 'bot2', 'builddir': '2b', 'factory': f3,
+       'locks': [second_lock]}
+c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
+"""
+
+config_1a = config_1 + \
+"""
+b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1B', 'factory': f1}
+c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
+"""
+
+
+class Locks(RunMixin, unittest.TestCase):
+    def setUp(self):
+        RunMixin.setUp(self)
+        self.req1 = req1 = BuildRequest("forced build", SourceStamp())
+        req1.number = 1
+        self.req2 = req2 = BuildRequest("forced build", SourceStamp())
+        req2.number = 2
+        self.req3 = req3 = BuildRequest("forced build", SourceStamp())
+        req3.number = 3
+        req1.events = req2.events = req3.events = self.events = []
+        d = self.master.loadConfig(config_1)
+        d.addCallback(lambda res: self.master.startService())
+        d.addCallback(lambda res: self.connectSlaves(["bot1", "bot2"],
+                                                     ["full1a", "full1b",
+                                                      "full1c", "full1d",
+                                                      "full2a", "full2b"]))
+        return maybeWait(d)
+
+    def testLock1(self):
+        self.control.getBuilder("full1a").requestBuild(self.req1)
+        self.control.getBuilder("full1b").requestBuild(self.req2)
+        d = defer.DeferredList([self.req1.waitUntilFinished(),
+                                self.req2.waitUntilFinished()])
+        d.addCallback(self._testLock1_1)
+        return maybeWait(d)
+
+    def _testLock1_1(self, res):
+        # full1a should complete its step before full1b starts it
+        self.failUnlessEqual(self.events,
+                             [("start", 1), ("done", 1),
+                              ("start", 2), ("done", 2)])
+
+    def testLock1a(self):
+        # just like testLock1, but we reload the config file first, with a
+        # change that causes full1b to be changed. This tickles a design bug
+        # in which full1a and full1b wind up with distinct Lock instances.
+        d = self.master.loadConfig(config_1a)
+        d.addCallback(self._testLock1a_1)
+        return maybeWait(d)
+    def _testLock1a_1(self, res):
+        self.control.getBuilder("full1a").requestBuild(self.req1)
+        self.control.getBuilder("full1b").requestBuild(self.req2)
+        d = defer.DeferredList([self.req1.waitUntilFinished(),
+                                self.req2.waitUntilFinished()])
+        d.addCallback(self._testLock1a_2)
+        return d
+
+    def _testLock1a_2(self, res):
+        # full1a should complete its step before full1b starts it
+        self.failUnlessEqual(self.events,
+                             [("start", 1), ("done", 1),
+                              ("start", 2), ("done", 2)])
+
+    def testLock2(self):
+        # two builds run on separate slaves with slave-scoped locks should
+        # not interfere
+        self.control.getBuilder("full1a").requestBuild(self.req1)
+        self.control.getBuilder("full2a").requestBuild(self.req2)
+        d = defer.DeferredList([self.req1.waitUntilFinished(),
+                                self.req2.waitUntilFinished()])
+        d.addCallback(self._testLock2_1)
+        return maybeWait(d)
+
+    def _testLock2_1(self, res):
+        # full2a should start its step before full1a finishes it. They run on
+        # different slaves, however, so they might start in either order.
+        self.failUnless(self.events[:2] == [("start", 1), ("start", 2)] or
+                        self.events[:2] == [("start", 2), ("start", 1)])
+
+    def testLock3(self):
+        # two builds run on separate slaves with master-scoped locks should
+        # not overlap
+        self.control.getBuilder("full1c").requestBuild(self.req1)
+        self.control.getBuilder("full2b").requestBuild(self.req2)
+        d = defer.DeferredList([self.req1.waitUntilFinished(),
+                                self.req2.waitUntilFinished()])
+        d.addCallback(self._testLock3_1)
+        return maybeWait(d)
+
+    def _testLock3_1(self, res):
+        # full2b should not start until after full1c finishes. The builds run
+        # on different slaves, so we can't really predict which will start
+        # first. The important thing is that they don't overlap.
+        self.failUnless(self.events == [("start", 1), ("done", 1),
+                                        ("start", 2), ("done", 2)]
+                        or self.events == [("start", 2), ("done", 2),
+                                           ("start", 1), ("done", 1)]
+                        )
+
+    def testLock4(self):
+        self.control.getBuilder("full1a").requestBuild(self.req1)
+        self.control.getBuilder("full1c").requestBuild(self.req2)
+        self.control.getBuilder("full1d").requestBuild(self.req3)
+        d = defer.DeferredList([self.req1.waitUntilFinished(),
+                                self.req2.waitUntilFinished(),
+                                self.req3.waitUntilFinished()])
+        d.addCallback(self._testLock4_1)
+        return maybeWait(d)
+
+    def _testLock4_1(self, res):
+        # full1a starts, then full1d starts (because they do not interfere).
+        # Once both are done, full1c can run.
+        self.failUnlessEqual(self.events,
+                             [("start", 1), ("start", 3),
+                              ("done", 1), ("done", 3),
+                              ("start", 2), ("done", 2)])
+

Added: vendor/buildbot/current/buildbot/test/test_maildir.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_maildir.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_maildir.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,79 @@
+# -*- test-case-name: buildbot.test.test_maildir -*-
+
+from twisted.trial import unittest
+import os, shutil
+from buildbot.changes.mail import FCMaildirSource
+from twisted.internet import reactor
+from twisted.python import util
+
+class MaildirTest(unittest.TestCase):
+    def setUp(self):
+        print "creating empty maildir"
+        self.maildir = "test-maildir"
+        if os.path.isdir(self.maildir):
+            shutil.rmtree(self.maildir)
+            print "removing stale maildir"
+        os.mkdir(self.maildir)
+        os.mkdir(os.path.join(self.maildir, "cur"))
+        os.mkdir(os.path.join(self.maildir, "new"))
+        os.mkdir(os.path.join(self.maildir, "tmp"))
+        self.source = None
+        self.done = 0
+
+    def tearDown(self):
+        print "removing old maildir"
+        shutil.rmtree(self.maildir)
+        if self.source:
+            self.source.stopService()
+
+    def addChange(self, c):
+        # NOTE: this assumes every message results in a Change, which isn't
+        # true for msg8-prefix
+        print "got change"
+        self.changes.append(c)
+
+    def deliverMail(self, msg):
+        print "delivering", msg
+        newdir = os.path.join(self.maildir, "new")
+        # to do this right, use safecat
+        shutil.copy(msg, newdir)
+
+    def do_timeout(self):
+        self.done = 1
+
+    def testMaildir(self):
+        self.changes = []
+        s = self.source = FCMaildirSource(self.maildir)
+        s.parent = self
+        s.startService()
+        testfiles_dir = util.sibpath(__file__, "mail")
+        testfiles = [msg for msg in os.listdir(testfiles_dir)
+                     if msg.startswith("msg")]
+        testfiles.sort()
+        count = len(testfiles)
+        for i in range(count):
+            msg = testfiles[i]
+            reactor.callLater(2*i, self.deliverMail,
+                              os.path.join(testfiles_dir, msg))
+        t = reactor.callLater(2*i + 15, self.do_timeout)
+        while not (self.done or len(self.changes) == count):
+            reactor.iterate(0.1)
+        s.stopService()
+        if self.done:
+            return self.fail("timeout: messages weren't received on time")
+        t.cancel()
+        # TODO: verify the messages, should use code from test_mailparse but
+        # I'm not sure how to factor the verification routines out in a
+        # useful fashion
+        #for i in range(count):
+        #    msg, check = test_messages[i]
+        #    check(self, self.changes[i])
+        
+
+if __name__ == '__main__':
+    suite = unittest.TestSuite()
+    suite.addTestClass(MaildirTest)
+    import sys
+    reporter = unittest.TextReporter(sys.stdout)
+    suite.run(reporter)
+    

Added: vendor/buildbot/current/buildbot/test/test_mailparse.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_mailparse.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_mailparse.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,247 @@
+# -*- test-case-name: buildbot.test.test_mailparse -*-
+
+from twisted.trial import unittest
+from twisted.python import util
+from buildbot.changes.mail import parseFreshCVSMail, parseSyncmail
+
+class Test1(unittest.TestCase):
+
+    def get(self, msg):
+        msg = util.sibpath(__file__, msg)
+        return parseFreshCVSMail(None, open(msg, "r"))
+
+    def testMsg1(self):
+        c = self.get("mail/msg1")
+        self.assertEqual(c.who, "moshez")
+        self.assertEqual(c.files, ["Twisted/debian/python-twisted.menu.in"])
+        self.assertEqual(c.comments, "Instance massenger, apparently\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg2(self):
+        c = self.get("mail/msg2")
+        self.assertEqual(c.who, "itamarst")
+        self.assertEqual(c.files, ["Twisted/twisted/web/woven/form.py",
+                                   "Twisted/twisted/python/formmethod.py"])
+        self.assertEqual(c.comments,
+                         "submit formmethod now subclass of Choice\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg3(self):
+        # same as msg2 but missing the ViewCVS section
+        c = self.get("mail/msg3")
+        self.assertEqual(c.who, "itamarst")
+        self.assertEqual(c.files, ["Twisted/twisted/web/woven/form.py",
+                                   "Twisted/twisted/python/formmethod.py"])
+        self.assertEqual(c.comments,
+                         "submit formmethod now subclass of Choice\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg4(self):
+        # same as msg3 but also missing CVS patch section
+        c = self.get("mail/msg4")
+        self.assertEqual(c.who, "itamarst")
+        self.assertEqual(c.files, ["Twisted/twisted/web/woven/form.py",
+                                   "Twisted/twisted/python/formmethod.py"])
+        self.assertEqual(c.comments,
+                         "submit formmethod now subclass of Choice\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg5(self):
+        # creates a directory
+        c = self.get("mail/msg5")
+        self.assertEqual(c.who, "etrepum")
+        self.assertEqual(c.files, ["Twisted/doc/examples/cocoaDemo"])
+        self.assertEqual(c.comments,
+                         "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
+        self.assertEqual(c.isdir, 1)
+
+    def testMsg6(self):
+        # adds files
+        c = self.get("mail/msg6")
+        self.assertEqual(c.who, "etrepum")
+        self.assertEqual(c.files, [
+            "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
+            "Twisted/doc/examples/cocoaDemo/__main__.py",
+            "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+            "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"])
+        self.assertEqual(c.comments,
+                         "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project.  The reactor is iterated periodically by a repeating NSTimer.\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg7(self):
+        # deletes files
+        c = self.get("mail/msg7")
+        self.assertEqual(c.who, "etrepum")
+        self.assertEqual(c.files, [
+            "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
+            "Twisted/doc/examples/cocoaDemo/__main__.py",
+            "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+            "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+            "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"])
+        self.assertEqual(c.comments,
+                         "Directories break debian build script, waiting for reasonable fix\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg8(self):
+        # files outside Twisted/
+        c = self.get("mail/msg8")
+        self.assertEqual(c.who, "acapnotic")
+        self.assertEqual(c.files, [ "CVSROOT/freshCfg" ])
+        self.assertEqual(c.comments, "it doesn't work with invalid syntax\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg9(self):
+        # also creates a directory
+        c = self.get("mail/msg9")
+        self.assertEqual(c.who, "exarkun")
+        self.assertEqual(c.files, ["Twisted/sandbox/exarkun/persist-plugin"])
+        self.assertEqual(c.comments,
+                         "Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository\n")
+        self.assertEqual(c.isdir, 1)
+
+
+class Test2(unittest.TestCase):
+    def get(self, msg):
+        msg = util.sibpath(__file__, msg)
+        return parseFreshCVSMail(None, open(msg, "r"), prefix="Twisted")
+
+    def testMsg1p(self):
+        c = self.get("mail/msg1")
+        self.assertEqual(c.who, "moshez")
+        self.assertEqual(c.files, ["debian/python-twisted.menu.in"])
+        self.assertEqual(c.comments, "Instance massenger, apparently\n")
+
+    def testMsg2p(self):
+        c = self.get("mail/msg2")
+        self.assertEqual(c.who, "itamarst")
+        self.assertEqual(c.files, ["twisted/web/woven/form.py",
+                                   "twisted/python/formmethod.py"])
+        self.assertEqual(c.comments,
+                         "submit formmethod now subclass of Choice\n")
+
+    def testMsg3p(self):
+        # same as msg2 but missing the ViewCVS section
+        c = self.get("mail/msg3")
+        self.assertEqual(c.who, "itamarst")
+        self.assertEqual(c.files, ["twisted/web/woven/form.py",
+                                   "twisted/python/formmethod.py"])
+        self.assertEqual(c.comments,
+                         "submit formmethod now subclass of Choice\n")
+
+    def testMsg4p(self):
+        # same as msg3 but also missing CVS patch section
+        c = self.get("mail/msg4")
+        self.assertEqual(c.who, "itamarst")
+        self.assertEqual(c.files, ["twisted/web/woven/form.py",
+                                   "twisted/python/formmethod.py"])
+        self.assertEqual(c.comments,
+                         "submit formmethod now subclass of Choice\n")
+
+    def testMsg5p(self):
+        # creates a directory
+        c = self.get("mail/msg5")
+        self.assertEqual(c.who, "etrepum")
+        self.assertEqual(c.files, ["doc/examples/cocoaDemo"])
+        self.assertEqual(c.comments,
+                         "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
+        self.assertEqual(c.isdir, 1)
+
+    def testMsg6p(self):
+        # adds files
+        c = self.get("mail/msg6")
+        self.assertEqual(c.who, "etrepum")
+        self.assertEqual(c.files, [
+            "doc/examples/cocoaDemo/MyAppDelegate.py",
+            "doc/examples/cocoaDemo/__main__.py",
+            "doc/examples/cocoaDemo/bin-python-main.m",
+            "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+            "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+            "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+            "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+            "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"])
+        self.assertEqual(c.comments,
+                         "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project.  The reactor is iterated periodically by a repeating NSTimer.\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg7p(self):
+        # deletes files
+        c = self.get("mail/msg7")
+        self.assertEqual(c.who, "etrepum")
+        self.assertEqual(c.files, [
+            "doc/examples/cocoaDemo/MyAppDelegate.py",
+            "doc/examples/cocoaDemo/__main__.py",
+            "doc/examples/cocoaDemo/bin-python-main.m",
+            "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+            "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+            "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+            "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+            "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"])
+        self.assertEqual(c.comments,
+                         "Directories break debian build script, waiting for reasonable fix\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsg8p(self):
+        # files outside Twisted/
+        c = self.get("mail/msg8")
+        self.assertEqual(c, None)
+
+
+class Test3(unittest.TestCase):
+    def get(self, msg):
+        msg = util.sibpath(__file__, msg)
+        return parseSyncmail(None, open(msg, "r"), prefix="buildbot")
+
+    def getNoPrefix(self, msg):
+        msg = util.sibpath(__file__, msg)
+        return parseSyncmail(None, open(msg, "r"))
+
+    def testMsgS1(self):
+        c = self.get("mail/syncmail.1")
+        self.failUnless(c is not None)
+        self.assertEqual(c.who, "warner")
+        self.assertEqual(c.files, ["buildbot/changes/freshcvsmail.py"])
+        self.assertEqual(c.comments,
+                         "remove leftover code, leave a temporary compatibility import. Note! Start\nimporting FCMaildirSource from changes.mail instead of changes.freshcvsmail\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsgS2(self):
+        c = self.get("mail/syncmail.2")
+        self.assertEqual(c.who, "warner")
+        self.assertEqual(c.files, ["ChangeLog"])
+        self.assertEqual(c.comments, "\t* NEWS: started adding new features\n")
+        self.assertEqual(c.isdir, 0)
+
+    def testMsgS3(self):
+        c = self.get("mail/syncmail.3")
+        self.failUnless(c == None)
+
+    def testMsgS4(self):
+        c = self.get("mail/syncmail.4")
+        self.assertEqual(c.who, "warner")
+        self.assertEqual(c.files, ["test/mail/syncmail.1",
+                                   "test/mail/syncmail.2",
+                                   "test/mail/syncmail.3"
+                                   ])
+        self.assertEqual(c.comments, "test cases for syncmail parser\n")
+        self.assertEqual(c.isdir, 0)
+        self.assertEqual(c.branch, None)
+
+    # tests a tag
+    def testMsgS5(self):
+        c = self.getNoPrefix("mail/syncmail.5")
+        self.failUnless(c)
+        self.assertEqual(c.who, "thomas")
+        self.assertEqual(c.files, ['test1/MANIFEST',
+                                   'test1/Makefile.am',
+                                   'test1/autogen.sh',
+                                   'test1/configure.in' 
+                                   ])
+        self.assertEqual(c.branch, "BRANCH-DEVEL")
+        self.assertEqual(c.isdir, 0)

Added: vendor/buildbot/current/buildbot/test/test_p4poller.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_p4poller.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_p4poller.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,217 @@
+import time
+
+from twisted.python import failure
+from twisted.internet import defer
+from twisted.trial import unittest
+
+from buildbot.twcompat import maybeWait
+from buildbot.changes.changes import Change
+from buildbot.changes.p4poller import P4Source, get_simple_split
+
+first_p4changes = \
+"""Change 1 on 2006/04/13 by slamb at testclient 'first rev'
+"""
+
+second_p4changes = \
+"""Change 3 on 2006/04/13 by bob at testclient 'short desc truncated'
+Change 2 on 2006/04/13 by slamb at testclient 'bar'
+"""
+
+third_p4changes = \
+"""Change 5 on 2006/04/13 by mpatel at testclient 'first rev'
+"""
+
+change_4_log = \
+"""Change 4 by mpatel at testclient on 2006/04/13 21:55:39
+
+	short desc truncated because this is a long description.
+"""
+change_3_log = \
+"""Change 3 by bob at testclient on 2006/04/13 21:51:39
+
+	short desc truncated because this is a long description.
+"""
+
+change_2_log = \
+"""Change 2 by slamb at testclient on 2006/04/13 21:46:23
+
+	creation
+"""
+
+p4change = {
+    '3': change_3_log +
+"""Affected files ...
+
+... //depot/myproject/branch_b/branch_b_file#1 add
+... //depot/myproject/branch_b/whatbranch#1 branch
+... //depot/myproject/branch_c/whatbranch#1 branch
+""",
+    '2': change_2_log +
+"""Affected files ...
+
+... //depot/myproject/trunk/whatbranch#1 add
+... //depot/otherproject/trunk/something#1 add
+""",
+    '5': change_4_log +
+"""Affected files ...
+
+... //depot/myproject/branch_b/branch_b_file#1 add
+... //depot/myproject/branch_b#75 edit
+... //depot/myproject/branch_c/branch_c_file#1 add
+""",
+}
+
+
+class MockP4Source(P4Source):
+    """Test P4Source which doesn't actually invoke p4."""
+    invocation = 0
+
+    def __init__(self, p4changes, p4change, *args, **kwargs):
+        P4Source.__init__(self, *args, **kwargs)
+        self.p4changes = p4changes
+        self.p4change = p4change
+
+    def _get_changes(self):
+        assert self.working
+        result = self.p4changes[self.invocation]
+        self.invocation += 1
+        return defer.succeed(result)
+
+    def _get_describe(self, dummy, num):
+        assert self.working
+        return defer.succeed(self.p4change[num])
+
+class TestP4Poller(unittest.TestCase):
+    def setUp(self):
+        self.changes = []
+        self.addChange = self.changes.append
+
+    def failUnlessIn(self, substr, string):
+        # this is for compatibility with python2.2
+        if isinstance(string, str):
+            self.failUnless(string.find(substr) != -1)
+        else:
+            self.assertIn(substr, string)
+
+    def testCheck(self):
+        """successful checks"""
+        self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
+                              p4change=p4change,
+                              p4port=None, p4user=None,
+                              p4base='//depot/myproject/',
+                              split_file=lambda x: x.split('/', 1))
+        self.t.parent = self
+
+        # The first time, it just learns the change to start at.
+        self.assert_(self.t.last_change is None)
+        self.assert_(not self.t.working)
+        return maybeWait(self.t.checkp4().addCallback(self._testCheck2))
+
+    def _testCheck2(self, res):
+        self.assertEquals(self.changes, [])
+        self.assertEquals(self.t.last_change, '1')
+
+        # Subsequent times, it returns Change objects for new changes.
+        return self.t.checkp4().addCallback(self._testCheck3)
+
+    def _testCheck3(self, res):
+        self.assertEquals(len(self.changes), 3)
+        self.assertEquals(self.t.last_change, '3')
+        self.assert_(not self.t.working)
+
+        # They're supposed to go oldest to newest, so this one must be first.
+        self.assertEquals(self.changes[0].asText(),
+            Change(who='slamb',
+                   files=['whatbranch'],
+                   comments=change_2_log,
+                   revision='2',
+                   when=self.makeTime("2006/04/13 21:46:23"),
+                   branch='trunk').asText())
+
+        # These two can happen in either order, since they're from the same
+        # Perforce change.
+        self.failUnlessIn(
+            Change(who='bob',
+                   files=['branch_b_file',
+                          'whatbranch'],
+                   comments=change_3_log,
+                   revision='3',
+                   when=self.makeTime("2006/04/13 21:51:39"),
+                   branch='branch_b').asText(),
+            [c.asText() for c in self.changes])
+        self.failUnlessIn(
+            Change(who='bob',
+                   files=['whatbranch'],
+                   comments=change_3_log,
+                   revision='3',
+                   when=self.makeTime("2006/04/13 21:51:39"),
+                   branch='branch_c').asText(),
+            [c.asText() for c in self.changes])
+
+    def makeTime(self, timestring):
+        datefmt = '%Y/%m/%d %H:%M:%S'
+        when = time.mktime(time.strptime(timestring, datefmt))
+        return when
+
+    def testFailedChanges(self):
+        """'p4 changes' failure is properly reported"""
+        self.t = MockP4Source(p4changes=['Perforce client error:\n...'],
+                              p4change={},
+                              p4port=None, p4user=None)
+        self.t.parent = self
+        d = self.t.checkp4()
+        d.addBoth(self._testFailedChanges2)
+        return maybeWait(d)
+
+    def _testFailedChanges2(self, f):
+        self.assert_(isinstance(f, failure.Failure))
+        self.failUnlessIn('Perforce client error', str(f))
+        self.assert_(not self.t.working)
+
+    def testFailedDescribe(self):
+        """'p4 describe' failure is properly reported"""
+        c = dict(p4change)
+        c['3'] = 'Perforce client error:\n...'
+        self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
+                              p4change=c, p4port=None, p4user=None)
+        self.t.parent = self
+        d = self.t.checkp4()
+        d.addCallback(self._testFailedDescribe2)
+        return maybeWait(d)
+
+    def _testFailedDescribe2(self, res):
+        # first time finds nothing; check again.
+        return self.t.checkp4().addBoth(self._testFailedDescribe3)
+
+    def _testFailedDescribe3(self, f):
+        self.assert_(isinstance(f, failure.Failure))
+        self.failUnlessIn('Perforce client error', str(f))
+        self.assert_(not self.t.working)
+        self.assertEquals(self.t.last_change, '2')
+
+    def testAlreadyWorking(self):
+        """don't launch a new poll while old is still going"""
+        self.t = P4Source()
+        self.t.working = True
+        self.assert_(self.t.last_change is None)
+        d = self.t.checkp4()
+        d.addCallback(self._testAlreadyWorking2)
+
+    def _testAlreadyWorking2(self, res):
+        self.assert_(self.t.last_change is None)
+
+    def testSplitFile(self):
+        """Make sure split file works on branch only changes"""
+        self.t = MockP4Source(p4changes=[third_p4changes],
+                              p4change=p4change,
+                              p4port=None, p4user=None,
+                              p4base='//depot/myproject/',
+                              split_file=get_simple_split)
+        self.t.parent = self
+        self.t.last_change = 50
+        d = self.t.checkp4()
+        d.addCallback(self._testSplitFile)
+
+    def _testSplitFile(self, res):
+        self.assertEquals(len(self.changes), 2)
+        self.assertEquals(self.t.last_change, '5')

Added: vendor/buildbot/current/buildbot/test/test_properties.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_properties.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_properties.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,167 @@
+# -*- test-case-name: buildbot.test.test_properties -*-
+
+import os
+
+from twisted.trial import unittest
+
+from buildbot.twcompat import maybeWait
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process import base
+from buildbot.steps.shell import ShellCommand, WithProperties
+from buildbot.status import builder
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.test.runutils import RunMixin
+
+class MyBuildStep(ShellCommand):
+    def _interpolateProperties(self, command):
+        command = ["tar", "czf",
+                   "build-%s.tar.gz" % self.getProperty("revision"),
+                   "source"]
+        return ShellCommand._interpolateProperties(self, command)
+
+
+class FakeBuild:
+    pass
+class FakeBuilder:
+    statusbag = None
+    name = "fakebuilder"
+class FakeSlave:
+    slavename = "bot12"
+class FakeSlaveBuilder:
+    slave = FakeSlave()
+    def getSlaveCommandVersion(self, command, oldversion=None):
+        return "1.10"
+
+class Interpolate(unittest.TestCase):
+    def setUp(self):
+        self.builder = FakeBuilder()
+        self.builder_status = builder.BuilderStatus("fakebuilder")
+        self.builder_status.basedir = "test_properties"
+        self.builder_status.nextBuildNumber = 5
+        rmdirRecursive(self.builder_status.basedir)
+        os.mkdir(self.builder_status.basedir)
+        self.build_status = self.builder_status.newBuild()
+        req = base.BuildRequest("reason", SourceStamp(branch="branch2",
+                                                      revision=1234))
+        self.build = base.Build([req])
+        self.build.setBuilder(self.builder)
+        self.build.setupStatus(self.build_status)
+        self.build.setupSlaveBuilder(FakeSlaveBuilder())
+
+    def testWithProperties(self):
+        self.build.setProperty("revision", 47)
+        self.failUnlessEqual(self.build_status.getProperty("revision"), 47)
+        c = ShellCommand(workdir=dir, build=self.build,
+                         command=["tar", "czf",
+                                  WithProperties("build-%s.tar.gz",
+                                                 "revision"),
+                                  "source"])
+        cmd = c._interpolateProperties(c.command)
+        self.failUnlessEqual(cmd,
+                             ["tar", "czf", "build-47.tar.gz", "source"])
+
+    def testWithPropertiesDict(self):
+        self.build.setProperty("other", "foo")
+        self.build.setProperty("missing", None)
+        c = ShellCommand(workdir=dir, build=self.build,
+                         command=["tar", "czf",
+                                  WithProperties("build-%(other)s.tar.gz"),
+                                  "source"])
+        cmd = c._interpolateProperties(c.command)
+        self.failUnlessEqual(cmd,
+                             ["tar", "czf", "build-foo.tar.gz", "source"])
+
+    def testWithPropertiesEmpty(self):
+        self.build.setProperty("empty", None)
+        c = ShellCommand(workdir=dir, build=self.build,
+                         command=["tar", "czf",
+                                  WithProperties("build-%(empty)s.tar.gz"),
+                                  "source"])
+        cmd = c._interpolateProperties(c.command)
+        self.failUnlessEqual(cmd,
+                             ["tar", "czf", "build-.tar.gz", "source"])
+
+    def testCustomBuildStep(self):
+        c = MyBuildStep(workdir=dir, build=self.build)
+        cmd = c._interpolateProperties(c.command)
+        self.failUnlessEqual(cmd,
+                             ["tar", "czf", "build-1234.tar.gz", "source"])
+
+    def testSourceStamp(self):
+        c = ShellCommand(workdir=dir, build=self.build,
+                         command=["touch",
+                                  WithProperties("%s-dir", "branch"),
+                                  WithProperties("%s-rev", "revision"),
+                                  ])
+        cmd = c._interpolateProperties(c.command)
+        self.failUnlessEqual(cmd,
+                             ["touch", "branch2-dir", "1234-rev"])
+
+    def testSlaveName(self):
+        c = ShellCommand(workdir=dir, build=self.build,
+                         command=["touch",
+                                  WithProperties("%s-slave", "slavename"),
+                                  ])
+        cmd = c._interpolateProperties(c.command)
+        self.failUnlessEqual(cmd,
+                             ["touch", "bot12-slave"])
+
+    def testBuildNumber(self):
+        c = ShellCommand(workdir=dir, build=self.build,
+                         command=["touch",
+                                  WithProperties("build-%d", "buildnumber"),
+                                  WithProperties("builder-%s", "buildername"),
+                                  ])
+        cmd = c._interpolateProperties(c.command)
+        self.failUnlessEqual(cmd,
+                             ["touch", "build-5", "builder-fakebuilder"])
+
+
+run_config = """
+from buildbot.process import factory
+from buildbot.steps.shell import ShellCommand, WithProperties
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['bots'] = [('bot1', 'sekrit')]
+c['sources'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+# Note: when run against twisted-1.3.0, this locks up about 5% of the time. I
+# suspect that a command with no output that finishes quickly triggers a race
+# condition in 1.3.0's process-reaping code. The 'touch' process becomes a
+# zombie and the step never completes. To keep this from messing up the unit
+# tests too badly, this step runs with a reduced timeout.
+
+f1 = factory.BuildFactory([s(ShellCommand,
+                             flunkOnFailure=True,
+                             command=['touch',
+                                      WithProperties('%s-slave', 'slavename'),
+                                      ],
+                             workdir='.',
+                             timeout=10,
+                             )])
+
+b1 = {'name': 'full1', 'slavename': 'bot1', 'builddir': 'bd1', 'factory': f1}
+c['builders'] = [b1]
+
+"""
+
+class Run(RunMixin, unittest.TestCase):
+    def testInterpolate(self):
+        # run an actual build with a step that interpolates a build property
+        d = self.master.loadConfig(run_config)
+        d.addCallback(lambda res: self.master.startService())
+        d.addCallback(lambda res: self.connectOneSlave("bot1"))
+        d.addCallback(lambda res: self.requestBuild("full1"))
+        d.addCallback(self.failUnlessBuildSucceeded)
+        def _check_touch(res):
+            f = os.path.join("slavebase-bot1", "bd1", "bot1-slave")
+            self.failUnless(os.path.exists(f))
+            return res
+        d.addCallback(_check_touch)
+        return maybeWait(d)
+
+
+# we test got_revision in test_vc

Added: vendor/buildbot/current/buildbot/test/test_run.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_run.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_run.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,514 @@
+# -*- test-case-name: buildbot.test.test_run -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+import os
+
+from buildbot import master, interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.changes import changes
+from buildbot.status import builder
+from buildbot.process.base import BuildRequest
+from buildbot.twcompat import maybeWait
+
+from buildbot.test.runutils import RunMixin, rmtree
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+    s(dummy.Dummy, timeout=1),
+    s(dummy.RemoteDummy, timeout=2),
+    ])
+
+BuildmasterConfig = c = {}
+c['bots'] = [['bot1', 'sekrit']]
+c['sources'] = []
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+                      'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+config_run = config_base + """
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('quick', None, 120, ['quick'])]
+"""
+
+config_2 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+                  'builddir': 'dummy1', 'factory': f2},
+                 {'name': 'testdummy', 'slavename': 'bot1',
+                  'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
+"""
+
+config_3 = config_2 + """
+c['builders'].append({'name': 'adummy', 'slavename': 'bot1',
+                      'builddir': 'adummy3', 'factory': f2})
+c['builders'].append({'name': 'bdummy', 'slavename': 'bot1',
+                      'builddir': 'adummy4', 'factory': f2,
+                      'category': 'test'})
+"""
+
+config_4 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+                  'builddir': 'dummy', 'factory': f2}]
+"""
+
+config_4_newbasedir = config_4 + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+                  'builddir': 'dummy2', 'factory': f2}]
+"""
+
+config_4_newbuilder = config_4_newbasedir + """
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+                      'builddir': 'dummy23', 'factory': f2})
+"""
+
+class Run(unittest.TestCase):
+    def rmtree(self, d):
+        rmtree(d)
+
+    def testMaster(self):
+        self.rmtree("basedir")
+        os.mkdir("basedir")
+        m = master.BuildMaster("basedir")
+        m.loadConfig(config_run)
+        m.readConfig = True
+        m.startService()
+        cm = m.change_svc
+        c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+        cm.addChange(c)
+        # verify that the Scheduler is now waiting
+        s = m.allSchedulers()[0]
+        self.failUnless(s.timer)
+        # halting the service will also stop the timer
+        d = defer.maybeDeferred(m.stopService)
+        return maybeWait(d)
+
+class Ping(RunMixin, unittest.TestCase):
+    def testPing(self):
+        self.master.loadConfig(config_2)
+        self.master.readConfig = True
+        self.master.startService()
+
+        d = self.connectSlave()
+        d.addCallback(self._testPing_1)
+        return maybeWait(d)
+
+    def _testPing_1(self, res):
+        d = interfaces.IControl(self.master).getBuilder("dummy").ping(1)
+        d.addCallback(self._testPing_2)
+        return d
+
+    def _testPing_2(self, res):
+        pass
+
+class BuilderNames(unittest.TestCase):
+
+    def testGetBuilderNames(self):
+        os.mkdir("bnames")
+        m = master.BuildMaster("bnames")
+        s = m.getStatus()
+
+        m.loadConfig(config_3)
+        m.readConfig = True
+
+        self.failUnlessEqual(s.getBuilderNames(),
+                             ["dummy", "testdummy", "adummy", "bdummy"])
+        self.failUnlessEqual(s.getBuilderNames(categories=['test']),
+                             ["testdummy", "bdummy"])
+
+class Disconnect(RunMixin, unittest.TestCase):
+
+    def setUp(self):
+        RunMixin.setUp(self)
+        
+        # verify that disconnecting the slave during a build properly
+        # terminates the build
+        m = self.master
+        s = self.status
+        c = self.control
+
+        m.loadConfig(config_2)
+        m.readConfig = True
+        m.startService()
+
+        self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+        self.s1 = s1 = s.getBuilder("dummy")
+        self.failUnlessEqual(s1.getName(), "dummy")
+        self.failUnlessEqual(s1.getState(), ("offline", []))
+        self.failUnlessEqual(s1.getCurrentBuilds(), [])
+        self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+        self.failUnlessEqual(s1.getBuild(-1), None)
+
+        d = self.connectSlave()
+        d.addCallback(self._disconnectSetup_1)
+        return maybeWait(d)
+
+    def _disconnectSetup_1(self, res):
+        self.failUnlessEqual(self.s1.getState(), ("idle", []))
+
+
+    def verifyDisconnect(self, bs):
+        self.failUnless(bs.isFinished())
+
+        step1 = bs.getSteps()[0]
+        self.failUnlessEqual(step1.getText(), ["delay", "interrupted"])
+        self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
+
+        self.failUnlessEqual(bs.getResults(), builder.FAILURE)
+
+    def verifyDisconnect2(self, bs):
+        self.failUnless(bs.isFinished())
+
+        step1 = bs.getSteps()[1]
+        self.failUnlessEqual(step1.getText(), ["remote", "delay", "2 secs",
+                                               "failed", "slave", "lost"])
+        self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
+
+        self.failUnlessEqual(bs.getResults(), builder.FAILURE)
+
+    def submitBuild(self):
+        ss = SourceStamp()
+        br = BuildRequest("forced build", ss, "dummy")
+        self.control.getBuilder("dummy").requestBuild(br)
+        d = defer.Deferred()
+        def _started(bc):
+            br.unsubscribe(_started)
+            d.callback(bc)
+        br.subscribe(_started)
+        return d
+
+    def testIdle2(self):
+        # now suppose the slave goes missing
+        self.slaves['bot1'].bf.continueTrying = 0
+        self.disappearSlave()
+
+        # forcing a build will work: the build detect that the slave is no
+        # longer available and will be re-queued. Wait 5 seconds, then check
+        # to make sure the build is still in the 'waiting for a slave' queue.
+        self.control.getBuilder("dummy").original.START_BUILD_TIMEOUT = 1
+        req = BuildRequest("forced build", SourceStamp())
+        self.failUnlessEqual(req.startCount, 0)
+        self.control.getBuilder("dummy").requestBuild(req)
+        # this should ping the slave, which doesn't respond, and then give up
+        # after a second. The BuildRequest will be re-queued, and its
+        # .startCount will be incremented.
+        d = defer.Deferred()
+        d.addCallback(self._testIdle2_1, req)
+        reactor.callLater(3, d.callback, None)
+        return maybeWait(d, 5)
+    testIdle2.timeout = 5
+
+    def _testIdle2_1(self, res, req):
+        self.failUnlessEqual(req.startCount, 1)
+        cancelled = req.cancel()
+        self.failUnless(cancelled)
+
+
+    def testBuild1(self):
+        # this next sequence is timing-dependent. The dummy build takes at
+        # least 3 seconds to complete, and this batch of commands must
+        # complete within that time.
+        #
+        d = self.submitBuild()
+        d.addCallback(self._testBuild1_1)
+        return maybeWait(d)
+
+    def _testBuild1_1(self, bc):
+        bs = bc.getStatus()
+        # now kill the slave before it gets to start the first step
+        d = self.shutdownAllSlaves() # dies before it gets started
+        d.addCallback(self._testBuild1_2, bs)
+        return d  # TODO: this used to have a 5-second timeout
+
+    def _testBuild1_2(self, res, bs):
+        # now examine the just-stopped build and make sure it is really
+        # stopped. This is checking for bugs in which the slave-detach gets
+        # missed or causes an exception which prevents the build from being
+        # marked as "finished due to an error".
+        d = bs.waitUntilFinished()
+        d2 = self.master.botmaster.waitUntilBuilderDetached("dummy")
+        dl = defer.DeferredList([d, d2])
+        dl.addCallback(self._testBuild1_3, bs)
+        return dl # TODO: this had a 5-second timeout too
+
+    def _testBuild1_3(self, res, bs):
+        self.failUnlessEqual(self.s1.getState()[0], "offline")
+        self.verifyDisconnect(bs)
+
+
+    def testBuild2(self):
+        # this next sequence is timing-dependent
+        d = self.submitBuild()
+        d.addCallback(self._testBuild1_1)
+        return maybeWait(d, 30)
+    testBuild2.timeout = 30
+
+    def _testBuild1_1(self, bc):
+        bs = bc.getStatus()
+        # shutdown the slave while it's running the first step
+        reactor.callLater(0.5, self.shutdownAllSlaves)
+
+        d = bs.waitUntilFinished()
+        d.addCallback(self._testBuild2_2, bs)
+        return d
+
+    def _testBuild2_2(self, res, bs):
+        # we hit here when the build has finished. The builder is still being
+        # torn down, however, so spin for another second to allow the
+        # callLater(0) in Builder.detached to fire.
+        d = defer.Deferred()
+        reactor.callLater(1, d.callback, None)
+        d.addCallback(self._testBuild2_3, bs)
+        return d
+
+    def _testBuild2_3(self, res, bs):
+        self.failUnlessEqual(self.s1.getState()[0], "offline")
+        self.verifyDisconnect(bs)
+
+
+    def testBuild3(self):
+        # this next sequence is timing-dependent
+        d = self.submitBuild()
+        d.addCallback(self._testBuild3_1)
+        return maybeWait(d, 30)
+    testBuild3.timeout = 30
+
+    def _testBuild3_1(self, bc):
+        bs = bc.getStatus()
+        # kill the slave while it's running the first step
+        reactor.callLater(0.5, self.killSlave)
+        d = bs.waitUntilFinished()
+        d.addCallback(self._testBuild3_2, bs)
+        return d
+
+    def _testBuild3_2(self, res, bs):
+        # the builder is still being torn down, so give it another second
+        d = defer.Deferred()
+        reactor.callLater(1, d.callback, None)
+        d.addCallback(self._testBuild3_3, bs)
+        return d
+
+    def _testBuild3_3(self, res, bs):
+        self.failUnlessEqual(self.s1.getState()[0], "offline")
+        self.verifyDisconnect(bs)
+
+
+    def testBuild4(self):
+        # this next sequence is timing-dependent
+        d = self.submitBuild()
+        d.addCallback(self._testBuild4_1)
+        return maybeWait(d, 30)
+    testBuild4.timeout = 30
+
+    def _testBuild4_1(self, bc):
+        bs = bc.getStatus()
+        # kill the slave while it's running the second (remote) step
+        reactor.callLater(1.5, self.killSlave)
+        d = bs.waitUntilFinished()
+        d.addCallback(self._testBuild4_2, bs)
+        return d
+
+    def _testBuild4_2(self, res, bs):
+        # at this point, the slave is in the process of being removed, so it
+        # could either be 'idle' or 'offline'. I think there is a
+        # reactor.callLater(0) standing between here and the offline state.
+        #reactor.iterate() # TODO: remove the need for this
+
+        self.failUnlessEqual(self.s1.getState()[0], "offline")
+        self.verifyDisconnect2(bs)
+
+
+    def testInterrupt(self):
+        # this next sequence is timing-dependent
+        d = self.submitBuild()
+        d.addCallback(self._testInterrupt_1)
+        return maybeWait(d, 30)
+    testInterrupt.timeout = 30
+
+    def _testInterrupt_1(self, bc):
+        bs = bc.getStatus()
+        # halt the build while it's running the first step
+        reactor.callLater(0.5, bc.stopBuild, "bang go splat")
+        d = bs.waitUntilFinished()
+        d.addCallback(self._testInterrupt_2, bs)
+        return d
+
+    def _testInterrupt_2(self, res, bs):
+        self.verifyDisconnect(bs)
+
+
+    def testDisappear(self):
+        bc = self.control.getBuilder("dummy")
+
+        # ping should succeed
+        d = bc.ping(1)
+        d.addCallback(self._testDisappear_1, bc)
+        return maybeWait(d)
+
+    def _testDisappear_1(self, res, bc):
+        self.failUnlessEqual(res, True)
+
+        # now, before any build is run, make the slave disappear
+        self.slaves['bot1'].bf.continueTrying = 0
+        self.disappearSlave()
+
+        # at this point, a ping to the slave should timeout
+        d = bc.ping(1)
+        d.addCallback(self. _testDisappear_2)
+        return d
+    def _testDisappear_2(self, res):
+        self.failUnlessEqual(res, False)
+
+    def testDuplicate(self):
+        bc = self.control.getBuilder("dummy")
+        bs = self.status.getBuilder("dummy")
+        ss = bs.getSlaves()[0]
+
+        self.failUnless(ss.isConnected())
+        self.failUnlessEqual(ss.getAdmin(), "one")
+
+        # now, before any build is run, make the first slave disappear
+        self.slaves['bot1'].bf.continueTrying = 0
+        self.disappearSlave()
+
+        d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+        # now let the new slave take over
+        self.connectSlave2()
+        d.addCallback(self._testDuplicate_1, ss)
+        return maybeWait(d, 2)
+    testDuplicate.timeout = 5
+
+    def _testDuplicate_1(self, res, ss):
+        d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+        d.addCallback(self._testDuplicate_2, ss)
+        return d
+
+    def _testDuplicate_2(self, res, ss):
+        self.failUnless(ss.isConnected())
+        self.failUnlessEqual(ss.getAdmin(), "two")
+
+
+class Disconnect2(RunMixin, unittest.TestCase):
+
+    def setUp(self):
+        RunMixin.setUp(self)
+        # verify that disconnecting the slave during a build properly
+        # terminates the build
+        m = self.master
+        s = self.status
+        c = self.control
+
+        m.loadConfig(config_2)
+        m.readConfig = True
+        m.startService()
+
+        self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+        self.s1 = s1 = s.getBuilder("dummy")
+        self.failUnlessEqual(s1.getName(), "dummy")
+        self.failUnlessEqual(s1.getState(), ("offline", []))
+        self.failUnlessEqual(s1.getCurrentBuilds(), [])
+        self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+        self.failUnlessEqual(s1.getBuild(-1), None)
+
+        d = self.connectSlaveFastTimeout()
+        d.addCallback(self._setup_disconnect2_1)
+        return maybeWait(d)
+
+    def _setup_disconnect2_1(self, res):
+        self.failUnlessEqual(self.s1.getState(), ("idle", []))
+
+
+    def testSlaveTimeout(self):
+        # now suppose the slave goes missing. We want to find out when it
+        # creates a new Broker, so we reach inside and mark it with the
+        # well-known sigil of impending messy death.
+        bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+        broker = bd.remote.broker
+        broker.redshirt = 1
+
+        # make sure the keepalives will keep the connection up
+        d = defer.Deferred()
+        reactor.callLater(5, d.callback, None)
+        d.addCallback(self._testSlaveTimeout_1)
+        return maybeWait(d, 20)
+    testSlaveTimeout.timeout = 20
+
+    def _testSlaveTimeout_1(self, res):
+        bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+        if not bd.remote or not hasattr(bd.remote.broker, "redshirt"):
+            self.fail("slave disconnected when it shouldn't have")
+
+        d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+        # whoops! how careless of me.
+        self.disappearSlave()
+        # the slave will realize the connection is lost within 2 seconds, and
+        # reconnect.
+        d.addCallback(self._testSlaveTimeout_2)
+        return d
+
+    def _testSlaveTimeout_2(self, res):
+        # the ReconnectingPBClientFactory will attempt a reconnect in two
+        # seconds.
+        d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+        d.addCallback(self._testSlaveTimeout_3)
+        return d
+
+    def _testSlaveTimeout_3(self, res):
+        # make sure it is a new connection (i.e. a new Broker)
+        bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+        self.failUnless(bd.remote, "hey, slave isn't really connected")
+        self.failIf(hasattr(bd.remote.broker, "redshirt"),
+                    "hey, slave's Broker is still marked for death")
+
+
+class Basedir(RunMixin, unittest.TestCase):
+    def testChangeBuilddir(self):
+        m = self.master
+        m.loadConfig(config_4)
+        m.readConfig = True
+        m.startService()
+        
+        d = self.connectSlave()
+        d.addCallback(self._testChangeBuilddir_1)
+        return maybeWait(d)
+
+    def _testChangeBuilddir_1(self, res):
+        self.bot = bot = self.slaves['bot1'].bot
+        self.builder = builder = bot.builders.get("dummy")
+        self.failUnless(builder)
+        self.failUnlessEqual(builder.builddir, "dummy")
+        self.failUnlessEqual(builder.basedir,
+                             os.path.join("slavebase-bot1", "dummy"))
+
+        d = self.master.loadConfig(config_4_newbasedir)
+        d.addCallback(self._testChangeBuilddir_2)
+        return d
+
+    def _testChangeBuilddir_2(self, res):
+        bot = self.bot
+        # this does NOT cause the builder to be replaced
+        builder = bot.builders.get("dummy")
+        self.failUnless(builder)
+        self.failUnlessIdentical(self.builder, builder)
+        # the basedir should be updated
+        self.failUnlessEqual(builder.builddir, "dummy2")
+        self.failUnlessEqual(builder.basedir,
+                             os.path.join("slavebase-bot1", "dummy2"))
+
+        # add a new builder, which causes the basedir list to be reloaded
+        d = self.master.loadConfig(config_4_newbuilder)
+        return d
+
+# TODO: test everything, from Change submission to Scheduler to Build to
+# Status. Use all the status types. Specifically I want to catch recurrences
+# of the bug where I forgot to make Waterfall inherit from StatusReceiver
+# such that buildSetSubmitted failed.
+

Added: vendor/buildbot/current/buildbot/test/test_runner.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_runner.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_runner.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,299 @@
+
+# this file tests the 'buildbot' command, with its various sub-commands
+
+from twisted.trial import unittest
+from twisted.python import usage
+import os, shutil, shlex
+
+from buildbot.scripts import runner, tryclient
+
+class Options(unittest.TestCase):
+    optionsFile = "SDFsfsFSdfsfsFSD"
+
+    def make(self, d, key):
+        # we use a wacky filename here in case the test code discovers the
+        # user's real ~/.buildbot/ directory
+        os.makedirs(os.sep.join(d + [".buildbot"]))
+        f = open(os.sep.join(d + [".buildbot", self.optionsFile]), "w")
+        f.write("key = '%s'\n" % key)
+        f.close()
+
+    def check(self, d, key):
+        basedir = os.sep.join(d)
+        options = runner.loadOptions(self.optionsFile, here=basedir,
+                                     home=self.home)
+        if key is None:
+            self.failIf(options.has_key('key'))
+        else:
+            self.failUnlessEqual(options['key'], key)
+
+    def testFindOptions(self):
+        self.make(["home", "dir1", "dir2", "dir3"], "one")
+        self.make(["home", "dir1", "dir2"], "two")
+        self.make(["home"], "home")
+        self.home = os.path.abspath("home")
+
+        self.check(["home", "dir1", "dir2", "dir3"], "one")
+        self.check(["home", "dir1", "dir2"], "two")
+        self.check(["home", "dir1"], "home")
+
+        self.home = os.path.abspath("nothome")
+        os.makedirs(os.sep.join(["nothome", "dir1"]))
+        self.check(["nothome", "dir1"], None)
+
+    def doForce(self, args, expected):
+        o = runner.ForceOptions()
+        o.parseOptions(args)
+        self.failUnlessEqual(o.keys(), expected.keys())
+        for k in o.keys():
+            self.failUnlessEqual(o[k], expected[k],
+                                 "[%s] got %s instead of %s" % (k, o[k],
+                                                                expected[k]))
+
+    def testForceOptions(self):
+        if not hasattr(shlex, "split"):
+            raise unittest.SkipTest("need python>=2.3 for shlex.split")
+
+        exp = {"builder": "b1", "reason": "reason",
+               "branch": None, "revision": None}
+        self.doForce(shlex.split("b1 reason"), exp)
+        self.doForce(shlex.split("b1 'reason'"), exp)
+        self.failUnlessRaises(usage.UsageError, self.doForce,
+                              shlex.split("--builder b1 'reason'"), exp)
+        self.doForce(shlex.split("--builder b1 --reason reason"), exp)
+        self.doForce(shlex.split("--builder b1 --reason 'reason'"), exp)
+        self.doForce(shlex.split("--builder b1 --reason \"reason\""), exp)
+        
+        exp['reason'] = "longer reason"
+        self.doForce(shlex.split("b1 'longer reason'"), exp)
+        self.doForce(shlex.split("b1 longer reason"), exp)
+        self.doForce(shlex.split("--reason 'longer reason' b1"), exp)
+        
+
+class Create(unittest.TestCase):
+    def failUnlessIn(self, substring, string, msg=None):
+        # trial provides a version of this that requires python-2.3 to test
+        # strings.
+        self.failUnless(string.find(substring) != -1, msg)
+    def failUnlessExists(self, filename):
+        self.failUnless(os.path.exists(filename), "%s should exist" % filename)
+    def failIfExists(self, filename):
+        self.failIf(os.path.exists(filename), "%s should not exist" % filename)
+
+    def testMaster(self):
+        basedir = "test_runner.master"
+        options = runner.MasterOptions()
+        options.parseOptions(["-q", basedir])
+        cwd = os.getcwd()
+        runner.createMaster(options)
+        os.chdir(cwd)
+
+        tac = os.path.join(basedir, "buildbot.tac")
+        self.failUnless(os.path.exists(tac))
+        tacfile = open(tac,"rt").read()
+        self.failUnlessIn("basedir", tacfile)
+        self.failUnlessIn("configfile = r'master.cfg'", tacfile)
+        self.failUnlessIn("BuildMaster(basedir, configfile)", tacfile)
+
+        cfg = os.path.join(basedir, "master.cfg")
+        self.failIfExists(cfg)
+        samplecfg = os.path.join(basedir, "master.cfg.sample")
+        self.failUnlessExists(samplecfg)
+        cfgfile = open(samplecfg,"rt").read()
+        self.failUnlessIn("This is a sample buildmaster config file", cfgfile)
+
+        makefile = os.path.join(basedir, "Makefile.sample")
+        self.failUnlessExists(makefile)
+
+        # now verify that running it a second time (with the same options)
+        # does the right thing: nothing changes
+        runner.createMaster(options)
+        os.chdir(cwd)
+
+        self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
+        self.failUnlessExists(os.path.join(basedir, "master.cfg.sample"))
+
+        oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+
+        # mutate Makefile.sample, since it should be rewritten
+        f = open(os.path.join(basedir, "Makefile.sample"), "rt")
+        oldmake = f.read()
+        f = open(os.path.join(basedir, "Makefile.sample"), "wt")
+        f.write(oldmake)
+        f.write("# additional line added\n")
+        f.close()
+
+        # also mutate master.cfg.sample
+        f = open(os.path.join(basedir, "master.cfg.sample"), "rt")
+        oldsamplecfg = f.read()
+        f = open(os.path.join(basedir, "master.cfg.sample"), "wt")
+        f.write(oldsamplecfg)
+        f.write("# additional line added\n")
+        f.close()
+
+        # now run it again (with different options)
+        options = runner.MasterOptions()
+        options.parseOptions(["-q", "--config", "other.cfg", basedir])
+        runner.createMaster(options)
+        os.chdir(cwd)
+
+        tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+        self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
+        self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
+
+        make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+        self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
+
+        samplecfg = open(os.path.join(basedir, "master.cfg.sample"),
+                         "rt").read()
+        self.failUnlessEqual(samplecfg, oldsamplecfg,
+                             "*should* rewrite master.cfg.sample")
+
+
+    def testSlave(self):
+        basedir = "test_runner.slave"
+        options = runner.SlaveOptions()
+        options.parseOptions(["-q", basedir, "buildmaster:1234",
+                              "botname", "passwd"])
+        cwd = os.getcwd()
+        runner.createSlave(options)
+        os.chdir(cwd)
+
+        tac = os.path.join(basedir, "buildbot.tac")
+        self.failUnless(os.path.exists(tac))
+        tacfile = open(tac,"rt").read()
+        self.failUnlessIn("basedir", tacfile)
+        self.failUnlessIn("host = 'buildmaster'", tacfile)
+        self.failUnlessIn("port = 1234", tacfile)
+        self.failUnlessIn("slavename = 'botname'", tacfile)
+        self.failUnlessIn("passwd = 'passwd'", tacfile)
+        self.failUnlessIn("keepalive = 600", tacfile)
+        self.failUnlessIn("BuildSlave(host, port, slavename", tacfile)
+
+        makefile = os.path.join(basedir, "Makefile.sample")
+        self.failUnlessExists(makefile)
+
+        self.failUnlessExists(os.path.join(basedir, "info", "admin"))
+        self.failUnlessExists(os.path.join(basedir, "info", "host"))
+        # edit one to make sure the later install doesn't change it
+        f = open(os.path.join(basedir, "info", "admin"), "wt")
+        f.write("updated at buildbot.example.org\n")
+        f.close()
+
+        # now verify that running it a second time (with the same options)
+        # does the right thing: nothing changes
+        runner.createSlave(options)
+        os.chdir(cwd)
+
+        self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
+        admin = open(os.path.join(basedir, "info", "admin"), "rt").read()
+        self.failUnlessEqual(admin, "updated at buildbot.example.org\n")
+
+
+        # mutate Makefile.sample, since it should be rewritten
+        oldmake = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+        f = open(os.path.join(basedir, "Makefile.sample"), "wt")
+        f.write(oldmake)
+        f.write("# additional line added\n")
+        f.close()
+        oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+
+        # now run it again (with different options)
+        options = runner.SlaveOptions()
+        options.parseOptions(["-q", "--keepalive", "30",
+                              basedir, "buildmaster:9999",
+                              "newbotname", "passwd"])
+        runner.createSlave(options)
+        os.chdir(cwd)
+
+        tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+        self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
+        self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
+        tacfile = open(os.path.join(basedir, "buildbot.tac.new"),"rt").read()
+        self.failUnlessIn("basedir", tacfile)
+        self.failUnlessIn("host = 'buildmaster'", tacfile)
+        self.failUnlessIn("port = 9999", tacfile)
+        self.failUnlessIn("slavename = 'newbotname'", tacfile)
+        self.failUnlessIn("passwd = 'passwd'", tacfile)
+        self.failUnlessIn("keepalive = 30", tacfile)
+        self.failUnlessIn("BuildSlave(host, port, slavename", tacfile)
+
+        make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+        self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
+
+class Try(unittest.TestCase):
+    # test some aspects of the 'buildbot try' command
+    def makeOptions(self, contents):
+        if os.path.exists(".buildbot"):
+            shutil.rmtree(".buildbot")
+        os.mkdir(".buildbot")
+        open(os.path.join(".buildbot", "options"), "w").write(contents)
+
+    def testGetopt1(self):
+        opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
+        self.makeOptions(opts)
+        config = runner.TryOptions()
+        config.parseOptions([])
+        t = tryclient.Try(config)
+        self.failUnlessEqual(t.connect, "ssh")
+        self.failUnlessEqual(t.builderNames, ['a'])
+
+    def testGetopt2(self):
+        opts = ""
+        self.makeOptions(opts)
+        config = runner.TryOptions()
+        config.parseOptions(['--connect=ssh', '--builder', 'a'])
+        t = tryclient.Try(config)
+        self.failUnlessEqual(t.connect, "ssh")
+        self.failUnlessEqual(t.builderNames, ['a'])
+
+    def testGetopt3(self):
+        opts = ""
+        self.makeOptions(opts)
+        config = runner.TryOptions()
+        config.parseOptions(['--connect=ssh',
+                             '--builder', 'a', '--builder=b'])
+        t = tryclient.Try(config)
+        self.failUnlessEqual(t.connect, "ssh")
+        self.failUnlessEqual(t.builderNames, ['a', 'b'])
+
+    def testGetopt4(self):
+        opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
+        self.makeOptions(opts)
+        config = runner.TryOptions()
+        config.parseOptions(['--builder=b'])
+        t = tryclient.Try(config)
+        self.failUnlessEqual(t.connect, "ssh")
+        self.failUnlessEqual(t.builderNames, ['b'])
+
+    def testGetTopdir(self):
+        os.mkdir("gettopdir")
+        os.mkdir(os.path.join("gettopdir", "foo"))
+        os.mkdir(os.path.join("gettopdir", "foo", "bar"))
+        open(os.path.join("gettopdir", "1"),"w").write("1")
+        open(os.path.join("gettopdir", "foo", "2"),"w").write("2")
+        open(os.path.join("gettopdir", "foo", "bar", "3"),"w").write("3")
+
+        target = os.path.abspath("gettopdir")
+        t = tryclient.getTopdir("1", "gettopdir")
+        self.failUnlessEqual(os.path.abspath(t), target)
+        t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo"))
+        self.failUnlessEqual(os.path.abspath(t), target)
+        t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo", "bar"))
+        self.failUnlessEqual(os.path.abspath(t), target)
+
+        target = os.path.abspath(os.path.join("gettopdir", "foo"))
+        t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo"))
+        self.failUnlessEqual(os.path.abspath(t), target)
+        t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo", "bar"))
+        self.failUnlessEqual(os.path.abspath(t), target)
+
+        target = os.path.abspath(os.path.join("gettopdir", "foo", "bar"))
+        t = tryclient.getTopdir("3", os.path.join("gettopdir", "foo", "bar"))
+        self.failUnlessEqual(os.path.abspath(t), target)
+
+        nonexistent = "nonexistent\n29fis3kq\tBAR"
+        # hopefully there won't be a real file with that name between here
+        # and the filesystem root.
+        self.failUnlessRaises(ValueError, tryclient.getTopdir, nonexistent)
+

Added: vendor/buildbot/current/buildbot/test/test_scheduler.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_scheduler.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_scheduler.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,314 @@
+# -*- test-case-name: buildbot.test.test_scheduler -*-
+
+import os, time
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+from twisted.application import service
+from twisted.spread import pb
+
+from buildbot import scheduler, sourcestamp, buildset, status
+from buildbot.twcompat import maybeWait
+from buildbot.changes.changes import Change
+from buildbot.scripts import tryclient
+
+
+class FakeMaster(service.MultiService):
+    d = None
+    def submitBuildSet(self, bs):
+        self.sets.append(bs)
+        if self.d:
+            reactor.callLater(0, self.d.callback, bs)
+            self.d = None
+        return pb.Referenceable() # makes the cleanup work correctly
+
+class Scheduling(unittest.TestCase):
+    def setUp(self):
+        self.master = master = FakeMaster()
+        master.sets = []
+        master.startService()
+
+    def tearDown(self):
+        d = self.master.stopService()
+        return maybeWait(d)
+
+    def addScheduler(self, s):
+        s.setServiceParent(self.master)
+
+    def testPeriodic1(self):
+        self.addScheduler(scheduler.Periodic("quickly", ["a","b"], 2))
+        d = defer.Deferred()
+        reactor.callLater(5, d.callback, None)
+        d.addCallback(self._testPeriodic1_1)
+        return maybeWait(d)
+    def _testPeriodic1_1(self, res):
+        self.failUnless(len(self.master.sets) > 1)
+        s1 = self.master.sets[0]
+        self.failUnlessEqual(s1.builderNames, ["a","b"])
+        self.failUnlessEqual(s1.reason, "The Periodic scheduler named 'quickly' triggered this build")
+
+    def testNightly(self):
+        # now == 15-Nov-2005, 00:05:36 AM . By using mktime, this is
+        # converted into the local timezone, which happens to match what
+        # Nightly is going to do anyway.
+        MIN=60; HOUR=60*MIN; DAY=24*3600
+        now = time.mktime((2005, 11, 15, 0, 5, 36, 1, 319, 0))
+
+        s = scheduler.Nightly('nightly', ["a"], hour=3)
+        t = s.calculateNextRunTimeFrom(now)
+        self.failUnlessEqual(int(t-now), 2*HOUR+54*MIN+24)
+
+        s = scheduler.Nightly('nightly', ["a"], minute=[3,8,54])
+        t = s.calculateNextRunTimeFrom(now)
+        self.failUnlessEqual(int(t-now), 2*MIN+24)
+
+        s = scheduler.Nightly('nightly', ["a"],
+                              dayOfMonth=16, hour=1, minute=6)
+        t = s.calculateNextRunTimeFrom(now)
+        self.failUnlessEqual(int(t-now), DAY+HOUR+24)
+
+        s = scheduler.Nightly('nightly', ["a"],
+                              dayOfMonth=16, hour=1, minute=3)
+        t = s.calculateNextRunTimeFrom(now)
+        self.failUnlessEqual(int(t-now), DAY+57*MIN+24)
+
+        s = scheduler.Nightly('nightly', ["a"],
+                              dayOfMonth=15, hour=1, minute=3)
+        t = s.calculateNextRunTimeFrom(now)
+        self.failUnlessEqual(int(t-now), 57*MIN+24)
+
+        s = scheduler.Nightly('nightly', ["a"],
+                              dayOfMonth=15, hour=0, minute=3)
+        t = s.calculateNextRunTimeFrom(now)
+        self.failUnlessEqual(int(t-now), 30*DAY-3*MIN+24)
+
+
+    def isImportant(self, change):
+        if "important" in change.files:
+            return True
+        return False
+
+    def testBranch(self):
+        s = scheduler.Scheduler("b1", "branch1", 2, ["a","b"],
+                                fileIsImportant=self.isImportant)
+        self.addScheduler(s)
+
+        c0 = Change("carol", ["important"], "other branch", branch="other")
+        s.addChange(c0)
+        self.failIf(s.timer)
+        self.failIf(s.importantChanges)
+
+        c1 = Change("alice", ["important", "not important"], "some changes",
+                    branch="branch1")
+        s.addChange(c1)
+        c2 = Change("bob", ["not important", "boring"], "some more changes",
+                    branch="branch1")
+        s.addChange(c2)
+        c3 = Change("carol", ["important", "dull"], "even more changes",
+                    branch="branch1")
+        s.addChange(c3)
+        
+        self.failUnlessEqual(s.importantChanges, [c1,c3])
+        self.failUnlessEqual(s.unimportantChanges, [c2])
+        self.failUnless(s.timer)
+
+        d = defer.Deferred()
+        reactor.callLater(4, d.callback, None)
+        d.addCallback(self._testBranch_1)
+        return maybeWait(d)
+    def _testBranch_1(self, res):
+        self.failUnlessEqual(len(self.master.sets), 1)
+        s = self.master.sets[0].source
+        self.failUnlessEqual(s.branch, "branch1")
+        self.failUnlessEqual(s.revision, None)
+        self.failUnlessEqual(len(s.changes), 3)
+        self.failUnlessEqual(s.patch, None)
+
+
+    def testAnyBranch(self):
+        s = scheduler.AnyBranchScheduler("b1", None, 1, ["a","b"],
+                                         fileIsImportant=self.isImportant)
+        self.addScheduler(s)
+
+        c1 = Change("alice", ["important", "not important"], "some changes",
+                    branch="branch1")
+        s.addChange(c1)
+        c2 = Change("bob", ["not important", "boring"], "some more changes",
+                    branch="branch1")
+        s.addChange(c2)
+        c3 = Change("carol", ["important", "dull"], "even more changes",
+                    branch="branch1")
+        s.addChange(c3)
+
+        c4 = Change("carol", ["important"], "other branch", branch="branch2")
+        s.addChange(c4)
+
+        c5 = Change("carol", ["important"], "default branch", branch=None)
+        s.addChange(c5)
+
+        d = defer.Deferred()
+        reactor.callLater(2, d.callback, None)
+        d.addCallback(self._testAnyBranch_1)
+        return maybeWait(d)
+    def _testAnyBranch_1(self, res):
+        self.failUnlessEqual(len(self.master.sets), 3)
+        self.master.sets.sort(lambda a,b: cmp(a.source.branch,
+                                              b.source.branch))
+
+        s1 = self.master.sets[0].source
+        self.failUnlessEqual(s1.branch, None)
+        self.failUnlessEqual(s1.revision, None)
+        self.failUnlessEqual(len(s1.changes), 1)
+        self.failUnlessEqual(s1.patch, None)
+
+        s2 = self.master.sets[1].source
+        self.failUnlessEqual(s2.branch, "branch1")
+        self.failUnlessEqual(s2.revision, None)
+        self.failUnlessEqual(len(s2.changes), 3)
+        self.failUnlessEqual(s2.patch, None)
+
+        s3 = self.master.sets[2].source
+        self.failUnlessEqual(s3.branch, "branch2")
+        self.failUnlessEqual(s3.revision, None)
+        self.failUnlessEqual(len(s3.changes), 1)
+        self.failUnlessEqual(s3.patch, None)
+
+    def testAnyBranch2(self):
+        # like testAnyBranch but without fileIsImportant
+        s = scheduler.AnyBranchScheduler("b1", None, 2, ["a","b"])
+        self.addScheduler(s)
+        c1 = Change("alice", ["important", "not important"], "some changes",
+                    branch="branch1")
+        s.addChange(c1)
+        c2 = Change("bob", ["not important", "boring"], "some more changes",
+                    branch="branch1")
+        s.addChange(c2)
+        c3 = Change("carol", ["important", "dull"], "even more changes",
+                    branch="branch1")
+        s.addChange(c3)
+
+        c4 = Change("carol", ["important"], "other branch", branch="branch2")
+        s.addChange(c4)
+
+        d = defer.Deferred()
+        reactor.callLater(2, d.callback, None)
+        d.addCallback(self._testAnyBranch2_1)
+        return maybeWait(d)
+    def _testAnyBranch2_1(self, res):
+        self.failUnlessEqual(len(self.master.sets), 2)
+        self.master.sets.sort(lambda a,b: cmp(a.source.branch,
+                                              b.source.branch))
+        s1 = self.master.sets[0].source
+        self.failUnlessEqual(s1.branch, "branch1")
+        self.failUnlessEqual(s1.revision, None)
+        self.failUnlessEqual(len(s1.changes), 3)
+        self.failUnlessEqual(s1.patch, None)
+
+        s2 = self.master.sets[1].source
+        self.failUnlessEqual(s2.branch, "branch2")
+        self.failUnlessEqual(s2.revision, None)
+        self.failUnlessEqual(len(s2.changes), 1)
+        self.failUnlessEqual(s2.patch, None)
+
+
+    def createMaildir(self, jobdir):
+        os.mkdir(jobdir)
+        os.mkdir(os.path.join(jobdir, "new"))
+        os.mkdir(os.path.join(jobdir, "cur"))
+        os.mkdir(os.path.join(jobdir, "tmp"))
+
+    jobcounter = 1
+    def pushJob(self, jobdir, job):
+        while 1:
+            filename = "job_%d" % self.jobcounter
+            self.jobcounter += 1
+            if os.path.exists(os.path.join(jobdir, "new", filename)):
+                continue
+            if os.path.exists(os.path.join(jobdir, "tmp", filename)):
+                continue
+            if os.path.exists(os.path.join(jobdir, "cur", filename)):
+                continue
+            break
+        f = open(os.path.join(jobdir, "tmp", filename), "w")
+        f.write(job)
+        f.close()
+        os.rename(os.path.join(jobdir, "tmp", filename),
+                  os.path.join(jobdir, "new", filename))
+
+    def testTryJobdir(self):
+        self.master.basedir = "try_jobdir"
+        os.mkdir(self.master.basedir)
+        jobdir = "jobdir1"
+        jobdir_abs = os.path.join(self.master.basedir, jobdir)
+        self.createMaildir(jobdir_abs)
+        s = scheduler.Try_Jobdir("try1", ["a", "b"], jobdir)
+        self.addScheduler(s)
+        self.failIf(self.master.sets)
+        job1 = tryclient.createJobfile("buildsetID",
+                                       "branch1", "123", 1, "diff",
+                                       ["a", "b"])
+        self.master.d = d = defer.Deferred()
+        self.pushJob(jobdir_abs, job1)
+        d.addCallback(self._testTryJobdir_1)
+        # N.B.: if we don't have DNotify, we poll every 10 seconds, so don't
+        # set a .timeout here shorter than that. TODO: make it possible to
+        # set the polling interval, so we can make it shorter.
+        return maybeWait(d, 5)
+
+    def _testTryJobdir_1(self, bs):
+        self.failUnlessEqual(bs.builderNames, ["a", "b"])
+        self.failUnlessEqual(bs.source.branch, "branch1")
+        self.failUnlessEqual(bs.source.revision, "123")
+        self.failUnlessEqual(bs.source.patch, (1, "diff"))
+
+
+    def testTryUserpass(self):
+        up = [("alice","pw1"), ("bob","pw2")]
+        s = scheduler.Try_Userpass("try2", ["a", "b"], 0, userpass=up)
+        self.addScheduler(s)
+        port = s.getPort()
+        config = {'connect': 'pb',
+                  'username': 'alice',
+                  'passwd': 'pw1',
+                  'master': "localhost:%d" % port,
+                  'builders': ["a", "b"],
+                  }
+        t = tryclient.Try(config)
+        ss = sourcestamp.SourceStamp("branch1", "123", (1, "diff"))
+        t.sourcestamp = ss
+        d2 = self.master.d = defer.Deferred()
+        d = t.deliverJob()
+        d.addCallback(self._testTryUserpass_1, t, d2)
+        return maybeWait(d, 5)
+    testTryUserpass.timeout = 5
+    def _testTryUserpass_1(self, res, t, d2):
+        # at this point, the Try object should have a RemoteReference to the
+        # status object. The FakeMaster returns a stub.
+        self.failUnless(t.buildsetStatus)
+        d2.addCallback(self._testTryUserpass_2, t)
+        return d2
+    def _testTryUserpass_2(self, bs, t):
+        # this should be the BuildSet submitted by the TryScheduler
+        self.failUnlessEqual(bs.builderNames, ["a", "b"])
+        self.failUnlessEqual(bs.source.branch, "branch1")
+        self.failUnlessEqual(bs.source.revision, "123")
+        self.failUnlessEqual(bs.source.patch, (1, "diff"))
+
+        t.cleanup()
+
+        # twisted-2.0.1 (but not later versions) seems to require a reactor
+        # iteration before stopListening actually works. TODO: investigate
+        # this.
+        d = defer.Deferred()
+        reactor.callLater(0, d.callback, None)
+        return d
+    
+    def testGetBuildSets(self):
+        # validate IStatus.getBuildSets
+        s = status.builder.Status(None, ".")
+        bs1 = buildset.BuildSet(["a","b"], sourcestamp.SourceStamp(),
+                                reason="one", bsid="1")
+        s.buildsetSubmitted(bs1.status)
+        self.failUnlessEqual(s.getBuildSets(), [bs1.status])
+        bs1.status.notifyFinishedWatchers()
+        self.failUnlessEqual(s.getBuildSets(), [])

Added: vendor/buildbot/current/buildbot/test/test_shell.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_shell.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_shell.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,139 @@
+
+
+# test step.ShellCommand and the slave-side commands.ShellCommand
+
+import sys, time, os
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from twisted.python import util
+from buildbot.slave.commands import SlaveShellCommand
+from buildbot.twcompat import maybeWait
+from buildbot.test.runutils import SlaveCommandTestBase
+
+class SlaveSide(SlaveCommandTestBase, unittest.TestCase):
+    def testOne(self):
+        self.setUpBuilder("test_shell.testOne")
+        emitcmd = util.sibpath(__file__, "emit.py")
+        args = {
+            'command': [sys.executable, emitcmd, "0"],
+            'workdir': ".",
+            }
+        d = self.startCommand(SlaveShellCommand, args)
+        d.addCallback(self.collectUpdates)
+        def _check(logs):
+            self.failUnlessEqual(logs['stdout'], "this is stdout\n")
+            self.failUnlessEqual(logs['stderr'], "this is stderr\n")
+        d.addCallback(_check)
+        return maybeWait(d)
+
+    # TODO: move test_slavecommand.Shell and .ShellPTY over here
+
+    def _generateText(self, filename):
+        lines = []
+        for i in range(3):
+            lines.append("this is %s %d\n" % (filename, i))
+        return "".join(lines)
+
+    def testLogFiles_0(self):
+        return self._testLogFiles(0)
+
+    def testLogFiles_1(self):
+        return self._testLogFiles(1)
+
+    def testLogFiles_2(self):
+        return self._testLogFiles(2)
+
+    def testLogFiles_3(self):
+        return self._testLogFiles(3)
+
+    def _testLogFiles(self, mode):
+        basedir = "test_shell.testLogFiles"
+        self.setUpBuilder(basedir)
+        # emitlogs.py writes two lines to stdout and two logfiles, one second
+        # apart. Then it waits for us to write something to stdin, then it
+        # writes one more line.
+
+        if mode != 3:
+            # we write something to the log file first, to exercise the logic
+            # that distinguishes between the old file and the one as modified
+            # by the ShellCommand. We set the timestamp back 5 seconds so
+            # that timestamps can be used to distinguish old from new.
+            log2file = os.path.join(basedir, "log2.out")
+            f = open(log2file, "w")
+            f.write("dummy text\n")
+            f.close()
+            earlier = time.time() - 5
+            os.utime(log2file, (earlier, earlier))
+
+        if mode == 3:
+            # mode=3 doesn't create the old logfiles in the first place, but
+            # then behaves like mode=1 (where the command pauses before
+            # creating them).
+            mode = 1
+
+        # mode=1 will cause emitlogs.py to delete the old logfiles first, and
+        # then wait two seconds before creating the new files. mode=0 does
+        # not do this.
+        args = {
+            'command': [sys.executable,
+                        util.sibpath(__file__, "emitlogs.py"),
+                        "%s" % mode],
+            'workdir': ".",
+            'logfiles': {"log2": "log2.out",
+                         "log3": "log3.out"},
+            'keep_stdin_open': True,
+            }
+        finishd = self.startCommand(SlaveShellCommand, args)
+        # The first batch of lines is written immediately. The second is
+        # written after a pause of one second. We poll once per second until
+        # we see both batches.
+
+        self._check_timeout = 10
+        d = self._check_and_wait()
+        def _wait_for_finish(res, finishd):
+            return finishd
+        d.addCallback(_wait_for_finish, finishd)
+        d.addCallback(self.collectUpdates)
+        def _check(logs):
+            self.failUnlessEqual(logs['stdout'], self._generateText("stdout"))
+            if mode == 2:
+                self.failIf(('log','log2') in logs)
+                self.failIf(('log','log3') in logs)
+            else:
+                self.failUnlessEqual(logs[('log','log2')],
+                                     self._generateText("log2"))
+                self.failUnlessEqual(logs[('log','log3')],
+                                     self._generateText("log3"))
+        d.addCallback(_check)
+        d.addBoth(self._maybePrintError)
+        return maybeWait(d)
+
+    def _check_and_wait(self, res=None):
+        self._check_timeout -= 1
+        if self._check_timeout <= 0:
+            raise defer.TimeoutError("gave up on command")
+        logs = self.collectUpdates()
+        if logs.get('stdout') == "this is stdout 0\nthis is stdout 1\n":
+            # the emitlogs.py process is now waiting for something to arrive
+            # on stdin
+            self.cmd.command.pp.transport.write("poke\n")
+            return
+        if not self.cmd.running:
+            self.fail("command finished too early")
+        spin = defer.Deferred()
+        spin.addCallback(self._check_and_wait)
+        reactor.callLater(1, spin.callback, None)
+        return spin
+
+    def _maybePrintError(self, res):
+        rc = self.findRC()
+        if rc != 0:
+            print "Command ended with rc=%s" % rc
+            print "STDERR:"
+            self.printStderr()
+        return res
+
+    # MAYBE TODO: a command which appends to an existing logfile should
+    # result in only the new text being sent up to the master. I need to
+    # think about this more first.
+

Added: vendor/buildbot/current/buildbot/test/test_slavecommand.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_slavecommand.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_slavecommand.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,254 @@
+# -*- test-case-name: buildbot.test.test_slavecommand -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, interfaces
+from twisted.python import runtime, failure, util
+from buildbot.twcompat import maybeWait
+
+import os, sys
+
+from buildbot.slave import commands
+SlaveShellCommand = commands.SlaveShellCommand
+
+from buildbot.test.runutils import SignalMixin, FakeSlaveBuilder
+
+# test slavecommand.py by running the various commands with a fake
+# SlaveBuilder object that logs the calls to sendUpdate()
+
+
+
+class ShellBase(SignalMixin):
+
+    def setUp(self):
+        self.basedir = "test_slavecommand"
+        if not os.path.isdir(self.basedir):
+            os.mkdir(self.basedir)
+        self.subdir = os.path.join(self.basedir, "subdir")
+        if not os.path.isdir(self.subdir):
+            os.mkdir(self.subdir)
+        self.builder = FakeSlaveBuilder(self.usePTY, self.basedir)
+        self.emitcmd = util.sibpath(__file__, "emit.py")
+        self.subemitcmd = os.path.join(util.sibpath(__file__, "subdir"),
+                                       "emit.py")
+        self.sleepcmd = util.sibpath(__file__, "sleep.py")
+
+    def failUnlessIn(self, substring, string):
+        self.failUnless(string.find(substring) != -1,
+                        "'%s' not in '%s'" % (substring, string))
+
+    def getfile(self, which):
+        got = ""
+        for r in self.builder.updates:
+            if r.has_key(which):
+                got += r[which]
+        return got
+
+    def checkOutput(self, expected):
+        """
+        @type  expected: list of (streamname, contents) tuples
+        @param expected: the expected output
+        """
+        expected_linesep = os.linesep
+        if self.usePTY:
+            # PTYs change the line ending. I'm not sure why.
+            expected_linesep = "\r\n"
+        expected = [(stream, contents.replace("\n", expected_linesep, 1000))
+                    for (stream, contents) in expected]
+        if self.usePTY:
+            # PTYs merge stdout+stderr into a single stream
+            expected = [('stdout', contents)
+                        for (stream, contents) in expected]
+        # now merge everything into one string per stream
+        streams = {}
+        for (stream, contents) in expected:
+            streams[stream] = streams.get(stream, "") + contents
+        for (stream, contents) in streams.items():
+            got = self.getfile(stream)
+            self.assertEquals(got, contents)
+
+    def getrc(self):
+        self.failUnless(self.builder.updates[-1].has_key('rc'))
+        got = self.builder.updates[-1]['rc']
+        return got
+    def checkrc(self, expected):
+        got = self.getrc()
+        self.assertEquals(got, expected)
+        
+    def testShell1(self):
+        targetfile = os.path.join(self.basedir, "log1.out")
+        if os.path.exists(targetfile):
+            os.unlink(targetfile)
+        cmd = "%s %s 0" % (sys.executable, self.emitcmd)
+        args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        expected = [('stdout', "this is stdout\n"),
+                    ('stderr', "this is stderr\n")]
+        d.addCallback(self._checkPass, expected, 0)
+        def _check_targetfile(res):
+            self.failUnless(os.path.exists(targetfile))
+        d.addCallback(_check_targetfile)
+        return maybeWait(d)
+
+    def _checkPass(self, res, expected, rc):
+        self.checkOutput(expected)
+        self.checkrc(rc)
+
+    def testShell2(self):
+        cmd = [sys.executable, self.emitcmd, "0"]
+        args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        expected = [('stdout', "this is stdout\n"),
+                    ('stderr', "this is stderr\n")]
+        d.addCallback(self._checkPass, expected, 0)
+        return maybeWait(d)
+
+    def testShellRC(self):
+        cmd = [sys.executable, self.emitcmd, "1"]
+        args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        expected = [('stdout', "this is stdout\n"),
+                    ('stderr', "this is stderr\n")]
+        d.addCallback(self._checkPass, expected, 1)
+        return maybeWait(d)
+
+    def testShellEnv(self):
+        cmd = "%s %s 0" % (sys.executable, self.emitcmd)
+        args = {'command': cmd, 'workdir': '.',
+                'env': {'EMIT_TEST': "envtest"}, 'timeout': 60}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        expected = [('stdout', "this is stdout\n"),
+                    ('stderr', "this is stderr\n"),
+                    ('stdout', "EMIT_TEST: envtest\n"),
+                    ]
+        d.addCallback(self._checkPass, expected, 0)
+        return maybeWait(d)
+
+    def testShellSubdir(self):
+        targetfile = os.path.join(self.basedir, "subdir", "log1.out")
+        if os.path.exists(targetfile):
+            os.unlink(targetfile)
+        cmd = "%s %s 0" % (sys.executable, self.subemitcmd)
+        args = {'command': cmd, 'workdir': "subdir", 'timeout': 60}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        expected = [('stdout', "this is stdout in subdir\n"),
+                    ('stderr', "this is stderr\n")]
+        d.addCallback(self._checkPass, expected, 0)
+        def _check_targetfile(res):
+            self.failUnless(os.path.exists(targetfile))
+        d.addCallback(_check_targetfile)
+        return maybeWait(d)
+
+    def testShellMissingCommand(self):
+        args = {'command': "/bin/EndWorldHungerAndMakePigsFly",
+                'workdir': '.', 'timeout': 10,
+                'env': {"LC_ALL": "C"},
+                }
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        d.addCallback(self._testShellMissingCommand_1)
+        return maybeWait(d)
+    def _testShellMissingCommand_1(self, res):
+        self.failIfEqual(self.getrc(), 0)
+        # we used to check the error message to make sure it said something
+        # about a missing command, but there are a variety of shells out
+        # there, and they emit message sin a variety of languages, so we
+        # stopped trying.
+
+    def testTimeout(self):
+        args = {'command': [sys.executable, self.sleepcmd, "10"],
+                'workdir': '.', 'timeout': 2}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        d.addCallback(self._testTimeout_1)
+        return maybeWait(d)
+    def _testTimeout_1(self, res):
+        self.failIfEqual(self.getrc(), 0)
+        got = self.getfile('header')
+        self.failUnlessIn("command timed out: 2 seconds without output", got)
+        if runtime.platformType == "posix":
+            # the "killing pid" message is not present in windows
+            self.failUnlessIn("killing pid", got)
+        # but the process *ought* to be killed somehow
+        self.failUnlessIn("process killed by signal", got)
+        #print got
+    if runtime.platformType != 'posix':
+        testTimeout.todo = "timeout doesn't appear to work under windows"
+
+    def testInterrupt1(self):
+        args = {'command': [sys.executable, self.sleepcmd, "10"],
+                'workdir': '.', 'timeout': 20}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        reactor.callLater(1, c.interrupt)
+        d.addCallback(self._testInterrupt1_1)
+        return maybeWait(d)
+    def _testInterrupt1_1(self, res):
+        self.failIfEqual(self.getrc(), 0)
+        got = self.getfile('header')
+        self.failUnlessIn("command interrupted", got)
+        if runtime.platformType == "posix":
+            self.failUnlessIn("process killed by signal", got)
+    if runtime.platformType != 'posix':
+        testInterrupt1.todo = "interrupt doesn't appear to work under windows"
+
+
+    # todo: twisted-specific command tests
+
+class Shell(ShellBase, unittest.TestCase):
+    usePTY = False
+
+    def testInterrupt2(self):
+        # test the backup timeout. This doesn't work under a PTY, because the
+        # transport.loseConnection we do in the timeout handler actually
+        # *does* kill the process.
+        args = {'command': [sys.executable, self.sleepcmd, "5"],
+                'workdir': '.', 'timeout': 20}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        c.command.BACKUP_TIMEOUT = 1
+        # make it unable to kill the child, by changing the signal it uses
+        # from SIGKILL to the do-nothing signal 0.
+        c.command.KILL = None
+        reactor.callLater(1, c.interrupt)
+        d.addBoth(self._testInterrupt2_1)
+        return maybeWait(d)
+    def _testInterrupt2_1(self, res):
+        # the slave should raise a TimeoutError exception. In a normal build
+        # process (i.e. one that uses step.RemoteShellCommand), this
+        # exception will be handed to the Step, which will acquire an ERROR
+        # status. In our test environment, it isn't such a big deal.
+        self.failUnless(isinstance(res, failure.Failure),
+                        "res is not a Failure: %s" % (res,))
+        self.failUnless(res.check(commands.TimeoutError))
+        self.checkrc(-1)
+        return
+        # the command is still actually running. Start another command, to
+        # make sure that a) the old command's output doesn't interfere with
+        # the new one, and b) the old command's actual termination doesn't
+        # break anything
+        args = {'command': [sys.executable, self.sleepcmd, "5"],
+                'workdir': '.', 'timeout': 20}
+        c = SlaveShellCommand(self.builder, None, args)
+        d = c.start()
+        d.addCallback(self._testInterrupt2_2)
+        return d
+    def _testInterrupt2_2(self, res):
+        self.checkrc(0)
+        # N.B.: under windows, the trial process hangs out for another few
+        # seconds. I assume that the win32eventreactor is waiting for one of
+        # the lingering child processes to really finish.
+
+haveProcess = interfaces.IReactorProcess(reactor, None)
+if runtime.platformType == 'posix':
+    # test with PTYs also
+    class ShellPTY(ShellBase, unittest.TestCase):
+        usePTY = True
+    if not haveProcess:
+        ShellPTY.skip = "this reactor doesn't support IReactorProcess"
+if not haveProcess:
+    Shell.skip = "this reactor doesn't support IReactorProcess"

Added: vendor/buildbot/current/buildbot/test/test_slaves.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_slaves.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_slaves.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,433 @@
+# -*- test-case-name: buildbot.test.test_slaves -*-
+
+from twisted.trial import unittest
+from buildbot.twcompat import maybeWait
+from twisted.internet import defer, reactor
+from twisted.python import log
+
+from buildbot.test.runutils import RunMixin
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.status.builder import SUCCESS
+from buildbot.slave import bot
+
+config_1 = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['bots'] = [('bot1', 'sekrit'), ('bot2', 'sekrit'), ('bot3', 'sekrit')]
+c['sources'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
+
+c['builders'] = [
+    {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+     'builddir': 'b1', 'factory': f1},
+    ]
+"""
+
+config_2 = config_1 + """
+
+c['builders'] = [
+    {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+     'builddir': 'b1', 'factory': f2},
+    ]
+
+"""
+
+class Slave(RunMixin, unittest.TestCase):
+
+    def setUp(self):
+        RunMixin.setUp(self)
+        self.master.loadConfig(config_1)
+        self.master.startService()
+        d = self.connectSlave(["b1"])
+        d.addCallback(lambda res: self.connectSlave(["b1"], "bot2"))
+        return maybeWait(d)
+
+    def doBuild(self, buildername):
+        br = BuildRequest("forced", SourceStamp())
+        d = br.waitUntilFinished()
+        self.control.getBuilder(buildername).requestBuild(br)
+        return d
+
+    def testSequence(self):
+        # make sure both slaves appear in the list.
+        attached_slaves = [c for c in self.master.botmaster.slaves.values()
+                           if c.slave]
+        self.failUnlessEqual(len(attached_slaves), 2)
+        b = self.master.botmaster.builders["b1"]
+        self.failUnlessEqual(len(b.slaves), 2)
+
+        # since the current scheduling algorithm is simple and does not
+        # rotate or attempt any sort of load-balancing, two builds in
+        # sequence should both use the first slave. This may change later if
+        # we move to a more sophisticated scheme.
+
+        d = self.doBuild("b1")
+        d.addCallback(self._testSequence_1)
+        return maybeWait(d)
+    def _testSequence_1(self, res):
+        self.failUnlessEqual(res.getResults(), SUCCESS)
+        self.failUnlessEqual(res.getSlavename(), "bot1")
+
+        d = self.doBuild("b1")
+        d.addCallback(self._testSequence_2)
+        return d
+    def _testSequence_2(self, res):
+        self.failUnlessEqual(res.getSlavename(), "bot1")
+
+
+    def testSimultaneous(self):
+        # make sure we can actually run two builds at the same time
+        d1 = self.doBuild("b1")
+        d2 = self.doBuild("b1")
+        d1.addCallback(self._testSimultaneous_1, d2)
+        return maybeWait(d1)
+    def _testSimultaneous_1(self, res, d2):
+        self.failUnlessEqual(res.getResults(), SUCCESS)
+        self.failUnlessEqual(res.getSlavename(), "bot1")
+        d2.addCallback(self._testSimultaneous_2)
+        return d2
+    def _testSimultaneous_2(self, res):
+        self.failUnlessEqual(res.getResults(), SUCCESS)
+        self.failUnlessEqual(res.getSlavename(), "bot2")
+
+    def testFallback1(self):
+        # detach the first slave, verify that a build is run using the second
+        # slave instead
+        d = self.shutdownSlave("bot1", "b1")
+        d.addCallback(self._testFallback1_1)
+        return maybeWait(d)
+    def _testFallback1_1(self, res):
+        attached_slaves = [c for c in self.master.botmaster.slaves.values()
+                           if c.slave]
+        self.failUnlessEqual(len(attached_slaves), 1)
+        self.failUnlessEqual(len(self.master.botmaster.builders["b1"].slaves),
+                             1)
+        d = self.doBuild("b1")
+        d.addCallback(self._testFallback1_2)
+        return d
+    def _testFallback1_2(self, res):
+        self.failUnlessEqual(res.getResults(), SUCCESS)
+        self.failUnlessEqual(res.getSlavename(), "bot2")
+
+    def testFallback2(self):
+        # Disable the first slave, so that a slaveping will timeout. Then
+        # start a build, and verify that the non-failing (second) one is
+        # claimed for the build, and that the failing one is removed from the
+        # list.
+
+        # reduce the ping time so we'll failover faster
+        self.master.botmaster.builders["b1"].START_BUILD_TIMEOUT = 1
+        self.disappearSlave("bot1", "b1")
+        d = self.doBuild("b1")
+        d.addCallback(self._testFallback2_1)
+        return maybeWait(d)
+    def _testFallback2_1(self, res):
+        self.failUnlessEqual(res.getResults(), SUCCESS)
+        self.failUnlessEqual(res.getSlavename(), "bot2")
+        b1slaves = self.master.botmaster.builders["b1"].slaves
+        self.failUnlessEqual(len(b1slaves), 1)
+        self.failUnlessEqual(b1slaves[0].slave.slavename, "bot2")
+
+
+    def notFinished(self, brs):
+        # utility method
+        builds = brs.getBuilds()
+        self.failIf(len(builds) > 1)
+        if builds:
+            self.failIf(builds[0].isFinished())
+
+    def testDontClaimPingingSlave(self):
+        # have two slaves connect for the same builder. Do something to the
+        # first one so that slavepings are delayed (but do not fail
+        # outright).
+        timers = []
+        self.slaves['bot1'].debugOpts["stallPings"] = (10, timers)
+        br = BuildRequest("forced", SourceStamp())
+        d1 = br.waitUntilFinished()
+        self.control.getBuilder("b1").requestBuild(br)
+        s1 = br.status # this is a BuildRequestStatus
+        # give it a chance to start pinging
+        d2 = defer.Deferred()
+        d2.addCallback(self._testDontClaimPingingSlave_1, d1, s1, timers)
+        reactor.callLater(1, d2.callback, None)
+        return maybeWait(d2)
+    def _testDontClaimPingingSlave_1(self, res, d1, s1, timers):
+        # now the first build is running (waiting on the ping), so start the
+        # second build. This should claim the second slave, not the first,
+        # because the first is busy doing the ping.
+        self.notFinished(s1)
+        d3 = self.doBuild("b1")
+        d3.addCallback(self._testDontClaimPingingSlave_2, d1, s1, timers)
+        return d3
+    def _testDontClaimPingingSlave_2(self, res, d1, s1, timers):
+        self.failUnlessEqual(res.getSlavename(), "bot2")
+        self.notFinished(s1)
+        # now let the ping complete
+        self.failUnlessEqual(len(timers), 1)
+        timers[0].reset(0)
+        d1.addCallback(self._testDontClaimPingingSlave_3)
+        return d1
+    def _testDontClaimPingingSlave_3(self, res):
+        self.failUnlessEqual(res.getSlavename(), "bot1")
+
+config_3 = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['bots'] = [('bot1', 'sekrit')]
+c['sources'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.Wait, handle='one')])
+f2 = factory.BuildFactory([s(dummy.Wait, handle='two')])
+f3 = factory.BuildFactory([s(dummy.Wait, handle='three')])
+
+c['builders'] = [
+    {'name': 'b1', 'slavenames': ['bot1'],
+     'builddir': 'b1', 'factory': f1},
+    ]
+"""
+
+config_4 = config_3 + """
+c['builders'] = [
+    {'name': 'b1', 'slavenames': ['bot1'],
+     'builddir': 'b1', 'factory': f2},
+    ]
+"""
+
+config_5 = config_3 + """
+c['builders'] = [
+    {'name': 'b1', 'slavenames': ['bot1'],
+     'builddir': 'b1', 'factory': f3},
+    ]
+"""
+
+from buildbot.slave.commands import waitCommandRegistry
+
+class Reconfig(RunMixin, unittest.TestCase):
+
+    def setUp(self):
+        RunMixin.setUp(self)
+        self.master.loadConfig(config_3)
+        self.master.startService()
+        d = self.connectSlave(["b1"])
+        return maybeWait(d)
+
+    def _one_started(self):
+        log.msg("testReconfig._one_started")
+        self.build1_started = True
+        self.d1.callback(None)
+        return self.d2
+
+    def _two_started(self):
+        log.msg("testReconfig._two_started")
+        self.build2_started = True
+        self.d3.callback(None)
+        return self.d4
+
+    def _three_started(self):
+        log.msg("testReconfig._three_started")
+        self.build3_started = True
+        self.d5.callback(None)
+        return self.d6
+
+    def testReconfig(self):
+        # reconfiguring a Builder should not interrupt any running Builds. No
+        # queued BuildRequests should be lost. The next Build started should
+        # use the new process.
+        slave1 = self.slaves['bot1']
+        bot1 = slave1.getServiceNamed('bot')
+        sb1 = bot1.builders['b1']
+        self.failUnless(isinstance(sb1, bot.SlaveBuilder))
+        self.failUnless(sb1.running)
+        b1 = self.master.botmaster.builders['b1']
+        self.orig_b1 = b1
+
+        self.d1 = d1 = defer.Deferred()
+        self.d2 = d2 = defer.Deferred()
+        self.d3, self.d4 = defer.Deferred(), defer.Deferred()
+        self.d5, self.d6 = defer.Deferred(), defer.Deferred()
+        self.build1_started = False
+        self.build2_started = False
+        self.build3_started = False
+        waitCommandRegistry[("one","build1")] = self._one_started
+        waitCommandRegistry[("two","build2")] = self._two_started
+        waitCommandRegistry[("three","build3")] = self._three_started
+
+        # use different branches to make sure these cannot be merged
+        br1 = BuildRequest("build1", SourceStamp(branch="1"))
+        b1.submitBuildRequest(br1)
+        br2 = BuildRequest("build2", SourceStamp(branch="2"))
+        b1.submitBuildRequest(br2)
+        br3 = BuildRequest("build3", SourceStamp(branch="3"))
+        b1.submitBuildRequest(br3)
+        self.requests = (br1, br2, br3)
+        # all three are now in the queue
+
+        # wait until the first one has started
+        d1.addCallback(self._testReconfig_2)
+        return d1
+
+    def _testReconfig_2(self, res):
+        log.msg("_testReconfig_2")
+        # confirm that it is building
+        brs = self.requests[0].status.getBuilds()
+        self.failUnlessEqual(len(brs), 1)
+        self.build1 = brs[0]
+        self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
+        # br1 is building, br2 and br3 are in the queue (in that order). Now
+        # we reconfigure the Builder.
+        self.failUnless(self.build1_started)
+        d = self.master.loadConfig(config_4)
+        d.addCallback(self._testReconfig_3)
+        return d
+
+    def _testReconfig_3(self, res):
+        log.msg("_testReconfig_3")
+        # now check to see that br1 is still building, and that br2 and br3
+        # are in the queue of the new builder
+        b1 = self.master.botmaster.builders['b1']
+        self.failIfIdentical(b1, self.orig_b1)
+        self.failIf(self.build1.isFinished())
+        self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
+        self.failUnlessEqual(len(b1.buildable), 2)
+        self.failUnless(self.requests[1] in b1.buildable)
+        self.failUnless(self.requests[2] in b1.buildable)
+
+        # allow br1 to finish, and make sure its status is delivered normally
+        d = self.requests[0].waitUntilFinished()
+        d.addCallback(self._testReconfig_4)
+        self.d2.callback(None)
+        return d
+
+    def _testReconfig_4(self, bs):
+        log.msg("_testReconfig_4")
+        self.failUnlessEqual(bs.getReason(), "build1")
+        self.failUnless(bs.isFinished())
+        self.failUnlessEqual(bs.getResults(), SUCCESS)
+
+        # at this point, the first build has finished, and there is a pending
+        # call to start the second build. Once that pending call fires, there
+        # is a network roundtrip before the 'wait' RemoteCommand is delivered
+        # to the slave. We need to wait for both events to happen before we
+        # can check to make sure it is using the correct process. Just wait a
+        # full second.
+        d = defer.Deferred()
+        d.addCallback(self._testReconfig_5)
+        reactor.callLater(1, d.callback, None)
+        return d
+
+    def _testReconfig_5(self, res):
+        log.msg("_testReconfig_5")
+        # at this point the next build ought to be running
+        b1 = self.master.botmaster.builders['b1']
+        self.failUnlessEqual(len(b1.buildable), 1)
+        self.failUnless(self.requests[2] in b1.buildable)
+        self.failUnlessEqual(len(b1.building), 1)
+        # and it ought to be using the new process
+        self.failUnless(self.build2_started)
+
+        # now, while the second build is running, change the config multiple
+        # times.
+
+        d = self.master.loadConfig(config_3)
+        d.addCallback(lambda res: self.master.loadConfig(config_4))
+        d.addCallback(lambda res: self.master.loadConfig(config_5))
+        def _done(res):
+            # then once that's done, allow the second build to finish and
+            # wait for it to complete
+            da = self.requests[1].waitUntilFinished()
+            self.d4.callback(None)
+            return da
+        d.addCallback(_done)
+        def _done2(res):
+            # and once *that*'s done, wait another second to let the third
+            # build start
+            db = defer.Deferred()
+            reactor.callLater(1, db.callback, None)
+            return db
+        d.addCallback(_done2)
+        d.addCallback(self._testReconfig_6)
+        return d
+
+    def _testReconfig_6(self, res):
+        log.msg("_testReconfig_6")
+        # now check to see that the third build is running
+        self.failUnless(self.build3_started)
+
+        # we're done
+
+
+
+class Slave2(RunMixin, unittest.TestCase):
+
+    revision = 0
+
+    def setUp(self):
+        RunMixin.setUp(self)
+        self.master.loadConfig(config_1)
+        self.master.startService()
+
+    def doBuild(self, buildername, reason="forced"):
+        # we need to prevent these builds from being merged, so we create
+        # each of them with a different revision specifier. The revision is
+        # ignored because our build process does not have a source checkout
+        # step.
+        self.revision += 1
+        br = BuildRequest(reason, SourceStamp(revision=self.revision))
+        d = br.waitUntilFinished()
+        self.control.getBuilder(buildername).requestBuild(br)
+        return d
+
+    def testFirstComeFirstServed(self):
+        # submit three builds, then connect a slave which fails the
+        # slaveping. The first build will claim the slave, do the slaveping,
+        # give up, and re-queue the build. Verify that the build gets
+        # re-queued in front of all other builds. This may be tricky, because
+        # the other builds may attempt to claim the just-failed slave.
+
+        d1 = self.doBuild("b1", "first")
+        d2 = self.doBuild("b1", "second")
+        #buildable = self.master.botmaster.builders["b1"].buildable
+        #print [b.reason for b in buildable]
+
+        # specifically, I want the poor build to get precedence over any
+        # others that were waiting. To test this, we need more builds than
+        # slaves.
+
+        # now connect a broken slave. The first build started as soon as it
+        # connects, so by the time we get to our _1 method, the ill-fated
+        # build has already started.
+        d = self.connectSlave(["b1"], opts={"failPingOnce": True})
+        d.addCallback(self._testFirstComeFirstServed_1, d1, d2)
+        return maybeWait(d)
+    def _testFirstComeFirstServed_1(self, res, d1, d2):
+        # the master has send the slaveping. When this is received, it will
+        # fail, causing the master to hang up on the slave. When it
+        # reconnects, it should find the first build at the front of the
+        # queue. If we simply wait for both builds to complete, then look at
+        # the status logs, we should see that the builds ran in the correct
+        # order.
+
+        d = defer.DeferredList([d1,d2])
+        d.addCallback(self._testFirstComeFirstServed_2)
+        return d
+    def _testFirstComeFirstServed_2(self, res):
+        b = self.status.getBuilder("b1")
+        builds = b.getBuild(0), b.getBuild(1)
+        reasons = [build.getReason() for build in builds]
+        self.failUnlessEqual(reasons, ["first", "second"])
+

Added: vendor/buildbot/current/buildbot/test/test_status.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_status.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_status.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,978 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+import email, os
+
+from twisted.internet import defer, reactor
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.twcompat import implements, providedBy, maybeWait
+from buildbot.status import builder, base
+
+mail = None
+try:
+    from buildbot.status import mail
+except ImportError:
+    pass
+from buildbot.status import progress, client # NEEDS COVERAGE
+from buildbot.test.runutils import RunMixin
+
+class MyStep:
+    build = None
+    def getName(self):
+        return "step"
+
+class MyLogFileProducer(builder.LogFileProducer):
+    # The reactor.callLater(0) in LogFileProducer.resumeProducing is a bit of
+    # a nuisance from a testing point of view. This subclass adds a Deferred
+    # to that call so we can find out when it is complete.
+    def resumeProducing(self):
+        d = defer.Deferred()
+        reactor.callLater(0, self._resumeProducing, d)
+        return d
+    def _resumeProducing(self, d):
+        builder.LogFileProducer._resumeProducing(self)
+        reactor.callLater(0, d.callback, None)
+
+class MyLog(builder.LogFile):
+    def __init__(self, basedir, name, text=None, step=None):
+        self.fakeBuilderBasedir = basedir
+        if not step:
+            step = MyStep()
+        builder.LogFile.__init__(self, step, name, name)
+        if text:
+            self.addStdout(text)
+            self.finish()
+    def getFilename(self):
+        return os.path.join(self.fakeBuilderBasedir, self.name)
+
+    def subscribeConsumer(self, consumer):
+        p = MyLogFileProducer(self, consumer)
+        d = p.resumeProducing()
+        return d
+
+class MyHTMLLog(builder.HTMLLogFile):
+    def __init__(self, basedir, name, html):
+        step = MyStep()
+        builder.HTMLLogFile.__init__(self, step, name, name, html)
+
+class MyLogSubscriber:
+    def __init__(self):
+        self.chunks = []
+    def logChunk(self, build, step, log, channel, text):
+        self.chunks.append((channel, text))
+
+class MyLogConsumer:
+    def __init__(self, limit=None):
+        self.chunks = []
+        self.finished = False
+        self.limit = limit
+    def registerProducer(self, producer, streaming):
+        self.producer = producer
+        self.streaming = streaming
+    def unregisterProducer(self):
+        self.producer = None
+    def writeChunk(self, chunk):
+        self.chunks.append(chunk)
+        if self.limit:
+            self.limit -= 1
+            if self.limit == 0:
+                self.producer.pauseProducing()
+    def finish(self):
+        self.finished = True
+
+if mail:
+    class MyMailer(mail.MailNotifier):
+        def sendMessage(self, m, recipients):
+            self.parent.messages.append((m, recipients))
+
+class MyStatus:
+    def getBuildbotURL(self):
+        return self.url
+    def getURLForThing(self, thing):
+        return None
+
+class MyBuilder(builder.BuilderStatus):
+    nextBuildNumber = 0
+
+class MyBuild(builder.BuildStatus):
+    testlogs = []
+    def __init__(self, parent, number, results):
+        builder.BuildStatus.__init__(self, parent, number)
+        self.results = results
+        self.source = SourceStamp(revision="1.14")
+        self.reason = "build triggered by changes"
+        self.finished = True
+    def getLogs(self):
+        return self.testlogs
+
+class MyLookup:
+    if implements:
+        implements(interfaces.IEmailLookup)
+    else:
+        __implements__ = interfaces.IEmailLookup,
+
+    def getAddress(self, user):
+        d = defer.Deferred()
+        # With me now is Mr Thomas Walters of West Hartlepool who is totally
+        # invisible.
+        if user == "Thomas_Walters":
+            d.callback(None)
+        else:
+            d.callback(user + "@" + "dev.com")
+        return d
+
+class Mail(unittest.TestCase):
+
+    def setUp(self):
+        self.builder = MyBuilder("builder1")
+
+    def stall(self, res, timeout):
+        d = defer.Deferred()
+        reactor.callLater(timeout, d.callback, res)
+        return d
+
+    def makeBuild(self, number, results):
+        return MyBuild(self.builder, number, results)
+
+    def failUnlessIn(self, substring, string):
+        self.failUnless(string.find(substring) != -1)
+
+    def getBuildbotURL(self):
+        return "BUILDBOT_URL"
+
+    def getURLForThing(self, thing):
+        return None
+
+    def testBuild1(self):
+        mailer = MyMailer(fromaddr="buildbot at example.com",
+                          extraRecipients=["recip at example.com",
+                                           "recip2 at example.com"],
+                          lookup=mail.Domain("dev.com"))
+        mailer.parent = self
+        mailer.status = self
+        self.messages = []
+
+        b1 = self.makeBuild(3, builder.SUCCESS)
+        b1.blamelist = ["bob"]
+
+        mailer.buildFinished("builder1", b1, b1.results)
+        self.failUnless(len(self.messages) == 1)
+        m,r = self.messages.pop()
+        t = m.as_string()
+        self.failUnlessIn("To: bob at dev.com, recip2 at example.com, "
+                          "recip at example.com\n", t)
+        self.failUnlessIn("From: buildbot at example.com\n", t)
+        self.failUnlessIn("Subject: buildbot success in builder1\n", t)
+        self.failUnlessIn("Date: ", t)
+        self.failUnlessIn("Build succeeded!\n", t)
+        self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
+
+    def testBuild2(self):
+        mailer = MyMailer(fromaddr="buildbot at example.com",
+                          extraRecipients=["recip at example.com",
+                                           "recip2 at example.com"],
+                          lookup="dev.com",
+                          sendToInterestedUsers=False)
+        mailer.parent = self
+        mailer.status = self
+        self.messages = []
+
+        b1 = self.makeBuild(3, builder.SUCCESS)
+        b1.blamelist = ["bob"]
+
+        mailer.buildFinished("builder1", b1, b1.results)
+        self.failUnless(len(self.messages) == 1)
+        m,r = self.messages.pop()
+        t = m.as_string()
+        self.failUnlessIn("To: recip2 at example.com, "
+                          "recip at example.com\n", t)
+        self.failUnlessIn("From: buildbot at example.com\n", t)
+        self.failUnlessIn("Subject: buildbot success in builder1\n", t)
+        self.failUnlessIn("Build succeeded!\n", t)
+        self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
+
+    def testBuildStatusCategory(self):
+        # a status client only interested in a category should only receive
+        # from that category
+        mailer = MyMailer(fromaddr="buildbot at example.com",
+                          extraRecipients=["recip at example.com",
+                                           "recip2 at example.com"],
+                          lookup="dev.com",
+                          sendToInterestedUsers=False,
+                          categories=["debug"])
+
+        mailer.parent = self
+        mailer.status = self
+        self.messages = []
+
+        b1 = self.makeBuild(3, builder.SUCCESS)
+        b1.blamelist = ["bob"]
+
+        mailer.buildFinished("builder1", b1, b1.results)
+        self.failIf(self.messages)
+
+    def testBuilderCategory(self):
+        # a builder in a certain category should notify status clients that
+        # did not list categories, or categories including this one
+        mailer1 = MyMailer(fromaddr="buildbot at example.com",
+                           extraRecipients=["recip at example.com",
+                                            "recip2 at example.com"],
+                           lookup="dev.com",
+                           sendToInterestedUsers=False)
+        mailer2 = MyMailer(fromaddr="buildbot at example.com",
+                           extraRecipients=["recip at example.com",
+                                            "recip2 at example.com"],
+                           lookup="dev.com",
+                           sendToInterestedUsers=False,
+                           categories=["active"])
+        mailer3 = MyMailer(fromaddr="buildbot at example.com",
+                           extraRecipients=["recip at example.com",
+                                            "recip2 at example.com"],
+                           lookup="dev.com",
+                           sendToInterestedUsers=False,
+                           categories=["active", "debug"])
+
+        builderd = MyBuilder("builder2", "debug")
+
+        mailer1.parent = self
+        mailer1.status = self
+        mailer2.parent = self
+        mailer2.status = self
+        mailer3.parent = self
+        mailer3.status = self
+        self.messages = []
+
+        t = mailer1.builderAdded("builder2", builderd)
+        self.assertEqual(len(mailer1.watched), 1)
+        self.assertEqual(t, mailer1)
+        t = mailer2.builderAdded("builder2", builderd)
+        self.assertEqual(len(mailer2.watched), 0)
+        self.assertEqual(t, None)
+        t = mailer3.builderAdded("builder2", builderd)
+        self.assertEqual(len(mailer3.watched), 1)
+        self.assertEqual(t, mailer3)
+
+        b2 = MyBuild(builderd, 3, builder.SUCCESS)
+        b2.blamelist = ["bob"]
+
+        mailer1.buildFinished("builder2", b2, b2.results)
+        self.failUnlessEqual(len(self.messages), 1)
+        self.messages = []
+        mailer2.buildFinished("builder2", b2, b2.results)
+        self.failUnlessEqual(len(self.messages), 0)
+        self.messages = []
+        mailer3.buildFinished("builder2", b2, b2.results)
+        self.failUnlessEqual(len(self.messages), 1)
+
+    def testFailure(self):
+        mailer = MyMailer(fromaddr="buildbot at example.com", mode="problem",
+                          extraRecipients=["recip at example.com",
+                                           "recip2 at example.com"],
+                          lookup=MyLookup())
+        mailer.parent = self
+        mailer.status = self
+        self.messages = []
+
+        b1 = self.makeBuild(3, builder.SUCCESS)
+        b1.blamelist = ["dev1", "dev2"]
+        b2 = self.makeBuild(4, builder.FAILURE)
+        b2.setText(["snarkleack", "polarization", "failed"])
+        b2.blamelist = ["dev3", "dev3", "dev3", "dev4",
+                        "Thomas_Walters"]
+        mailer.buildFinished("builder1", b1, b1.results)
+        self.failIf(self.messages)
+        mailer.buildFinished("builder1", b2, b2.results)
+        self.failUnless(len(self.messages) == 1)
+        m,r = self.messages.pop()
+        t = m.as_string()
+        self.failUnlessIn("To: dev3 at dev.com, dev4 at dev.com, "
+                          "recip2 at example.com, recip at example.com\n", t)
+        self.failUnlessIn("From: buildbot at example.com\n", t)
+        self.failUnlessIn("Subject: buildbot failure in builder1\n", t)
+        self.failUnlessIn("The Buildbot has detected a new failure", t)
+        self.failUnlessIn("BUILD FAILED: snarkleack polarization failed\n", t)
+        self.failUnlessEqual(r, ["dev3 at dev.com", "dev4 at dev.com",
+                                 "recip2 at example.com", "recip at example.com"])
+
+    def testLogs(self):
+        basedir = "test_status_logs"
+        os.mkdir(basedir)
+        mailer = MyMailer(fromaddr="buildbot at example.com", addLogs=True,
+                          extraRecipients=["recip at example.com",
+                                           "recip2 at example.com"])
+        mailer.parent = self
+        mailer.status = self
+        self.messages = []
+
+        b1 = self.makeBuild(3, builder.WARNINGS)
+        b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+                       MyLog(basedir,
+                             'test', "Test log here\nTest 4 failed\n"),
+                   ]
+        b1.text = ["unusual", "gnarzzler", "output"]
+        mailer.buildFinished("builder1", b1, b1.results)
+        self.failUnless(len(self.messages) == 1)
+        m,r = self.messages.pop()
+        t = m.as_string()
+        self.failUnlessIn("Subject: buildbot warnings in builder1\n", t)
+        m2 = email.message_from_string(t)
+        p = m2.get_payload()
+        self.failUnlessEqual(len(p), 3)
+
+        self.failUnlessIn("Build Had Warnings: unusual gnarzzler output\n",
+                          p[0].get_payload())
+
+        self.failUnlessEqual(p[1].get_filename(), "step.compile")
+        self.failUnlessEqual(p[1].get_payload(), "Compile log here\n")
+        
+        self.failUnlessEqual(p[2].get_filename(), "step.test")
+        self.failUnlessIn("Test log here\n", p[2].get_payload())
+
+    def testMail(self):
+        basedir = "test_status_mail"
+        os.mkdir(basedir)
+        dest = os.environ.get("BUILDBOT_TEST_MAIL")
+        if not dest:
+            raise unittest.SkipTest("define BUILDBOT_TEST_MAIL=dest to run this")
+        mailer = mail.MailNotifier(fromaddr="buildbot at example.com",
+                                   addLogs=True,
+                                   extraRecipients=[dest])
+        s = MyStatus()
+        s.url = "project URL"
+        mailer.status = s
+
+        b1 = self.makeBuild(3, builder.SUCCESS)
+        b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+                       MyLog(basedir,
+                             'test', "Test log here\nTest 4 failed\n"),
+                   ]
+
+        print "sending mail to", dest
+        d = mailer.buildFinished("builder1", b1, b1.results)
+        # When this fires, the mail has been sent, but the SMTP connection is
+        # still up (because smtp.sendmail relies upon the server to hang up).
+        # Spin for a moment to avoid the "unclean reactor" warning that Trial
+        # gives us if we finish before the socket is disconnected. Really,
+        # sendmail() ought to hang up the connection once it is finished:
+        # otherwise a malicious SMTP server could make us consume lots of
+        # memory.
+        d.addCallback(self.stall, 0.1)
+        return maybeWait(d)
+
+if not mail:
+    Mail.skip = "the Twisted Mail package is not installed"
+
+class Progress(unittest.TestCase):
+    def testWavg(self):
+        bp = progress.BuildProgress([])
+        e = progress.Expectations(bp)
+        # wavg(old, current)
+        self.failUnlessEqual(e.wavg(None, None), None)
+        self.failUnlessEqual(e.wavg(None, 3), 3)
+        self.failUnlessEqual(e.wavg(3, None), 3)
+        self.failUnlessEqual(e.wavg(3, 4), 3.5)
+        e.decay = 0.1
+        self.failUnlessEqual(e.wavg(3, 4), 3.1)
+
+
+class Results(unittest.TestCase):
+
+    def testAddResults(self):
+        b = builder.BuildStatus(builder.BuilderStatus("test"), 12)
+        testname = ("buildbot", "test", "test_status", "Results",
+                    "testAddResults")
+        r1 = builder.TestResult(name=testname,
+                                results=builder.SUCCESS,
+                                text=["passed"],
+                                logs={'output': ""},
+                                )
+        b.addTestResult(r1)
+
+        res = b.getTestResults()
+        self.failUnlessEqual(res.keys(), [testname])
+        t = res[testname]
+        self.failUnless(providedBy(t, interfaces.ITestResult))
+        self.failUnlessEqual(t.getName(), testname)
+        self.failUnlessEqual(t.getResults(), builder.SUCCESS)
+        self.failUnlessEqual(t.getText(), ["passed"])
+        self.failUnlessEqual(t.getLogs(), {'output': ""})
+
+class Log(unittest.TestCase):
+    def setUpClass(self):
+        self.basedir = "status_log_add"
+        os.mkdir(self.basedir)
+
+    def testAdd(self):
+        l = MyLog(self.basedir, "compile", step=13)
+        self.failUnlessEqual(l.getName(), "compile")
+        self.failUnlessEqual(l.getStep(), 13)
+        l.addHeader("HEADER\n")
+        l.addStdout("Some text\n")
+        l.addStderr("Some error\n")
+        l.addStdout("Some more text\n")
+        self.failIf(l.isFinished())
+        l.finish()
+        self.failUnless(l.isFinished())
+        self.failUnlessEqual(l.getText(),
+                             "Some text\nSome error\nSome more text\n")
+        self.failUnlessEqual(l.getTextWithHeaders(),
+                             "HEADER\n" +
+                             "Some text\nSome error\nSome more text\n")
+        self.failUnlessEqual(len(list(l.getChunks())), 4)
+
+        self.failUnless(l.hasContents())
+        os.unlink(l.getFilename())
+        self.failIf(l.hasContents())
+
+    def TODO_testDuplicate(self):
+        # create multiple logs for the same step with the same logname, make
+        # sure their on-disk filenames are suitably uniquified. This
+        # functionality actually lives in BuildStepStatus and BuildStatus, so
+        # this test must involve more than just the MyLog class.
+
+        # naieve approach, doesn't work
+        l1 = MyLog(self.basedir, "duplicate")
+        l1.addStdout("Some text\n")
+        l1.finish()
+        l2 = MyLog(self.basedir, "duplicate")
+        l2.addStdout("Some more text\n")
+        l2.finish()
+        self.failIfEqual(l1.getFilename(), l2.getFilename())
+
+    def testMerge1(self):
+        l = MyLog(self.basedir, "merge1")
+        l.addHeader("HEADER\n")
+        l.addStdout("Some text\n")
+        l.addStdout("Some more text\n")
+        l.addStdout("more\n")
+        l.finish()
+        self.failUnlessEqual(l.getText(),
+                             "Some text\nSome more text\nmore\n")
+        self.failUnlessEqual(l.getTextWithHeaders(),
+                             "HEADER\n" +
+                             "Some text\nSome more text\nmore\n")
+        self.failUnlessEqual(len(list(l.getChunks())), 2)
+
+    def testMerge2(self):
+        l = MyLog(self.basedir, "merge2")
+        l.addHeader("HEADER\n")
+        for i in xrange(1000):
+            l.addStdout("aaaa")
+        for i in xrange(30):
+            l.addStderr("bbbb")
+        for i in xrange(10):
+            l.addStdout("cc")
+        target = 1000*"aaaa" + 30 * "bbbb" + 10 * "cc"
+        self.failUnlessEqual(len(l.getText()), len(target))
+        self.failUnlessEqual(l.getText(), target)
+        l.finish()
+        self.failUnlessEqual(len(l.getText()), len(target))
+        self.failUnlessEqual(l.getText(), target)
+        self.failUnlessEqual(len(list(l.getChunks())), 4)
+
+    def testMerge3(self):
+        l = MyLog(self.basedir, "merge3")
+        l.chunkSize = 100
+        l.addHeader("HEADER\n")
+        for i in xrange(8):
+            l.addStdout(10*"a")
+        for i in xrange(8):
+            l.addStdout(10*"a")
+        self.failUnlessEqual(list(l.getChunks()),
+                             [(builder.HEADER, "HEADER\n"),
+                              (builder.STDOUT, 100*"a"),
+                              (builder.STDOUT, 60*"a")])
+        l.finish()
+        self.failUnlessEqual(l.getText(), 160*"a")
+
+    def testReadlines(self):
+        l = MyLog(self.basedir, "chunks")
+        l.addHeader("HEADER\n") # should be ignored
+        l.addStdout("Some text\n")
+        l.addStdout("Some More Text\nAnd Some More\n")
+        l.addStderr("Some Stderr\n")
+        l.addStdout("Last line\n")
+        l.finish()
+        alllines = list(l.readlines())
+        self.failUnlessEqual(len(alllines), 4)
+        self.failUnlessEqual(alllines[0], "Some text\n")
+        self.failUnlessEqual(alllines[2], "And Some More\n")
+        self.failUnlessEqual(alllines[3], "Last line\n")
+        stderr = list(l.readlines(interfaces.LOG_CHANNEL_STDERR))
+        self.failUnlessEqual(len(stderr), 1)
+        self.failUnlessEqual(stderr[0], "Some Stderr\n")
+        lines = l.readlines()
+        if False: # TODO: l.readlines() is not yet an iterator
+            # verify that it really is an iterator
+            line0 = lines.next()
+            self.failUnlessEqual(line0, "Some text\n")
+            line1 = lines.next()
+            line2 = lines.next()
+            self.failUnlessEqual(line2, "And Some More\n")
+
+
+    def testChunks(self):
+        l = MyLog(self.basedir, "chunks")
+        c1 = l.getChunks()
+        l.addHeader("HEADER\n")
+        l.addStdout("Some text\n")
+        self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
+                             "HEADER\nSome text\n")
+        c2 = l.getChunks()
+
+        l.addStdout("Some more text\n")
+        self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
+                             "HEADER\nSome text\nSome more text\n")
+        c3 = l.getChunks()
+        
+        l.addStdout("more\n")
+        l.finish()
+
+        self.failUnlessEqual(list(c1), [])
+        self.failUnlessEqual(list(c2), [(builder.HEADER, "HEADER\n"),
+                                        (builder.STDOUT, "Some text\n")])
+        self.failUnlessEqual(list(c3), [(builder.HEADER, "HEADER\n"),
+                                        (builder.STDOUT,
+                                         "Some text\nSome more text\n")])
+        
+        self.failUnlessEqual(l.getText(),
+                             "Some text\nSome more text\nmore\n")
+        self.failUnlessEqual(l.getTextWithHeaders(),
+                             "HEADER\n" +
+                             "Some text\nSome more text\nmore\n")
+        self.failUnlessEqual(len(list(l.getChunks())), 2)
+
+    def testUpgrade(self):
+        l = MyLog(self.basedir, "upgrade")
+        l.addHeader("HEADER\n")
+        l.addStdout("Some text\n")
+        l.addStdout("Some more text\n")
+        l.addStdout("more\n")
+        l.finish()
+        self.failUnless(l.hasContents())
+        # now doctor it to look like a 0.6.4-era non-upgraded logfile
+        l.entries = list(l.getChunks())
+        del l.filename
+        os.unlink(l.getFilename())
+        # now make sure we can upgrade it
+        l.upgrade("upgrade")
+        self.failUnlessEqual(l.getText(),
+                             "Some text\nSome more text\nmore\n")
+        self.failUnlessEqual(len(list(l.getChunks())), 2)
+        self.failIf(l.entries)
+
+        # now, do it again, but make it look like an upgraded 0.6.4 logfile
+        # (i.e. l.filename is missing, but the contents are there on disk)
+        l.entries = list(l.getChunks())
+        del l.filename
+        l.upgrade("upgrade")
+        self.failUnlessEqual(l.getText(),
+                             "Some text\nSome more text\nmore\n")
+        self.failUnlessEqual(len(list(l.getChunks())), 2)
+        self.failIf(l.entries)
+        self.failUnless(l.hasContents())
+
+    def testHTMLUpgrade(self):
+        l = MyHTMLLog(self.basedir, "upgrade", "log contents")
+        l.upgrade("filename")
+
+    def testSubscribe(self):
+        l1 = MyLog(self.basedir, "subscribe1")
+        l1.finish()
+        self.failUnless(l1.isFinished())
+
+        s = MyLogSubscriber()
+        l1.subscribe(s, True)
+        l1.unsubscribe(s)
+        self.failIf(s.chunks)
+
+        s = MyLogSubscriber()
+        l1.subscribe(s, False)
+        l1.unsubscribe(s)
+        self.failIf(s.chunks)
+
+        finished = []
+        l2 = MyLog(self.basedir, "subscribe2")
+        l2.waitUntilFinished().addCallback(finished.append)
+        l2.addHeader("HEADER\n")
+        s1 = MyLogSubscriber()
+        l2.subscribe(s1, True)
+        s2 = MyLogSubscriber()
+        l2.subscribe(s2, False)
+        self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n")])
+        self.failUnlessEqual(s2.chunks, [])
+
+        l2.addStdout("Some text\n")
+        self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
+                                         (builder.STDOUT, "Some text\n")])
+        self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n")])
+        l2.unsubscribe(s1)
+        
+        l2.addStdout("Some more text\n")
+        self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
+                                         (builder.STDOUT, "Some text\n")])
+        self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n"),
+                                         (builder.STDOUT, "Some more text\n"),
+                                         ])
+        self.failIf(finished)
+        l2.finish()
+        self.failUnlessEqual(finished, [l2])
+
+    def testConsumer(self):
+        l1 = MyLog(self.basedir, "consumer1")
+        l1.finish()
+        self.failUnless(l1.isFinished())
+
+        s = MyLogConsumer()
+        d = l1.subscribeConsumer(s)
+        d.addCallback(self._testConsumer_1, s)
+        return maybeWait(d, 5)
+    def _testConsumer_1(self, res, s):
+        self.failIf(s.chunks)
+        self.failUnless(s.finished)
+        self.failIf(s.producer) # producer should be registered and removed
+
+        l2 = MyLog(self.basedir, "consumer2")
+        l2.addHeader("HEADER\n")
+        l2.finish()
+        self.failUnless(l2.isFinished())
+
+        s = MyLogConsumer()
+        d = l2.subscribeConsumer(s)
+        d.addCallback(self._testConsumer_2, s)
+        return d
+    def _testConsumer_2(self, res, s):
+        self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
+        self.failUnless(s.finished)
+        self.failIf(s.producer) # producer should be registered and removed
+
+
+        l2 = MyLog(self.basedir, "consumer3")
+        l2.chunkSize = 1000
+        l2.addHeader("HEADER\n")
+        l2.addStdout(800*"a")
+        l2.addStdout(800*"a") # should now have two chunks on disk, 1000+600
+        l2.addStdout(800*"b") # HEADER,1000+600*a on disk, 800*a in memory
+        l2.addStdout(800*"b") # HEADER,1000+600*a,1000+600*b on disk
+        l2.addStdout(200*"c") # HEADER,1000+600*a,1000+600*b on disk,
+                              # 200*c in memory
+        
+        s = MyLogConsumer(limit=1)
+        d = l2.subscribeConsumer(s)
+        d.addCallback(self._testConsumer_3, l2, s)
+        return d
+    def _testConsumer_3(self, res, l2, s):
+        self.failUnless(s.streaming)
+        self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
+        s.limit = 1
+        d = s.producer.resumeProducing()
+        d.addCallback(self._testConsumer_4, l2, s)
+        return d
+    def _testConsumer_4(self, res, l2, s):
+        self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+                                        (builder.STDOUT, 1000*"a"),
+                                        ])
+        s.limit = None
+        d = s.producer.resumeProducing()
+        d.addCallback(self._testConsumer_5, l2, s)
+        return d
+    def _testConsumer_5(self, res, l2, s):
+        self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+                                        (builder.STDOUT, 1000*"a"),
+                                        (builder.STDOUT, 600*"a"),
+                                        (builder.STDOUT, 1000*"b"),
+                                        (builder.STDOUT, 600*"b"),
+                                        (builder.STDOUT, 200*"c")])
+        l2.addStdout(1000*"c") # HEADER,1600*a,1600*b,1200*c on disk
+        self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+                                        (builder.STDOUT, 1000*"a"),
+                                        (builder.STDOUT, 600*"a"),
+                                        (builder.STDOUT, 1000*"b"),
+                                        (builder.STDOUT, 600*"b"),
+                                        (builder.STDOUT, 200*"c"),
+                                        (builder.STDOUT, 1000*"c")])
+        l2.finish()
+        self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+                                        (builder.STDOUT, 1000*"a"),
+                                        (builder.STDOUT, 600*"a"),
+                                        (builder.STDOUT, 1000*"b"),
+                                        (builder.STDOUT, 600*"b"),
+                                        (builder.STDOUT, 200*"c"),
+                                        (builder.STDOUT, 1000*"c")])
+        self.failIf(s.producer)
+        self.failUnless(s.finished)
+
+    def testLargeSummary(self):
+        bigtext = "a" * 200000 # exceed the NetstringReceiver 100KB limit
+        l = MyLog(self.basedir, "large", bigtext)
+        s = MyLogConsumer()
+        d = l.subscribeConsumer(s)
+        def _check(res):
+            for ctype,chunk in s.chunks:
+                self.failUnless(len(chunk) < 100000)
+            merged = "".join([c[1] for c in s.chunks])
+            self.failUnless(merged == bigtext)
+        d.addCallback(_check)
+        # when this fails, it fails with a timeout, and there is an exception
+        # sent to log.err(). This AttributeError exception is in
+        # NetstringReceiver.dataReceived where it does
+        # self.transport.loseConnection() because of the NetstringParseError,
+        # however self.transport is None
+        return maybeWait(d, 5)
+    testLargeSummary.timeout = 5
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+    s(dummy.Dummy, timeout=1),
+    s(dummy.RemoteDummy, timeout=2),
+    ])
+
+BuildmasterConfig = c = {}
+c['bots'] = [['bot1', 'sekrit']]
+c['sources'] = []
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+                      'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+config_2 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+                  'builddir': 'dummy1', 'factory': f2},
+                 {'name': 'testdummy', 'slavename': 'bot1',
+                  'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
+"""
+
+class STarget(base.StatusReceiver):
+    debug = False
+
+    def __init__(self, mode):
+        self.mode = mode
+        self.events = []
+    def announce(self):
+        if self.debug:
+            print self.events[-1]
+
+    def builderAdded(self, name, builder):
+        self.events.append(("builderAdded", name, builder))
+        self.announce()
+        if "builder" in self.mode:
+            return self
+    def builderChangedState(self, name, state):
+        self.events.append(("builderChangedState", name, state))
+        self.announce()
+    def buildStarted(self, name, build):
+        self.events.append(("buildStarted", name, build))
+        self.announce()
+        if "eta" in self.mode:
+            self.eta_build = build.getETA()
+        if "build" in self.mode:
+            return self
+    def buildETAUpdate(self, build, ETA):
+        self.events.append(("buildETAUpdate", build, ETA))
+        self.announce()
+    def stepStarted(self, build, step):
+        self.events.append(("stepStarted", build, step))
+        self.announce()
+        if 0 and "eta" in self.mode:
+            print "TIMES", step.getTimes()
+            print "ETA", step.getETA()
+            print "EXP", step.getExpectations()
+        if "step" in self.mode:
+            return self
+    def stepETAUpdate(self, build, step, ETA, expectations):
+        self.events.append(("stepETAUpdate", build, step, ETA, expectations))
+        self.announce()
+    def logStarted(self, build, step, log):
+        self.events.append(("logStarted", build, step, log))
+        self.announce()
+    def logFinished(self, build, step, log):
+        self.events.append(("logFinished", build, step, log))
+        self.announce()
+    def stepFinished(self, build, step, results):
+        self.events.append(("stepFinished", build, step, results))
+        if 0 and "eta" in self.mode:
+            print "post-EXP", step.getExpectations()
+        self.announce()
+    def buildFinished(self, name, build, results):
+        self.events.append(("buildFinished", name, build, results))
+        self.announce()
+    def builderRemoved(self, name):
+        self.events.append(("builderRemoved", name))
+        self.announce()
+
+class Subscription(RunMixin, unittest.TestCase):
+    # verify that StatusTargets can subscribe/unsubscribe properly
+
+    def testSlave(self):
+        m = self.master
+        s = m.getStatus()
+        self.t1 = t1 = STarget(["builder"])
+        #t1.debug = True; print
+        s.subscribe(t1)
+        self.failUnlessEqual(len(t1.events), 0)
+
+        self.t3 = t3 = STarget(["builder", "build", "step"])
+        s.subscribe(t3)
+
+        m.loadConfig(config_2)
+        m.readConfig = True
+        m.startService()
+
+        self.failUnlessEqual(len(t1.events), 4)
+        self.failUnlessEqual(t1.events[0][0:2], ("builderAdded", "dummy"))
+        self.failUnlessEqual(t1.events[1],
+                             ("builderChangedState", "dummy", "offline"))
+        self.failUnlessEqual(t1.events[2][0:2], ("builderAdded", "testdummy"))
+        self.failUnlessEqual(t1.events[3],
+                             ("builderChangedState", "testdummy", "offline"))
+        t1.events = []
+
+        self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+        self.failUnlessEqual(s.getBuilderNames(categories=['test']),
+                             ["testdummy"])
+        self.s1 = s1 = s.getBuilder("dummy")
+        self.failUnlessEqual(s1.getName(), "dummy")
+        self.failUnlessEqual(s1.getState(), ("offline", []))
+        self.failUnlessEqual(s1.getCurrentBuilds(), [])
+        self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+        self.failUnlessEqual(s1.getBuild(-1), None)
+        #self.failUnlessEqual(s1.getEvent(-1), foo("created"))
+
+        # status targets should, upon being subscribed, immediately get a
+        # list of all current builders matching their category
+        self.t2 = t2 = STarget([])
+        s.subscribe(t2)
+        self.failUnlessEqual(len(t2.events), 2)
+        self.failUnlessEqual(t2.events[0][0:2], ("builderAdded", "dummy"))
+        self.failUnlessEqual(t2.events[1][0:2], ("builderAdded", "testdummy"))
+
+        d = self.connectSlave(builders=["dummy", "testdummy"])
+        d.addCallback(self._testSlave_1, t1)
+        return maybeWait(d)
+
+    def _testSlave_1(self, res, t1):
+        self.failUnlessEqual(len(t1.events), 2)
+        self.failUnlessEqual(t1.events[0],
+                             ("builderChangedState", "dummy", "idle"))
+        self.failUnlessEqual(t1.events[1],
+                             ("builderChangedState", "testdummy", "idle"))
+        t1.events = []
+
+        c = interfaces.IControl(self.master)
+        req = BuildRequest("forced build for testing", SourceStamp())
+        c.getBuilder("dummy").requestBuild(req)
+        d = req.waitUntilFinished()
+        d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
+        dl = defer.DeferredList([d, d2])
+        dl.addCallback(self._testSlave_2)
+        return dl
+
+    def _testSlave_2(self, res):
+        # t1 subscribes to builds, but not anything lower-level
+        ev = self.t1.events
+        self.failUnlessEqual(len(ev), 4)
+        self.failUnlessEqual(ev[0][0:3],
+                             ("builderChangedState", "dummy", "building"))
+        self.failUnlessEqual(ev[1][0], "buildStarted")
+        self.failUnlessEqual(ev[2][0:2]+ev[2][3:4],
+                             ("buildFinished", "dummy", builder.SUCCESS))
+        self.failUnlessEqual(ev[3][0:3],
+                             ("builderChangedState", "dummy", "idle"))
+
+        self.failUnlessEqual([ev[0] for ev in self.t3.events],
+                             ["builderAdded",
+                              "builderChangedState", # offline
+                              "builderAdded",
+                              "builderChangedState", # idle
+                              "builderChangedState", # offline
+                              "builderChangedState", # idle
+                              "builderChangedState", # building
+                              "buildStarted",
+                              "stepStarted", "stepETAUpdate", "stepFinished",
+                              "stepStarted", "stepETAUpdate",
+                              "logStarted", "logFinished", "stepFinished",
+                              "buildFinished",
+                              "builderChangedState", # idle
+                              ])
+
+        b = self.s1.getLastFinishedBuild()
+        self.failUnless(b)
+        self.failUnlessEqual(b.getBuilder().getName(), "dummy")
+        self.failUnlessEqual(b.getNumber(), 0)
+        self.failUnlessEqual(b.getSourceStamp(), (None, None, None))
+        self.failUnlessEqual(b.getReason(), "forced build for testing")
+        self.failUnlessEqual(b.getChanges(), [])
+        self.failUnlessEqual(b.getResponsibleUsers(), [])
+        self.failUnless(b.isFinished())
+        self.failUnlessEqual(b.getText(), ['build', 'successful'])
+        self.failUnlessEqual(b.getColor(), "green")
+        self.failUnlessEqual(b.getResults(), builder.SUCCESS)
+
+        steps = b.getSteps()
+        self.failUnlessEqual(len(steps), 2)
+
+        eta = 0
+        st1 = steps[0]
+        self.failUnlessEqual(st1.getName(), "dummy")
+        self.failUnless(st1.isFinished())
+        self.failUnlessEqual(st1.getText(), ["delay", "1 secs"])
+        start,finish = st1.getTimes()
+        self.failUnless(0.5 < (finish-start) < 10)
+        self.failUnlessEqual(st1.getExpectations(), [])
+        self.failUnlessEqual(st1.getLogs(), [])
+        eta += finish-start
+
+        st2 = steps[1]
+        self.failUnlessEqual(st2.getName(), "remote dummy")
+        self.failUnless(st2.isFinished())
+        self.failUnlessEqual(st2.getText(),
+                             ["remote", "delay", "2 secs"])
+        start,finish = st2.getTimes()
+        self.failUnless(1.5 < (finish-start) < 10)
+        eta += finish-start
+        self.failUnlessEqual(st2.getExpectations(), [('output', 38, None)])
+        logs = st2.getLogs()
+        self.failUnlessEqual(len(logs), 1)
+        self.failUnlessEqual(logs[0].getName(), "stdio")
+        self.failUnlessEqual(logs[0].getText(), "data")
+
+        self.eta = eta
+        # now we run it a second time, and we should have an ETA
+
+        self.t4 = t4 = STarget(["builder", "build", "eta"])
+        self.master.getStatus().subscribe(t4)
+        c = interfaces.IControl(self.master)
+        req = BuildRequest("forced build for testing", SourceStamp())
+        c.getBuilder("dummy").requestBuild(req)
+        d = req.waitUntilFinished()
+        d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
+        dl = defer.DeferredList([d, d2])
+        dl.addCallback(self._testSlave_3)
+        return dl
+
+    def _testSlave_3(self, res):
+        t4 = self.t4
+        eta = self.eta
+        self.failUnless(eta-1 < t4.eta_build < eta+1, # should be 3 seconds
+                        "t4.eta_build was %g, not in (%g,%g)"
+                        % (t4.eta_build, eta-1, eta+1))
+    
+
+class Client(unittest.TestCase):
+    def testAdaptation(self):
+        b = builder.BuilderStatus("bname")
+        b2 = client.makeRemote(b)
+        self.failUnless(isinstance(b2, client.RemoteBuilder))
+        b3 = client.makeRemote(None)
+        self.failUnless(b3 is None)

Added: vendor/buildbot/current/buildbot/test/test_steps.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_steps.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_steps.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,501 @@
+# -*- test-case-name: buildbot.test.test_steps -*-
+
+# create the BuildStep with a fake .remote instance that logs the
+# .callRemote invocations and compares them against the expected calls. Then
+# the test harness should send statusUpdate() messages in with assorted
+# data, eventually calling remote_complete(). Then we can verify that the
+# Step's rc was correct, and that the status it was supposed to return
+# matches.
+
+# sometimes, .callRemote should raise an exception because of a stale
+# reference. Sometimes it should errBack with an UnknownCommand failure.
+# Or other failure.
+
+# todo: test batched updates, by invoking remote_update(updates) instead of
+# statusUpdate(update). Also involves interrupted builds.
+
+import os, time
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process import buildstep, base, factory
+from buildbot.steps import shell, source, python
+from buildbot.status import builder
+from buildbot.status.builder import SUCCESS, FAILURE
+from buildbot.test.runutils import RunMixin, rmtree
+from buildbot.test.runutils import makeBuildStep, StepTester
+from buildbot.twcompat import maybeWait
+from buildbot.slave import commands, registry
+
+
+class MyShellCommand(shell.ShellCommand):
+    started = False
+    def runCommand(self, c):
+        self.started = True
+        self.rc = c
+        return shell.ShellCommand.runCommand(self, c)
+
+class FakeBuild:
+    pass
+class FakeBuilder:
+    statusbag = None
+    name = "fakebuilder"
+class FakeSlaveBuilder:
+    def getSlaveCommandVersion(self, command, oldversion=None):
+        return "1.10"
+
+class FakeRemote:
+    def __init__(self):
+        self.events = []
+        self.remoteCalls = 0
+        #self.callRemoteNotifier = None
+    def callRemote(self, methname, *args):
+        event = ["callRemote", methname, args]
+        self.events.append(event)
+##         if self.callRemoteNotifier:
+##             reactor.callLater(0, self.callRemoteNotifier, event)
+        self.remoteCalls += 1
+        self.deferred = defer.Deferred()
+        return self.deferred
+    def notifyOnDisconnect(self, callback):
+        pass
+    def dontNotifyOnDisconnect(self, callback):
+        pass
+
+
+class BuildStep(unittest.TestCase):
+
+    def setUp(self):
+        rmtree("test_steps")
+        self.builder = FakeBuilder()
+        self.builder_status = builder.BuilderStatus("fakebuilder")
+        self.builder_status.basedir = "test_steps"
+        self.builder_status.nextBuildNumber = 0
+        os.mkdir(self.builder_status.basedir)
+        self.build_status = self.builder_status.newBuild()
+        req = base.BuildRequest("reason", SourceStamp())
+        self.build = base.Build([req])
+        self.build.build_status = self.build_status # fake it
+        self.build.builder = self.builder
+        self.build.slavebuilder = FakeSlaveBuilder()
+        self.remote = FakeRemote()
+        self.finished = 0
+
+    def callback(self, results):
+        self.failed = 0
+        self.failure = None
+        self.results = results
+        self.finished = 1
+    def errback(self, failure):
+        self.failed = 1
+        self.failure = failure
+        self.results = None
+        self.finished = 1
+
+    def testShellCommand1(self):
+        cmd = "argle bargle"
+        dir = "murkle"
+        expectedEvents = []
+        buildstep.RemoteCommand.commandCounter[0] = 3
+        c = MyShellCommand(workdir=dir, command=cmd, build=self.build,
+                           timeout=10)
+        self.assertEqual(self.remote.events, expectedEvents)
+        c.step_status = self.build_status.addStepWithName("myshellcommand")
+        d = c.startStep(self.remote)
+        self.failUnless(c.started)
+        rc = c.rc
+        d.addCallbacks(self.callback, self.errback)
+        timeout = time.time() + 10
+        while self.remote.remoteCalls == 0:
+            if time.time() > timeout:
+                self.fail("timeout")
+            reactor.iterate(0.01)
+        expectedEvents.append(["callRemote", "startCommand",
+                               (rc, "3",
+                               "shell",
+                                {'command': "argle bargle",
+                                 'workdir': "murkle",
+                                 'want_stdout': 1,
+                                 'want_stderr': 1,
+                                 'logfiles': {},
+                                 'timeout': 10,
+                                 'env': None}) ] )
+        self.assertEqual(self.remote.events, expectedEvents)
+
+        # we could do self.remote.deferred.errback(UnknownCommand) here. We
+        # could also do .callback(), but generally the master end silently
+        # ignores the slave's ack
+
+        logs = c.step_status.getLogs()
+        for log in logs:
+            if log.getName() == "log":
+                break
+
+        rc.remoteUpdate({'header':
+                         "command 'argle bargle' in dir 'murkle'\n\n"})
+        rc.remoteUpdate({'stdout': "foo\n"})
+        self.assertEqual(log.getText(), "foo\n")
+        self.assertEqual(log.getTextWithHeaders(),
+                         "command 'argle bargle' in dir 'murkle'\n\n"
+                         "foo\n")
+        rc.remoteUpdate({'stderr': "bar\n"})
+        self.assertEqual(log.getText(), "foo\nbar\n")
+        self.assertEqual(log.getTextWithHeaders(),
+                         "command 'argle bargle' in dir 'murkle'\n\n"
+                         "foo\nbar\n")
+        rc.remoteUpdate({'rc': 0})
+        self.assertEqual(rc.rc, 0)
+        
+        rc.remote_complete()
+        # that should fire the Deferred
+        timeout = time.time() + 10
+        while not self.finished:
+            if time.time() > timeout:
+                self.fail("timeout")
+            reactor.iterate(0.01)
+        self.assertEqual(self.failed, 0)
+        self.assertEqual(self.results, 0)
+
+
+class MyObserver(buildstep.LogObserver):
+    out = ""
+    def outReceived(self, data):
+        self.out = self.out + data
+
+class Steps(unittest.TestCase):
+    def testMultipleStepInstances(self):
+        steps = [
+            (source.CVS, {'cvsroot': "root", 'cvsmodule': "module"}),
+            (shell.Configure, {'command': "./configure"}),
+            (shell.Compile, {'command': "make"}),
+            (shell.Compile, {'command': "make more"}),
+            (shell.Compile, {'command': "make evenmore"}),
+            (shell.Test, {'command': "make test"}),
+            (shell.Test, {'command': "make testharder"}),
+            ]
+        f = factory.ConfigurableBuildFactory(steps)
+        req = base.BuildRequest("reason", SourceStamp())
+        b = f.newBuild([req])
+        #for s in b.steps: print s.name
+
+    # test the various methods available to buildsteps
+
+    def test_getProperty(self):
+        s = makeBuildStep("test_steps.Steps.test_getProperty")
+        bs = s.step_status.getBuild()
+
+        s.setProperty("prop1", "value1")
+        s.setProperty("prop2", "value2")
+        self.failUnlessEqual(s.getProperty("prop1"), "value1")
+        self.failUnlessEqual(bs.getProperty("prop1"), "value1")
+        self.failUnlessEqual(s.getProperty("prop2"), "value2")
+        self.failUnlessEqual(bs.getProperty("prop2"), "value2")
+        s.setProperty("prop1", "value1a")
+        self.failUnlessEqual(s.getProperty("prop1"), "value1a")
+        self.failUnlessEqual(bs.getProperty("prop1"), "value1a")
+
+
+    def test_addURL(self):
+        s = makeBuildStep("test_steps.Steps.test_addURL")
+        s.addURL("coverage", "http://coverage.example.org/target")
+        s.addURL("icon", "http://coverage.example.org/icon.png")
+        bs = s.step_status
+        links = bs.getURLs()
+        expected = {"coverage": "http://coverage.example.org/target",
+                    "icon": "http://coverage.example.org/icon.png",
+                    }
+        self.failUnlessEqual(links, expected)
+
+    def test_addLog(self):
+        s = makeBuildStep("test_steps.Steps.test_addLog")
+        l = s.addLog("newlog")
+        l.addStdout("some stdout here")
+        l.finish()
+        bs = s.step_status
+        logs = bs.getLogs()
+        self.failUnlessEqual(len(logs), 1)
+        l1 = logs[0]
+        self.failUnlessEqual(l1.getText(), "some stdout here")
+
+    def test_addHTMLLog(self):
+        s = makeBuildStep("test_steps.Steps.test_addHTMLLog")
+        l = s.addHTMLLog("newlog", "some html here")
+        bs = s.step_status
+        logs = bs.getLogs()
+        self.failUnlessEqual(len(logs), 1)
+        l1 = logs[0]
+        self.failUnless(isinstance(l1, builder.HTMLLogFile))
+        self.failUnlessEqual(l1.getText(), "some html here")
+
+    def test_addCompleteLog(self):
+        s = makeBuildStep("test_steps.Steps.test_addCompleteLog")
+        l = s.addCompleteLog("newlog", "some stdout here")
+        bs = s.step_status
+        logs = bs.getLogs()
+        self.failUnlessEqual(len(logs), 1)
+        l1 = logs[0]
+        self.failUnlessEqual(l1.getText(), "some stdout here")
+
+    def test_addLogObserver(self):
+        s = makeBuildStep("test_steps.Steps.test_addLogObserver")
+        bss = s.step_status
+        o1,o2,o3 = MyObserver(), MyObserver(), MyObserver()
+
+        # add the log before the observer
+        l1 = s.addLog("one")
+        l1.addStdout("onestuff")
+        s.addLogObserver("one", o1)
+        self.failUnlessEqual(o1.out, "onestuff")
+        l1.addStdout(" morestuff")
+        self.failUnlessEqual(o1.out, "onestuff morestuff")
+
+        # add the observer before the log
+        s.addLogObserver("two", o2)
+        l2 = s.addLog("two")
+        l2.addStdout("twostuff")
+        self.failUnlessEqual(o2.out, "twostuff")
+
+    # test more stuff about ShellCommands
+
+    def test_description(self):
+        s = makeBuildStep("test_steps.Steps.test_description.1",
+                          step_class=shell.ShellCommand,
+                          workdir="dummy",
+                          description=["list", "of", "strings"],
+                          descriptionDone=["another", "list"])
+        self.failUnlessEqual(s.description, ["list", "of", "strings"])
+        self.failUnlessEqual(s.descriptionDone, ["another", "list"])
+
+        s = makeBuildStep("test_steps.Steps.test_description.2",
+                          step_class=shell.ShellCommand,
+                          workdir="dummy",
+                          description="single string",
+                          descriptionDone="another string")
+        self.failUnlessEqual(s.description, ["single string"])
+        self.failUnlessEqual(s.descriptionDone, ["another string"])
+
+class VersionCheckingStep(buildstep.BuildStep):
+    def start(self):
+        # give our test a chance to run. It is non-trivial for a buildstep to
+        # claw its way back out to the test case which is currently running.
+        master = self.build.builder.botmaster.parent
+        checker = master._checker
+        checker(self)
+        # then complete
+        self.finished(buildstep.SUCCESS)
+
+version_config = """
+from buildbot.process import factory
+from buildbot.test.test_steps import VersionCheckingStep
+BuildmasterConfig = c = {}
+f1 = factory.BuildFactory([
+    factory.s(VersionCheckingStep),
+    ])
+c['bots'] = [['bot1', 'sekrit']]
+c['sources'] = []
+c['schedulers'] = []
+c['builders'] = [{'name':'quick', 'slavename':'bot1',
+                  'builddir': 'quickdir', 'factory': f1}]
+c['slavePortnum'] = 0
+"""
+
+class SlaveVersion(RunMixin, unittest.TestCase):
+    def setUp(self):
+        RunMixin.setUp(self)
+        self.master.loadConfig(version_config)
+        self.master.startService()
+        d = self.connectSlave(["quick"])
+        return maybeWait(d)
+
+    def doBuild(self, buildername):
+        br = base.BuildRequest("forced", SourceStamp())
+        d = br.waitUntilFinished()
+        self.control.getBuilder(buildername).requestBuild(br)
+        return d
+
+
+    def checkCompare(self, s):
+        cver = commands.command_version
+        v = s.slaveVersion("svn", None)
+        # this insures that we are getting the version correctly
+        self.failUnlessEqual(s.slaveVersion("svn", None), cver)
+        # and that non-existent commands do not provide a version
+        self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND"), None)
+        # TODO: verify that a <=0.5.0 buildslave (which does not implement
+        # remote_getCommands) handles oldversion= properly. This requires a
+        # mutant slave which does not offer that method.
+        #self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND", "old"), "old")
+
+        # now check the comparison functions
+        self.failIf(s.slaveVersionIsOlderThan("svn", cver))
+        self.failIf(s.slaveVersionIsOlderThan("svn", "1.1"))
+        self.failUnless(s.slaveVersionIsOlderThan("svn", cver + ".1"))
+
+        self.failUnlessEqual(s.getSlaveName(), "bot1")
+
+    def testCompare(self):
+        self.master._checker = self.checkCompare
+        d = self.doBuild("quick")
+        return maybeWait(d)
+
+
+class ReorgCompatibility(unittest.TestCase):
+    def testCompat(self):
+        from buildbot.process.step import LogObserver, LogLineObserver
+        from buildbot.process.step import RemoteShellCommand
+        from buildbot.process.step import BuildStep, LoggingBuildStep
+        from buildbot.process.step import ShellCommand, WithProperties
+        from buildbot.process.step import TreeSize
+        from buildbot.process.step import Configure
+        from buildbot.process.step import Compile
+        from buildbot.process.step import Test
+        from buildbot.process.step import CVS
+        from buildbot.process.step import SVN
+        from buildbot.process.step import Darcs
+        from buildbot.process.step import Git
+        from buildbot.process.step import Arch
+        from buildbot.process.step import Bazaar
+        from buildbot.process.step import Mercurial
+        from buildbot.process.step import P4
+        from buildbot.process.step import P4Sync
+        from buildbot.process.step import Dummy
+        from buildbot.process.step import FailingDummy
+        from buildbot.process.step import RemoteDummy
+
+        # now trick pyflakes into thinking we care
+        unused = [LogObserver, LogLineObserver, RemoteShellCommand,
+                  BuildStep, LoggingBuildStep, ShellCommand, WithProperties,
+                  TreeSize, Configure, Compile, Test, CVS, SVN, Darcs,
+                  Git, Arch, Bazaar, Mercurial, P4, P4Sync,
+                  Dummy, FailingDummy, RemoteDummy]
+
+
+class _SimpleBuildStep(buildstep.BuildStep):
+    def start(self):
+        args = {"arg1": "value"}
+        cmd = buildstep.RemoteCommand("simple", args)
+        d = self.runCommand(cmd)
+        d.addCallback(lambda res: self.finished(SUCCESS))
+
+class _SimpleCommand(commands.Command):
+    def start(self):
+        self.builder.flag = True
+        self.builder.flag_args = self.args
+        return defer.succeed(None)
+
+class CheckStepTester(StepTester, unittest.TestCase):
+    def testSimple(self):
+        self.slavebase = "testSimple.slave"
+        self.masterbase = "testSimple.master"
+        sb = self.makeSlaveBuilder()
+        sb.flag = False
+        registry.registerSlaveCommand("simple", _SimpleCommand, "1")
+        step = self.makeStep(_SimpleBuildStep)
+        d = self.runStep(step)
+        def _checkSimple(results):
+            self.failUnless(sb.flag)
+            self.failUnlessEqual(sb.flag_args, {"arg1": "value"})
+        d.addCallback(_checkSimple)
+        return maybeWait(d)
+
+class Python(StepTester, unittest.TestCase):
+    def testPyFlakes1(self):
+        self.masterbase = "Python.testPyFlakes1"
+        step = self.makeStep(python.PyFlakes)
+        output = \
+"""pyflakes buildbot
+buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+        log = step.addLog("stdio")
+        log.addStdout(output)
+        log.finish()
+        step.createSummary(log)
+        desc = step.descriptionDone
+        self.failUnless("unused=2" in desc)
+        self.failUnless("undefined=1" in desc)
+        self.failUnless("redefs=3" in desc)
+        self.failUnless("import*=1" in desc)
+        self.failIf("misc=" in desc)
+
+        self.failUnlessEqual(step.getProperty("pyflakes-unused"), 2)
+        self.failUnlessEqual(step.getProperty("pyflakes-undefined"), 1)
+        self.failUnlessEqual(step.getProperty("pyflakes-redefs"), 3)
+        self.failUnlessEqual(step.getProperty("pyflakes-import*"), 1)
+        self.failUnlessEqual(step.getProperty("pyflakes-misc"), 0)
+        self.failUnlessEqual(step.getProperty("pyflakes-total"), 7)
+
+        logs = {}
+        for log in step.step_status.getLogs():
+            logs[log.getName()] = log
+
+        for name in ["unused", "undefined", "redefs", "import*"]:
+            self.failUnless(name in logs)
+        self.failIf("misc" in logs)
+        lines = logs["unused"].readlines()
+        self.failUnlessEqual(len(lines), 2)
+        self.failUnlessEqual(lines[0], "buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused\n")
+
+        cmd = buildstep.RemoteCommand(None, {})
+        cmd.rc = 0
+        results = step.evaluateCommand(cmd)
+        self.failUnlessEqual(results, FAILURE) # because of the 'undefined'
+
+    def testPyFlakes2(self):
+        self.masterbase = "Python.testPyFlakes2"
+        step = self.makeStep(python.PyFlakes)
+        output = \
+"""pyflakes buildbot
+some more text here that should be ignored
+buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+could not compile 'blah/blah.py':3:
+pretend there was an invalid line here
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+        log = step.addLog("stdio")
+        log.addStdout(output)
+        log.finish()
+        step.createSummary(log)
+        desc = step.descriptionDone
+        self.failUnless("unused=2" in desc)
+        self.failUnless("undefined=1" in desc)
+        self.failUnless("redefs=3" in desc)
+        self.failUnless("import*=1" in desc)
+        self.failUnless("misc=2" in desc)
+
+
+    def testPyFlakes3(self):
+        self.masterbase = "Python.testPyFlakes3"
+        step = self.makeStep(python.PyFlakes)
+        output = \
+"""buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+        log = step.addLog("stdio")
+        log.addStdout(output)
+        log.finish()
+        step.createSummary(log)
+        desc = step.descriptionDone
+        self.failUnless("unused=2" in desc)
+        self.failUnless("undefined=1" in desc)
+        self.failUnless("redefs=3" in desc)
+        self.failUnless("import*=1" in desc)
+        self.failIf("misc" in desc)
+

Added: vendor/buildbot/current/buildbot/test/test_svnpoller.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_svnpoller.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_svnpoller.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,429 @@
+# -*- test-case-name: buildbot.test.test_svnpoller -*-
+
+import time
+from twisted.internet import defer
+from twisted.trial import unittest
+from buildbot.changes.svnpoller import SVNPoller
+
+# this is the output of "svn info --xml
+# svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+prefix_output = """\
+<?xml version="1.0"?>
+<info>
+<entry
+   kind="dir"
+   path="trunk"
+   revision="18354">
+<url>svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk</url>
+<repository>
+<root>svn+ssh://svn.twistedmatrix.com/svn/Twisted</root>
+<uuid>bbbe8e31-12d6-0310-92fd-ac37d47ddeeb</uuid>
+</repository>
+<commit
+   revision="18352">
+<author>jml</author>
+<date>2006-10-01T02:37:34.063255Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+# and this is "svn info --xml svn://svn.twistedmatrix.com/svn/Twisted". I
+# think this is kind of a degenerate case.. it might even be a form of error.
+prefix_output_2 = """\
+<?xml version="1.0"?>
+<info>
+</info>
+"""
+
+# this is the svn info output for a local repository, svn info --xml
+# file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository
+prefix_output_3 = """\
+<?xml version="1.0"?>
+<info>
+<entry
+   kind="dir"
+   path="SVN-Repository"
+   revision="3">
+<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</url>
+<repository>
+<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
+</repository>
+<commit
+   revision="3">
+<author>warner</author>
+<date>2006-10-01T07:37:04.182499Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+# % svn info --xml file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk
+
+prefix_output_4 = """\
+<?xml version="1.0"?>
+<info>
+<entry
+   kind="dir"
+   path="trunk"
+   revision="3">
+<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk</url>
+<repository>
+<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
+</repository>
+<commit
+   revision="1">
+<author>warner</author>
+<date>2006-10-01T07:37:02.286440Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+
+
+class ComputePrefix(unittest.TestCase):
+    def test1(self):
+        base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+        s = SVNPoller(base + "/")
+        self.failUnlessEqual(s.svnurl, base) # certify slash-stripping
+        prefix = s.determine_prefix(prefix_output)
+        self.failUnlessEqual(prefix, "trunk")
+        self.failUnlessEqual(s._prefix, prefix)
+
+    def test2(self):
+        base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted"
+        s = SVNPoller(base)
+        self.failUnlessEqual(s.svnurl, base)
+        prefix = s.determine_prefix(prefix_output_2)
+        self.failUnlessEqual(prefix, "")
+
+    def test3(self):
+        base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository"
+        s = SVNPoller(base)
+        self.failUnlessEqual(s.svnurl, base)
+        prefix = s.determine_prefix(prefix_output_3)
+        self.failUnlessEqual(prefix, "")
+
+    def test4(self):
+        base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk"
+        s = SVNPoller(base)
+        self.failUnlessEqual(s.svnurl, base)
+        prefix = s.determine_prefix(prefix_output_4)
+        self.failUnlessEqual(prefix, "sample/trunk")
+
+# output from svn log on .../SVN-Repository/sample
+# (so it includes trunk and branches)
+sample_base = "file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+sample_logentries = [None] * 4
+
+sample_logentries[3] = """\
+<logentry
+   revision="4">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+<paths>
+<path
+   action="M">/sample/trunk/version.c</path>
+</paths>
+<msg>revised_to_2</msg>
+</logentry>
+"""
+
+sample_logentries[2] = """\
+<logentry
+   revision="3">
+<author>warner</author>
+<date>2006-10-01T19:35:10.215692Z</date>
+<paths>
+<path
+   action="M">/sample/branch/main.c</path>
+</paths>
+<msg>commit_on_branch</msg>
+</logentry>
+"""
+
+sample_logentries[1] = """\
+<logentry
+   revision="2">
+<author>warner</author>
+<date>2006-10-01T19:35:09.154973Z</date>
+<paths>
+<path
+   copyfrom-path="/sample/trunk"
+   copyfrom-rev="1"
+   action="A">/sample/branch</path>
+</paths>
+<msg>make_branch</msg>
+</logentry>
+"""
+
+sample_logentries[0] = """\
+<logentry
+   revision="1">
+<author>warner</author>
+<date>2006-10-01T19:35:08.642045Z</date>
+<paths>
+<path
+   action="A">/sample</path>
+<path
+   action="A">/sample/trunk</path>
+<path
+   action="A">/sample/trunk/subdir/subdir.c</path>
+<path
+   action="A">/sample/trunk/main.c</path>
+<path
+   action="A">/sample/trunk/version.c</path>
+<path
+   action="A">/sample/trunk/subdir</path>
+</paths>
+<msg>sample_project_files</msg>
+</logentry>
+"""
+
+sample_info_output = """\
+<?xml version="1.0"?>
+<info>
+<entry
+   kind="dir"
+   path="sample"
+   revision="4">
+<url>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample</url>
+<repository>
+<root>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>4f94adfc-c41e-0410-92d5-fbf86b7c7689</uuid>
+</repository>
+<commit
+   revision="4">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+
+changes_output_template = """\
+<?xml version="1.0"?>
+<log>
+%s</log>
+"""
+
+def make_changes_output(maxrevision):
+    # return what 'svn log' would have just after the given revision was
+    # committed
+    logs = sample_logentries[0:maxrevision]
+    assert len(logs) == maxrevision
+    logs.reverse()
+    output = changes_output_template % ("".join(logs))
+    return output
+
+def split_file(path):
+    pieces = path.split("/")
+    if pieces[0] == "branch":
+        return "branch", "/".join(pieces[1:])
+    if pieces[0] == "trunk":
+        return None, "/".join(pieces[1:])
+    raise RuntimeError("there shouldn't be any files like %s" % path)
+
+class MySVNPoller(SVNPoller):
+    def __init__(self, *args, **kwargs):
+        SVNPoller.__init__(self, *args, **kwargs)
+        self.pending_commands = []
+        self.finished_changes = []
+
+    def getProcessOutput(self, args):
+        d = defer.Deferred()
+        self.pending_commands.append((args, d))
+        return d
+
+    def submit_changes(self, changes):
+        self.finished_changes.extend(changes)
+
+class ComputeChanges(unittest.TestCase):
+    def test1(self):
+        base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+        s = SVNPoller(base)
+        s._prefix = "sample"
+        output = make_changes_output(4)
+        doc = s.parse_logs(output)
+
+        newlast, logentries = s._filter_new_logentries(doc, 4)
+        self.failUnlessEqual(newlast, 4)
+        self.failUnlessEqual(len(logentries), 0)
+
+        newlast, logentries = s._filter_new_logentries(doc, 3)
+        self.failUnlessEqual(newlast, 4)
+        self.failUnlessEqual(len(logentries), 1)
+
+        newlast, logentries = s._filter_new_logentries(doc, 1)
+        self.failUnlessEqual(newlast, 4)
+        self.failUnlessEqual(len(logentries), 3)
+
+        newlast, logentries = s._filter_new_logentries(doc, None)
+        self.failUnlessEqual(newlast, 4)
+        self.failUnlessEqual(len(logentries), 0)
+
+    def testChanges(self):
+        base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+        s = SVNPoller(base, split_file=split_file)
+        s._prefix = "sample"
+        doc = s.parse_logs(make_changes_output(3))
+        newlast, logentries = s._filter_new_logentries(doc, 1)
+        # so we see revisions 2 and 3 as being new
+        self.failUnlessEqual(newlast, 3)
+        changes = s.create_changes(logentries)
+        self.failUnlessEqual(len(changes), 2)
+        self.failUnlessEqual(changes[0].branch, "branch")
+        self.failUnlessEqual(changes[0].revision, 2)
+        self.failUnlessEqual(changes[1].branch, "branch")
+        self.failUnlessEqual(changes[1].files, ["main.c"])
+        self.failUnlessEqual(changes[1].revision, 3)
+
+        # and now pull in r4
+        doc = s.parse_logs(make_changes_output(4))
+        newlast, logentries = s._filter_new_logentries(doc, newlast)
+        self.failUnlessEqual(newlast, 4)
+        # so we see revision 4 as being new
+        changes = s.create_changes(logentries)
+        self.failUnlessEqual(len(changes), 1)
+        self.failUnlessEqual(changes[0].branch, None)
+        self.failUnlessEqual(changes[0].revision, 4)
+        self.failUnlessEqual(changes[0].files, ["version.c"])
+
+    def testFirstTime(self):
+        base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+        s = SVNPoller(base, split_file=split_file)
+        s._prefix = "sample"
+        doc = s.parse_logs(make_changes_output(4))
+        logentries = s.get_new_logentries(doc)
+        # SVNPoller ignores all changes that happened before it was started
+        self.failUnlessEqual(len(logentries), 0)
+        self.failUnlessEqual(s.last_change, 4)
+
+class Misc(unittest.TestCase):
+    def testAlreadyWorking(self):
+        base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+        s = MySVNPoller(base)
+        d = s.checksvn()
+        # the SVNPoller is now waiting for its getProcessOutput to finish
+        self.failUnlessEqual(s.overrun_counter, 0)
+        d2 = s.checksvn()
+        self.failUnlessEqual(s.overrun_counter, 1)
+        self.failUnlessEqual(len(s.pending_commands), 1)
+
+    def testGetRoot(self):
+        base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+        s = MySVNPoller(base)
+        d = s.checksvn()
+        # the SVNPoller is now waiting for its getProcessOutput to finish
+        self.failUnlessEqual(len(s.pending_commands), 1)
+        self.failUnlessEqual(s.pending_commands[0][0],
+                             ["info", "--xml", "--non-interactive", base])
+
+def makeTime(timestring):
+    datefmt = '%Y/%m/%d %H:%M:%S'
+    when = time.mktime(time.strptime(timestring, datefmt))
+    return when
+
+
+class Everything(unittest.TestCase):
+    def test1(self):
+        s = MySVNPoller(sample_base, split_file=split_file)
+        d = s.checksvn()
+        # the SVNPoller is now waiting for its getProcessOutput to finish
+        self.failUnlessEqual(len(s.pending_commands), 1)
+        self.failUnlessEqual(s.pending_commands[0][0],
+                             ["info", "--xml", "--non-interactive",
+                              sample_base])
+        d = s.pending_commands[0][1]
+        s.pending_commands.pop(0)
+        d.callback(sample_info_output)
+        # now it should be waiting for the 'svn log' command
+        self.failUnlessEqual(len(s.pending_commands), 1)
+        self.failUnlessEqual(s.pending_commands[0][0],
+                             ["log", "--xml", "--verbose", "--non-interactive",
+                              "--limit=100", sample_base])
+        d = s.pending_commands[0][1]
+        s.pending_commands.pop(0)
+        d.callback(make_changes_output(1))
+        # the command ignores the first batch of changes
+        self.failUnlessEqual(len(s.finished_changes), 0)
+        self.failUnlessEqual(s.last_change, 1)
+
+        # now fire it again, nothing changing
+        d = s.checksvn()
+        self.failUnlessEqual(s.pending_commands[0][0],
+                             ["log", "--xml", "--verbose", "--non-interactive",
+                              "--limit=100", sample_base])
+        d = s.pending_commands[0][1]
+        s.pending_commands.pop(0)
+        d.callback(make_changes_output(1))
+        # nothing has changed
+        self.failUnlessEqual(len(s.finished_changes), 0)
+        self.failUnlessEqual(s.last_change, 1)
+
+        # and again, with r2 this time
+        d = s.checksvn()
+        self.failUnlessEqual(s.pending_commands[0][0],
+                             ["log", "--xml", "--verbose", "--non-interactive",
+                              "--limit=100", sample_base])
+        d = s.pending_commands[0][1]
+        s.pending_commands.pop(0)
+        d.callback(make_changes_output(2))
+        # r2 should appear
+        self.failUnlessEqual(len(s.finished_changes), 1)
+        self.failUnlessEqual(s.last_change, 2)
+
+        c = s.finished_changes[0]
+        self.failUnlessEqual(c.branch, "branch")
+        self.failUnlessEqual(c.revision, 2)
+        self.failUnlessEqual(c.files, [''])
+        # TODO: this is what creating the branch looks like: a Change with a
+        # zero-length file. We should decide if we want filenames like this
+        # in the Change (and make sure nobody else gets confused by it) or if
+        # we want to strip them out.
+        self.failUnlessEqual(c.comments, "make_branch")
+
+        # and again at r2, so nothing should change
+        d = s.checksvn()
+        self.failUnlessEqual(s.pending_commands[0][0],
+                             ["log", "--xml", "--verbose", "--non-interactive",
+                              "--limit=100", sample_base])
+        d = s.pending_commands[0][1]
+        s.pending_commands.pop(0)
+        d.callback(make_changes_output(2))
+        # nothing has changed
+        self.failUnlessEqual(len(s.finished_changes), 1)
+        self.failUnlessEqual(s.last_change, 2)
+
+        # and again with both r3 and r4 appearing together
+        d = s.checksvn()
+        self.failUnlessEqual(s.pending_commands[0][0],
+                             ["log", "--xml", "--verbose", "--non-interactive",
+                              "--limit=100", sample_base])
+        d = s.pending_commands[0][1]
+        s.pending_commands.pop(0)
+        d.callback(make_changes_output(4))
+        self.failUnlessEqual(len(s.finished_changes), 3)
+        self.failUnlessEqual(s.last_change, 4)
+
+        c3 = s.finished_changes[1]
+        self.failUnlessEqual(c3.branch, "branch")
+        self.failUnlessEqual(c3.revision, 3)
+        self.failUnlessEqual(c3.files, ["main.c"])
+        self.failUnlessEqual(c3.comments, "commit_on_branch")
+
+        c4 = s.finished_changes[2]
+        self.failUnlessEqual(c4.branch, None)
+        self.failUnlessEqual(c4.revision, 4)
+        self.failUnlessEqual(c4.files, ["version.c"])
+        self.failUnlessEqual(c4.comments, "revised_to_2")
+        self.failUnless(abs(c4.when - time.time()) < 60)
+
+
+# TODO:
+#  get coverage of split_file returning None
+#  point at a live SVN server for a little while

Added: vendor/buildbot/current/buildbot/test/test_transfer.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_transfer.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_transfer.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,299 @@
+# -*- test-case-name: buildbot.test.test_transfer -*-
+
+import os
+from stat import ST_MODE
+from twisted.trial import unittest
+from buildbot.twcompat import maybeWait
+from buildbot.steps.transfer import FileUpload, FileDownload
+from buildbot.test.runutils import StepTester
+from buildbot.status.builder import SUCCESS, FAILURE
+
+
+# these steps pass a pb.Referenceable inside their arguments, so we have to
+# catch and wrap them. If the LocalAsRemote wrapper were a proper membrane,
+# we wouldn't have to do this.
+
+class Upload(StepTester, unittest.TestCase):
+
+    def filterArgs(self, args):
+        if "writer" in args:
+            args["writer"] = self.wrap(args["writer"])
+        return args
+
+    def testSuccess(self):
+        self.slavebase = "Upload.testSuccess.slave"
+        self.masterbase = "Upload.testSuccess.master"
+        sb = self.makeSlaveBuilder()
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+                              "build"))
+        # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+        # files will appear there. Under trial, we're chdir'ed into
+        # _trial_temp instead, so use a different masterdest= to keep the
+        # uploaded file in a test-local directory
+        masterdest = os.path.join(self.masterbase, "dest.text")
+        step = self.makeStep(FileUpload,
+                             slavesrc="source.txt",
+                             masterdest=masterdest)
+        slavesrc = os.path.join(self.slavebase,
+                                self.slavebuilderbase,
+                                "build",
+                                "source.txt")
+        contents = "this is the source file\n" * 1000
+        open(slavesrc, "w").write(contents)
+        f = open(masterdest, "w")
+        f.write("overwrite me\n")
+        f.close()
+
+        d = self.runStep(step)
+        def _checkUpload(results):
+            step_status = step.step_status
+            #l = step_status.getLogs()
+            #if l:
+            #    logtext = l[0].getText()
+            #    print logtext
+            self.failUnlessEqual(results, SUCCESS)
+            self.failUnless(os.path.exists(masterdest))
+            masterdest_contents = open(masterdest, "r").read()
+            self.failUnlessEqual(masterdest_contents, contents)
+        d.addCallback(_checkUpload)
+        return maybeWait(d)
+
+    def testMaxsize(self):
+        self.slavebase = "Upload.testMaxsize.slave"
+        self.masterbase = "Upload.testMaxsize.master"
+        sb = self.makeSlaveBuilder()
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+                              "build"))
+        masterdest = os.path.join(self.masterbase, "dest2.text")
+        step = self.makeStep(FileUpload,
+                             slavesrc="source.txt",
+                             masterdest=masterdest,
+                             maxsize=12345)
+        slavesrc = os.path.join(self.slavebase,
+                                self.slavebuilderbase,
+                                "build",
+                                "source.txt")
+        contents = "this is the source file\n" * 1000
+        open(slavesrc, "w").write(contents)
+        f = open(masterdest, "w")
+        f.write("overwrite me\n")
+        f.close()
+
+        d = self.runStep(step)
+        def _checkUpload(results):
+            step_status = step.step_status
+            #l = step_status.getLogs()
+            #if l:
+            #    logtext = l[0].getText()
+            #    print logtext
+            self.failUnlessEqual(results, FAILURE)
+            self.failUnless(os.path.exists(masterdest))
+            masterdest_contents = open(masterdest, "r").read()
+            self.failUnlessEqual(len(masterdest_contents), 12345)
+            self.failUnlessEqual(masterdest_contents, contents[:12345])
+        d.addCallback(_checkUpload)
+        return maybeWait(d)
+
+    def testMode(self):
+        self.slavebase = "Upload.testMode.slave"
+        self.masterbase = "Upload.testMode.master"
+        sb = self.makeSlaveBuilder()
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+                              "build"))
+        masterdest = os.path.join(self.masterbase, "dest3.text")
+        step = self.makeStep(FileUpload,
+                             slavesrc="source.txt",
+                             masterdest=masterdest,
+                             mode=0755)
+        slavesrc = os.path.join(self.slavebase,
+                                self.slavebuilderbase,
+                                "build",
+                                "source.txt")
+        contents = "this is the source file\n"
+        open(slavesrc, "w").write(contents)
+        f = open(masterdest, "w")
+        f.write("overwrite me\n")
+        f.close()
+
+        d = self.runStep(step)
+        def _checkUpload(results):
+            step_status = step.step_status
+            #l = step_status.getLogs()
+            #if l:
+            #    logtext = l[0].getText()
+            #    print logtext
+            self.failUnlessEqual(results, SUCCESS)
+            self.failUnless(os.path.exists(masterdest))
+            masterdest_contents = open(masterdest, "r").read()
+            self.failUnlessEqual(masterdest_contents, contents)
+            # and with 0777 to ignore sticky bits
+            dest_mode = os.stat(masterdest)[ST_MODE] & 0777
+            self.failUnlessEqual(dest_mode, 0755,
+                                 "target mode was %o, we wanted %o" %
+                                 (dest_mode, 0755))
+        d.addCallback(_checkUpload)
+        return maybeWait(d)
+
+    def testMissingFile(self):
+        self.slavebase = "Upload.testMissingFile.slave"
+        self.masterbase = "Upload.testMissingFile.master"
+        sb = self.makeSlaveBuilder()
+        step = self.makeStep(FileUpload,
+                             slavesrc="MISSING.txt",
+                             masterdest="dest.txt")
+        masterdest = os.path.join(self.masterbase, "dest4.txt")
+
+        d = self.runStep(step)
+        def _checkUpload(results):
+            step_status = step.step_status
+            self.failUnlessEqual(results, FAILURE)
+            self.failIf(os.path.exists(masterdest))
+            l = step_status.getLogs()
+            logtext = l[0].getText().strip()
+            self.failUnless(logtext.startswith("Cannot open file"))
+            self.failUnless(logtext.endswith("for upload"))
+        d.addCallback(_checkUpload)
+        return maybeWait(d)
+
+    
+
+class Download(StepTester, unittest.TestCase):
+
+    def filterArgs(self, args):
+        if "reader" in args:
+            args["reader"] = self.wrap(args["reader"])
+        return args
+
+    def testSuccess(self):
+        self.slavebase = "Download.testSuccess.slave"
+        self.masterbase = "Download.testSuccess.master"
+        sb = self.makeSlaveBuilder()
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+                              "build"))
+        mastersrc = os.path.join(self.masterbase, "source.text")
+        slavedest = os.path.join(self.slavebase,
+                                 self.slavebuilderbase,
+                                 "build",
+                                 "dest.txt")
+        step = self.makeStep(FileDownload,
+                             mastersrc=mastersrc,
+                             slavedest="dest.txt")
+        contents = "this is the source file\n" * 1000  # 24kb, so two blocks
+        open(mastersrc, "w").write(contents)
+        f = open(slavedest, "w")
+        f.write("overwrite me\n")
+        f.close()
+
+        d = self.runStep(step)
+        def _checkDownload(results):
+            step_status = step.step_status
+            self.failUnlessEqual(results, SUCCESS)
+            self.failUnless(os.path.exists(slavedest))
+            slavedest_contents = open(slavedest, "r").read()
+            self.failUnlessEqual(slavedest_contents, contents)
+        d.addCallback(_checkDownload)
+        return maybeWait(d)
+
+    def testMaxsize(self):
+        self.slavebase = "Download.testMaxsize.slave"
+        self.masterbase = "Download.testMaxsize.master"
+        sb = self.makeSlaveBuilder()
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+                              "build"))
+        mastersrc = os.path.join(self.masterbase, "source.text")
+        slavedest = os.path.join(self.slavebase,
+                                 self.slavebuilderbase,
+                                 "build",
+                                 "dest.txt")
+        step = self.makeStep(FileDownload,
+                             mastersrc=mastersrc,
+                             slavedest="dest.txt",
+                             maxsize=12345)
+        contents = "this is the source file\n" * 1000  # 24kb, so two blocks
+        open(mastersrc, "w").write(contents)
+        f = open(slavedest, "w")
+        f.write("overwrite me\n")
+        f.close()
+
+        d = self.runStep(step)
+        def _checkDownload(results):
+            step_status = step.step_status
+            # the file should be truncated, and the step a FAILURE
+            self.failUnlessEqual(results, FAILURE)
+            self.failUnless(os.path.exists(slavedest))
+            slavedest_contents = open(slavedest, "r").read()
+            self.failUnlessEqual(len(slavedest_contents), 12345)
+            self.failUnlessEqual(slavedest_contents, contents[:12345])
+        d.addCallback(_checkDownload)
+        return maybeWait(d)
+
+    def testMode(self):
+        self.slavebase = "Download.testMode.slave"
+        self.masterbase = "Download.testMode.master"
+        sb = self.makeSlaveBuilder()
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+                              "build"))
+        mastersrc = os.path.join(self.masterbase, "source.text")
+        slavedest = os.path.join(self.slavebase,
+                                 self.slavebuilderbase,
+                                 "build",
+                                 "dest.txt")
+        step = self.makeStep(FileDownload,
+                             mastersrc=mastersrc,
+                             slavedest="dest.txt",
+                             mode=0755)
+        contents = "this is the source file\n"
+        open(mastersrc, "w").write(contents)
+        f = open(slavedest, "w")
+        f.write("overwrite me\n")
+        f.close()
+
+        d = self.runStep(step)
+        def _checkDownload(results):
+            step_status = step.step_status
+            self.failUnlessEqual(results, SUCCESS)
+            self.failUnless(os.path.exists(slavedest))
+            slavedest_contents = open(slavedest, "r").read()
+            self.failUnlessEqual(slavedest_contents, contents)
+            # and with 0777 to ignore sticky bits
+            dest_mode = os.stat(slavedest)[ST_MODE] & 0777
+            self.failUnlessEqual(dest_mode, 0755,
+                                 "target mode was %o, we wanted %o" %
+                                 (dest_mode, 0755))
+        d.addCallback(_checkDownload)
+        return maybeWait(d)
+
+    def testMissingFile(self):
+        self.slavebase = "Download.testMissingFile.slave"
+        self.masterbase = "Download.testMissingFile.master"
+        sb = self.makeSlaveBuilder()
+        os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+                              "build"))
+        mastersrc = os.path.join(self.masterbase, "MISSING.text")
+        slavedest = os.path.join(self.slavebase,
+                                 self.slavebuilderbase,
+                                 "build",
+                                 "dest.txt")
+        step = self.makeStep(FileDownload,
+                             mastersrc=mastersrc,
+                             slavedest="dest.txt")
+
+        d = self.runStep(step)
+        def _checkDownload(results):
+            step_status = step.step_status
+            self.failUnlessEqual(results, FAILURE)
+            self.failIf(os.path.exists(slavedest))
+            l = step_status.getLogs()
+            logtext = l[0].getText().strip()
+            self.failUnless(logtext.endswith(" not available at master"))
+        d.addCallbacks(_checkDownload)
+
+        return maybeWait(d)
+
+
+# TODO:
+#  test relative paths, ~/paths
+#   need to implement expanduser() for slave-side
+#  test error message when master-side file is in a missing directory
+#  remove workdir= default?
+

Added: vendor/buildbot/current/buildbot/test/test_twisted.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_twisted.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_twisted.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,219 @@
+# -*- test-case-name: buildbot.test.test_twisted -*-
+
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.steps.python_twisted import countFailedTests
+from buildbot.steps.python_twisted import Trial, TrialTestCaseCounter
+from buildbot.status import builder
+
+noisy = 0
+if noisy:
+    from twisted.python.log import startLogging
+    import sys
+    startLogging(sys.stdout)
+
+out1 = """
+-------------------------------------------------------------------------------
+Ran 13 tests in 1.047s
+
+OK
+"""
+
+out2 = """
+-------------------------------------------------------------------------------
+Ran 12 tests in 1.040s
+
+FAILED (failures=1)
+"""
+
+out3 = """
+ NotImplementedError
+-------------------------------------------------------------------------------
+Ran 13 tests in 1.042s
+
+FAILED (failures=1, errors=1)
+"""
+
+out4 = """
+unparseable
+"""
+
+out5 = """
+   File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/test/test_defer.py", line 79, in testTwoCallbacks
+    self.fail("just because")
+   File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/trial/unittest.py", line 21, in fail
+    raise AssertionError, message
+ AssertionError: just because
+unparseable
+"""
+
+out6 = """
+===============================================================================
+SKIPPED: testProtocolLocalhost (twisted.flow.test.test_flow.FlowTest)
+-------------------------------------------------------------------------------
+XXX freezes, fixme
+===============================================================================
+SKIPPED: testIPv6 (twisted.names.test.test_names.HostsTestCase)
+-------------------------------------------------------------------------------
+IPv6 support is not in our hosts resolver yet
+===============================================================================
+EXPECTED FAILURE: testSlots (twisted.test.test_rebuild.NewStyleTestCase)
+-------------------------------------------------------------------------------
+Traceback (most recent call last):
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
+    stage(*args, **kwargs)
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
+    self.runner(self.method)
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
+    method()
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/test/test_rebuild.py", line 130, in testSlots
+    rebuild.updateInstance(self.m.SlottedClass())
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/python/rebuild.py", line 114, in updateInstance
+    self.__class__ = latestClass(self.__class__)
+TypeError: __class__ assignment: 'SlottedClass' object layout differs from 'SlottedClass'
+===============================================================================
+FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
+-------------------------------------------------------------------------------
+Traceback (most recent call last):
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
+    stage(*args, **kwargs)
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
+    self.runner(self.method)
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
+    method()
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/conch/test/test_sftp.py", line 450, in testBatchFile
+    self.failUnlessEqual(res[1:-2], ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1'])
+  File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 115, in failUnlessEqual
+    raise FailTest, (msg or '%r != %r' % (first, second))
+FailTest: [] != ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1']
+-------------------------------------------------------------------------------
+Ran 1454 tests in 911.579s
+
+FAILED (failures=2, skips=49, expectedFailures=9)
+Exception exceptions.AttributeError: "'NoneType' object has no attribute 'StringIO'" in <bound method RemoteReference.__del__ of <twisted.spread.pb.RemoteReference instance at 0x27036c0>> ignored
+"""
+
+class MyTrial(Trial):
+    def addTestResult(self, testname, results, text, logs):
+        self.results.append((testname, results, text, logs))
+    def addCompleteLog(self, name, log):
+        pass
+
+class MyLogFile:
+    def __init__(self, text):
+        self.text = text
+    def getText(self):
+        return self.text
+
+
+class Count(unittest.TestCase):
+
+    def count(self, total, failures=0, errors=0,
+              expectedFailures=0, unexpectedSuccesses=0, skips=0):
+        d = {
+            'total': total,
+            'failures': failures,
+            'errors': errors,
+            'expectedFailures': expectedFailures,
+            'unexpectedSuccesses': unexpectedSuccesses,
+            'skips': skips,
+            }
+        return d
+
+    def testCountFailedTests(self):
+        count = countFailedTests(out1)
+        self.assertEquals(count, self.count(total=13))
+        count = countFailedTests(out2)
+        self.assertEquals(count, self.count(total=12, failures=1))
+        count = countFailedTests(out3)
+        self.assertEquals(count, self.count(total=13, failures=1, errors=1))
+        count = countFailedTests(out4)
+        self.assertEquals(count, self.count(total=None))
+        count = countFailedTests(out5)
+        self.assertEquals(count, self.count(total=None))
+
+class Counter(unittest.TestCase):
+
+    def setProgress(self, metric, value):
+        self.progress = (metric, value)
+
+    def testCounter(self):
+        self.progress = (None,None)
+        c = TrialTestCaseCounter()
+        c.setStep(self)
+        STDOUT = interfaces.LOG_CHANNEL_STDOUT
+        def add(text):
+            c.logChunk(None, None, None, STDOUT, text)
+        add("\n\n")
+        self.failUnlessEqual(self.progress, (None,None))
+        add("bogus line\n")
+        self.failUnlessEqual(self.progress, (None,None))
+        add("buildbot.test.test_config.ConfigTest.testBots ... [OK]\n")
+        self.failUnlessEqual(self.progress, ("tests", 1))
+        add("buildbot.test.test_config.ConfigTest.tes")
+        self.failUnlessEqual(self.progress, ("tests", 1))
+        add("tBuilders ... [OK]\n")
+        self.failUnlessEqual(self.progress, ("tests", 2))
+        # confirm alternative delimiters work too.. ptys seem to emit
+        # something different
+        add("buildbot.test.test_config.ConfigTest.testIRC ... [OK]\r\n")
+        self.failUnlessEqual(self.progress, ("tests", 3))
+        add("===============================================================================\n")
+        self.failUnlessEqual(self.progress, ("tests", 3))
+        add("buildbot.test.test_config.IOnlyLookLikeA.testLine ... [OK]\n")
+        self.failUnlessEqual(self.progress, ("tests", 3))
+
+
+
+class Parse(unittest.TestCase):
+    def failUnlessIn(self, substr, string):
+        self.failUnless(string.find(substr) != -1)
+
+    def testParse(self):
+        t = MyTrial(build=None, workdir=".", testpath=None, testChanges=True)
+        t.results = []
+        log = MyLogFile(out6)
+        t.createSummary(log)
+
+        self.failUnlessEqual(len(t.results), 4)
+        r1, r2, r3, r4 = t.results
+        testname, results, text, logs = r1
+        self.failUnlessEqual(testname,
+                             ("twisted", "flow", "test", "test_flow",
+                              "FlowTest", "testProtocolLocalhost"))
+        self.failUnlessEqual(results, builder.SKIPPED)
+        self.failUnlessEqual(text, ['skipped'])
+        self.failUnlessIn("XXX freezes, fixme", logs)
+        self.failUnless(logs.startswith("SKIPPED:"))
+        self.failUnless(logs.endswith("fixme\n"))
+
+        testname, results, text, logs = r2
+        self.failUnlessEqual(testname,
+                             ("twisted", "names", "test", "test_names",
+                              "HostsTestCase", "testIPv6"))
+        self.failUnlessEqual(results, builder.SKIPPED)
+        self.failUnlessEqual(text, ['skipped'])
+        self.failUnless(logs.startswith("SKIPPED: testIPv6"))
+        self.failUnless(logs.endswith("IPv6 support is not in our hosts resolver yet\n"))
+
+        testname, results, text, logs = r3
+        self.failUnlessEqual(testname,
+                             ("twisted", "test", "test_rebuild",
+                              "NewStyleTestCase", "testSlots"))
+        self.failUnlessEqual(results, builder.SUCCESS)
+        self.failUnlessEqual(text, ['expected', 'failure'])
+        self.failUnless(logs.startswith("EXPECTED FAILURE: "))
+        self.failUnlessIn("\nTraceback ", logs)
+        self.failUnless(logs.endswith("layout differs from 'SlottedClass'\n"))
+
+        testname, results, text, logs = r4
+        self.failUnlessEqual(testname,
+                             ("twisted", "conch", "test", "test_sftp",
+                              "TestOurServerBatchFile", "testBatchFile"))
+        self.failUnlessEqual(results, builder.FAILURE)
+        self.failUnlessEqual(text, ['failure'])
+        self.failUnless(logs.startswith("FAILURE: "))
+        self.failUnlessIn("Traceback ", logs)
+        self.failUnless(logs.endswith("'testRenameFile', 'testfile1']\n"))
+

Added: vendor/buildbot/current/buildbot/test/test_util.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_util.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_util.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,26 @@
+# -*- test-case-name: buildbot.test.test_util -*-
+
+from twisted.trial import unittest
+
+from buildbot import util
+
+
+class Foo(util.ComparableMixin):
+    compare_attrs = ["a", "b"]
+
+    def __init__(self, a, b, c):
+        self.a, self.b, self.c = a,b,c
+
+
+class Bar(Foo, util.ComparableMixin):
+    compare_attrs = ["b", "c"]
+
+class Compare(unittest.TestCase):
+    def testCompare(self):
+        f1 = Foo(1, 2, 3)
+        f2 = Foo(1, 2, 4)
+        f3 = Foo(1, 3, 4)
+        b1 = Bar(1, 2, 3)
+        self.failUnless(f1 == f2)
+        self.failIf(f1 == f3)
+        self.failIf(f1 == b1)

Added: vendor/buildbot/current/buildbot/test/test_vc.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_vc.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_vc.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,2391 @@
+# -*- test-case-name: buildbot.test.test_vc -*-
+
+from __future__ import generators
+
+import sys, os, time, re
+from email.Utils import mktime_tz, parsedate_tz
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor, utils, protocol, error
+from twisted.python import failure
+
+#defer.Deferred.debug = True
+
+from twisted.python import log
+#log.startLogging(sys.stderr)
+
+from buildbot import master, interfaces
+from buildbot.slave import bot, commands
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.status.builder import SUCCESS, FAILURE
+from buildbot.process import base
+from buildbot.steps import source
+from buildbot.changes import changes
+from buildbot.sourcestamp import SourceStamp
+from buildbot.twcompat import maybeWait, which
+from buildbot.scripts import tryclient
+from buildbot.test.runutils import SignalMixin
+
+#step.LoggedRemoteCommand.debug = True
+
+# buildbot.twcompat will patch these into t.i.defer if necessary
+from twisted.internet.defer import waitForDeferred, deferredGenerator
+
+# Most of these tests (all but SourceStamp) depend upon having a set of
+# repositories from which we can perform checkouts. These repositories are
+# created by the setUp method at the start of each test class. In earlier
+# versions these repositories were created offline and distributed with a
+# separate tarball named 'buildbot-test-vc-1.tar.gz'. This is no longer
+# necessary.
+
+# CVS requires a local file repository. Providing remote access is beyond
+# the feasible abilities of this test program (needs pserver or ssh).
+
+# SVN requires a local file repository. To provide remote access over HTTP
+# requires an apache server with DAV support and mod_svn, way beyond what we
+# can test from here.
+
+# Arch and Darcs both allow remote (read-only) operation with any web
+# server. We test both local file access and HTTP access (by spawning a
+# small web server to provide access to the repository files while the test
+# is running).
+
+# Perforce starts the daemon running on localhost. Unfortunately, it must
+# use a predetermined Internet-domain port number, unless we want to go
+# all-out: bind the listen socket ourselves and pretend to be inetd.
+
+try:
+    import cStringIO
+    StringIO = cStringIO
+except ImportError:
+    import StringIO
+
+class _PutEverythingGetter(protocol.ProcessProtocol):
+    def __init__(self, deferred, stdin):
+        self.deferred = deferred
+        self.outBuf = StringIO.StringIO()
+        self.errBuf = StringIO.StringIO()
+        self.outReceived = self.outBuf.write
+        self.errReceived = self.errBuf.write
+        self.stdin = stdin
+
+    def connectionMade(self):
+        if self.stdin is not None:
+            self.transport.write(self.stdin)
+            self.transport.closeStdin()
+
+    def processEnded(self, reason):
+        out = self.outBuf.getvalue()
+        err = self.errBuf.getvalue()
+        e = reason.value
+        code = e.exitCode
+        if e.signal:
+            self.deferred.errback((out, err, e.signal))
+        else:
+            self.deferred.callback((out, err, code))
+
+def myGetProcessOutputAndValue(executable, args=(), env={}, path='.',
+                               reactor=None, stdin=None):
+    """Like twisted.internet.utils.getProcessOutputAndValue but takes
+    stdin, too."""
+    if reactor is None:
+        from twisted.internet import reactor
+    d = defer.Deferred()
+    p = _PutEverythingGetter(d, stdin)
+    reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
+    return d
+
+config_vc = """
+from buildbot.process import factory
+from buildbot.steps import source
+s = factory.s
+
+f1 = factory.BuildFactory([
+    %s,
+    ])
+c = {}
+c['bots'] = [['bot1', 'sekrit']]
+c['sources'] = []
+c['schedulers'] = []
+c['builders'] = [{'name': 'vc', 'slavename': 'bot1',
+                  'builddir': 'vc-dir', 'factory': f1}]
+c['slavePortnum'] = 0
+BuildmasterConfig = c
+"""
+
+p0_diff = r"""
+Index: subdir/subdir.c
+===================================================================
+RCS file: /home/warner/stuff/Projects/BuildBot/code-arch/_trial_temp/test_vc/repositories/CVS-Repository/sample/subdir/subdir.c,v
+retrieving revision 1.1.1.1
+diff -u -r1.1.1.1 subdir.c
+--- subdir/subdir.c	14 Aug 2005 01:32:49 -0000	1.1.1.1
++++ subdir/subdir.c	14 Aug 2005 01:36:15 -0000
+@@ -4,6 +4,6 @@
+ int
+ main(int argc, const char *argv[])
+ {
+-    printf("Hello subdir.\n");
++    printf("Hello patched subdir.\n");
+     return 0;
+ }
+"""
+
+# this patch does not include the filename headers, so it is
+# patchlevel-neutral
+TRY_PATCH = '''
+@@ -5,6 +5,6 @@
+ int
+ main(int argc, const char *argv[])
+ {
+-    printf("Hello subdir.\\n");
++    printf("Hello try.\\n");
+     return 0;
+ }
+'''
+
+MAIN_C = '''
+// this is main.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+    printf("Hello world.\\n");
+    return 0;
+}
+'''
+
+BRANCH_C = '''
+// this is main.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+    printf("Hello branch.\\n");
+    return 0;
+}
+'''
+
+VERSION_C = '''
+// this is version.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+    printf("Hello world, version=%d\\n");
+    return 0;
+}
+'''
+
+SUBDIR_C = '''
+// this is subdir/subdir.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+    printf("Hello subdir.\\n");
+    return 0;
+}
+'''
+
+TRY_C = '''
+// this is subdir/subdir.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+    printf("Hello try.\\n");
+    return 0;
+}
+'''
+
+def qw(s):
+    return s.split()
+
+class VCS_Helper:
+    # this is a helper class which keeps track of whether each VC system is
+    # available, and whether the repository for each has been created. There
+    # is one instance of this class, at module level, shared between all test
+    # cases.
+
+    def __init__(self):
+        self._helpers = {}
+        self._isCapable = {}
+        self._excuses = {}
+        self._repoReady = {}
+
+    def registerVC(self, name, helper):
+        self._helpers[name] = helper
+        self._repoReady[name] = False
+
+    def skipIfNotCapable(self, name):
+        """Either return None, or raise SkipTest"""
+        d = self.capable(name)
+        def _maybeSkip(res):
+            if not res[0]:
+                raise unittest.SkipTest(res[1])
+        d.addCallback(_maybeSkip)
+        return d
+
+    def capable(self, name):
+        """Return a Deferred that fires with (True,None) if this host offers
+        the given VC tool, or (False,excuse) if it does not (and therefore
+        the tests should be skipped)."""
+
+        if self._isCapable.has_key(name):
+            if self._isCapable[name]:
+                return defer.succeed((True,None))
+            else:
+                return defer.succeed((False, self._excuses[name]))
+        d = defer.maybeDeferred(self._helpers[name].capable)
+        def _capable(res):
+            if res[0]:
+                self._isCapable[name] = True
+            else:
+                self._excuses[name] = res[1]
+            return res
+        d.addCallback(_capable)
+        return d
+
+    def getHelper(self, name):
+        return self._helpers[name]
+
+    def createRepository(self, name):
+        """Return a Deferred that fires when the repository is set up."""
+        if self._repoReady[name]:
+            return defer.succeed(True)
+        d = self._helpers[name].createRepository()
+        def _ready(res):
+            self._repoReady[name] = True
+        d.addCallback(_ready)
+        return d
+
+VCS = VCS_Helper()
+
+
+# the overall plan here:
+#
+# Each VC system is tested separately, all using the same source tree defined
+# in the 'files' dictionary above. Each VC system gets its own TestCase
+# subclass. The first test case that is run will create the repository during
+# setUp(), making two branches: 'trunk' and 'branch'. The trunk gets a copy
+# of all the files in 'files'. The variant of good.c is committed on the
+# branch.
+#
+# then testCheckout is run, which does a number of checkout/clobber/update
+# builds. These all use trunk r1. It then runs self.fix(), which modifies
+# 'fixable.c', then performs another build and makes sure the tree has been
+# updated.
+#
+# testBranch uses trunk-r1 and branch-r1, making sure that we clobber the
+# tree properly when we switch between them
+#
+# testPatch does a trunk-r1 checkout and applies a patch.
+#
+# testTryGetPatch performs a trunk-r1 checkout, modifies some files, then
+# verifies that tryclient.getSourceStamp figures out the base revision and
+# what got changed.
+
+
+# vc_create makes a repository at r1 with three files: main.c, version.c, and
+# subdir/foo.c . It also creates a branch from r1 (called b1) in which main.c
+# says "hello branch" instead of "hello world". self.trunk[] contains
+# revision stamps for everything on the trunk, and self.branch[] does the
+# same for the branch.
+
+# vc_revise() checks out a tree at HEAD, changes version.c, then checks it
+# back in. The new version stamp is appended to self.trunk[]. The tree is
+# removed afterwards.
+
+# vc_try_checkout(workdir, rev) checks out a tree at REV, then changes
+# subdir/subdir.c to say 'Hello try'
+# vc_try_finish(workdir) removes the tree and cleans up any VC state
+# necessary (like deleting the Arch archive entry).
+
+
+class BaseHelper:
+    def __init__(self):
+        self.trunk = []
+        self.branch = []
+        self.allrevs = []
+
+    def capable(self):
+        # this is also responsible for setting self.vcexe
+        raise NotImplementedError
+
+    def createBasedir(self):
+        # you must call this from createRepository
+        self.repbase = os.path.abspath(os.path.join("test_vc",
+                                                    "repositories"))
+        if not os.path.isdir(self.repbase):
+            os.makedirs(self.repbase)
+
+    def createRepository(self):
+        # this will only be called once per process
+        raise NotImplementedError
+
+    def populate(self, basedir):
+        os.makedirs(basedir)
+        os.makedirs(os.path.join(basedir, "subdir"))
+        open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
+        self.version = 1
+        version_c = VERSION_C % self.version
+        open(os.path.join(basedir, "version.c"), "w").write(version_c)
+        open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
+        open(os.path.join(basedir, "subdir", "subdir.c"), "w").write(SUBDIR_C)
+
+    def populate_branch(self, basedir):
+        open(os.path.join(basedir, "main.c"), "w").write(BRANCH_C)
+
+    def addTrunkRev(self, rev):
+        self.trunk.append(rev)
+        self.allrevs.append(rev)
+    def addBranchRev(self, rev):
+        self.branch.append(rev)
+        self.allrevs.append(rev)
+
+    def runCommand(self, basedir, command, failureIsOk=False, stdin=None):
+        # all commands passed to do() should be strings or lists. If they are
+        # strings, none of the arguments may have spaces. This makes the
+        # commands less verbose at the expense of restricting what they can
+        # specify.
+        if type(command) not in (list, tuple):
+            command = command.split(" ")
+        DEBUG = False
+        if DEBUG:
+            print "do %s" % command
+            print " in basedir %s" % basedir
+            if stdin:
+                print " STDIN:\n", stdin, "\n--STDIN DONE"
+        env = os.environ.copy()
+        env['LC_ALL'] = "C"
+        d = myGetProcessOutputAndValue(command[0], command[1:],
+                                       env=env, path=basedir,
+                                       stdin=stdin)
+        def check((out, err, code)):
+            if DEBUG:
+                print
+                print "command was: %s" % command
+                if out: print "out: %s" % out
+                if err: print "err: %s" % err
+                print "code: %s" % code
+            if code != 0 and not failureIsOk:
+                log.msg("command %s finished with exit code %d" %
+                        (command, code))
+                log.msg(" and stdout %s" % (out,))
+                log.msg(" and stderr %s" % (err,))
+                raise RuntimeError("command %s finished with exit code %d"
+                                   % (command, code)
+                                   + ": see logs for stdout")
+            return out
+        d.addCallback(check)
+        return d
+
+    def do(self, basedir, command, failureIsOk=False, stdin=None):
+        d = self.runCommand(basedir, command, failureIsOk=failureIsOk,
+                            stdin=stdin)
+        return waitForDeferred(d)
+
+    def dovc(self, basedir, command, failureIsOk=False, stdin=None):
+        """Like do(), but the VC binary will be prepended to COMMAND."""
+        if isinstance(command, (str, unicode)):
+            command = self.vcexe + " " + command
+        else:
+            # command is a list
+            command = [self.vcexe] + command
+        return self.do(basedir, command, failureIsOk, stdin)
+
+class VCBase(SignalMixin):
+    metadir = None
+    createdRepository = False
+    master = None
+    slave = None
+    helper = None
+    httpServer = None
+    httpPort = None
+    skip = None
+    has_got_revision = False
+    has_got_revision_branches_are_merged = False # for SVN
+
+    def failUnlessIn(self, substring, string, msg=None):
+        # trial provides a version of this that requires python-2.3 to test
+        # strings.
+        if msg is None:
+            msg = ("did not see the expected substring '%s' in string '%s'" %
+                   (substring, string))
+        self.failUnless(string.find(substring) != -1, msg)
+
+    def setUp(self):
+        d = VCS.skipIfNotCapable(self.vc_name)
+        d.addCallback(self._setUp1)
+        return maybeWait(d)
+
+    def _setUp1(self, res):
+        self.helper = VCS.getHelper(self.vc_name)
+
+        if os.path.exists("basedir"):
+            rmdirRecursive("basedir")
+        os.mkdir("basedir")
+        self.master = master.BuildMaster("basedir")
+        self.slavebase = os.path.abspath("slavebase")
+        if os.path.exists(self.slavebase):
+            rmdirRecursive(self.slavebase)
+        os.mkdir("slavebase")
+
+        d = VCS.createRepository(self.vc_name)
+        return d
+
+    def connectSlave(self):
+        port = self.master.slavePort._port.getHost().port
+        slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
+                               self.slavebase, keepalive=0, usePTY=1)
+        self.slave = slave
+        slave.startService()
+        d = self.master.botmaster.waitUntilBuilderAttached("vc")
+        return d
+
+    def loadConfig(self, config):
+        # reloading the config file causes a new 'listDirs' command to be
+        # sent to the slave. To synchronize on this properly, it is easiest
+        # to stop and restart the slave.
+        d = defer.succeed(None)
+        if self.slave:
+            d = self.master.botmaster.waitUntilBuilderDetached("vc")
+            self.slave.stopService()
+        d.addCallback(lambda res: self.master.loadConfig(config))
+        d.addCallback(lambda res: self.connectSlave())
+        return d
+
+    def serveHTTP(self):
+        # launch an HTTP server to serve the repository files
+        from twisted.web import static, server
+        from twisted.internet import reactor
+        self.root = static.File(self.helper.repbase)
+        self.site = server.Site(self.root)
+        self.httpServer = reactor.listenTCP(0, self.site)
+        self.httpPort = self.httpServer.getHost().port
+
+    def doBuild(self, shouldSucceed=True, ss=None):
+        c = interfaces.IControl(self.master)
+
+        if ss is None:
+            ss = SourceStamp()
+        #print "doBuild(ss: b=%s rev=%s)" % (ss.branch, ss.revision)
+        req = base.BuildRequest("test_vc forced build", ss)
+        d = req.waitUntilFinished()
+        c.getBuilder("vc").requestBuild(req)
+        d.addCallback(self._doBuild_1, shouldSucceed)
+        return d
+    def _doBuild_1(self, bs, shouldSucceed):
+        r = bs.getResults()
+        if r != SUCCESS and shouldSucceed:
+            print
+            print
+            if not bs.isFinished():
+                print "Hey, build wasn't even finished!"
+            print "Build did not succeed:", r, bs.getText()
+            for s in bs.getSteps():
+                for l in s.getLogs():
+                    print "--- START step %s / log %s ---" % (s.getName(),
+                                                              l.getName())
+                    print l.getTextWithHeaders()
+                    print "--- STOP ---"
+                    print
+            self.fail("build did not succeed")
+        return bs
+
+    def printLogs(self, bs):
+        for s in bs.getSteps():
+            for l in s.getLogs():
+                print "--- START step %s / log %s ---" % (s.getName(),
+                                                          l.getName())
+                print l.getTextWithHeaders()
+                print "--- STOP ---"
+                print
+
+    def touch(self, d, f):
+        open(os.path.join(d,f),"w").close()
+    def shouldExist(self, *args):
+        target = os.path.join(*args)
+        self.failUnless(os.path.exists(target),
+                        "expected to find %s but didn't" % target)
+    def shouldNotExist(self, *args):
+        target = os.path.join(*args)
+        self.failIf(os.path.exists(target),
+                    "expected to NOT find %s, but did" % target)
+    def shouldContain(self, d, f, contents):
+        c = open(os.path.join(d, f), "r").read()
+        self.failUnlessIn(contents, c)
+
+    def checkGotRevision(self, bs, expected):
+        if self.has_got_revision:
+            self.failUnlessEqual(bs.getProperty("got_revision"), expected)
+
+    def checkGotRevisionIsLatest(self, bs):
+        expected = self.helper.trunk[-1]
+        if self.has_got_revision_branches_are_merged:
+            expected = self.helper.allrevs[-1]
+        self.checkGotRevision(bs, expected)
+
+    def do_vctest(self, testRetry=True):
+        vctype = self.vctype
+        args = self.helper.vcargs
+        m = self.master
+        self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+        self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+        # woo double-substitution
+        s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+        for k,v in args.items():
+            s += ", %s=%s" % (k, repr(v))
+        s += ")"
+        config = config_vc % s
+
+        m.loadConfig(config % 'clobber')
+        m.readConfig = True
+        m.startService()
+
+        d = self.connectSlave()
+        d.addCallback(lambda res: log.msg("testing clobber"))
+        d.addCallback(self._do_vctest_clobber)
+        d.addCallback(lambda res: log.msg("doing update"))
+        d.addCallback(lambda res: self.loadConfig(config % 'update'))
+        d.addCallback(lambda res: log.msg("testing update"))
+        d.addCallback(self._do_vctest_update)
+        if testRetry:
+            d.addCallback(lambda res: log.msg("testing update retry"))
+            d.addCallback(self._do_vctest_update_retry)
+        d.addCallback(lambda res: log.msg("doing copy"))
+        d.addCallback(lambda res: self.loadConfig(config % 'copy'))
+        d.addCallback(lambda res: log.msg("testing copy"))
+        d.addCallback(self._do_vctest_copy)
+        if self.metadir:
+            d.addCallback(lambda res: log.msg("doing export"))
+            d.addCallback(lambda res: self.loadConfig(config % 'export'))
+            d.addCallback(lambda res: log.msg("testing export"))
+            d.addCallback(self._do_vctest_export)
+        return d
+
+    def _do_vctest_clobber(self, res):
+        d = self.doBuild() # initial checkout
+        d.addCallback(self._do_vctest_clobber_1)
+        return d
+    def _do_vctest_clobber_1(self, bs):
+        self.shouldExist(self.workdir, "main.c")
+        self.shouldExist(self.workdir, "version.c")
+        self.shouldExist(self.workdir, "subdir", "subdir.c")
+        if self.metadir:
+            self.shouldExist(self.workdir, self.metadir)
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        self.failUnlessEqual(bs.getProperty("branch"), None)
+        self.checkGotRevisionIsLatest(bs)
+
+        self.touch(self.workdir, "newfile")
+        self.shouldExist(self.workdir, "newfile")
+        d = self.doBuild() # rebuild clobbers workdir
+        d.addCallback(self._do_vctest_clobber_2)
+        return d
+    def _do_vctest_clobber_2(self, res):
+        self.shouldNotExist(self.workdir, "newfile")
+
+    def _do_vctest_update(self, res):
+        log.msg("_do_vctest_update")
+        d = self.doBuild() # rebuild with update
+        d.addCallback(self._do_vctest_update_1)
+        return d
+    def _do_vctest_update_1(self, bs):
+        log.msg("_do_vctest_update_1")
+        self.shouldExist(self.workdir, "main.c")
+        self.shouldExist(self.workdir, "version.c")
+        self.shouldContain(self.workdir, "version.c",
+                           "version=%d" % self.helper.version)
+        if self.metadir:
+            self.shouldExist(self.workdir, self.metadir)
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        self.checkGotRevisionIsLatest(bs)
+
+        self.touch(self.workdir, "newfile")
+        d = self.doBuild() # update rebuild leaves new files
+        d.addCallback(self._do_vctest_update_2)
+        return d
+    def _do_vctest_update_2(self, bs):
+        log.msg("_do_vctest_update_2")
+        self.shouldExist(self.workdir, "main.c")
+        self.shouldExist(self.workdir, "version.c")
+        self.touch(self.workdir, "newfile")
+        # now make a change to the repository and make sure we pick it up
+        d = self.helper.vc_revise()
+        d.addCallback(lambda res: self.doBuild())
+        d.addCallback(self._do_vctest_update_3)
+        return d
+    def _do_vctest_update_3(self, bs):
+        log.msg("_do_vctest_update_3")
+        self.shouldExist(self.workdir, "main.c")
+        self.shouldExist(self.workdir, "version.c")
+        self.shouldContain(self.workdir, "version.c",
+                           "version=%d" % self.helper.version)
+        self.shouldExist(self.workdir, "newfile")
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        self.checkGotRevisionIsLatest(bs)
+
+        # now "update" to an older revision
+        d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-2]))
+        d.addCallback(self._do_vctest_update_4)
+        return d
+    def _do_vctest_update_4(self, bs):
+        log.msg("_do_vctest_update_4")
+        self.shouldExist(self.workdir, "main.c")
+        self.shouldExist(self.workdir, "version.c")
+        self.shouldContain(self.workdir, "version.c",
+                           "version=%d" % (self.helper.version-1))
+        self.failUnlessEqual(bs.getProperty("revision"),
+                             self.helper.trunk[-2])
+        self.checkGotRevision(bs, self.helper.trunk[-2])
+
+        # now update to the newer revision
+        d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-1]))
+        d.addCallback(self._do_vctest_update_5)
+        return d
+    def _do_vctest_update_5(self, bs):
+        log.msg("_do_vctest_update_5")
+        self.shouldExist(self.workdir, "main.c")
+        self.shouldExist(self.workdir, "version.c")
+        self.shouldContain(self.workdir, "version.c",
+                           "version=%d" % self.helper.version)
+        self.failUnlessEqual(bs.getProperty("revision"),
+                             self.helper.trunk[-1])
+        self.checkGotRevision(bs, self.helper.trunk[-1])
+
+
+    def _do_vctest_update_retry(self, res):
+        # certain local changes will prevent an update from working. The
+        # most common is to replace a file with a directory, or vice
+        # versa. The slave code should spot the failure and do a
+        # clobber/retry.
+        os.unlink(os.path.join(self.workdir, "main.c"))
+        os.mkdir(os.path.join(self.workdir, "main.c"))
+        self.touch(os.path.join(self.workdir, "main.c"), "foo")
+        self.touch(self.workdir, "newfile")
+
+        d = self.doBuild() # update, but must clobber to handle the error
+        d.addCallback(self._do_vctest_update_retry_1)
+        return d
+    def _do_vctest_update_retry_1(self, bs):
+        # SVN-1.4.0 doesn't seem to have any problem with the
+        # file-turned-directory issue (although older versions did). So don't
+        # actually check that the tree was clobbered.. as long as the update
+        # succeeded (checked by doBuild), that should be good enough.
+        #self.shouldNotExist(self.workdir, "newfile")
+        pass
+
+    def _do_vctest_copy(self, res):
+        d = self.doBuild() # copy rebuild clobbers new files
+        d.addCallback(self._do_vctest_copy_1)
+        return d
+    def _do_vctest_copy_1(self, bs):
+        if self.metadir:
+            self.shouldExist(self.workdir, self.metadir)
+        self.shouldNotExist(self.workdir, "newfile")
+        self.touch(self.workdir, "newfile")
+        self.touch(self.vcdir, "newvcfile")
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        self.checkGotRevisionIsLatest(bs)
+
+        d = self.doBuild() # copy rebuild clobbers new files
+        d.addCallback(self._do_vctest_copy_2)
+        return d
+    def _do_vctest_copy_2(self, bs):
+        if self.metadir:
+            self.shouldExist(self.workdir, self.metadir)
+        self.shouldNotExist(self.workdir, "newfile")
+        self.shouldExist(self.vcdir, "newvcfile")
+        self.shouldExist(self.workdir, "newvcfile")
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        self.checkGotRevisionIsLatest(bs)
+        self.touch(self.workdir, "newfile")
+
+    def _do_vctest_export(self, res):
+        d = self.doBuild() # export rebuild clobbers new files
+        d.addCallback(self._do_vctest_export_1)
+        return d
+    def _do_vctest_export_1(self, bs):
+        self.shouldNotExist(self.workdir, self.metadir)
+        self.shouldNotExist(self.workdir, "newfile")
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        #self.checkGotRevisionIsLatest(bs)
+        # VC 'export' is not required to have a got_revision
+        self.touch(self.workdir, "newfile")
+
+        d = self.doBuild() # export rebuild clobbers new files
+        d.addCallback(self._do_vctest_export_2)
+        return d
+    def _do_vctest_export_2(self, bs):
+        self.shouldNotExist(self.workdir, self.metadir)
+        self.shouldNotExist(self.workdir, "newfile")
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        #self.checkGotRevisionIsLatest(bs)
+        # VC 'export' is not required to have a got_revision
+
+    def do_patch(self):
+        vctype = self.vctype
+        args = self.helper.vcargs
+        m = self.master
+        self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+        self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+        s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+        for k,v in args.items():
+            s += ", %s=%s" % (k, repr(v))
+        s += ")"
+        self.config = config_vc % s
+
+        m.loadConfig(self.config % "clobber")
+        m.readConfig = True
+        m.startService()
+
+        ss = SourceStamp(revision=self.helper.trunk[-1], patch=(0, p0_diff))
+
+        d = self.connectSlave()
+        d.addCallback(lambda res: self.doBuild(ss=ss))
+        d.addCallback(self._doPatch_1)
+        return d
+    def _doPatch_1(self, bs):
+        self.shouldContain(self.workdir, "version.c",
+                           "version=%d" % self.helper.version)
+        # make sure the file actually got patched
+        subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+                                "subdir", "subdir.c")
+        data = open(subdir_c, "r").read()
+        self.failUnlessIn("Hello patched subdir.\\n", data)
+        self.failUnlessEqual(bs.getProperty("revision"),
+                             self.helper.trunk[-1])
+        self.checkGotRevision(bs, self.helper.trunk[-1])
+
+        # make sure that a rebuild does not use the leftover patched workdir
+        d = self.master.loadConfig(self.config % "update")
+        d.addCallback(lambda res: self.doBuild(ss=None))
+        d.addCallback(self._doPatch_2)
+        return d
+    def _doPatch_2(self, bs):
+        # make sure the file is back to its original
+        subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+                                "subdir", "subdir.c")
+        data = open(subdir_c, "r").read()
+        self.failUnlessIn("Hello subdir.\\n", data)
+        self.failUnlessEqual(bs.getProperty("revision"), None)
+        self.checkGotRevisionIsLatest(bs)
+
+        # now make sure we can patch an older revision. We need at least two
+        # revisions here, so we might have to create one first
+        if len(self.helper.trunk) < 2:
+            d = self.helper.vc_revise()
+            d.addCallback(self._doPatch_3)
+            return d
+        return self._doPatch_3()
+
+    def _doPatch_3(self, res=None):
+        ss = SourceStamp(revision=self.helper.trunk[-2], patch=(0, p0_diff))
+        d = self.doBuild(ss=ss)
+        d.addCallback(self._doPatch_4)
+        return d
+    def _doPatch_4(self, bs):
+        self.shouldContain(self.workdir, "version.c",
+                           "version=%d" % (self.helper.version-1))
+        # and make sure the file actually got patched
+        subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+                                "subdir", "subdir.c")
+        data = open(subdir_c, "r").read()
+        self.failUnlessIn("Hello patched subdir.\\n", data)
+        self.failUnlessEqual(bs.getProperty("revision"),
+                             self.helper.trunk[-2])
+        self.checkGotRevision(bs, self.helper.trunk[-2])
+
+        # now check that we can patch a branch
+        ss = SourceStamp(branch=self.helper.branchname,
+                         revision=self.helper.branch[-1],
+                         patch=(0, p0_diff))
+        d = self.doBuild(ss=ss)
+        d.addCallback(self._doPatch_5)
+        return d
+    def _doPatch_5(self, bs):
+        self.shouldContain(self.workdir, "version.c",
+                           "version=%d" % 1)
+        self.shouldContain(self.workdir, "main.c", "Hello branch.")
+        subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+                                "subdir", "subdir.c")
+        data = open(subdir_c, "r").read()
+        self.failUnlessIn("Hello patched subdir.\\n", data)
+        self.failUnlessEqual(bs.getProperty("revision"),
+                             self.helper.branch[-1])
+        self.failUnlessEqual(bs.getProperty("branch"), self.helper.branchname)
+        self.checkGotRevision(bs, self.helper.branch[-1])
+
+
+    def do_vctest_once(self, shouldSucceed):
+        m = self.master
+        vctype = self.vctype
+        args = self.helper.vcargs
+        vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+        workdir = os.path.join(self.slavebase, "vc-dir", "build")
+        # woo double-substitution
+        s = "s(%s, timeout=200, workdir='build', mode='clobber'" % (vctype,)
+        for k,v in args.items():
+            s += ", %s=%s" % (k, repr(v))
+        s += ")"
+        config = config_vc % s
+
+        m.loadConfig(config)
+        m.readConfig = True
+        m.startService()
+
+        self.connectSlave()
+        d = self.doBuild(shouldSucceed) # initial checkout
+        return d
+
+    def do_branch(self):
+        log.msg("do_branch")
+        vctype = self.vctype
+        args = self.helper.vcargs
+        m = self.master
+        self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+        self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+        s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+        for k,v in args.items():
+            s += ", %s=%s" % (k, repr(v))
+        s += ")"
+        self.config = config_vc % s
+
+        m.loadConfig(self.config % "update")
+        m.readConfig = True
+        m.startService()
+
+        # first we do a build of the trunk
+        d = self.connectSlave()
+        d.addCallback(lambda res: self.doBuild(ss=SourceStamp()))
+        d.addCallback(self._doBranch_1)
+        return d
+    def _doBranch_1(self, bs):
+        log.msg("_doBranch_1")
+        # make sure the checkout was of the trunk
+        main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+        data = open(main_c, "r").read()
+        self.failUnlessIn("Hello world.", data)
+
+        # now do a checkout on the branch. The change in branch name should
+        # trigger a clobber.
+        self.touch(self.workdir, "newfile")
+        d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
+        d.addCallback(self._doBranch_2)
+        return d
+    def _doBranch_2(self, bs):
+        log.msg("_doBranch_2")
+        # make sure it was on the branch
+        main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+        data = open(main_c, "r").read()
+        self.failUnlessIn("Hello branch.", data)
+        # and make sure the tree was clobbered
+        self.shouldNotExist(self.workdir, "newfile")
+
+        # doing another build on the same branch should not clobber the tree
+        self.touch(self.workdir, "newbranchfile")
+        d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
+        d.addCallback(self._doBranch_3)
+        return d
+    def _doBranch_3(self, bs):
+        log.msg("_doBranch_3")
+        # make sure it is still on the branch
+        main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+        data = open(main_c, "r").read()
+        self.failUnlessIn("Hello branch.", data)
+        # and make sure the tree was not clobbered
+        self.shouldExist(self.workdir, "newbranchfile")
+
+        # now make sure that a non-branch checkout clobbers the tree
+        d = self.doBuild(ss=SourceStamp())
+        d.addCallback(self._doBranch_4)
+        return d
+    def _doBranch_4(self, bs):
+        log.msg("_doBranch_4")
+        # make sure it was on the trunk
+        main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+        data = open(main_c, "r").read()
+        self.failUnlessIn("Hello world.", data)
+        self.shouldNotExist(self.workdir, "newbranchfile")
+
+    def do_getpatch(self, doBranch=True):
+        log.msg("do_getpatch")
+        # prepare a buildslave to do checkouts
+        vctype = self.vctype
+        args = self.helper.vcargs
+        m = self.master
+        self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+        self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+        # woo double-substitution
+        s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+        for k,v in args.items():
+            s += ", %s=%s" % (k, repr(v))
+        s += ")"
+        config = config_vc % s
+
+        m.loadConfig(config % 'clobber')
+        m.readConfig = True
+        m.startService()
+
+        d = self.connectSlave()
+
+        # then set up the "developer's tree". first we modify a tree from the
+        # head of the trunk
+        tmpdir = "try_workdir"
+        self.trydir = os.path.join(self.helper.repbase, tmpdir)
+        rmdirRecursive(self.trydir)
+        d.addCallback(self.do_getpatch_trunkhead)
+        d.addCallback(self.do_getpatch_trunkold)
+        if doBranch:
+            d.addCallback(self.do_getpatch_branch)
+        d.addCallback(self.do_getpatch_finish)
+        return d
+
+    def do_getpatch_finish(self, res):
+        log.msg("do_getpatch_finish")
+        self.helper.vc_try_finish(self.trydir)
+        return res
+
+    def try_shouldMatch(self, filename):
+        devfilename = os.path.join(self.trydir, filename)
+        devfile = open(devfilename, "r").read()
+        slavefilename = os.path.join(self.workdir, filename)
+        slavefile = open(slavefilename, "r").read()
+        self.failUnlessEqual(devfile, slavefile,
+                             ("slavefile (%s) contains '%s'. "
+                              "developer's file (%s) contains '%s'. "
+                              "These ought to match") %
+                             (slavefilename, slavefile,
+                              devfilename, devfile))
+
+    def do_getpatch_trunkhead(self, res):
+        log.msg("do_getpatch_trunkhead")
+        d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-1])
+        d.addCallback(self._do_getpatch_trunkhead_1)
+        return d
+    def _do_getpatch_trunkhead_1(self, res):
+        log.msg("_do_getpatch_trunkhead_1")
+        d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
+        d.addCallback(self._do_getpatch_trunkhead_2)
+        return d
+    def _do_getpatch_trunkhead_2(self, ss):
+        log.msg("_do_getpatch_trunkhead_2")
+        d = self.doBuild(ss=ss)
+        d.addCallback(self._do_getpatch_trunkhead_3)
+        return d
+    def _do_getpatch_trunkhead_3(self, res):
+        log.msg("_do_getpatch_trunkhead_3")
+        # verify that the resulting buildslave tree matches the developer's
+        self.try_shouldMatch("main.c")
+        self.try_shouldMatch("version.c")
+        self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+    def do_getpatch_trunkold(self, res):
+        log.msg("do_getpatch_trunkold")
+        # now try a tree from an older revision. We need at least two
+        # revisions here, so we might have to create one first
+        if len(self.helper.trunk) < 2:
+            d = self.helper.vc_revise()
+            d.addCallback(self._do_getpatch_trunkold_1)
+            return d
+        return self._do_getpatch_trunkold_1()
+    def _do_getpatch_trunkold_1(self, res=None):
+        log.msg("_do_getpatch_trunkold_1")
+        d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-2])
+        d.addCallback(self._do_getpatch_trunkold_2)
+        return d
+    def _do_getpatch_trunkold_2(self, res):
+        log.msg("_do_getpatch_trunkold_2")
+        d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
+        d.addCallback(self._do_getpatch_trunkold_3)
+        return d
+    def _do_getpatch_trunkold_3(self, ss):
+        log.msg("_do_getpatch_trunkold_3")
+        d = self.doBuild(ss=ss)
+        d.addCallback(self._do_getpatch_trunkold_4)
+        return d
+    def _do_getpatch_trunkold_4(self, res):
+        log.msg("_do_getpatch_trunkold_4")
+        # verify that the resulting buildslave tree matches the developer's
+        self.try_shouldMatch("main.c")
+        self.try_shouldMatch("version.c")
+        self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+    def do_getpatch_branch(self, res):
+        log.msg("do_getpatch_branch")
+        # now try a tree from a branch
+        d = self.helper.vc_try_checkout(self.trydir, self.helper.branch[-1],
+                                        self.helper.branchname)
+        d.addCallback(self._do_getpatch_branch_1)
+        return d
+    def _do_getpatch_branch_1(self, res):
+        log.msg("_do_getpatch_branch_1")
+        d = tryclient.getSourceStamp(self.vctype_try, self.trydir,
+                                     self.helper.try_branchname)
+        d.addCallback(self._do_getpatch_branch_2)
+        return d
+    def _do_getpatch_branch_2(self, ss):
+        log.msg("_do_getpatch_branch_2")
+        d = self.doBuild(ss=ss)
+        d.addCallback(self._do_getpatch_branch_3)
+        return d
+    def _do_getpatch_branch_3(self, res):
+        log.msg("_do_getpatch_branch_3")
+        # verify that the resulting buildslave tree matches the developer's
+        self.try_shouldMatch("main.c")
+        self.try_shouldMatch("version.c")
+        self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+
+    def dumpPatch(self, patch):
+        # this exists to help me figure out the right 'patchlevel' value
+        # should be returned by tryclient.getSourceStamp
+        n = self.mktemp()
+        open(n,"w").write(patch)
+        d = self.runCommand(".", ["lsdiff", n])
+        def p(res): print "lsdiff:", res.strip().split("\n")
+        d.addCallback(p)
+        return d
+
+
+    def tearDown(self):
+        d = defer.succeed(None)
+        if self.slave:
+            d2 = self.master.botmaster.waitUntilBuilderDetached("vc")
+            d.addCallback(lambda res: self.slave.stopService())
+            d.addCallback(lambda res: d2)
+        if self.master:
+            d.addCallback(lambda res: self.master.stopService())
+        if self.httpServer:
+            d.addCallback(lambda res: self.httpServer.stopListening())
+            def stopHTTPTimer():
+                try:
+                    from twisted.web import http # Twisted-2.0
+                except ImportError:
+                    from twisted.protocols import http # Twisted-1.3
+                http._logDateTimeStop() # shut down the internal timer. DUMB!
+            d.addCallback(lambda res: stopHTTPTimer())
+        d.addCallback(lambda res: self.tearDown2())
+        return maybeWait(d)
+
+    def tearDown2(self):
+        pass
+
+class CVSHelper(BaseHelper):
+    branchname = "branch"
+    try_branchname = "branch"
+
+    def capable(self):
+        cvspaths = which('cvs')
+        if not cvspaths:
+            return (False, "CVS is not installed")
+        # cvs-1.10 (as shipped with OS-X 10.3 "Panther") is too old for this
+        # test. There is a situation where we check out a tree, make a
+        # change, then commit it back, and CVS refuses to believe that we're
+        # operating in a CVS tree. I tested cvs-1.12.9 and it works ok, OS-X
+        # 10.4 "Tiger" comes with cvs-1.11, but I haven't tested that yet.
+        # For now, skip the tests if we've got 1.10 .
+        log.msg("running %s --version.." % (cvspaths[0],))
+        d = utils.getProcessOutput(cvspaths[0], ["--version"],
+                                   env=os.environ)
+        d.addCallback(self._capable, cvspaths[0])
+        return d
+
+    def _capable(self, v, vcexe):
+        m = re.search(r'\(CVS\) ([\d\.]+) ', v)
+        if not m:
+            log.msg("couldn't identify CVS version number in output:")
+            log.msg("'''%s'''" % v)
+            log.msg("skipping tests")
+            return (False, "Found CVS but couldn't identify its version")
+        ver = m.group(1)
+        log.msg("found CVS version '%s'" % ver)
+        if ver == "1.10":
+            return (False, "Found CVS, but it is too old")
+        self.vcexe = vcexe
+        return (True, None)
+
+    def getdate(self):
+        # this timestamp is eventually passed to CVS in a -D argument, and
+        # strftime's %z specifier doesn't seem to work reliably (I get +0000
+        # where I should get +0700 under linux sometimes, and windows seems
+        # to want to put a verbose 'Eastern Standard Time' in there), so
+        # leave off the timezone specifier and treat this as localtime. A
+        # valid alternative would be to use a hard-coded +0000 and
+        # time.gmtime().
+        return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
+
+    def createRepository(self):
+        self.createBasedir()
+        self.cvsrep = cvsrep = os.path.join(self.repbase, "CVS-Repository")
+        tmp = os.path.join(self.repbase, "cvstmp")
+
+        w = self.dovc(self.repbase, "-d %s init" % cvsrep)
+        yield w; w.getResult() # we must getResult() to raise any exceptions
+
+        self.populate(tmp)
+        cmd = ("-d %s import" % cvsrep +
+               " -m sample_project_files sample vendortag start")
+        w = self.dovc(tmp, cmd)
+        yield w; w.getResult()
+        rmdirRecursive(tmp)
+        # take a timestamp as the first revision number
+        time.sleep(2)
+        self.addTrunkRev(self.getdate())
+        time.sleep(2)
+
+        w = self.dovc(self.repbase,
+                      "-d %s checkout -d cvstmp sample" % self.cvsrep)
+        yield w; w.getResult()
+
+        w = self.dovc(tmp, "tag -b %s" % self.branchname)
+        yield w; w.getResult()
+        self.populate_branch(tmp)
+        w = self.dovc(tmp,
+                      "commit -m commit_on_branch -r %s" % self.branchname)
+        yield w; w.getResult()
+        rmdirRecursive(tmp)
+        time.sleep(2)
+        self.addBranchRev(self.getdate())
+        time.sleep(2)
+        self.vcargs = { 'cvsroot': self.cvsrep, 'cvsmodule': "sample" }
+    createRepository = deferredGenerator(createRepository)
+
+
+    def vc_revise(self):
+        tmp = os.path.join(self.repbase, "cvstmp")
+
+        w = self.dovc(self.repbase,
+                      "-d %s checkout -d cvstmp sample" % self.cvsrep)
+        yield w; w.getResult()
+        self.version += 1
+        version_c = VERSION_C % self.version
+        open(os.path.join(tmp, "version.c"), "w").write(version_c)
+        w = self.dovc(tmp,
+                      "commit -m revised_to_%d version.c" % self.version)
+        yield w; w.getResult()
+        rmdirRecursive(tmp)
+        time.sleep(2)
+        self.addTrunkRev(self.getdate())
+        time.sleep(2)
+    vc_revise = deferredGenerator(vc_revise)
+
+    def vc_try_checkout(self, workdir, rev, branch=None):
+        # 'workdir' is an absolute path
+        assert os.path.abspath(workdir) == workdir
+        cmd = [self.vcexe, "-d", self.cvsrep, "checkout",
+               "-d", workdir,
+               "-D", rev]
+        if branch is not None:
+            cmd.append("-r")
+            cmd.append(branch)
+        cmd.append("sample")
+        w = self.do(self.repbase, cmd)
+        yield w; w.getResult()
+        open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+    vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+    def vc_try_finish(self, workdir):
+        rmdirRecursive(workdir)
+
+class CVS(VCBase, unittest.TestCase):
+    vc_name = "cvs"
+
+    metadir = "CVS"
+    vctype = "source.CVS"
+    vctype_try = "cvs"
+    # CVS gives us got_revision, but it is based entirely upon the local
+    # clock, which means it is unlikely to match the timestamp taken earlier.
+    # This might be enough for common use, but won't be good enough for our
+    # tests to accept, so pretend it doesn't have got_revision at all.
+    has_got_revision = False
+
+    def testCheckout(self):
+        d = self.do_vctest()
+        return maybeWait(d)
+
+    def testPatch(self):
+        d = self.do_patch()
+        return maybeWait(d)
+
+    def testCheckoutBranch(self):
+        d = self.do_branch()
+        return maybeWait(d)
+        
+    def testTry(self):
+        d = self.do_getpatch(doBranch=False)
+        return maybeWait(d)
+
+VCS.registerVC(CVS.vc_name, CVSHelper())
+
+
+class SVNHelper(BaseHelper):
+    branchname = "sample/branch"
+    try_branchname = "sample/branch"
+
+    def capable(self):
+        svnpaths = which('svn')
+        svnadminpaths = which('svnadmin')
+        if not svnpaths:
+            return (False, "SVN is not installed")
+        if not svnadminpaths:
+            return (False, "svnadmin is not installed")
+        # we need svn to be compiled with the ra_local access
+        # module
+        log.msg("running svn --version..")
+        env = os.environ.copy()
+        env['LC_ALL'] = "C"
+        d = utils.getProcessOutput(svnpaths[0], ["--version"],
+                                   env=env)
+        d.addCallback(self._capable, svnpaths[0], svnadminpaths[0])
+        return d
+
+    def _capable(self, v, vcexe, svnadmin):
+        if v.find("handles 'file' schem") != -1:
+            # older versions say 'schema', 1.2.0 and beyond say 'scheme'
+            self.vcexe = vcexe
+            self.svnadmin = svnadmin
+            return (True, None)
+        excuse = ("%s found but it does not support 'file:' " +
+                  "schema, skipping svn tests") % vcexe
+        log.msg(excuse)
+        return (False, excuse)
+
+    def createRepository(self):
+        self.createBasedir()
+        self.svnrep = os.path.join(self.repbase,
+                                   "SVN-Repository").replace('\\','/')
+        tmp = os.path.join(self.repbase, "svntmp")
+        if sys.platform == 'win32':
+            # On Windows Paths do not start with a /
+            self.svnurl = "file:///%s" % self.svnrep
+        else:
+            self.svnurl = "file://%s" % self.svnrep
+        self.svnurl_trunk = self.svnurl + "/sample/trunk"
+        self.svnurl_branch = self.svnurl + "/sample/branch"
+
+        w = self.do(self.repbase, self.svnadmin+" create %s" % self.svnrep)
+        yield w; w.getResult()
+
+        self.populate(tmp)
+        w = self.dovc(tmp,
+                      "import -m sample_project_files %s" %
+                      self.svnurl_trunk)
+        yield w; out = w.getResult()
+        rmdirRecursive(tmp)
+        m = re.search(r'Committed revision (\d+)\.', out)
+        assert m.group(1) == "1" # first revision is always "1"
+        self.addTrunkRev(int(m.group(1)))
+
+        w = self.dovc(self.repbase,
+                      "checkout %s svntmp" % self.svnurl_trunk)
+        yield w; w.getResult()
+
+        w = self.dovc(tmp, "cp -m make_branch %s %s" % (self.svnurl_trunk,
+                                                        self.svnurl_branch))
+        yield w; w.getResult()
+        w = self.dovc(tmp, "switch %s" % self.svnurl_branch)
+        yield w; w.getResult()
+        self.populate_branch(tmp)
+        w = self.dovc(tmp, "commit -m commit_on_branch")
+        yield w; out = w.getResult()
+        rmdirRecursive(tmp)
+        m = re.search(r'Committed revision (\d+)\.', out)
+        self.addBranchRev(int(m.group(1)))
+    createRepository = deferredGenerator(createRepository)
+
+    def vc_revise(self):
+        tmp = os.path.join(self.repbase, "svntmp")
+        rmdirRecursive(tmp)
+        log.msg("vc_revise" +  self.svnurl_trunk)
+        w = self.dovc(self.repbase,
+                      "checkout %s svntmp" % self.svnurl_trunk)
+        yield w; w.getResult()
+        self.version += 1
+        version_c = VERSION_C % self.version
+        open(os.path.join(tmp, "version.c"), "w").write(version_c)
+        w = self.dovc(tmp, "commit -m revised_to_%d" % self.version)
+        yield w; out = w.getResult()
+        m = re.search(r'Committed revision (\d+)\.', out)
+        self.addTrunkRev(int(m.group(1)))
+        rmdirRecursive(tmp)
+    vc_revise = deferredGenerator(vc_revise)
+
+    def vc_try_checkout(self, workdir, rev, branch=None):
+        assert os.path.abspath(workdir) == workdir
+        if os.path.exists(workdir):
+            rmdirRecursive(workdir)
+        if not branch:
+            svnurl = self.svnurl_trunk
+        else:
+            # N.B.: this is *not* os.path.join: SVN URLs use slashes
+            # regardless of the host operating system's filepath separator
+            svnurl = self.svnurl + "/" + branch
+        w = self.dovc(self.repbase,
+                      "checkout %s %s" % (svnurl, workdir))
+        yield w; w.getResult()
+        open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+    vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+    def vc_try_finish(self, workdir):
+        rmdirRecursive(workdir)
+
+
+class SVN(VCBase, unittest.TestCase):
+    vc_name = "svn"
+
+    metadir = ".svn"
+    vctype = "source.SVN"
+    vctype_try = "svn"
+    has_got_revision = True
+    has_got_revision_branches_are_merged = True
+
+    def testCheckout(self):
+        # we verify this one with the svnurl style of vcargs. We test the
+        # baseURL/defaultBranch style in testPatch and testCheckoutBranch.
+        self.helper.vcargs = { 'svnurl': self.helper.svnurl_trunk }
+        d = self.do_vctest()
+        return maybeWait(d)
+
+    def testPatch(self):
+        self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+                               'defaultBranch': "sample/trunk",
+                               }
+        d = self.do_patch()
+        return maybeWait(d)
+
+    def testCheckoutBranch(self):
+        self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+                               'defaultBranch': "sample/trunk",
+                               }
+        d = self.do_branch()
+        return maybeWait(d)
+
+    def testTry(self):
+        # extract the base revision and patch from a modified tree, use it to
+        # create the same contents on the buildslave
+        self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+                               'defaultBranch': "sample/trunk",
+                               }
+        d = self.do_getpatch()
+        return maybeWait(d)
+
+VCS.registerVC(SVN.vc_name, SVNHelper())
+
+
+class P4Helper(BaseHelper):
+    branchname = "branch"
+    p4port = 'localhost:1666'
+    pid = None
+    base_descr = 'Change: new\nDescription: asdf\nFiles:\n'
+
+    def capable(self):
+        p4paths = which('p4')
+        p4dpaths = which('p4d')
+        if not p4paths:
+            return (False, "p4 is not installed")
+        if not p4dpaths:
+            return (False, "p4d is not installed")
+        self.vcexe = p4paths[0]
+        self.p4dexe = p4dpaths[0]
+        return (True, None)
+
+    class _P4DProtocol(protocol.ProcessProtocol):
+        def __init__(self):
+            self.started = defer.Deferred()
+            self.ended = defer.Deferred()
+
+        def outReceived(self, data):
+            # When it says starting, it has bound to the socket.
+            if self.started:
+                if data.startswith('Perforce Server starting...'):
+                    self.started.callback(None)
+                else:
+                    print "p4d said %r" % data
+                    try:
+                        raise Exception('p4d said %r' % data)
+                    except:
+                        self.started.errback(failure.Failure())
+                self.started = None
+
+        def errReceived(self, data):
+            print "p4d stderr: %s" % data
+
+        def processEnded(self, status_object):
+            if status_object.check(error.ProcessDone):
+                self.ended.callback(None)
+            else:
+                self.ended.errback(status_object)
+
+    def _start_p4d(self):
+        proto = self._P4DProtocol()
+        reactor.spawnProcess(proto, self.p4dexe, ['p4d', '-p', self.p4port],
+                             env=os.environ, path=self.p4rep)
+        return proto.started, proto.ended
+
+    def dop4(self, basedir, command, failureIsOk=False, stdin=None):
+        # p4 looks at $PWD instead of getcwd(), which causes confusion when
+        # we spawn commands without an intervening shell (sh -c). We can
+        # override this with a -d argument.
+        command = "-p %s -d %s %s" % (self.p4port, basedir, command)
+        return self.dovc(basedir, command, failureIsOk, stdin)
+
+    def createRepository(self):
+        # this is only called once per VC system, so start p4d here.
+
+        self.createBasedir()
+        tmp = os.path.join(self.repbase, "p4tmp")
+        self.p4rep = os.path.join(self.repbase, 'P4-Repository')
+        os.mkdir(self.p4rep)
+
+        # Launch p4d.
+        started, self.p4d_shutdown = self._start_p4d()
+        w = waitForDeferred(started)
+        yield w; w.getResult()
+
+        # Create client spec.
+        os.mkdir(tmp)
+        clispec = 'Client: creator\n'
+        clispec += 'Root: %s\n' % tmp
+        clispec += 'View:\n'
+        clispec += '\t//depot/... //creator/...\n'
+        w = self.dop4(tmp, 'client -i', stdin=clispec)
+        yield w; w.getResult()
+
+        # Create first rev (trunk).
+        self.populate(os.path.join(tmp, 'trunk'))
+        files = ['main.c', 'version.c', 'subdir/subdir.c']
+        w = self.dop4(tmp, "-c creator add "
+                      + " ".join(['trunk/%s' % f for f in files]))
+        yield w; w.getResult()
+        descr = self.base_descr
+        for file in files:
+            descr += '\t//depot/trunk/%s\n' % file
+        w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+        yield w; out = w.getResult()
+        m = re.search(r'Change (\d+) submitted.', out)
+        assert m.group(1) == '1'
+        self.addTrunkRev(m.group(1))
+
+        # Create second rev (branch).
+        w = self.dop4(tmp, '-c creator integrate '
+                      + '//depot/trunk/... //depot/branch/...')
+        yield w; w.getResult()
+        w = self.dop4(tmp, "-c creator edit branch/main.c")
+        yield w; w.getResult()
+        self.populate_branch(os.path.join(tmp, 'branch'))
+        descr = self.base_descr
+        for file in files:
+            descr += '\t//depot/branch/%s\n' % file
+        w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+        yield w; out = w.getResult()
+        m = re.search(r'Change (\d+) submitted.', out)
+        self.addBranchRev(m.group(1))
+    createRepository = deferredGenerator(createRepository)
+
+    def vc_revise(self):
+        tmp = os.path.join(self.repbase, "p4tmp")
+        self.version += 1
+        version_c = VERSION_C % self.version
+        w = self.dop4(tmp, '-c creator edit trunk/version.c')
+        yield w; w.getResult()
+        open(os.path.join(tmp, "trunk/version.c"), "w").write(version_c)
+        descr = self.base_descr + '\t//depot/trunk/version.c\n'
+        w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+        yield w; out = w.getResult()
+        m = re.search(r'Change (\d+) submitted.', out)
+        self.addTrunkRev(m.group(1))
+    vc_revise = deferredGenerator(vc_revise)
+
+    def shutdown_p4d(self):
+        d = self.runCommand(self.repbase, '%s -p %s admin stop'
+                            % (self.vcexe, self.p4port))
+        return d.addCallback(lambda _: self.p4d_shutdown)
+
+class P4(VCBase, unittest.TestCase):
+    metadir = None
+    vctype = "source.P4"
+    vc_name = "p4"
+
+    def tearDownClass(self):
+        if self.helper:
+            return maybeWait(self.helper.shutdown_p4d())
+
+    def testCheckout(self):
+        self.helper.vcargs = { 'p4port': self.helper.p4port,
+                               'p4base': '//depot/',
+                               'defaultBranch': 'trunk' }
+        d = self.do_vctest(testRetry=False)
+        # TODO: like arch and darcs, sync does nothing when server is not
+        # changed.
+        return maybeWait(d)
+
+    def testCheckoutBranch(self):
+        self.helper.vcargs = { 'p4port': self.helper.p4port,
+                               'p4base': '//depot/',
+                               'defaultBranch': 'trunk' }
+        d = self.do_branch()
+        return maybeWait(d)
+
+    def testPatch(self):
+        self.helper.vcargs = { 'p4port': self.helper.p4port,
+                               'p4base': '//depot/',
+                               'defaultBranch': 'trunk' }
+        d = self.do_patch()
+        return maybeWait(d)
+
+VCS.registerVC(P4.vc_name, P4Helper())
+
+
+class DarcsHelper(BaseHelper):
+    branchname = "branch"
+    try_branchname = "branch"
+
+    def capable(self):
+        darcspaths = which('darcs')
+        if not darcspaths:
+            return (False, "Darcs is not installed")
+        self.vcexe = darcspaths[0]
+        return (True, None)
+
+    def createRepository(self):
+        self.createBasedir()
+        self.darcs_base = os.path.join(self.repbase, "Darcs-Repository")
+        self.rep_trunk = os.path.join(self.darcs_base, "trunk")
+        self.rep_branch = os.path.join(self.darcs_base, "branch")
+        tmp = os.path.join(self.repbase, "darcstmp")
+
+        os.makedirs(self.rep_trunk)
+        w = self.dovc(self.rep_trunk, ["initialize"])
+        yield w; w.getResult()
+        os.makedirs(self.rep_branch)
+        w = self.dovc(self.rep_branch, ["initialize"])
+        yield w; w.getResult()
+
+        self.populate(tmp)
+        w = self.dovc(tmp, qw("initialize"))
+        yield w; w.getResult()
+        w = self.dovc(tmp, qw("add -r ."))
+        yield w; w.getResult()
+        w = self.dovc(tmp, qw("record -a -m initial_import --skip-long-comment -A test at buildbot.sf.net"))
+        yield w; w.getResult()
+        w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
+        yield w; w.getResult()
+        w = self.dovc(tmp, qw("changes --context"))
+        yield w; out = w.getResult()
+        self.addTrunkRev(out)
+
+        self.populate_branch(tmp)
+        w = self.dovc(tmp, qw("record -a --ignore-times -m commit_on_branch --skip-long-comment -A test at buildbot.sf.net"))
+        yield w; w.getResult()
+        w = self.dovc(tmp, ["push", "-a", self.rep_branch])
+        yield w; w.getResult()
+        w = self.dovc(tmp, qw("changes --context"))
+        yield w; out = w.getResult()
+        self.addBranchRev(out)
+        rmdirRecursive(tmp)
+    createRepository = deferredGenerator(createRepository)
+
+    def vc_revise(self):
+        tmp = os.path.join(self.repbase, "darcstmp")
+        os.makedirs(tmp)
+        w = self.dovc(tmp, qw("initialize"))
+        yield w; w.getResult()
+        w = self.dovc(tmp, ["pull", "-a", self.rep_trunk])
+        yield w; w.getResult()
+
+        self.version += 1
+        version_c = VERSION_C % self.version
+        open(os.path.join(tmp, "version.c"), "w").write(version_c)
+        w = self.dovc(tmp, qw("record -a --ignore-times -m revised_to_%d --skip-long-comment -A test at buildbot.sf.net" % self.version))
+        yield w; w.getResult()
+        w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
+        yield w; w.getResult()
+        w = self.dovc(tmp, qw("changes --context"))
+        yield w; out = w.getResult()
+        self.addTrunkRev(out)
+        rmdirRecursive(tmp)
+    vc_revise = deferredGenerator(vc_revise)
+
+    def vc_try_checkout(self, workdir, rev, branch=None):
+        assert os.path.abspath(workdir) == workdir
+        if os.path.exists(workdir):
+            rmdirRecursive(workdir)
+        os.makedirs(workdir)
+        w = self.dovc(workdir, qw("initialize"))
+        yield w; w.getResult()
+        if not branch:
+            rep = self.rep_trunk
+        else:
+            rep = os.path.join(self.darcs_base, branch)
+        w = self.dovc(workdir, ["pull", "-a", rep])
+        yield w; w.getResult()
+        open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+    vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+    def vc_try_finish(self, workdir):
+        rmdirRecursive(workdir)
+
+
+class Darcs(VCBase, unittest.TestCase):
+    vc_name = "darcs"
+
+    # Darcs has a metadir="_darcs", but it does not have an 'export'
+    # mode
+    metadir = None
+    vctype = "source.Darcs"
+    vctype_try = "darcs"
+    has_got_revision = True
+
+    def testCheckout(self):
+        self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+        d = self.do_vctest(testRetry=False)
+
+        # TODO: testRetry has the same problem with Darcs as it does for
+        # Arch
+        return maybeWait(d)
+
+    def testPatch(self):
+        self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+                               'defaultBranch': "trunk" }
+        d = self.do_patch()
+        return maybeWait(d)
+
+    def testCheckoutBranch(self):
+        self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+                               'defaultBranch': "trunk" }
+        d = self.do_branch()
+        return maybeWait(d)
+
+    def testCheckoutHTTP(self):
+        self.serveHTTP()
+        repourl = "http://localhost:%d/Darcs-Repository/trunk" % self.httpPort
+        self.helper.vcargs =  { 'repourl': repourl }
+        d = self.do_vctest(testRetry=False)
+        return maybeWait(d)
+        
+    def testTry(self):
+        self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+                               'defaultBranch': "trunk" }
+        d = self.do_getpatch()
+        return maybeWait(d)
+
+VCS.registerVC(Darcs.vc_name, DarcsHelper())
+
+
+class ArchCommon:
+    def registerRepository(self, coordinates):
+        a = self.archname
+        w = self.dovc(self.repbase, "archives %s" % a)
+        yield w; out = w.getResult()
+        if out:
+            w = self.dovc(self.repbase, "register-archive -d %s" % a)
+            yield w; w.getResult()
+        w = self.dovc(self.repbase, "register-archive %s" % coordinates)
+        yield w; w.getResult()
+    registerRepository = deferredGenerator(registerRepository)
+
+    def unregisterRepository(self):
+        a = self.archname
+        w = self.dovc(self.repbase, "archives %s" % a)
+        yield w; out = w.getResult()
+        if out:
+            w = self.dovc(self.repbase, "register-archive -d %s" % a)
+            yield w; out = w.getResult()
+    unregisterRepository = deferredGenerator(unregisterRepository)
+
+class TlaHelper(BaseHelper, ArchCommon):
+    defaultbranch = "testvc--mainline--1"
+    branchname = "testvc--branch--1"
+    try_branchname = None # TlaExtractor can figure it out by itself
+    archcmd = "tla"
+
+    def capable(self):
+        tlapaths = which('tla')
+        if not tlapaths:
+            return (False, "Arch (tla) is not installed")
+        self.vcexe = tlapaths[0]
+        return (True, None)
+
+    def do_get(self, basedir, archive, branch, newdir):
+        # the 'get' syntax is different between tla and baz. baz, while
+        # claiming to honor an --archive argument, in fact ignores it. The
+        # correct invocation is 'baz get archive/revision newdir'.
+        if self.archcmd == "tla":
+            w = self.dovc(basedir,
+                          "get -A %s %s %s" % (archive, branch, newdir))
+        else:
+            w = self.dovc(basedir,
+                          "get %s/%s %s" % (archive, branch, newdir))
+        return w
+
+    def createRepository(self):
+        self.createBasedir()
+        # first check to see if bazaar is around, since we'll need to know
+        # later
+        d = VCS.capable(Bazaar.vc_name)
+        d.addCallback(self._createRepository_1)
+        return d
+
+    def _createRepository_1(self, res):
+        has_baz = res[0]
+
+        # pick a hopefully unique string for the archive name, in the form
+        # test-%d at buildbot.sf.net--testvc, since otherwise multiple copies of
+        # the unit tests run in the same user account will collide (since the
+        # archive names are kept in the per-user ~/.arch-params/ directory).
+        pid = os.getpid()
+        self.archname = "test-%s-%d at buildbot.sf.net--testvc" % (self.archcmd,
+                                                                pid)
+        trunk = self.defaultbranch
+        branch = self.branchname
+
+        repword = self.archcmd.capitalize()
+        self.archrep = os.path.join(self.repbase, "%s-Repository" % repword)
+        tmp = os.path.join(self.repbase, "archtmp")
+        a = self.archname
+
+        self.populate(tmp)
+
+        w = self.dovc(tmp, "my-id", failureIsOk=True)
+        yield w; res = w.getResult()
+        if not res:
+            # tla will fail a lot of operations if you have not set an ID
+            w = self.do(tmp, [self.vcexe, "my-id",
+                              "Buildbot Test Suite <test at buildbot.sf.net>"])
+            yield w; w.getResult()
+
+        if has_baz:
+            # bazaar keeps a cache of revisions, but this test creates a new
+            # archive each time it is run, so the cache causes errors.
+            # Disable the cache to avoid these problems. This will be
+            # slightly annoying for people who run the buildbot tests under
+            # the same UID as one which uses baz on a regular basis, but
+            # bazaar doesn't give us a way to disable the cache just for this
+            # one archive.
+            cmd = "%s cache-config --disable" % VCS.getHelper('bazaar').vcexe
+            w = self.do(tmp, cmd)
+            yield w; w.getResult()
+
+        w = waitForDeferred(self.unregisterRepository())
+        yield w; w.getResult()
+
+        # these commands can be run in any directory
+        w = self.dovc(tmp, "make-archive -l %s %s" % (a, self.archrep))
+        yield w; w.getResult()
+        if self.archcmd == "tla":
+            w = self.dovc(tmp, "archive-setup -A %s %s" % (a, trunk))
+            yield w; w.getResult()
+            w = self.dovc(tmp, "archive-setup -A %s %s" % (a, branch))
+            yield w; w.getResult()
+        else:
+            # baz does not require an 'archive-setup' step
+            pass
+
+        # these commands must be run in the directory that is to be imported
+        w = self.dovc(tmp, "init-tree --nested %s/%s" % (a, trunk))
+        yield w; w.getResult()
+        files = " ".join(["main.c", "version.c", "subdir",
+                          os.path.join("subdir", "subdir.c")])
+        w = self.dovc(tmp, "add-id %s" % files)
+        yield w; w.getResult()
+
+        w = self.dovc(tmp, "import %s/%s" % (a, trunk))
+        yield w; out = w.getResult()
+        self.addTrunkRev("base-0")
+
+        # create the branch
+        if self.archcmd == "tla":
+            branchstart = "%s--base-0" % trunk
+            w = self.dovc(tmp, "tag -A %s %s %s" % (a, branchstart, branch))
+            yield w; w.getResult()
+        else:
+            w = self.dovc(tmp, "branch %s" % branch)
+            yield w; w.getResult()
+
+        rmdirRecursive(tmp)
+
+        # check out the branch
+        w = self.do_get(self.repbase, a, branch, "archtmp")
+        yield w; w.getResult()
+        # and edit the file
+        self.populate_branch(tmp)
+        logfile = "++log.%s--%s" % (branch, a)
+        logmsg = "Summary: commit on branch\nKeywords:\n\n"
+        open(os.path.join(tmp, logfile), "w").write(logmsg)
+        w = self.dovc(tmp, "commit")
+        yield w; out = w.getResult()
+        m = re.search(r'committed %s/%s--([\S]+)' % (a, branch),
+                      out)
+        assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
+        self.addBranchRev(m.group(1))
+
+        w = waitForDeferred(self.unregisterRepository())
+        yield w; w.getResult()
+        rmdirRecursive(tmp)
+
+        # we unregister the repository each time, because we might have
+        # changed the coordinates (since we switch from a file: URL to an
+        # http: URL for various tests). The buildslave code doesn't forcibly
+        # unregister the archive, so we have to do it here.
+        w = waitForDeferred(self.unregisterRepository())
+        yield w; w.getResult()
+
+    _createRepository_1 = deferredGenerator(_createRepository_1)
+
+    def vc_revise(self):
+        # the fix needs to be done in a workspace that is linked to a
+        # read-write version of the archive (i.e., using file-based
+        # coordinates instead of HTTP ones), so we re-register the repository
+        # before we begin. We unregister it when we're done to make sure the
+        # build will re-register the correct one for whichever test is
+        # currently being run.
+
+        # except, that source.Bazaar really doesn't like it when the archive
+        # gets unregistered behind its back. The slave tries to do a 'baz
+        # replay' in a tree with an archive that is no longer recognized, and
+        # baz aborts with a botched invariant exception. This causes
+        # mode=update to fall back to clobber+get, which flunks one of the
+        # tests (the 'newfile' check in _do_vctest_update_3 fails)
+
+        # to avoid this, we take heroic steps here to leave the archive
+        # registration in the same state as we found it.
+
+        tmp = os.path.join(self.repbase, "archtmp")
+        a = self.archname
+
+        w = self.dovc(self.repbase, "archives %s" % a)
+        yield w; out = w.getResult()
+        assert out
+        lines = out.split("\n")
+        coordinates = lines[1].strip()
+
+        # now register the read-write location
+        w = waitForDeferred(self.registerRepository(self.archrep))
+        yield w; w.getResult()
+
+        trunk = self.defaultbranch
+
+        w = self.do_get(self.repbase, a, trunk, "archtmp")
+        yield w; w.getResult()
+
+        # tla appears to use timestamps to determine which files have
+        # changed, so wait long enough for the new file to have a different
+        # timestamp
+        time.sleep(2)
+        self.version += 1
+        version_c = VERSION_C % self.version
+        open(os.path.join(tmp, "version.c"), "w").write(version_c)
+
+        logfile = "++log.%s--%s" % (trunk, a)
+        logmsg = "Summary: revised_to_%d\nKeywords:\n\n" % self.version
+        open(os.path.join(tmp, logfile), "w").write(logmsg)
+        w = self.dovc(tmp, "commit")
+        yield w; out = w.getResult()
+        m = re.search(r'committed %s/%s--([\S]+)' % (a, trunk),
+                      out)
+        assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
+        self.addTrunkRev(m.group(1))
+
+        # now re-register the original coordinates
+        w = waitForDeferred(self.registerRepository(coordinates))
+        yield w; w.getResult()
+        rmdirRecursive(tmp)
+    vc_revise = deferredGenerator(vc_revise)
+
+    def vc_try_checkout(self, workdir, rev, branch=None):
+        assert os.path.abspath(workdir) == workdir
+        if os.path.exists(workdir):
+            rmdirRecursive(workdir)
+
+        a = self.archname
+
+        # register the read-write location, if it wasn't already registered
+        w = waitForDeferred(self.registerRepository(self.archrep))
+        yield w; w.getResult()
+
+        w = self.do_get(self.repbase, a, "testvc--mainline--1", workdir)
+        yield w; w.getResult()
+
+        # timestamps. ick.
+        time.sleep(2)
+        open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+    vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+    def vc_try_finish(self, workdir):
+        rmdirRecursive(workdir)
+
+class Arch(VCBase, unittest.TestCase):
+    vc_name = "tla"
+
+    metadir = None
+    # Arch has a metadir="{arch}", but it does not have an 'export' mode.
+    vctype = "source.Arch"
+    vctype_try = "tla"
+    has_got_revision = True
+
+    def testCheckout(self):
+        # these are the coordinates of the read-write archive used by all the
+        # non-HTTP tests. testCheckoutHTTP overrides these.
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              'version': self.helper.defaultbranch }
+        d = self.do_vctest(testRetry=False)
+        # the current testRetry=True logic doesn't have the desired effect:
+        # "update" is a no-op because arch knows that the repository hasn't
+        # changed. Other VC systems will re-checkout missing files on
+        # update, arch just leaves the tree untouched. TODO: come up with
+        # some better test logic, probably involving a copy of the
+        # repository that has a few changes checked in.
+
+        return maybeWait(d)
+
+    def testCheckoutHTTP(self):
+        self.serveHTTP()
+        url = "http://localhost:%d/Tla-Repository" % self.httpPort
+        self.helper.vcargs = { 'url': url,
+                               'version': "testvc--mainline--1" }
+        d = self.do_vctest(testRetry=False)
+        return maybeWait(d)
+
+    def testPatch(self):
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              'version': self.helper.defaultbranch }
+        d = self.do_patch()
+        return maybeWait(d)
+
+    def testCheckoutBranch(self):
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              'version': self.helper.defaultbranch }
+        d = self.do_branch()
+        return maybeWait(d)
+
+    def testTry(self):
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              'version': self.helper.defaultbranch }
+        d = self.do_getpatch()
+        return maybeWait(d)
+
+VCS.registerVC(Arch.vc_name, TlaHelper())
+
+
+class BazaarHelper(TlaHelper):
+    archcmd = "baz"
+
+    def capable(self):
+        bazpaths = which('baz')
+        if not bazpaths:
+            return (False, "Arch (baz) is not installed")
+        self.vcexe = bazpaths[0]
+        return (True, None)
+
+    def setUp2(self, res):
+        # we unregister the repository each time, because we might have
+        # changed the coordinates (since we switch from a file: URL to an
+        # http: URL for various tests). The buildslave code doesn't forcibly
+        # unregister the archive, so we have to do it here.
+        d = self.unregisterRepository()
+        return d
+
+
+class Bazaar(Arch):
+    vc_name = "bazaar"
+
+    vctype = "source.Bazaar"
+    vctype_try = "baz"
+    has_got_revision = True
+
+    fixtimer = None
+
+    def testCheckout(self):
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              # Baz adds the required 'archive' argument
+                              'archive': self.helper.archname,
+                              'version': self.helper.defaultbranch,
+                              }
+        d = self.do_vctest(testRetry=False)
+        # the current testRetry=True logic doesn't have the desired effect:
+        # "update" is a no-op because arch knows that the repository hasn't
+        # changed. Other VC systems will re-checkout missing files on
+        # update, arch just leaves the tree untouched. TODO: come up with
+        # some better test logic, probably involving a copy of the
+        # repository that has a few changes checked in.
+
+        return maybeWait(d)
+
+    def testCheckoutHTTP(self):
+        self.serveHTTP()
+        url = "http://localhost:%d/Baz-Repository" % self.httpPort
+        self.helper.vcargs = { 'url': url,
+                               'archive': self.helper.archname,
+                               'version': self.helper.defaultbranch,
+                               }
+        d = self.do_vctest(testRetry=False)
+        return maybeWait(d)
+
+    def testPatch(self):
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              # Baz adds the required 'archive' argument
+                              'archive': self.helper.archname,
+                              'version': self.helper.defaultbranch,
+                              }
+        d = self.do_patch()
+        return maybeWait(d)
+
+    def testCheckoutBranch(self):
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              # Baz adds the required 'archive' argument
+                              'archive': self.helper.archname,
+                              'version': self.helper.defaultbranch,
+                              }
+        d = self.do_branch()
+        return maybeWait(d)
+
+    def testTry(self):
+        self.helper.vcargs = {'url': self.helper.archrep,
+                              # Baz adds the required 'archive' argument
+                              'archive': self.helper.archname,
+                              'version': self.helper.defaultbranch,
+                              }
+        d = self.do_getpatch()
+        return maybeWait(d)
+
+    def fixRepository(self):
+        self.fixtimer = None
+        self.site.resource = self.root
+
+    def testRetry(self):
+        # we want to verify that source.Source(retry=) works, and the easiest
+        # way to make VC updates break (temporarily) is to break the HTTP
+        # server that's providing the repository. Anything else pretty much
+        # requires mutating the (read-only) BUILDBOT_TEST_VC repository, or
+        # modifying the buildslave's checkout command while it's running.
+
+        # this test takes a while to run, so don't bother doing it with
+        # anything other than baz
+
+        self.serveHTTP()
+
+        # break the repository server
+        from twisted.web import static
+        self.site.resource = static.Data("Sorry, repository is offline",
+                                         "text/plain")
+        # and arrange to fix it again in 5 seconds, while the test is
+        # running.
+        self.fixtimer = reactor.callLater(5, self.fixRepository)
+        
+        url = "http://localhost:%d/Baz-Repository" % self.httpPort
+        self.helper.vcargs = { 'url': url,
+                               'archive': self.helper.archname,
+                               'version': self.helper.defaultbranch,
+                               'retry': (5.0, 4),
+                               }
+        d = self.do_vctest_once(True)
+        d.addCallback(self._testRetry_1)
+        return maybeWait(d)
+    def _testRetry_1(self, bs):
+        # make sure there was mention of the retry attempt in the logs
+        l = bs.getLogs()[0]
+        self.failUnlessIn("unable to access URL", l.getText(),
+                          "funny, VC operation didn't fail at least once")
+        self.failUnlessIn("update failed, trying 4 more times after 5 seconds",
+                          l.getTextWithHeaders(),
+                          "funny, VC operation wasn't reattempted")
+
+    def testRetryFails(self):
+        # make sure that the build eventually gives up on a repository which
+        # is completely unavailable
+
+        self.serveHTTP()
+
+        # break the repository server, and leave it broken
+        from twisted.web import static
+        self.site.resource = static.Data("Sorry, repository is offline",
+                                         "text/plain")
+
+        url = "http://localhost:%d/Baz-Repository" % self.httpPort
+        self.helper.vcargs = {'url': url,
+                              'archive': self.helper.archname,
+                              'version': self.helper.defaultbranch,
+                              'retry': (0.5, 3),
+                              }
+        d = self.do_vctest_once(False)
+        d.addCallback(self._testRetryFails_1)
+        return maybeWait(d)
+    def _testRetryFails_1(self, bs):
+        self.failUnlessEqual(bs.getResults(), FAILURE)
+
+    def tearDown2(self):
+        if self.fixtimer:
+            self.fixtimer.cancel()
+        # tell tla to get rid of the leftover archive this test leaves in the
+        # user's 'tla archives' listing. The name of this archive is provided
+        # by the repository tarball, so the following command must use the
+        # same name. We could use archive= to set it explicitly, but if you
+        # change it from the default, then 'tla update' won't work.
+        d = self.helper.unregisterRepository()
+        return d
+
+VCS.registerVC(Bazaar.vc_name, BazaarHelper())
+
+class MercurialHelper(BaseHelper):
+    branchname = "branch"
+    try_branchname = "branch"
+
+    def capable(self):
+        hgpaths = which("hg")
+        if not hgpaths:
+            return (False, "Mercurial is not installed")
+        self.vcexe = hgpaths[0]
+        return (True, None)
+
+    def extract_id(self, output):
+        m = re.search(r'^(\w+)', output)
+        return m.group(0)
+
+    def createRepository(self):
+        self.createBasedir()
+        self.hg_base = os.path.join(self.repbase, "Mercurial-Repository")
+        self.rep_trunk = os.path.join(self.hg_base, "trunk")
+        self.rep_branch = os.path.join(self.hg_base, "branch")
+        tmp = os.path.join(self.hg_base, "hgtmp")
+
+        os.makedirs(self.rep_trunk)
+        w = self.dovc(self.rep_trunk, "init")
+        yield w; w.getResult()
+        os.makedirs(self.rep_branch)
+        w = self.dovc(self.rep_branch, "init")
+        yield w; w.getResult()
+
+        self.populate(tmp)
+        w = self.dovc(tmp, "init")
+        yield w; w.getResult()
+        w = self.dovc(tmp, "add")
+        yield w; w.getResult()
+        w = self.dovc(tmp, "commit -m initial_import")
+        yield w; w.getResult()
+        w = self.dovc(tmp, "push %s" % self.rep_trunk)
+        # note that hg-push does not actually update the working directory
+        yield w; w.getResult()
+        w = self.dovc(tmp, "identify")
+        yield w; out = w.getResult()
+        self.addTrunkRev(self.extract_id(out))
+
+        self.populate_branch(tmp)
+        w = self.dovc(tmp, "commit -m commit_on_branch")
+        yield w; w.getResult()
+        w = self.dovc(tmp, "push %s" % self.rep_branch)
+        yield w; w.getResult()
+        w = self.dovc(tmp, "identify")
+        yield w; out = w.getResult()
+        self.addBranchRev(self.extract_id(out))
+        rmdirRecursive(tmp)
+    createRepository = deferredGenerator(createRepository)
+
+    def vc_revise(self):
+        tmp = os.path.join(self.hg_base, "hgtmp2")
+        w = self.dovc(self.hg_base, "clone %s %s" % (self.rep_trunk, tmp))
+        yield w; w.getResult()
+
+        self.version += 1
+        version_c = VERSION_C % self.version
+        version_c_filename = os.path.join(tmp, "version.c")
+        open(version_c_filename, "w").write(version_c)
+        # hg uses timestamps to distinguish files which have changed, so we
+        # force the mtime forward a little bit
+        future = time.time() + 2*self.version
+        os.utime(version_c_filename, (future, future))
+        w = self.dovc(tmp, "commit -m revised_to_%d" % self.version)
+        yield w; w.getResult()
+        w = self.dovc(tmp, "push %s" % self.rep_trunk)
+        yield w; w.getResult()
+        w = self.dovc(tmp, "identify")
+        yield w; out = w.getResult()
+        self.addTrunkRev(self.extract_id(out))
+        rmdirRecursive(tmp)
+    vc_revise = deferredGenerator(vc_revise)
+
+    def vc_try_checkout(self, workdir, rev, branch=None):
+        assert os.path.abspath(workdir) == workdir
+        if os.path.exists(workdir):
+            rmdirRecursive(workdir)
+        if branch:
+            src = self.rep_branch
+        else:
+            src = self.rep_trunk
+        w = self.dovc(self.hg_base, "clone %s %s" % (src, workdir))
+        yield w; w.getResult()
+        try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
+        open(try_c_filename, "w").write(TRY_C)
+        future = time.time() + 2*self.version
+        os.utime(try_c_filename, (future, future))
+    vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+    def vc_try_finish(self, workdir):
+        rmdirRecursive(workdir)
+
+
+class Mercurial(VCBase, unittest.TestCase):
+    vc_name = "hg"
+
+    # Mercurial has a metadir=".hg", but it does not have an 'export' mode.
+    metadir = None
+    vctype = "source.Mercurial"
+    vctype_try = "hg"
+    has_got_revision = True
+
+    def testCheckout(self):
+        self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+        d = self.do_vctest(testRetry=False)
+
+        # TODO: testRetry has the same problem with Mercurial as it does for
+        # Arch
+        return maybeWait(d)
+
+    def testPatch(self):
+        self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+                               'defaultBranch': "trunk" }
+        d = self.do_patch()
+        return maybeWait(d)
+
+    def testCheckoutBranch(self):
+        self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+                               'defaultBranch': "trunk" }
+        d = self.do_branch()
+        return maybeWait(d)
+
+    def testCheckoutHTTP(self):
+        self.serveHTTP()
+        repourl = "http://localhost:%d/Mercurial-Repository/trunk/.hg" % self.httpPort
+        self.helper.vcargs =  { 'repourl': repourl }
+        d = self.do_vctest(testRetry=False)
+        return maybeWait(d)
+    # TODO: The easiest way to publish hg over HTTP is by running 'hg serve'
+    # as a child process while the test is running. (you can also use a CGI
+    # script, which sounds difficult, or you can publish the files directly,
+    # which isn't well documented).
+    testCheckoutHTTP.skip = "not yet implemented, use 'hg serve'"
+
+    def testTry(self):
+        self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+                               'defaultBranch': "trunk" }
+        d = self.do_getpatch()
+        return maybeWait(d)
+
+VCS.registerVC(Mercurial.vc_name, MercurialHelper())
+
+
+class Sources(unittest.TestCase):
+    # TODO: this needs serious rethink
+    def makeChange(self, when=None, revision=None):
+        if when:
+            when = mktime_tz(parsedate_tz(when))
+        return changes.Change("fred", [], "", when=when, revision=revision)
+
+    def testCVS1(self):
+        r = base.BuildRequest("forced build", SourceStamp())
+        b = base.Build([r])
+        s = source.CVS(cvsroot=None, cvsmodule=None, workdir=None, build=b)
+        self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
+
+    def testCVS2(self):
+        c = []
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+        r = base.BuildRequest("forced", SourceStamp(changes=c))
+        submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+        r.submittedAt = mktime_tz(parsedate_tz(submitted))
+        b = base.Build([r])
+        s = source.CVS(cvsroot=None, cvsmodule=None, workdir=None, build=b)
+        self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+                             "Wed, 08 Sep 2004 16:03:00 -0000")
+
+    def testCVS3(self):
+        c = []
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+        r = base.BuildRequest("forced", SourceStamp(changes=c))
+        submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+        r.submittedAt = mktime_tz(parsedate_tz(submitted))
+        b = base.Build([r])
+        s = source.CVS(cvsroot=None, cvsmodule=None, workdir=None, build=b,
+                       checkoutDelay=10)
+        self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+                             "Wed, 08 Sep 2004 16:02:10 -0000")
+
+    def testCVS4(self):
+        c = []
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+        r1 = base.BuildRequest("forced", SourceStamp(changes=c))
+        submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+        r1.submittedAt = mktime_tz(parsedate_tz(submitted))
+
+        c = []
+        c.append(self.makeChange("Wed, 08 Sep 2004 09:05:00 -0700"))
+        r2 = base.BuildRequest("forced", SourceStamp(changes=c))
+        submitted = "Wed, 08 Sep 2004 09:07:00 -0700"
+        r2.submittedAt = mktime_tz(parsedate_tz(submitted))
+
+        b = base.Build([r1, r2])
+        s = source.CVS(cvsroot=None, cvsmodule=None, workdir=None, build=b)
+        self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+                             "Wed, 08 Sep 2004 16:06:00 -0000")
+
+    def testSVN1(self):
+        r = base.BuildRequest("forced", SourceStamp())
+        b = base.Build([r])
+        s = source.SVN(svnurl="dummy", workdir=None, build=b)
+        self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
+
+    def testSVN2(self):
+        c = []
+        c.append(self.makeChange(revision=4))
+        c.append(self.makeChange(revision=10))
+        c.append(self.makeChange(revision=67))
+        r = base.BuildRequest("forced", SourceStamp(changes=c))
+        b = base.Build([r])
+        s = source.SVN(svnurl="dummy", workdir=None, build=b)
+        self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), 67)
+
+class Patch(VCBase, unittest.TestCase):
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def testPatch(self):
+        # invoke 'patch' all by itself, to see if it works the way we think
+        # it should. This is intended to ferret out some windows test
+        # failures.
+        helper = BaseHelper()
+        self.workdir = os.path.join("test_vc", "testPatch")
+        helper.populate(self.workdir)
+        patch = which("patch")[0]
+
+        command = [patch, "-p0"]
+        class FakeBuilder:
+            usePTY = False
+            def sendUpdate(self, status):
+                pass
+        c = commands.ShellCommand(FakeBuilder(), command, self.workdir,
+                                  sendRC=False, initialStdin=p0_diff)
+        d = c.start()
+        d.addCallback(self._testPatch_1)
+        return maybeWait(d)
+
+    def _testPatch_1(self, res):
+        # make sure the file actually got patched
+        subdir_c = os.path.join(self.workdir, "subdir", "subdir.c")
+        data = open(subdir_c, "r").read()
+        self.failUnlessIn("Hello patched subdir.\\n", data)

Added: vendor/buildbot/current/buildbot/test/test_web.py
===================================================================
--- vendor/buildbot/current/buildbot/test/test_web.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/test/test_web.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,514 @@
+# -*- test-case-name: buildbot.test.test_web -*-
+
+import os, time, shutil
+from twisted.python import components
+
+from twisted.trial import unittest
+from buildbot.test.runutils import RunMixin
+
+from twisted.internet import reactor, defer, protocol
+from twisted.internet.interfaces import IReactorUNIX
+from twisted.web import client
+
+from buildbot import master, interfaces, sourcestamp
+from buildbot.twcompat import providedBy, maybeWait
+from buildbot.status import html, builder
+from buildbot.changes.changes import Change
+from buildbot.process import base
+from buildbot.process.buildstep import BuildStep
+from buildbot.test.runutils import setupBuildStepStatus
+
+class ConfiguredMaster(master.BuildMaster):
+    """This BuildMaster variant has a static config file, provided as a
+    string when it is created."""
+
+    def __init__(self, basedir, config):
+        self.config = config
+        master.BuildMaster.__init__(self, basedir)
+        
+    def loadTheConfigFile(self):
+        self.loadConfig(self.config)
+
+components.registerAdapter(master.Control, ConfiguredMaster,
+                           interfaces.IControl)
+
+
+base_config = """
+from buildbot.status import html
+BuildmasterConfig = c = {
+    'bots': [],
+    'sources': [],
+    'schedulers': [],
+    'builders': [],
+    'slavePortnum': 0,
+    }
+"""
+
+
+
+class DistribUNIX:
+    def __init__(self, unixpath):
+        from twisted.web import server, resource, distrib
+        root = resource.Resource()
+        self.r = r = distrib.ResourceSubscription("unix", unixpath)
+        root.putChild('remote', r)
+        self.p = p = reactor.listenTCP(0, server.Site(root))
+        self.portnum = p.getHost().port
+    def shutdown(self):
+        d = defer.maybeDeferred(self.p.stopListening)
+        return d
+
+class DistribTCP:
+    def __init__(self, port):
+        from twisted.web import server, resource, distrib
+        root = resource.Resource()
+        self.r = r = distrib.ResourceSubscription("localhost", port)
+        root.putChild('remote', r)
+        self.p = p = reactor.listenTCP(0, server.Site(root))
+        self.portnum = p.getHost().port
+    def shutdown(self):
+        d = defer.maybeDeferred(self.p.stopListening)
+        d.addCallback(self._shutdown_1)
+        return d
+    def _shutdown_1(self, res):
+        return self.r.publisher.broker.transport.loseConnection()
+
+class SlowReader(protocol.Protocol):
+    didPause = False
+    count = 0
+    data = ""
+    def __init__(self, req):
+        self.req = req
+        self.d = defer.Deferred()
+    def connectionMade(self):
+        self.transport.write(self.req)
+    def dataReceived(self, data):
+        self.data += data
+        self.count += len(data)
+        if not self.didPause and self.count > 10*1000:
+            self.didPause = True
+            self.transport.pauseProducing()
+            reactor.callLater(2, self.resume)
+    def resume(self):
+        self.transport.resumeProducing()
+    def connectionLost(self, why):
+        self.d.callback(None)
+
+class CFactory(protocol.ClientFactory):
+    def __init__(self, p):
+        self.p = p
+    def buildProtocol(self, addr):
+        self.p.factory = self
+        return self.p
+
+def stopHTTPLog():
+    # grr.
+    try:
+        from twisted.web import http # Twisted-2.0
+    except ImportError:
+        from twisted.protocols import http # Twisted-1.3
+    http._logDateTimeStop()
+
+class BaseWeb:
+    master = None
+
+    def failUnlessIn(self, substr, string):
+        self.failUnless(string.find(substr) != -1)
+
+    def tearDown(self):
+        stopHTTPLog()
+        if self.master:
+            d = self.master.stopService()
+            return maybeWait(d)
+
+    def find_waterfall(self, master):
+        return filter(lambda child: isinstance(child, html.Waterfall),
+                      list(master))
+
+class Ports(BaseWeb, unittest.TestCase):
+
+    def test_webPortnum(self):
+        # run a regular web server on a TCP socket
+        config = base_config + "c['status'] = [html.Waterfall(http_port=0)]\n"
+        os.mkdir("test_web1")
+        self.master = m = ConfiguredMaster("test_web1", config)
+        m.startService()
+        # hack to find out what randomly-assigned port it is listening on
+        port = list(self.find_waterfall(m)[0])[0]._port.getHost().port
+
+        d = client.getPage("http://localhost:%d/" % port)
+        d.addCallback(self._test_webPortnum_1)
+        return maybeWait(d)
+    test_webPortnum.timeout = 10
+    def _test_webPortnum_1(self, page):
+        #print page
+        self.failUnless(page)
+
+    def test_webPathname(self):
+        # running a t.web.distrib server over a UNIX socket
+        if not providedBy(reactor, IReactorUNIX):
+            raise unittest.SkipTest("UNIX sockets not supported here")
+        config = (base_config +
+                  "c['status'] = [html.Waterfall(distrib_port='.web-pb')]\n")
+        os.mkdir("test_web2")
+        self.master = m = ConfiguredMaster("test_web2", config)
+        m.startService()
+            
+        p = DistribUNIX("test_web2/.web-pb")
+
+        d = client.getPage("http://localhost:%d/remote/" % p.portnum)
+        d.addCallback(self._test_webPathname_1, p)
+        return maybeWait(d)
+    test_webPathname.timeout = 10
+    def _test_webPathname_1(self, page, p):
+        #print page
+        self.failUnless(page)
+        return p.shutdown()
+
+
+    def test_webPathname_port(self):
+        # running a t.web.distrib server over TCP
+        config = (base_config +
+                  "c['status'] = [html.Waterfall(distrib_port=0)]\n")
+        os.mkdir("test_web3")
+        self.master = m = ConfiguredMaster("test_web3", config)
+        m.startService()
+        dport = list(self.find_waterfall(m)[0])[0]._port.getHost().port
+
+        p = DistribTCP(dport)
+
+        d = client.getPage("http://localhost:%d/remote/" % p.portnum)
+        d.addCallback(self._test_webPathname_port_1, p)
+        return maybeWait(d)
+    test_webPathname_port.timeout = 10
+    def _test_webPathname_port_1(self, page, p):
+        self.failUnlessIn("BuildBot", page)
+        return p.shutdown()
+
+
+class Waterfall(BaseWeb, unittest.TestCase):
+    def test_waterfall(self):
+        os.mkdir("test_web4")
+        os.mkdir("my-maildir"); os.mkdir("my-maildir/new")
+        self.robots_txt = os.path.abspath(os.path.join("test_web4",
+                                                       "robots.txt"))
+        self.robots_txt_contents = "User-agent: *\nDisallow: /\n"
+        f = open(self.robots_txt, "w")
+        f.write(self.robots_txt_contents)
+        f.close()
+        # this is the right way to configure the Waterfall status
+        config1 = base_config + """
+from buildbot.changes import mail
+c['sources'] = [mail.SyncmailMaildirSource('my-maildir')]
+c['status'] = [html.Waterfall(http_port=0, robots_txt=%s)]
+""" % repr(self.robots_txt)
+
+        self.master = m = ConfiguredMaster("test_web4", config1)
+        m.startService()
+        # hack to find out what randomly-assigned port it is listening on
+        port = list(self.find_waterfall(m)[0])[0]._port.getHost().port
+        self.port = port
+        # insert an event
+        m.change_svc.addChange(Change("user", ["foo.c"], "comments"))
+
+        d = client.getPage("http://localhost:%d/" % port)
+        d.addCallback(self._test_waterfall_1)
+        return maybeWait(d)
+    test_waterfall.timeout = 10
+    def _test_waterfall_1(self, page):
+        self.failUnless(page)
+        self.failUnlessIn("current activity", page)
+        self.failUnlessIn("<html", page)
+        TZ = time.tzname[time.daylight]
+        self.failUnlessIn("time (%s)" % TZ, page)
+
+        # phase=0 is really for debugging the waterfall layout
+        d = client.getPage("http://localhost:%d/?phase=0" % self.port)
+        d.addCallback(self._test_waterfall_2)
+        return d
+    def _test_waterfall_2(self, page):
+        self.failUnless(page)
+        self.failUnlessIn("<html", page)
+
+        d = client.getPage("http://localhost:%d/favicon.ico" % self.port)
+        d.addCallback(self._test_waterfall_3)
+        return d
+    def _test_waterfall_3(self, icon):
+        expected = open(html.buildbot_icon,"rb").read()
+        self.failUnless(icon == expected)
+
+        d = client.getPage("http://localhost:%d/changes" % self.port)
+        d.addCallback(self._test_waterfall_4)
+        return d
+    def _test_waterfall_4(self, changes):
+        self.failUnlessIn("<li>Syncmail mailing list in maildir " +
+                          "my-maildir</li>", changes)
+
+        d = client.getPage("http://localhost:%d/robots.txt" % self.port)
+        d.addCallback(self._test_waterfall_5)
+        return d
+    def _test_waterfall_5(self, robotstxt):
+        self.failUnless(robotstxt == self.robots_txt_contents)
+
+class WaterfallSteps(unittest.TestCase):
+
+    # failUnlessSubstring copied from twisted-2.1.0, because this helps us
+    # maintain compatibility with python2.2.
+    def failUnlessSubstring(self, substring, astring, msg=None):
+        """a python2.2 friendly test to assert that substring is found in
+        astring parameters follow the semantics of failUnlessIn
+        """
+        if astring.find(substring) == -1:
+            raise self.failureException(msg or "%r not found in %r"
+                                        % (substring, astring))
+        return substring
+    assertSubstring = failUnlessSubstring
+
+    def test_urls(self):
+        s = setupBuildStepStatus("test_web.test_urls")
+        s.addURL("coverage", "http://coverage.example.org/target")
+        s.addURL("icon", "http://coverage.example.org/icon.png")
+        box = html.IBox(s).getBox()
+        td = box.td()
+        e1 = '[<a href="http://coverage.example.org/target" class="BuildStep external">coverage</a>]'
+        self.failUnlessSubstring(e1, td)
+        e2 = '[<a href="http://coverage.example.org/icon.png" class="BuildStep external">icon</a>]'
+        self.failUnlessSubstring(e2, td)
+
+
+
+geturl_config = """
+from buildbot.status import html
+from buildbot.changes import mail
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.scheduler import Scheduler
+from buildbot.changes.base import ChangeSource
+s = factory.s
+
+class DiscardScheduler(Scheduler):
+    def addChange(self, change):
+        pass
+class DummyChangeSource(ChangeSource):
+    pass
+
+BuildmasterConfig = c = {}
+c['bots'] = [('bot1', 'sekrit'), ('bot2', 'sekrit')]
+c['sources'] = [DummyChangeSource()]
+c['schedulers'] = [DiscardScheduler('discard', None, 60, ['b1'])]
+c['slavePortnum'] = 0
+c['status'] = [html.Waterfall(http_port=0)]
+
+f = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+
+c['builders'] = [
+    {'name': 'b1', 'slavenames': ['bot1','bot2'],
+     'builddir': 'b1', 'factory': f},
+    ]
+c['buildbotURL'] = 'http://dummy.example.org:8010/'
+
+"""
+
+class GetURL(RunMixin, unittest.TestCase):
+
+    def setUp(self):
+        RunMixin.setUp(self)
+        self.master.loadConfig(geturl_config)
+        self.master.startService()
+        d = self.connectSlave(["b1"])
+        return maybeWait(d)
+
+    def tearDown(self):
+        stopHTTPLog()
+        return RunMixin.tearDown(self)
+
+    def doBuild(self, buildername):
+        br = base.BuildRequest("forced", sourcestamp.SourceStamp())
+        d = br.waitUntilFinished()
+        self.control.getBuilder(buildername).requestBuild(br)
+        return d
+
+    def assertNoURL(self, target):
+        self.failUnlessIdentical(self.status.getURLForThing(target), None)
+
+    def assertURLEqual(self, target, expected):
+        got = self.status.getURLForThing(target)
+        full_expected = "http://dummy.example.org:8010/" + expected
+        self.failUnlessEqual(got, full_expected)
+
+    def testMissingBase(self):
+        noweb_config1 = geturl_config + "del c['buildbotURL']\n"
+        d = self.master.loadConfig(noweb_config1)
+        d.addCallback(self._testMissingBase_1)
+        return maybeWait(d)
+    def _testMissingBase_1(self, res):
+        s = self.status
+        self.assertNoURL(s)
+        builder = s.getBuilder("b1")
+        self.assertNoURL(builder)
+
+    def testBase(self):
+        s = self.status
+        self.assertURLEqual(s, "")
+        builder = s.getBuilder("b1")
+        self.assertURLEqual(builder, "b1")
+
+    def testChange(self):
+        s = self.status
+        c = Change("user", ["foo.c"], "comments")
+        self.master.change_svc.addChange(c)
+        # TODO: something more like s.getChanges(), requires IChange and
+        # an accessor in IStatus. The HTML page exists already, though
+        self.assertURLEqual(c, "changes/1")
+
+    def testBuild(self):
+        # first we do some stuff so we'll have things to look at.
+        s = self.status
+        d = self.doBuild("b1")
+        # maybe check IBuildSetStatus here?
+        d.addCallback(self._testBuild_1)
+        return maybeWait(d)
+
+    def _testBuild_1(self, res):
+        s = self.status
+        builder = s.getBuilder("b1")
+        build = builder.getLastFinishedBuild()
+        self.assertURLEqual(build, "b1/builds/0")
+        # no page for builder.getEvent(-1)
+        step = build.getSteps()[0]
+        self.assertURLEqual(step, "b1/builds/0/step-remote%20dummy")
+        # maybe page for build.getTestResults?
+        self.assertURLEqual(step.getLogs()[0],
+                            "b1/builds/0/step-remote%20dummy/0")
+
+
+
+class Logfile(BaseWeb, RunMixin, unittest.TestCase):
+    def setUp(self):
+        config = """
+from buildbot.status import html
+from buildbot.process.factory import BasicBuildFactory
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+BuildmasterConfig = {
+    'bots': [('bot1', 'passwd1')],
+    'sources': [],
+    'schedulers': [],
+    'builders': [{'name': 'builder1', 'slavename': 'bot1',
+                  'builddir':'workdir', 'factory':f1}],
+    'slavePortnum': 0,
+    'status': [html.Waterfall(http_port=0)],
+    }
+"""
+        if os.path.exists("test_logfile"):
+            shutil.rmtree("test_logfile")
+        os.mkdir("test_logfile")
+        self.master = m = ConfiguredMaster("test_logfile", config)
+        m.startService()
+        # hack to find out what randomly-assigned port it is listening on
+        port = list(self.find_waterfall(m)[0])[0]._port.getHost().port
+        self.port = port
+        # insert an event
+
+        req = base.BuildRequest("reason", sourcestamp.SourceStamp())
+        build1 = base.Build([req])
+        bs = m.status.getBuilder("builder1").newBuild()
+        bs.setReason("reason")
+        bs.buildStarted(build1)
+
+        step1 = BuildStep(build=build1, name="setup")
+        bss = bs.addStepWithName("setup")
+        step1.setStepStatus(bss)
+        bss.stepStarted()
+
+        log1 = step1.addLog("output")
+        log1.addStdout("some stdout\n")
+        log1.finish()
+
+        log2 = step1.addHTMLLog("error", "<html>ouch</html>")
+
+        log3 = step1.addLog("big")
+        log3.addStdout("big log\n")
+        for i in range(1000):
+            log3.addStdout("a" * 500)
+            log3.addStderr("b" * 500)
+        log3.finish()
+
+        log4 = step1.addCompleteLog("bigcomplete",
+                                    "big2 log\n" + "a" * 1*1000*1000)
+
+        step1.step_status.stepFinished(builder.SUCCESS)
+        bs.buildFinished()
+
+    def getLogURL(self, stepname, lognum):
+        logurl = "http://localhost:%d/builder1/builds/0/step-%s/%d" \
+                 % (self.port, stepname, lognum)
+        return logurl
+
+    def test_logfile1(self):
+        d = client.getPage("http://localhost:%d/" % self.port)
+        d.addCallback(self._test_logfile1_1)
+        return maybeWait(d)
+    test_logfile1.timeout = 20
+    def _test_logfile1_1(self, page):
+        self.failUnless(page)
+
+    def test_logfile2(self):
+        logurl = self.getLogURL("setup", 0)
+        d = client.getPage(logurl)
+        d.addCallback(self._test_logfile2_1)
+        return maybeWait(d)
+    def _test_logfile2_1(self, logbody):
+        self.failUnless(logbody)
+
+    def test_logfile3(self):
+        logurl = self.getLogURL("setup", 0)
+        d = client.getPage(logurl + "/text")
+        d.addCallback(self._test_logfile3_1)
+        return maybeWait(d)
+    def _test_logfile3_1(self, logtext):
+        self.failUnlessEqual(logtext, "some stdout\n")
+
+    def test_logfile4(self):
+        logurl = self.getLogURL("setup", 1)
+        d = client.getPage(logurl)
+        d.addCallback(self._test_logfile4_1)
+        return maybeWait(d)
+    def _test_logfile4_1(self, logbody):
+        self.failUnlessEqual(logbody, "<html>ouch</html>")
+
+    def test_logfile5(self):
+        # this is log3, which is about 1MB in size, made up of alternating
+        # stdout/stderr chunks. buildbot-0.6.6, when run against
+        # twisted-1.3.0, fails to resume sending chunks after the client
+        # stalls for a few seconds, because of a recursive doWrite() call
+        # that was fixed in twisted-2.0.0
+        p = SlowReader("GET /builder1/builds/0/step-setup/2 HTTP/1.0\r\n\r\n")
+        f = CFactory(p)
+        c = reactor.connectTCP("localhost", self.port, f)
+        d = p.d
+        d.addCallback(self._test_logfile5_1, p)
+        return maybeWait(d, 10)
+    test_logfile5.timeout = 10
+    def _test_logfile5_1(self, res, p):
+        self.failUnlessIn("big log", p.data)
+        self.failUnlessIn("a"*100, p.data)
+        self.failUnless(p.count > 1*1000*1000)
+
+    def test_logfile6(self):
+        # this is log4, which is about 1MB in size, one big chunk.
+        # buildbot-0.6.6 dies as the NetstringReceiver barfs on the
+        # saved logfile, because it was using one big chunk and exceeding
+        # NetstringReceiver.MAX_LENGTH
+        p = SlowReader("GET /builder1/builds/0/step-setup/3 HTTP/1.0\r\n\r\n")
+        f = CFactory(p)
+        c = reactor.connectTCP("localhost", self.port, f)
+        d = p.d
+        d.addCallback(self._test_logfile6_1, p)
+        return maybeWait(d, 10)
+    test_logfile6.timeout = 10
+    def _test_logfile6_1(self, res, p):
+        self.failUnlessIn("big2 log", p.data)
+        self.failUnlessIn("a"*100, p.data)
+        self.failUnless(p.count > 1*1000*1000)
+
+

Added: vendor/buildbot/current/buildbot/twcompat.py
===================================================================
--- vendor/buildbot/current/buildbot/twcompat.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/twcompat.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,287 @@
+
+if 0:
+    print "hey python-mode, stop thinking I want 8-char indentation"
+
+"""
+utilities to be compatible with both Twisted-1.3 and 2.0
+
+implements. Use this like the following.
+
+from buildbot.twcompat import implements
+class Foo:
+    if implements:
+        implements(IFoo)
+    else:
+        __implements__ = IFoo,
+        
+Interface:
+    from buildbot.tcompat import Interface
+    class IFoo(Interface)
+
+providedBy:
+    from buildbot.tcompat import providedBy
+    assert providedBy(obj, IFoo)
+"""
+
+import os
+
+from twisted.copyright import version
+from twisted.python import components
+
+# does our Twisted use zope.interface?
+implements = None
+if hasattr(components, "interface"):
+    # yes
+    from zope.interface import implements
+    from zope.interface import Interface
+    def providedBy(obj, iface):
+        return iface.providedBy(obj)
+else:
+    # nope
+    from twisted.python.components import Interface
+    providedBy = components.implements
+
+# are we using a version of Trial that allows setUp/testFoo/tearDown to
+# return Deferreds?
+oldtrial = version.startswith("1.3")
+
+# use this at the end of setUp/testFoo/tearDown methods
+def maybeWait(d, timeout="none"):
+    from twisted.python import failure
+    from twisted.trial import unittest
+    if oldtrial:
+        # this is required for oldtrial (twisted-1.3.0) compatibility. When we
+        # move to retrial (twisted-2.0.0), replace these with a simple 'return
+        # d'.
+        try:
+            if timeout == "none":
+                unittest.deferredResult(d)
+            else:
+                unittest.deferredResult(d, timeout)
+        except failure.Failure, f:
+            if f.check(unittest.SkipTest):
+                raise f.value
+            raise
+        return None
+    return d
+
+# waitForDeferred and getProcessOutputAndValue are twisted-2.0 things. If
+# we're running under 1.3, patch them into place. These versions are copied
+# from twisted somewhat after 2.0.1 .
+
+from twisted.internet import defer
+if not hasattr(defer, 'waitForDeferred'):
+    Deferred = defer.Deferred
+    class waitForDeferred:
+        """
+        API Stability: semi-stable
+
+        Maintainer: U{Christopher Armstrong<mailto:radix at twistedmatrix.com>}
+
+        waitForDeferred and deferredGenerator help you write
+        Deferred-using code that looks like it's blocking (but isn't
+        really), with the help of generators.
+
+        There are two important functions involved: waitForDeferred, and
+        deferredGenerator.
+
+            def thingummy():
+                thing = waitForDeferred(makeSomeRequestResultingInDeferred())
+                yield thing
+                thing = thing.getResult()
+                print thing #the result! hoorj!
+            thingummy = deferredGenerator(thingummy)
+
+        waitForDeferred returns something that you should immediately yield;
+        when your generator is resumed, calling thing.getResult() will either
+        give you the result of the Deferred if it was a success, or raise an
+        exception if it was a failure.
+
+        deferredGenerator takes one of these waitForDeferred-using
+        generator functions and converts it into a function that returns a
+        Deferred. The result of the Deferred will be the last
+        value that your generator yielded (remember that 'return result' won't
+        work; use 'yield result; return' in place of that).
+
+        Note that not yielding anything from your generator will make the
+        Deferred result in None. Yielding a Deferred from your generator
+        is also an error condition; always yield waitForDeferred(d)
+        instead.
+
+        The Deferred returned from your deferred generator may also
+        errback if your generator raised an exception.
+
+            def thingummy():
+                thing = waitForDeferred(makeSomeRequestResultingInDeferred())
+                yield thing
+                thing = thing.getResult()
+                if thing == 'I love Twisted':
+                    # will become the result of the Deferred
+                    yield 'TWISTED IS GREAT!'
+                    return
+                else:
+                    # will trigger an errback
+                    raise Exception('DESTROY ALL LIFE')
+            thingummy = deferredGenerator(thingummy)
+
+        Put succinctly, these functions connect deferred-using code with this
+        'fake blocking' style in both directions: waitForDeferred converts from
+        a Deferred to the 'blocking' style, and deferredGenerator converts from
+        the 'blocking' style to a Deferred.
+        """
+        def __init__(self, d):
+            if not isinstance(d, Deferred):
+                raise TypeError("You must give waitForDeferred a Deferred. You gave it %r." % (d,))
+            self.d = d
+
+        def getResult(self):
+            if hasattr(self, 'failure'):
+                self.failure.raiseException()
+            return self.result
+
+    def _deferGenerator(g, deferred=None, result=None):
+        """
+        See L{waitForDeferred}.
+        """
+        while 1:
+            if deferred is None:
+                deferred = defer.Deferred()
+            try:
+                result = g.next()
+            except StopIteration:
+                deferred.callback(result)
+                return deferred
+            except:
+                deferred.errback()
+                return deferred
+
+            # Deferred.callback(Deferred) raises an error; we catch this case
+            # early here and give a nicer error message to the user in case
+            # they yield a Deferred. Perhaps eventually these semantics may
+            # change.
+            if isinstance(result, defer.Deferred):
+                return defer.fail(TypeError("Yield waitForDeferred(d), not d!"))
+
+            if isinstance(result, waitForDeferred):
+                waiting=[True, None]
+                # Pass vars in so they don't get changed going around the loop
+                def gotResult(r, waiting=waiting, result=result):
+                    result.result = r
+                    if waiting[0]:
+                        waiting[0] = False
+                        waiting[1] = r
+                    else:
+                        _deferGenerator(g, deferred, r)
+                def gotError(f, waiting=waiting, result=result):
+                    result.failure = f
+                    if waiting[0]:
+                        waiting[0] = False
+                        waiting[1] = f
+                    else:
+                        _deferGenerator(g, deferred, f)
+                result.d.addCallbacks(gotResult, gotError)
+                if waiting[0]:
+                    # Haven't called back yet, set flag so that we get reinvoked
+                    # and return from the loop
+                    waiting[0] = False
+                    return deferred
+                else:
+                    result = waiting[1]
+
+    def func_metamerge(f, g):
+        """
+        Merge function metadata from f -> g and return g
+        """
+        try:
+            g.__doc__ = f.__doc__
+            g.__dict__.update(f.__dict__)
+            g.__name__ = f.__name__
+        except (TypeError, AttributeError):
+            pass
+        return g
+
+    def deferredGenerator(f):
+        """
+        See L{waitForDeferred}.
+        """
+        def unwindGenerator(*args, **kwargs):
+            return _deferGenerator(f(*args, **kwargs))
+        return func_metamerge(f, unwindGenerator)
+
+    defer.waitForDeferred = waitForDeferred
+    defer.deferredGenerator = deferredGenerator
+
+from twisted.internet import utils
+if not hasattr(utils, "getProcessOutputAndValue"):
+    from twisted.internet import reactor, protocol
+    _callProtocolWithDeferred = utils._callProtocolWithDeferred
+    try:
+        import cStringIO
+        StringIO = cStringIO
+    except ImportError:
+        import StringIO
+
+    class _EverythingGetter(protocol.ProcessProtocol):
+
+        def __init__(self, deferred):
+            self.deferred = deferred
+            self.outBuf = StringIO.StringIO()
+            self.errBuf = StringIO.StringIO()
+            self.outReceived = self.outBuf.write
+            self.errReceived = self.errBuf.write
+
+        def processEnded(self, reason):
+            out = self.outBuf.getvalue()
+            err = self.errBuf.getvalue()
+            e = reason.value
+            code = e.exitCode
+            if e.signal:
+                self.deferred.errback((out, err, e.signal))
+            else:
+                self.deferred.callback((out, err, code))
+
+    def getProcessOutputAndValue(executable, args=(), env={}, path='.', 
+                                 reactor=reactor):
+        """Spawn a process and returns a Deferred that will be called back
+        with its output (from stdout and stderr) and it's exit code as (out,
+        err, code) If a signal is raised, the Deferred will errback with the
+        stdout and stderr up to that point, along with the signal, as (out,
+        err, signalNum)
+        """
+        return _callProtocolWithDeferred(_EverythingGetter,
+                                         executable, args, env, path,
+                                         reactor)
+    utils.getProcessOutputAndValue = getProcessOutputAndValue
+
+
+# copied from Twisted circa 2.2.0
+def _which(name, flags=os.X_OK):
+    """Search PATH for executable files with the given name.
+
+    @type name: C{str}
+    @param name: The name for which to search.
+
+    @type flags: C{int}
+    @param flags: Arguments to L{os.access}.
+
+    @rtype: C{list}
+    @return: A list of the full paths to files found, in the
+    order in which they were found.
+    """
+    result = []
+    exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
+    for p in os.environ['PATH'].split(os.pathsep):
+        p = os.path.join(p, name)
+        if os.access(p, flags):
+            result.append(p)
+        for e in exts:
+            pext = p + e
+            if os.access(pext, flags):
+                result.append(pext)
+    return result
+
+which = _which
+try:
+    from twisted.python.procutils import which
+except ImportError:
+    pass

Added: vendor/buildbot/current/buildbot/util.py
===================================================================
--- vendor/buildbot/current/buildbot/util.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/buildbot/util.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,70 @@
+# -*- test-case-name: buildbot.test.test_util -*-
+
+from twisted.internet.defer import Deferred
+from twisted.spread import pb
+import time
+
+def now():
+    #return int(time.time())
+    return time.time()
+
+def earlier(old, new):
+    # minimum of two things, but "None" counts as +infinity
+    if old:
+        if new < old:
+            return new
+        return old
+    return new
+
+def later(old, new):
+    # maximum of two things, but "None" counts as -infinity
+    if old:
+        if new > old:
+            return new
+        return old
+    return new
+
+class CancelableDeferred(Deferred):
+    """I am a version of Deferred that can be canceled by calling my
+    .cancel() method. After being canceled, no callbacks or errbacks will be
+    executed.
+    """
+    def __init__(self):
+        Deferred.__init__(self)
+        self.canceled = 0
+    def cancel(self):
+        self.canceled = 1
+    def _runCallbacks(self):
+        if self.canceled:
+            self.callbacks = []
+            return
+        Deferred._runCallbacks(self)
+
+def ignoreStaleRefs(failure):
+    """d.addErrback(util.ignoreStaleRefs)"""
+    r = failure.trap(pb.DeadReferenceError, pb.PBConnectionLost)
+    return None
+
+class _None:
+    pass
+
+class ComparableMixin:
+    """Specify a list of attributes that are 'important'. These will be used
+    for all comparison operations."""
+
+    compare_attrs = []
+
+    def __hash__(self):
+        alist = [self.__class__] + \
+                [getattr(self, name, _None) for name in self.compare_attrs]
+        return hash(tuple(alist))
+
+    def __cmp__(self, them):
+        if cmp(type(self), type(them)):
+            return cmp(type(self), type(them))
+        if cmp(self.__class__, them.__class__):
+            return cmp(self.__class__, them.__class__)
+        assert self.compare_attrs == them.compare_attrs
+        self_list= [getattr(self, name, _None) for name in self.compare_attrs]
+        them_list= [getattr(them, name, _None) for name in self.compare_attrs]
+        return cmp(self_list, them_list)

Added: vendor/buildbot/current/contrib/README.txt
===================================================================
--- vendor/buildbot/current/contrib/README.txt	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/README.txt	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,41 @@
+Utility scripts, things contributed by users but not strictly a part of
+buildbot:
+
+debugclient.py (and debug.*): debugging gui for buildbot
+
+fakechange.py: connect to a running bb and submit a fake change to trigger
+               builders
+
+run_maxq.py: a builder-helper for running maxq under buildbot
+
+svn_buildbot.py: a script intended to be run from a subversion hook-script
+                 which submits changes to svn (requires python 2.3)
+
+svnpoller.py: this script is intended to be run from a cronjob, and uses 'svn
+              log' to poll a (possibly remote) SVN repository for changes.
+              For each change it finds, it runs 'buildbot sendchange' to
+              deliver them to a waiting PBChangeSource on a (possibly remote)
+              buildmaster. Modify the svnurl to point at your own SVN
+              repository, and of course the user running the script must have
+              read permissions to that repository. It keeps track of the last
+              revision in a file, change 'fname' to set the location of this
+              state file. Modify the --master argument to the 'buildbot
+              sendchange' command to point at your buildmaster. Contributed
+              by John Pye. Note that if there are multiple changes within a
+              single polling interval, this will miss all but the last one.
+
+svn_watcher.py: adapted from svnpoller.py by Niklaus Giger to add options and
+                run under windows. Runs as a standalone script (it loops
+                internally rather than expecting to run from a cronjob),
+                polls an SVN repository every 10 minutes. It expects the
+                svnurl and buildmaster location as command-line arguments.
+
+viewcvspoll.py: a standalone script which loops every 60 seconds and polls a
+                (local?) MySQL database (presumably maintained by ViewCVS?)
+                for information about new CVS changes, then delivers them
+                over PB to a remote buildmaster's PBChangeSource. Contributed
+                by Stephen Kennedy.
+
+CSS/*.css: alternative HTML stylesheets to make the Waterfall display look
+           prettier. Copy them somewhere, then pass the filename to the
+           css= argument of the Waterfall() constructor.

Added: vendor/buildbot/current/contrib/arch_buildbot.py
===================================================================
--- vendor/buildbot/current/contrib/arch_buildbot.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/arch_buildbot.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,73 @@
+#! /usr/bin/python
+
+# this script is meant to run as an Arch post-commit hook (and also as a
+# pre-commit hook), using the "arch-meta-hook" framework. See
+# http://wiki.gnuarch.org/NdimMetaHook for details. The pre-commit hook
+# creates a list of files (and log comments), while the post-commit hook
+# actually notifies the buildmaster.
+
+# this script doesn't handle partial commits quite right: it will tell the
+# buildmaster that everything changed, not just the filenames you give to
+# 'tla commit'.
+
+import os, commands, cStringIO
+from buildbot.scripts import runner
+
+# Just modify the appropriate values below and then put this file in two
+# places: ~/.arch-params/hooks/ARCHIVE/=precommit/90buildbot.py and
+# ~/.arch-params/hooks/ARCHIVE/=commit/10buildbot.py
+
+master = "localhost:9989"
+username = "myloginname"
+
+# Remember that for this to work, your buildmaster's master.cfg needs to have
+# a c['sources'] list which includes a pb.PBChangeSource instance.
+
+os.chdir(os.getenv("ARCH_TREE_ROOT"))
+filelist = ",,bb-files"
+commentfile = ",,bb-comments"
+
+if os.getenv("ARCH_HOOK_ACTION") == "precommit":
+    files = []
+    out = commands.getoutput("tla changes")
+    for line in cStringIO.StringIO(out).readlines():
+        if line[0] in "AMD": # add, modify, delete
+            files.append(line[3:])
+    if files:
+        f = open(filelist, "w")
+        f.write("".join(files))
+        f.close()
+    # comments
+    logfiles = [f for f in os.listdir(".") if f.startswith("++log.")]
+    if len(logfiles) > 1:
+        print ("Warning, multiple ++log.* files found, getting comments "
+               "from the first one")
+    if logfiles:
+        open(commentfile, "w").write(open(logfiles[0], "r").read())
+
+elif os.getenv("ARCH_HOOK_ACTION") == "commit":
+    revision = os.getenv("ARCH_REVISION")
+
+    files = []
+    if os.path.exists(filelist):
+        f = open(filelist, "r")
+        for line in f.readlines():
+            files.append(line.rstrip())
+    if not files:
+        # buildbot insists upon having at least one modified file (otherwise
+        # the prefix-stripping mechanism will ignore the change)
+        files = ["dummy"]
+
+    if os.path.exists(commentfile):
+        comments = open(commentfile, "r").read()
+    else:
+        comments = "commit from arch"
+
+    c = {'master': master, 'username': username,
+         'revision': revision, 'comments': comments, 'files': files}
+    runner.sendchange(c, True)
+
+    if os.path.exists(filelist):
+        os.unlink(filelist)
+    if os.path.exists(commentfile):
+        os.unlink(commentfile)


Property changes on: vendor/buildbot/current/contrib/arch_buildbot.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/darcs_buildbot.py
===================================================================
--- vendor/buildbot/current/contrib/darcs_buildbot.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/darcs_buildbot.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,86 @@
+#! /usr/bin/python
+
+# This is a script which delivers Change events from Darcs to the buildmaster
+# each time a patch is pushed into a repository. Add it to the 'apply' hook
+# on your canonical "central" repository, by putting something like the
+# following in the _darcs/prefs/defaults file of that repository:
+#
+#  apply posthook /PATH/TO/darcs_buildbot.py BUILDMASTER:PORT
+#  apply run-posthook
+#
+# (the second command is necessary to avoid the usual "do you really want to
+# run this hook" prompt. Note that you cannot have multiple 'apply posthook'
+# lines.)
+#
+# Note that both Buildbot and Darcs must be installed on the repository
+# machine. You will also need the Python/XML distribution installed (the
+# "python2.3-xml" package under debian).
+
+import os, sys, commands
+from StringIO import StringIO
+from buildbot.scripts import runner
+import xml
+from xml.dom import minidom
+
+MASTER = sys.argv[1]
+
+out = commands.getoutput("darcs changes --last=1 --xml-output --summary")
+#out = commands.getoutput("darcs changes -p 'project @ 2006-05-21 19:07:27 by warner' --xml-output --summary")
+try:
+    doc = minidom.parseString(out)
+except xml.parsers.expat.ExpatError, e:
+    print "failed to parse XML"
+    print str(e)
+    print "purported XML is:"
+    print "--BEGIN--"
+    print out
+    print "--END--"
+    sys.exit(1)
+
+c = doc.getElementsByTagName("changelog")[0]
+p = c.getElementsByTagName("patch")[0]
+
+def getText(node):
+    return "".join([cn.data
+                    for cn in node.childNodes
+                    if cn.nodeType == cn.TEXT_NODE])
+def getTextFromChild(parent, childtype):
+    children = parent.getElementsByTagName(childtype)
+    if not children:
+        return ""
+    return getText(children[0])
+
+
+author = p.getAttribute("author")
+revision = p.getAttribute("hash")
+comments = getTextFromChild(p, "name") + "\n" + getTextFromChild(p, "comment")
+
+summary = c.getElementsByTagName("summary")[0]
+files = []
+for filenode in summary.childNodes:
+    if filenode.nodeName in ("add_file", "modify_file", "remove_file"):
+        filename = getText(filenode).strip()
+        files.append(filename)
+
+# note that these are all unicode. Because PB can't handle unicode, we encode
+# them into ascii, which will blow up early if there's anything we can't get
+# to the far side. When we move to something that *can* handle unicode (like
+# newpb), remove this.
+author = author.encode("ascii")
+comments = comments.encode("ascii")
+files = [f.encode("ascii") for f in files]
+revision = revision.encode("ascii")
+
+change = {
+    'master': MASTER,
+    # note: this is more likely to be a full email address, which would make
+    # the left-hand "Changes" column kind of wide. The buildmaster should
+    # probably be improved to display an abbreviation of the username.
+    'username': author,
+    'revision': revision,
+    'comments': comments,
+    'files': files,
+    }
+
+runner.sendchange(change, True)
+


Property changes on: vendor/buildbot/current/contrib/darcs_buildbot.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/fakechange.py
===================================================================
--- vendor/buildbot/current/contrib/fakechange.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/fakechange.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,76 @@
+#! /usr/bin/python
+
+"""
+This is an example of how to use the remote ChangeMaster interface, which is
+a port that allows a remote program to inject Changes into the buildmaster.
+
+The buildmaster can either pull changes in from external sources (see
+buildbot.changes.changes.ChangeMaster.addSource for an example), or those
+changes can be pushed in from outside. This script shows how to do the
+pushing.
+
+Changes are just dictionaries with three keys:
+
+ 'who': a simple string with a username. Responsibility for this change will
+ be assigned to the named user (if something goes wrong with the build, they
+ will be blamed for it).
+
+ 'files': a list of strings, each with a filename relative to the top of the
+ source tree.
+
+ 'comments': a (multiline) string with checkin comments.
+
+Each call to .addChange injects a single Change object: each Change
+represents multiple files, all changed by the same person, and all with the
+same checkin comments.
+
+The port that this script connects to is the same 'slavePort' that the
+buildslaves and other debug tools use. The ChangeMaster service will only be
+available on that port if 'change' is in the list of services passed to
+buildbot.master.makeApp (this service is turned ON by default).
+"""
+
+import sys
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.internet import reactor
+from twisted.python import log
+import commands, random, os.path
+
+def done(*args):
+    reactor.stop()
+
+users = ('zaphod', 'arthur', 'trillian', 'marvin', 'sbfast')
+dirs = ('src', 'doc', 'tests')
+sources = ('foo.c', 'bar.c', 'baz.c', 'Makefile')
+docs = ('Makefile', 'index.html', 'manual.texinfo')
+
+def makeFilename():
+    d = random.choice(dirs)
+    if d in ('src', 'tests'):
+        f = random.choice(sources)
+    else:
+        f = random.choice(docs)
+    return os.path.join(d, f)
+        
+
+def send_change(remote):
+    who = random.choice(users)
+    if len(sys.argv) > 1:
+        files = sys.argv[1:]
+    else:
+        files = [makeFilename()]
+    comments = commands.getoutput("fortune")
+    change = {'who': who, 'files': files, 'comments': comments}
+    d = remote.callRemote('addChange', change)
+    d.addCallback(done)
+    print "%s: %s" % (who, " ".join(files))
+
+
+f = pb.PBClientFactory()
+d = f.login(credentials.UsernamePassword("change", "changepw"))
+reactor.connectTCP("localhost", 8007, f)
+err = lambda f: (log.err(), reactor.stop())
+d.addCallback(send_change).addErrback(err)
+
+reactor.run()


Property changes on: vendor/buildbot/current/contrib/fakechange.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/hg_buildbot.py
===================================================================
--- vendor/buildbot/current/contrib/hg_buildbot.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/hg_buildbot.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,57 @@
+#! /usr/bin/python
+
+# This is a script which delivers Change events from Mercurial to the
+# buildmaster each time a changeset is pushed into a repository. Add it to
+# the 'incoming' commit hook on your canonical "central" repository, by
+# putting something like the following in the .hg/hgrc file of that
+# repository:
+#
+#  [hooks]
+#  incoming.buildbot = /PATH/TO/hg_buildbot.py BUILDMASTER:PORT
+#
+# Note that both Buildbot and Mercurial must be installed on the repository
+# machine.
+
+import os, sys, commands
+from StringIO import StringIO
+from buildbot.scripts import runner
+
+MASTER = sys.argv[1]
+
+CHANGESET_ID = os.environ["HG_NODE"]
+
+# TODO: consider doing 'import mercurial.hg' and extract this information
+# using the native python
+out = commands.getoutput("hg -v log -r %s" % CHANGESET_ID)
+# TODO: or maybe use --template instead of trying hard to parse everything
+#out = commands.getoutput("hg --template SOMETHING log -r %s" % CHANGESET_ID)
+
+s = StringIO(out)
+while True:
+    line = s.readline()
+    if not line:
+        break
+    if line.startswith("user:"):
+        user = line[line.find(":")+1:].strip()
+    elif line.startswith("files:"):
+        files = line[line.find(":")+1:].strip().split()
+    elif line.startswith("description:"):
+        comments = "".join(s.readlines())
+        if comments[-1] == "\n":
+            # this removes the additional newline that hg emits
+            comments = comments[:-1]
+        break
+
+change = {
+    'master': MASTER,
+    # note: this is more likely to be a full email address, which would make
+    # the left-hand "Changes" column kind of wide. The buildmaster should
+    # probably be improved to display an abbreviation of the username.
+    'username': user,
+    'revision': CHANGESET_ID,
+    'comments': comments,
+    'files': files,
+    }
+
+runner.sendchange(c, True)
+


Property changes on: vendor/buildbot/current/contrib/hg_buildbot.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/run_maxq.py
===================================================================
--- vendor/buildbot/current/contrib/run_maxq.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/run_maxq.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,47 @@
+#!/usr/bin/env jython
+
+import sys, glob
+
+testdir = sys.argv[1]
+
+orderfiles = glob.glob(testdir + '/*.tests')
+
+# wee. just be glad I didn't make this one gigantic nested listcomp.
+# anyway, this builds a once-nested list of files to test.
+
+#open!
+files = [open(fn) for fn in orderfiles]
+
+#create prelim list of lists of files!
+files = [f.readlines() for f in files]
+
+#shwack newlines and filter out empties!
+files = [filter(None, [fn.strip() for fn in fs]) for fs in files]
+
+#prefix with testdir
+files = [[testdir + '/' + fn.strip() for fn in fs] for fs in files]
+
+print "Will run these tests:", files
+
+i = 0
+
+for testlist in files:
+
+    print "==========================="
+    print "running tests from testlist", orderfiles[i]
+    print "---------------------------"
+    i = i + 1
+    
+    for test in testlist:
+        print "running test", test
+
+        try:
+            execfile(test, globals().copy())
+
+        except:
+            ei = sys.exc_info()
+            print "TEST FAILURE:", ei[1]
+
+        else:
+            print "SUCCESS"
+


Property changes on: vendor/buildbot/current/contrib/run_maxq.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/svn_buildbot.py
===================================================================
--- vendor/buildbot/current/contrib/svn_buildbot.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/svn_buildbot.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+
+# this requires python >=2.3 for the 'sets' module.
+
+# The sets.py from python-2.3 appears to work fine under python2.2 . To
+# install this script on a host with only python2.2, copy
+# /usr/lib/python2.3/sets.py from a newer python into somewhere on your
+# PYTHONPATH, then edit the #! line above to invoke python2.2
+
+# python2.1 is right out
+
+# If you run this program as part of your SVN post-commit hooks, it will
+# deliver Change notices to a buildmaster that is running a PBChangeSource
+# instance.
+
+# edit your svn-repository/hooks/post-commit file, and add lines that look
+# like this:
+
+'''
+# set up PYTHONPATH to contain Twisted/buildbot perhaps, if not already
+# installed site-wide
+. ~/.environment
+
+/path/to/svn_buildbot.py --repository "$REPOS" --revision "$REV" --bbserver localhost --bbport 9989
+'''
+
+import commands, sys, os
+import re
+import sets
+
+# We have hackish "-d" handling here rather than in the Options
+# subclass below because a common error will be to not have twisted in
+# PYTHONPATH; we want to be able to print that error to the log if
+# debug mode is on, so we set it up before the imports.
+
+DEBUG = None
+
+if '-d' in sys.argv:
+    i = sys.argv.index('-d')
+    DEBUG = sys.argv[i+1]
+    del sys.argv[i]
+    del sys.argv[i]
+
+if DEBUG:
+    f = open(DEBUG, 'a')
+    sys.stderr = f
+    sys.stdout = f
+
+from twisted.internet import defer, reactor
+from twisted.python import usage
+from twisted.spread import pb
+from twisted.cred import credentials
+
+class Options(usage.Options):
+    optParameters = [
+        ['repository', 'r', None,
+         "The repository that was changed."],
+        ['revision', 'v', None,
+         "The revision that we want to examine (default: latest)"],
+        ['bbserver', 's', 'localhost',
+         "The hostname of the server that buildbot is running on"],
+        ['bbport', 'p', 8007,
+         "The port that buildbot is listening on"],
+        ['include', 'f', None,
+         '''\
+Search the list of changed files for this regular expression, and if there is
+at least one match notify buildbot; otherwise buildbot will not do a build.
+You may provide more than one -f argument to try multiple
+patterns.  If no filter is given, buildbot will always be notified.'''],
+        ['filter', 'f', None, "Same as --include.  (Deprecated)"],
+        ['exclude', 'F', None,
+         '''\
+The inverse of --filter.  Changed files matching this expression will never  
+be considered for a build.  
+You may provide more than one -F argument to try multiple
+patterns.  Excludes override includes, that is, patterns that match both an
+include and an exclude will be excluded.'''],
+        ]
+    optFlags = [
+        ['dryrun', 'n', "Do not actually send changes"],
+        ]
+
+    def __init__(self):
+        usage.Options.__init__(self)
+        self._includes = []
+        self._excludes = []
+        self['includes'] = None
+        self['excludes'] = None
+
+    def opt_include(self, arg):
+        self._includes.append('.*%s.*' % (arg,))
+    opt_filter = opt_include
+
+    def opt_exclude(self, arg):
+        self._excludes.append('.*%s.*' % (arg,))
+
+    def postOptions(self):
+        if self['repository'] is None:
+            raise usage.error("You must pass --repository")
+        if self._includes:
+            self['includes'] = '(%s)' % ('|'.join(self._includes),)
+        if self._excludes:
+            self['excludes'] = '(%s)' % ('|'.join(self._excludes),)
+
+def split_file_dummy(changed_file):
+    """Split the repository-relative filename into a tuple of (branchname,
+    branch_relative_filename). If you have no branches, this should just
+    return (None, changed_file).
+    """
+    return (None, changed_file)
+
+# this version handles repository layouts that look like:
+#  trunk/files..                  -> trunk
+#  branches/branch1/files..       -> branches/branch1
+#  branches/branch2/files..       -> branches/branch2
+#
+def split_file_branches(changed_file):
+    pieces = changed_file.split(os.sep)
+    if pieces[0] == 'branches':
+        return (os.path.join(*pieces[:2]),
+                os.path.join(*pieces[2:]))
+    if pieces[0] == 'trunk':
+        return (pieces[0], os.path.join(*pieces[1:]))
+    ## there are other sibilings of 'trunk' and 'branches'. Pretend they are
+    ## all just funny-named branches, and let the Schedulers ignore them.
+    #return (pieces[0], os.path.join(*pieces[1:]))
+
+    raise RuntimeError("cannot determine branch for '%s'" % changed_file)
+
+split_file = split_file_dummy
+
+
+class ChangeSender:
+
+    def getChanges(self, opts):
+        """Generate and stash a list of Change dictionaries, ready to be sent
+        to the buildmaster's PBChangeSource."""
+
+        # first we extract information about the files that were changed
+        repo = opts['repository']
+        print "Repo:", repo
+        rev_arg = ''
+        if opts['revision']:
+            rev_arg = '-r %s' % (opts['revision'],)
+        changed = commands.getoutput('svnlook changed %s "%s"' % (rev_arg,
+                                                                  repo)
+                                     ).split('\n')
+        # the first 4 columns can contain status information
+        changed = [x[4:] for x in changed]
+
+        message = commands.getoutput('svnlook log %s "%s"' % (rev_arg, repo))
+        who = commands.getoutput('svnlook author %s "%s"' % (rev_arg, repo))
+        revision = opts.get('revision')
+        if revision is not None:
+            revision = int(revision)
+
+        # see if we even need to notify buildbot by looking at filters first
+        changestring = '\n'.join(changed)
+        fltpat = opts['includes']
+        if fltpat:
+            included = sets.Set(re.findall(fltpat, changestring))
+        else:
+            included = sets.Set(changed)
+
+        expat = opts['excludes']
+        if expat:
+            excluded = sets.Set(re.findall(expat, changestring))
+        else:
+            excluded = sets.Set([])
+        if len(included.difference(excluded)) == 0:
+            print changestring
+            print """\
+    Buildbot was not interested, no changes matched any of these filters:\n %s
+    or all the changes matched these exclusions:\n %s\
+    """ % (fltpat, expat)
+            sys.exit(0)
+
+        # now see which branches are involved
+        files_per_branch = {}
+        for f in changed:
+            branch, filename = split_file(f)
+            if files_per_branch.has_key(branch):
+                files_per_branch[branch].append(filename)
+            else:
+                files_per_branch[branch] = [filename]
+
+        # now create the Change dictionaries
+        changes = []
+        for branch in files_per_branch.keys():
+            d = {'who': who,
+                 'branch': branch,
+                 'files': files_per_branch[branch],
+                 'comments': message,
+                 'revision': revision}
+            changes.append(d)
+
+        return changes
+
+    def sendChanges(self, opts, changes):
+        pbcf = pb.PBClientFactory()
+        reactor.connectTCP(opts['bbserver'], int(opts['bbport']), pbcf)
+        d = pbcf.login(credentials.UsernamePassword('change', 'changepw'))
+        d.addCallback(self.sendAllChanges, changes)
+        return d
+
+    def sendAllChanges(self, remote, changes):
+        dl = [remote.callRemote('addChange', change)
+              for change in changes]
+        return defer.DeferredList(dl)
+
+    def run(self):
+        opts = Options()
+        try:
+            opts.parseOptions()
+        except usage.error, ue:
+            print opts
+            print "%s: %s" % (sys.argv[0], ue)
+            sys.exit()
+
+        changes = self.getChanges(opts)
+        if opts['dryrun']:
+            for i,c in enumerate(changes):
+                print "CHANGE #%d" % (i+1)
+                keys = c.keys()
+                keys.sort()
+                for k in keys:
+                    print "[%10s]: %s" % (k, c[k])
+            print "*NOT* sending any changes"
+            return
+
+        d = self.sendChanges(opts, changes)
+
+        def quit(*why):
+            print "quitting! because", why
+            reactor.stop()
+
+        def failed(f):
+            print "FAILURE"
+            print f
+            reactor.stop()
+
+        d.addCallback(quit, "SUCCESS")
+        d.addErrback(failed)
+        reactor.callLater(60, quit, "TIMEOUT")
+        reactor.run()
+
+if __name__ == '__main__':
+    s = ChangeSender()
+    s.run()
+
+


Property changes on: vendor/buildbot/current/contrib/svn_buildbot.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/svn_watcher.py
===================================================================
--- vendor/buildbot/current/contrib/svn_watcher.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/svn_watcher.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+
+# This is a program which will poll a (remote) SVN repository, looking for
+# new revisions. It then uses the 'buildbot sendchange' command to deliver
+# information about the Change to a (remote) buildmaster. It can be run from
+# a cron job on a periodic basis, or can be told (with the 'watch' option) to
+# automatically repeat its check every 10 minutes.
+
+# This script does not store any state information, so to avoid spurious
+# changes you must use the 'watch' option and let it run forever.
+
+# You will need to provide it with the location of the buildmaster's
+# PBChangeSource port (in the form hostname:portnum), and the svnurl of the
+# repository to watch.
+
+
+# 15.03.06 by John Pye
+# 29.03.06 by Niklaus Giger, added support to run under windows, added invocation option
+import subprocess
+import xml.dom.minidom
+import sys
+import time
+import os
+if sys.platform == 'win32':
+    import win32pipe
+
+def getoutput(cmd):
+    p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+    return p.stdout.read()
+
+def checkChanges(repo, master, verbose=False, oldRevision=-1):
+    cmd = ["svn", "log", "--non-interactive", "--xml", "--verbose",
+           "--limit=1", repo]
+    if verbose == True:
+        print "Getting last revision of repository: " + repo
+
+    if sys.platform == 'win32':
+        f = win32pipe.popen(cmd)
+        xml1 = ''.join(f.readlines())
+        f.close()
+    else:
+        xml1 = getoutput(cmd)
+
+    if verbose == True:
+        print "XML\n-----------\n"+xml1+"\n\n"
+
+    doc = xml.dom.minidom.parseString(xml1)
+    el = doc.getElementsByTagName("logentry")[0]
+    revision = el.getAttribute("revision")
+    author = "".join([t.data for t in
+    el.getElementsByTagName("author")[0].childNodes])
+    comments = "".join([t.data for t in
+    el.getElementsByTagName("msg")[0].childNodes])
+    
+    pathlist = el.getElementsByTagName("paths")[0]
+    paths = []
+    for p in pathlist.getElementsByTagName("path"):
+            paths.append("".join([t.data for t in p.childNodes]))
+
+    if verbose == True:
+        print "PATHS"
+        print paths
+
+    if  revision != oldRevision:
+        cmd = ["buildbot", "sendchange", "--master=%s"%master,
+               "--revision=%s"%revision, "--username=%s"%author,
+               "--comments=%s"%comments]
+        cmd += paths
+
+        if verbose == True:
+            print cmd
+
+        if sys.platform == 'win32':
+            f = win32pipe.popen(cmd)
+            print time.strftime("%H.%M.%S ") + "Revision "+revision+ ": "+ ''.join(f.readlines())
+            f.close()
+        else:
+            xml1 = getoutput(cmd)
+    else:
+        print time.strftime("%H.%M.%S ") + "nothing has changed since revision "+revision
+
+    return revision
+
+if __name__ == '__main__':
+    if len(sys.argv) == 4 and sys.argv[3] == 'watch':
+        oldRevision = -1
+        print "Watching for changes in repo "+  sys.argv[1] + " master " +  sys.argv[2] 
+        while 1:
+            oldRevision = checkChanges(sys.argv[1],  sys.argv[2], False, oldRevision)
+            time.sleep(10*60) # Check the repository every 10 minutes
+
+    elif len(sys.argv) == 3:
+        checkChanges(sys.argv[1],  sys.argv[2], True )
+    else:
+        print os.path.basename(sys.argv[0]) + ":  http://host/path/to/repo master:port [watch]"
+


Property changes on: vendor/buildbot/current/contrib/svn_watcher.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/svnpoller.py
===================================================================
--- vendor/buildbot/current/contrib/svnpoller.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/svnpoller.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+"""
+ svn.py
+ Script for BuildBot to monitor a remote Subversion repository.
+ Copyright (C) 2006 John Pye
+"""
+# This script is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+import commands
+import xml.dom.minidom
+import ConfigParser
+import os.path
+import codecs
+
+# change these settings to match your project
+svnurl = "https://pse.cheme.cmu.edu/svn/ascend/code/trunk"
+statefilename = "~/changemonitor/config.ini"
+buildmaster = "buildbot.example.org:9989" # connects to a PBChangeSource
+
+xml1 = commands.getoutput("svn log --non-interactive --verbose --xml --limit=1 " + svnurl)
+#print "XML\n-----------\n"+xml1+"\n\n"
+
+try:
+	doc = xml.dom.minidom.parseString(xml1)
+	el = doc.getElementsByTagName("logentry")[0]
+	revision = el.getAttribute("revision")
+	author = "".join([t.data for t in el.getElementsByTagName("author")[0].childNodes])
+	comments = "".join([t.data for t in el.getElementsByTagName("msg")[0].childNodes])
+
+	pathlist = el.getElementsByTagName("paths")[0]
+	paths = []
+	for p in pathlist.getElementsByTagName("path"):
+		paths.append("".join([t.data for t in p.childNodes]))
+	#print "PATHS"
+	#print paths
+except xml.parsers.expat.ExpatError, e:
+	print "FAILED TO PARSE 'svn log' XML:"
+	print str(e)
+	print "----"
+	print "RECEIVED TEXT:"
+	print xml1
+	import sys
+	sys.exit(1)
+
+fname = statefilename
+fname = os.path.expanduser(fname)
+ini = ConfigParser.SafeConfigParser()
+
+try:
+	ini.read(fname)
+except:
+	print "Creating changemonitor config.ini:",fname
+	ini.add_section("CurrentRevision")
+	ini.set("CurrentRevision",-1)
+
+try:
+	lastrevision = ini.get("CurrentRevision","changeset")
+except ConfigParser.NoOptionError:
+	print "NO OPTION FOUND"
+	lastrevision = -1
+except ConfigParser.NoSectionError:
+	print "NO SECTION FOUND"
+	lastrevision = -1
+
+if lastrevision != revision:
+
+	#comments = codecs.encodings.unicode_escape.encode(comments)
+	cmd = "buildbot sendchange --master="+buildmaster+" --branch=trunk --revision=\""+revision+"\" --username=\""+author+"\" --comments=\""+comments+"\" "+" ".join(paths)
+
+	#print cmd
+	res = commands.getoutput(cmd)
+
+	print "SUBMITTING NEW REVISION",revision
+	if not ini.has_section("CurrentRevision"):
+		ini.add_section("CurrentRevision")
+	try:
+		ini.set("CurrentRevision","changeset",revision)
+		f = open(fname,"w")
+		ini.write(f)
+		#print "WROTE CHANGES TO",fname
+	except:
+		print "FAILED TO RECORD INI FILE"


Property changes on: vendor/buildbot/current/contrib/svnpoller.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/viewcvspoll.py
===================================================================
--- vendor/buildbot/current/contrib/viewcvspoll.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/viewcvspoll.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,85 @@
+#! /usr/bin/python
+
+"""Based on the fakechanges.py contrib script"""
+
+import sys
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.internet import reactor, task
+from twisted.python import log
+import commands, random, os.path, time, MySQLdb
+
+class ViewCvsPoller:
+
+    def __init__(self):
+        def _load_rc():
+            import user
+            ret = {}
+            for line in open(os.path.join(user.home,".cvsblamerc")).readlines():
+                if line.find("=") != -1:
+                    key, val = line.split("=")
+                    ret[key.strip()] = val.strip()
+            return ret
+        # maybe add your own keys here db=xxx, user=xxx, passwd=xxx
+        self.cvsdb = MySQLdb.connect("cvs", **_load_rc())
+        #self.last_checkin = "2005-05-11" # for testing
+        self.last_checkin = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
+
+    def get_changes(self):
+        changes = []
+
+        def empty_change():
+            return {'who': None, 'files': [], 'comments': None }
+        change = empty_change()
+
+        cursor = self.cvsdb.cursor()
+        cursor.execute("""SELECT whoid, descid, fileid, dirid, branchid, ci_when
+            FROM checkins WHERE ci_when>='%s'""" % self.last_checkin)
+        last_checkin = None
+        for whoid, descid, fileid, dirid, branchid, ci_when in cursor.fetchall():
+            if branchid != 1: # only head
+                continue
+            cursor.execute("""SELECT who from people where id=%s""" % whoid)
+            who = cursor.fetchone()[0]
+            cursor.execute("""SELECT description from descs where id=%s""" % descid)
+            desc = cursor.fetchone()[0]
+            cursor.execute("""SELECT file from files where id=%s""" % fileid)
+            filename = cursor.fetchone()[0]
+            cursor.execute("""SELECT dir from dirs where id=%s""" % dirid)
+            dirname = cursor.fetchone()[0]
+            if who == change["who"] and desc == change["comments"]:
+                change["files"].append( "%s/%s" % (dirname, filename) )
+            elif change["who"]:
+                changes.append(change)
+                change = empty_change()
+            else:
+                change["who"] = who
+                change["files"].append( "%s/%s" % (dirname, filename) )
+                change["comments"] = desc
+            if last_checkin == None or ci_when > last_checkin:
+                last_checkin = ci_when
+        if last_checkin:
+            self.last_checkin = last_checkin
+        return changes
+
+poller = ViewCvsPoller()
+
+def error(*args):
+    log.err()
+    reactor.stop()
+
+def poll_changes(remote):
+    print "GET CHANGES SINCE", poller.last_checkin,
+    changes = poller.get_changes()
+    for change in changes:
+        print change["who"], "\n *", "\n * ".join(change["files"])
+        remote.callRemote('addChange', change).addErrback(error)
+    print
+    reactor.callLater(60, poll_changes, remote)
+
+factory = pb.PBClientFactory()
+reactor.connectTCP("localhost", 9999, factory )
+deferred = factory.login(credentials.UsernamePassword("change", "changepw"))
+deferred.addCallback(poll_changes).addErrback(error)
+
+reactor.run()


Property changes on: vendor/buildbot/current/contrib/viewcvspoll.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/windows/buildbot.bat
===================================================================
--- vendor/buildbot/current/contrib/windows/buildbot.bat	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/windows/buildbot.bat	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,2 @@
+ at python C:\Python23\Scripts\buildbot %*
+

Added: vendor/buildbot/current/contrib/windows/buildbot2.bat
===================================================================
--- vendor/buildbot/current/contrib/windows/buildbot2.bat	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/windows/buildbot2.bat	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,98 @@
+ at echo off
+rem This is Windows helper batch file for Buildbot
+rem NOTE: You will need Windows NT5/XP to use some of the syntax here.
+
+rem Please note you must have Twisted Matrix installed to use this build system
+rem Details: http://twistedmatrix.com/ (Version 1.3.0 or more, preferrably 2.0+)
+
+rem NOTE: --reactor=win32 argument is need because of Twisted
+rem The Twisted default reactor is select based (ie. posix) (why?!)
+
+rem Keep environmental settings local to this file
+setlocal
+
+rem Change the following settings to suite your environment
+
+rem This is where you want Buildbot installed
+set BB_DIR=z:\Tools\PythonLibs
+
+rem Assuming you have TortoiseCVS installed [for CVS.exe].
+set CVS_EXE="c:\Program Files\TortoiseCVS\cvs.exe"
+
+rem Trial: --spew will give LOADS of information. Use -o for verbose.
+set TRIAL=python C:\Python23\scripts\trial.py -o --reactor=win32
+set BUILDBOT_TEST_VC=c:\temp
+
+if "%1"=="helper" (
+	goto print_help
+)
+
+if "%1"=="bbinstall" (
+	rem You will only need to run this when you install Buildbot
+	echo BB: Install BuildBot at the location you set in the config:
+	echo BB: BB_DIR= %BB_DIR%
+	echo BB: You must be in the buildbot-x.y.z directory to run this:
+	python setup.py install --prefix %BB_DIR% --install-lib %BB_DIR%
+	goto end
+)
+
+if "%1"=="cvsco" (
+	echo BB: Getting Buildbot from Sourceforge CVS [if CVS in path].
+	if "%2"=="" (
+		echo BB ERROR: Please give a root path for the check out, eg. z:\temp
+		goto end
+	)
+
+	cd %2
+	echo BB: Hit return as there is no password
+	%CVS_EXE% -d:pserver:anonymous at cvs.sourceforge.net:/cvsroot/buildbot login 
+	%CVS_EXE% -z3 -d:pserver:anonymous at cvs.sourceforge.net:/cvsroot/buildbot co -P buildbot 
+	goto end
+)
+
+if "%1"=="cvsup" (
+	echo BB: Updating Buildbot from Sourceforge CVS [if CVS in path].
+	echo BB: Make sure you have the project checked out in local VCS.
+	
+	rem we only want buildbot code, the rest is from the install
+	cd %BB_DIR%
+	echo BB: Hit return as there is no password
+	%CVS_EXE% -d:pserver:anonymous at cvs.sourceforge.net:/cvsroot/buildbot login 
+	%CVS_EXE% -z3 -d:pserver:anonymous at cvs.sourceforge.net:/cvsroot/buildbot up -P -d buildbot buildbot/buildbot
+	goto end
+)
+
+if "%1"=="test" (
+	rem Trial is a testing framework supplied by the Twisted Matrix package.
+	rem It installs itself in the Python installation directory in a "scripts" folder,
+	rem e.g. c:\python23\scripts
+	rem This is just a convenience function because that directory is not in our path.
+		
+	if "%2" NEQ "" (
+		echo BB: TEST: buildbot.test.%2
+		%TRIAL% -m buildbot.test.%2
+	) else (
+		echo BB: Running ALL buildbot tests...
+		%TRIAL% buildbot.test
+	)
+	goto end
+)
+
+rem Okay, nothing that we recognised to pass to buildbot
+echo BB: Running buildbot...
+python -c "from buildbot.scripts import runner; runner.run()" %*
+goto end
+
+:print_help
+echo Buildbot helper script commands:
+echo	helper		This help message
+echo	test		Test buildbot is set up correctly
+echo Maintenance:
+echo	bbinstall	Install Buildbot from package
+echo	cvsup		Update from cvs
+echo	cvsco [dir]	Check buildbot out from cvs into [dir]
+
+:end
+rem End environment scope
+endlocal
+

Added: vendor/buildbot/current/contrib/windows/buildbot_service.py
===================================================================
--- vendor/buildbot/current/contrib/windows/buildbot_service.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/windows/buildbot_service.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,508 @@
+# Runs the build-bot as a Windows service.
+# To use:
+# * Install and configure buildbot as per normal (ie, running
+#  'setup.py install' from the source directory).
+#
+# * Configure any number of build-bot directories (slaves or masters), as
+#   per the buildbot instructions.  Test these directories normally by
+#   using the (possibly modified) "buildbot.bat" file and ensure everything
+#   is working as expected.
+#
+# * Install the buildbot service.  Execute the command:
+#   % python buildbot_service.py
+#   To see installation options.  You probably want to specify:
+#   + --username and --password options to specify the user to run the
+#   + --startup auto to have the service start at boot time.
+#
+#   For example:
+#   % python buildbot_service.py --user mark --password secret \
+#     --startup auto install
+#   Alternatively, you could execute:
+#   % python buildbot_service.py install
+#   to install the service with default options, then use Control Panel
+#   to configure it.
+#
+# * Start the service specifying the name of all buildbot directories as
+#   service args.  This can be done one of 2 ways:
+#   - Execute the command:
+#     % python buildbot_service.py start "dir_name1" "dir_name2"
+#   or:
+#   - Start Control Panel->Administrative Tools->Services
+#   - Locate the previously installed buildbot service.
+#   - Open the "properties" for the service.
+#   - Enter the directory names into the "Start Parameters" textbox.  The
+#     directory names must be fully qualified, and surrounded in quotes if
+#    they include spaces.
+#   - Press the "Start"button.
+#   Note that the service will automatically use the previously specified
+#   directories if no arguments are specified. This means the directories
+#   need only be specified when the directories to use have changed (and
+#   therefore also the first time buildbot is configured)
+#
+# * The service should now be running.  You should check the Windows
+#   event log.  If all goes well, you should see some information messages
+#   telling you the buildbot has successfully started.
+#
+# * If you change the buildbot configuration, you must restart the service.
+#   There is currently no way to ask a running buildbot to reload the
+#   config.  You can restart by executing:
+#   % python buildbot_service.py restart
+#
+# Troubleshooting:
+# * Check the Windows event log for any errors.
+# * Check the "twistd.log" file in your buildbot directories - once each
+#   bot has been started it just writes to this log as normal.
+# * Try executing:
+#   % python buildbot_service.py debug
+#   This will execute the buildbot service in "debug" mode, and allow you to
+#   see all messages etc generated. If the service works in debug mode but
+#   not as a real service, the error probably relates to the environment or
+#   permissions of the user configured to run the service (debug mode runs as
+#   the currently logged in user, not the service user)
+# * Ensure you have the latest pywin32 build available, at least version 206.
+
+# Written by Mark Hammond, 2006.
+
+import sys, os, threading
+
+import pywintypes
+import winerror, win32con
+import win32api, win32event, win32file, win32pipe, win32process, win32security
+import win32service, win32serviceutil, servicemanager
+
+# Are we running in a py2exe environment?
+is_frozen = hasattr(sys, "frozen")
+
+# Taken from the Zope service support - each "child" is run as a sub-process
+# (trying to run multiple twisted apps in the same process is likely to screw
+# stdout redirection etc).
+# Note that unlike the Zope service, we do *not* attempt to detect a failed
+# client and perform restarts - buildbot itself does a good job
+# at reconnecting, and Windows itself provides restart semantics should
+# everything go pear-shaped.
+
+# We execute a new thread that captures the tail of the output from our child
+# process. If the child fails, it is written to the event log.
+# This process is unconditional, and the output is never written to disk
+# (except obviously via the event log entry)
+# Size of the blocks we read from the child process's output.
+CHILDCAPTURE_BLOCK_SIZE = 80
+# The number of BLOCKSIZE blocks we keep as process output.
+CHILDCAPTURE_MAX_BLOCKS = 200
+
+class BBService(win32serviceutil.ServiceFramework):    
+    _svc_name_ = 'BuildBot'
+    _svc_display_name_ = _svc_name_
+    _svc_description_ = 'Manages local buildbot slaves and masters - ' \
+                        'see http://buildbot.sourceforge.net'
+
+    def __init__(self, args):
+        win32serviceutil.ServiceFramework.__init__(self, args)
+
+        # Create an event which we will use to wait on. The "service stop" 
+        # request will set this event.
+        # * We must make it inheritable so we can pass it to the child 
+        #   process via the cmd-line
+        # * Must be manual reset so each child process and our service
+        #   all get woken from a single set of the event.
+        sa = win32security.SECURITY_ATTRIBUTES()
+        sa.bInheritHandle = True
+        self.hWaitStop = win32event.CreateEvent(sa, True, False, None)
+
+        self.args = args
+        self.dirs = None
+        self.runner_prefix = None
+
+        # Patch up the service messages file in a frozen exe.
+        # (We use the py2exe option that magically bundles the .pyd files
+        # into the .zip file - so servicemanager.pyd doesn't exist.)
+        if is_frozen and servicemanager.RunningAsService():
+            msg_file = os.path.join(os.path.dirname(sys.executable),
+                                    "buildbot.msg")
+            if os.path.isfile(msg_file):
+                servicemanager.Initialize("BuildBot", msg_file)
+            else:
+                self.warning("Strange - '%s' does not exist" % (msg_file,))
+
+    def _checkConfig(self):
+        # Locate our child process runner (but only when run from source)
+        if not is_frozen:
+            # Running from source
+            python_exe = os.path.join(sys.prefix, "python.exe")
+            if not os.path.isfile(python_exe):
+                # for ppl who build Python itself from source.
+                python_exe = os.path.join(sys.prefix, "PCBuild", "python.exe")
+            if not os.path.isfile(python_exe):
+                self.error("Can not find python.exe to spawn subprocess")
+                return False
+
+            me = __file__
+            if me.endswith(".pyc") or me.endswith(".pyo"):
+                me = me[:-1]
+
+            self.runner_prefix = '"%s" "%s"' % (python_exe, me)
+        else:
+            # Running from a py2exe built executable - our child process is
+            # us (but with the funky cmdline args!)
+            self.runner_prefix = '"' + sys.executable + '"'
+
+        # Now our arg processing - this may be better handled by a
+        # twisted/buildbot style config file - but as of time of writing,
+        # MarkH is clueless about such things!
+
+        # Note that the "arguments" you type into Control Panel for the
+        # service do *not* persist - they apply only when you click "start"
+        # on the service. When started by Windows, args are never presented.
+        # Thus, it is the responsibility of the service to persist any args.
+        
+        # so, when args are presented, we save them as a "custom option". If
+        # they are not presented, we load them from the option.
+        self.dirs = []
+        if len(self.args) > 1:
+            dir_string = os.pathsep.join(self.args[1:])
+            save_dirs = True
+        else:
+            dir_string = win32serviceutil.GetServiceCustomOption(self,
+                                                            "directories")
+            save_dirs = False
+
+        if not dir_string:
+            self.error("You must specify the buildbot directories as "
+                       "parameters to the service.\nStopping the service.")
+            return False
+
+        dirs = dir_string.split(os.pathsep)
+        for d in dirs:
+            d = os.path.abspath(d)
+            sentinal = os.path.join(d, "buildbot.tac")
+            if os.path.isfile(sentinal):
+                self.dirs.append(d)
+            else:
+                msg = "Directory '%s' is not a buildbot dir - ignoring" \
+                      % (d,)
+                self.warning(msg)
+        if not self.dirs:
+            self.error("No valid buildbot directories were specified.\n"
+                       "Stopping the service.")
+            return False
+        if save_dirs:
+            dir_string = os.pathsep.join(self.dirs).encode("mbcs")
+            win32serviceutil.SetServiceCustomOption(self, "directories",
+                                                    dir_string)
+        return True
+
+    def SvcStop(self):
+        # Tell the SCM we are starting the stop process.
+        self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+        # Set the stop event - the main loop takes care of termination.
+        win32event.SetEvent(self.hWaitStop)
+
+    # SvcStop only gets triggered when the user explictly stops (or restarts)
+    # the service.  To shut the service down cleanly when Windows is shutting
+    # down, we also need to hook SvcShutdown.
+    SvcShutdown = SvcStop
+
+    def SvcDoRun(self):
+        if not self._checkConfig():
+            # stopped status set by caller.
+            return
+
+        self.logmsg(servicemanager.PYS_SERVICE_STARTED)
+
+        child_infos = []
+
+        for bbdir in self.dirs:
+            self.info("Starting BuildBot in directory '%s'" % (bbdir,))
+            hstop = self.hWaitStop
+
+            cmd = '%s --spawn %d start %s' % (self.runner_prefix, hstop, bbdir)
+            #print "cmd is", cmd
+            h, t, output = self.createProcess(cmd)
+            child_infos.append((bbdir, h, t, output))
+
+        while child_infos:
+            handles = [self.hWaitStop] + [i[1] for i in child_infos]
+
+            rc = win32event.WaitForMultipleObjects(handles,
+                                                   0, # bWaitAll
+                                                   win32event.INFINITE)
+            if rc == win32event.WAIT_OBJECT_0:
+                # user sent a stop service request
+                break
+            else:
+                # A child process died.  For now, just log the output
+                # and forget the process.
+                index = rc - win32event.WAIT_OBJECT_0 - 1
+                bbdir, dead_handle, dead_thread, output_blocks = \
+                                                        child_infos[index]
+                status = win32process.GetExitCodeProcess(dead_handle)
+                output = "".join(output_blocks)
+                if not output:
+                    output = "The child process generated no output. " \
+                             "Please check the twistd.log file in the " \
+                             "indicated directory."
+
+                self.warning("BuildBot for directory %r terminated with "
+                             "exit code %d.\n%s" % (bbdir, status, output))
+
+                del child_infos[index]
+
+                if not child_infos:
+                    self.warning("All BuildBot child processes have "
+                                 "terminated.  Service stopping.")
+
+        # Either no child processes left, or stop event set.
+        self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+        # The child processes should have also seen our stop signal
+        # so wait for them to terminate.
+        for bbdir, h, t, output in child_infos:
+            for i in range(10): # 30 seconds to shutdown...
+                self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+                rc = win32event.WaitForSingleObject(h, 3000)
+                if rc == win32event.WAIT_OBJECT_0:
+                    break
+            # Process terminated - no need to try harder.
+            if rc == win32event.WAIT_OBJECT_0:
+                break
+
+            self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+            # If necessary, kill it
+            if win32process.GetExitCodeProcess(h)==win32con.STILL_ACTIVE:
+                self.warning("BuildBot process at %r failed to terminate - "
+                             "killing it" % (bbdir,))
+                win32api.TerminateProcess(h, 3)
+            self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+            # Wait for the redirect thread - it should have died as the remote
+            # process terminated.
+            # As we are shutting down, we do the join with a little more care,
+            # reporting progress as we wait (even though we never will <wink>)
+            for i in range(5):
+                t.join(1)
+                self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+                if not t.isAlive():
+                    break
+            else:
+                self.warning("Redirect thread did not stop!")
+
+        # All done.
+        self.logmsg(servicemanager.PYS_SERVICE_STOPPED)
+
+    #
+    # Error reporting/logging functions.
+    #
+    def logmsg(self, event):
+        # log a service event using servicemanager.LogMsg
+        try:
+            servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
+                                  event,
+                                  (self._svc_name_,
+                                   " (%s)" % self._svc_display_name_))
+        except win32api.error, details:
+            # Failed to write a log entry - most likely problem is
+            # that the event log is full.  We don't want this to kill us
+            try:
+                print "FAILED to write INFO event", event, ":", details
+            except IOError:
+                # No valid stdout!  Ignore it.
+                pass
+
+    def _dolog(self, func, msg):
+        try:
+            func(msg)
+        except win32api.error, details:
+            # Failed to write a log entry - most likely problem is
+            # that the event log is full.  We don't want this to kill us
+            try:
+                print "FAILED to write event log entry:", details
+                print msg
+            except IOError:
+                pass
+
+    def info(self, s):
+        self._dolog(servicemanager.LogInfoMsg, s)
+
+    def warning(self, s):
+        self._dolog(servicemanager.LogWarningMsg, s)
+
+    def error(self, s):
+        self._dolog(servicemanager.LogErrorMsg, s)
+    
+    # Functions that spawn a child process, redirecting any output.
+    # Although builtbot itself does this, it is very handy to debug issues
+    # such as ImportErrors that happen before buildbot has redirected.
+    def createProcess(self, cmd):
+        hInputRead, hInputWriteTemp = self.newPipe()
+        hOutReadTemp, hOutWrite = self.newPipe()
+        pid = win32api.GetCurrentProcess()
+        # This one is duplicated as inheritable.
+        hErrWrite = win32api.DuplicateHandle(pid, hOutWrite, pid, 0, 1,
+                                       win32con.DUPLICATE_SAME_ACCESS)
+
+        # These are non-inheritable duplicates.
+        hOutRead = self.dup(hOutReadTemp)
+        hInputWrite = self.dup(hInputWriteTemp)
+        # dup() closed hOutReadTemp, hInputWriteTemp
+
+        si = win32process.STARTUPINFO()
+        si.hStdInput = hInputRead
+        si.hStdOutput = hOutWrite
+        si.hStdError = hErrWrite
+        si.dwFlags = win32process.STARTF_USESTDHANDLES | \
+                     win32process.STARTF_USESHOWWINDOW
+        si.wShowWindow = win32con.SW_HIDE
+
+        # pass True to allow handles to be inherited.  Inheritance is
+        # problematic in general, but should work in the controlled
+        # circumstances of a service process.
+        create_flags = win32process.CREATE_NEW_CONSOLE
+        # info is (hProcess, hThread, pid, tid)
+        info = win32process.CreateProcess(None, cmd, None, None, True,
+                                          create_flags, None, None, si)
+        # (NOTE: these really aren't necessary for Python - they are closed
+        # as soon as they are collected)
+        hOutWrite.Close()
+        hErrWrite.Close()
+        hInputRead.Close()
+        # We don't use stdin
+        hInputWrite.Close()
+
+        # start a thread collecting output
+        blocks = []
+        t = threading.Thread(target=self.redirectCaptureThread,
+                             args = (hOutRead,blocks))
+        t.start()
+        return info[0], t, blocks
+
+    def redirectCaptureThread(self, handle, captured_blocks):
+        # One of these running per child process we are watching.  It
+        # handles both stdout and stderr on a single handle. The read data is
+        # never referenced until the thread dies - so no need for locks
+        # around self.captured_blocks.
+        #self.info("Redirect thread starting")
+        while 1:
+            try:
+                ec, data = win32file.ReadFile(handle, CHILDCAPTURE_BLOCK_SIZE)
+            except pywintypes.error, err:
+                # ERROR_BROKEN_PIPE means the child process closed the
+                # handle - ie, it terminated.
+                if err[0] != winerror.ERROR_BROKEN_PIPE:
+                    self.warning("Error reading output from process: %s" % err)
+                break
+            captured_blocks.append(data)
+            del captured_blocks[CHILDCAPTURE_MAX_BLOCKS:]
+        handle.Close()
+        #self.info("Redirect capture thread terminating")
+
+    def newPipe(self):
+        sa = win32security.SECURITY_ATTRIBUTES()
+        sa.bInheritHandle = True
+        return win32pipe.CreatePipe(sa, 0)
+
+    def dup(self, pipe):
+        # create a duplicate handle that is not inherited, so that
+        # it can be closed in the parent.  close the original pipe in
+        # the process.
+        pid = win32api.GetCurrentProcess()
+        dup = win32api.DuplicateHandle(pid, pipe, pid, 0, 0,
+                                       win32con.DUPLICATE_SAME_ACCESS)
+        pipe.Close()
+        return dup
+
+# Service registration and startup
+def RegisterWithFirewall(exe_name, description):
+    # Register our executable as an exception with Windows Firewall.
+    # taken from http://msdn.microsoft.com/library/default.asp?url=/library/en-us/ics/ics/wf_adding_an_application.asp
+    from win32com.client import Dispatch
+    #  Set constants
+    NET_FW_PROFILE_DOMAIN = 0
+    NET_FW_PROFILE_STANDARD = 1
+    
+    # Scope
+    NET_FW_SCOPE_ALL = 0
+    
+    # IP Version - ANY is the only allowable setting for now
+    NET_FW_IP_VERSION_ANY = 2
+    
+    fwMgr = Dispatch("HNetCfg.FwMgr")
+    
+    # Get the current profile for the local firewall policy.
+    profile = fwMgr.LocalPolicy.CurrentProfile
+    
+    app = Dispatch("HNetCfg.FwAuthorizedApplication")
+    
+    app.ProcessImageFileName = exe_name
+    app.Name = description
+    app.Scope = NET_FW_SCOPE_ALL
+    # Use either Scope or RemoteAddresses, but not both
+    #app.RemoteAddresses = "*"
+    app.IpVersion = NET_FW_IP_VERSION_ANY
+    app.Enabled = True
+    
+    # Use this line if you want to add the app, but disabled.
+    #app.Enabled = False
+    
+    profile.AuthorizedApplications.Add(app)
+
+# A custom install function.
+def CustomInstall(opts):
+    # Register this process with the Windows Firewaall
+    import pythoncom
+    try:
+        RegisterWithFirewall(sys.executable, "BuildBot")
+    except pythoncom.com_error, why:
+        print "FAILED to register with the Windows firewall"
+        print why
+
+#
+# Magic code to allow shutdown.  Note that this code is executed in
+# the *child* process, by way of the service process executing us with
+# special cmdline args (which includes the service stop handle!)
+def _RunChild():
+    del sys.argv[1] # The --spawn arg.
+    # Create a new thread that just waits for the event to be signalled.
+    t = threading.Thread(target=_WaitForShutdown, 
+                         args = (int(sys.argv[1]),)
+                         )
+    del sys.argv[1] # The stop handle
+    # This child process will be sent a console handler notification as
+    # users log off, or as the system shuts down.  We want to ignore these
+    # signals as the service parent is responsible for our shutdown.
+    def ConsoleHandler(what):
+        # We can ignore *everything* - ctrl+c will never be sent as this
+        # process is never attached to a console the user can press the
+        # key in!
+        return True
+    win32api.SetConsoleCtrlHandler(ConsoleHandler, True)
+    t.setDaemon(True) # we don't want to wait for this to stop!
+    t.start()
+    if hasattr(sys, "frozen"):
+        # py2exe sets this env vars that may screw our child process - reset
+        del os.environ["PYTHONPATH"]
+
+    # Start the buildbot app
+    from buildbot.scripts import runner
+    runner.run()
+    print "Service child process terminating normally."
+
+def _WaitForShutdown(h):
+    win32event.WaitForSingleObject(h, win32event.INFINITE)
+    print "Shutdown requested"
+
+    from twisted.internet import reactor
+    reactor.callLater(0, reactor.stop)
+
+# This function is also called by the py2exe startup code.
+def HandleCommandLine():
+    if len(sys.argv)>1 and sys.argv[1] == "--spawn":
+        # Special command-line created by the service to execute the
+        # child-process.
+        # First arg is the handle to wait on
+        _RunChild()
+    else:
+        win32serviceutil.HandleCommandLine(BBService,
+                                           customOptionHandler=CustomInstall)
+
+if __name__ == '__main__':
+    HandleCommandLine()


Property changes on: vendor/buildbot/current/contrib/windows/buildbot_service.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/contrib/windows/setup.py
===================================================================
--- vendor/buildbot/current/contrib/windows/setup.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/contrib/windows/setup.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,79 @@
+# setup.py
+# A distutils setup script to create py2exe binaries for buildbot.
+# Both a service and standard executable are created.
+# Usage:
+# % setup.py py2exe
+
+import sys, os, tempfile, shutil
+from os.path import dirname, join, abspath, exists, splitext
+
+this_dir = abspath(dirname(__file__))
+bb_root_dir = abspath(join(this_dir, "..", ".."))
+
+from distutils.core import setup
+import py2exe
+
+includes = []
+
+# We try and bundle *all* modules in the following packages:
+for package in ["buildbot.changes", "buildbot.process", "buildbot.status"]:
+    __import__(package)
+    p = sys.modules[package]
+    for fname in os.listdir(p.__path__[0]):
+        base, ext = splitext(fname)
+        if not fname.startswith("_") and ext == ".py":
+            includes.append(p.__name__ + "." + base)
+
+# Other misc modules dynamically imported, so missed by py2exe
+includes.extend("""
+            buildbot.scheduler
+            buildbot.slave.bot
+            buildbot.master
+            twisted.internet.win32eventreactor
+            twisted.web.resource""".split())
+
+# Turn into "," sep py2exe requires
+includes = ",".join(includes)
+
+py2exe_options = {"bundle_files": 1,
+                  "includes": includes,
+                 }
+
+# Each "target" executable we create
+buildbot_target = {
+    "script": join(bb_root_dir, "bin", "buildbot")
+}
+# Due to the way py2exe works, we need to rebuild the service code as a
+# normal console process - this will be executed by the service itself.
+
+service_target = {
+    "modules": ["buildbot_service"],
+    "cmdline_style": "custom",
+}
+
+# We use the py2exe "bundle" option, so servicemanager.pyd
+# (which has the message resources) does not exist.  Take a copy
+# of it with a "friendlier" name.  The service runtime arranges for this
+# to be used.
+import servicemanager
+
+msg_file = join(tempfile.gettempdir(), "buildbot.msg")
+shutil.copy(servicemanager.__file__, msg_file)
+
+data_files = [
+    ["", [msg_file]],
+    ["", [join(bb_root_dir, "buildbot", "status", "classic.css")]],
+    ["", [join(bb_root_dir, "buildbot", "buildbot.png")]],
+]
+
+try:
+    setup(name="buildbot",
+          # The buildbot script as a normal executable
+          console=[buildbot_target],
+          service=[service_target],
+          options={'py2exe': py2exe_options},
+          data_files = data_files,
+          zipfile = "buildbot.library", # 'library.zip' invites trouble :)
+    )
+finally:
+    os.unlink(msg_file)


Property changes on: vendor/buildbot/current/contrib/windows/setup.py
___________________________________________________________________
Name: svn:executable
   + 

Added: vendor/buildbot/current/docs/buildbot.html
===================================================================
--- vendor/buildbot/current/docs/buildbot.html	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/buildbot.html	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,7012 @@
+<html lang="en">
+<head>
+<title>BuildBot Manual 0.7.5</title>
+<meta http-equiv="Content-Type" content="text/html">
+<meta name="description" content="BuildBot Manual 0.7.5">
+<meta name="generator" content="makeinfo 4.8">
+<link title="Top" rel="top" href="#Top">
+<link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+<!--
+This is the BuildBot manual.
+
+Copyright (C) 2005,2006 Brian Warner
+
+Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty
+provided the copyright notice and this notice are preserved.-->
+<meta http-equiv="Content-Style-Type" content="text/css">
+<style type="text/css"><!--
+  pre.display { font-family:inherit }
+  pre.format  { font-family:inherit }
+  pre.smalldisplay { font-family:inherit; font-size:smaller }
+  pre.smallformat  { font-family:inherit; font-size:smaller }
+  pre.smallexample { font-size:smaller }
+  pre.smalllisp    { font-size:smaller }
+  span.sc    { font-variant:small-caps }
+  span.roman { font-family:serif; font-weight:normal; } 
+  span.sansserif { font-family:sans-serif; font-weight:normal; } 
+--></style>
+</head>
+<body>
+<h1 class="settitle">BuildBot Manual 0.7.5</h1>
+   <div class="contents">
+<h2>Table of Contents</h2>
+<ul>
+<li><a name="toc_Top" href="#Top">BuildBot</a>
+<li><a name="toc_Introduction" href="#Introduction">1 Introduction</a>
+<ul>
+<li><a href="#History-and-Philosophy">1.1 History and Philosophy</a>
+<li><a href="#System-Architecture">1.2 System Architecture</a>
+<ul>
+<li><a href="#BuildSlave-Connections">1.2.1 BuildSlave Connections</a>
+<li><a href="#Buildmaster-Architecture">1.2.2 Buildmaster Architecture</a>
+<li><a href="#Status-Delivery-Architecture">1.2.3 Status Delivery Architecture</a>
+</li></ul>
+<li><a href="#Control-Flow">1.3 Control Flow</a>
+</li></ul>
+<li><a name="toc_Installation" href="#Installation">2 Installation</a>
+<ul>
+<li><a href="#Requirements">2.1 Requirements</a>
+<li><a href="#Installing-the-code">2.2 Installing the code</a>
+<li><a href="#Creating-a-buildmaster">2.3 Creating a buildmaster</a>
+<li><a href="#Creating-a-buildslave">2.4 Creating a buildslave</a>
+<ul>
+<li><a href="#Buildslave-Options">2.4.1 Buildslave Options</a>
+</li></ul>
+<li><a href="#Launching-the-daemons">2.5 Launching the daemons</a>
+<li><a href="#Logfiles">2.6 Logfiles</a>
+<li><a href="#Shutdown">2.7 Shutdown</a>
+<li><a href="#Maintenance">2.8 Maintenance</a>
+<li><a href="#Troubleshooting">2.9 Troubleshooting</a>
+<ul>
+<li><a href="#Starting-the-buildslave">2.9.1 Starting the buildslave</a>
+<li><a href="#Connecting-to-the-buildmaster">2.9.2 Connecting to the buildmaster</a>
+<li><a href="#Forcing-Builds">2.9.3 Forcing Builds</a>
+</li></ul>
+</li></ul>
+<li><a name="toc_Concepts" href="#Concepts">3 Concepts</a>
+<ul>
+<li><a href="#Version-Control-Systems">3.1 Version Control Systems</a>
+<ul>
+<li><a href="#Generalizing-VC-Systems">3.1.1 Generalizing VC Systems</a>
+<li><a href="#Source-Tree-Specifications">3.1.2 Source Tree Specifications</a>
+<li><a href="#How-Different-VC-Systems-Specify-Sources">3.1.3 How Different VC Systems Specify Sources</a>
+<li><a href="#Attributes-of-Changes">3.1.4 Attributes of Changes</a>
+</li></ul>
+<li><a href="#Schedulers">3.2 Schedulers</a>
+<li><a href="#BuildSet">3.3 BuildSet</a>
+<li><a href="#BuildRequest">3.4 BuildRequest</a>
+<li><a href="#Builder">3.5 Builder</a>
+<li><a href="#Users">3.6 Users</a>
+<ul>
+<li><a href="#Doing-Things-With-Users">3.6.1 Doing Things With Users</a>
+<li><a href="#Email-Addresses">3.6.2 Email Addresses</a>
+<li><a href="#IRC-Nicknames">3.6.3 IRC Nicknames</a>
+<li><a href="#Live-Status-Clients">3.6.4 Live Status Clients</a>
+</li></ul>
+</li></ul>
+<li><a name="toc_Configuration" href="#Configuration">4 Configuration</a>
+<ul>
+<li><a href="#Config-File-Format">4.1 Config File Format</a>
+<li><a href="#Loading-the-Config-File">4.2 Loading the Config File</a>
+<li><a href="#Defining-the-Project">4.3 Defining the Project</a>
+<li><a href="#Listing-Change-Sources-and-Schedulers">4.4 Listing Change Sources and Schedulers</a>
+<ul>
+<li><a href="#Scheduler-Types">4.4.1 Scheduler Types</a>
+<li><a href="#Build-Dependencies">4.4.2 Build Dependencies</a>
+</li></ul>
+<li><a href="#Setting-the-slaveport">4.5 Setting the slaveport</a>
+<li><a href="#Buildslave-Specifiers">4.6 Buildslave Specifiers</a>
+<li><a href="#Defining-Builders">4.7 Defining Builders</a>
+<li><a href="#Defining-Status-Targets">4.8 Defining Status Targets</a>
+<li><a href="#Debug-options">4.9 Debug options</a>
+</li></ul>
+<li><a name="toc_Getting-Source-Code-Changes" href="#Getting-Source-Code-Changes">5 Getting Source Code Changes</a>
+<ul>
+<li><a href="#Change-Sources">5.1 Change Sources</a>
+<ul>
+<li><a href="#Choosing-ChangeSources">5.1.1 Choosing ChangeSources</a>
+<li><a href="#CVSToys-_002d-PBService">5.1.2 CVSToys - PBService</a>
+<li><a href="#CVSToys-_002d-mail-notification">5.1.3 CVSToys - mail notification</a>
+<li><a href="#Other-mail-notification-ChangeSources">5.1.4 Other mail notification ChangeSources</a>
+<li><a href="#PBChangeSource">5.1.5 PBChangeSource</a>
+<li><a href="#P4Source">5.1.6 P4Source</a>
+<li><a href="#BonsaiPoller">5.1.7 BonsaiPoller</a>
+<li><a href="#SVNPoller">5.1.8 SVNPoller</a>
+</li></ul>
+</li></ul>
+<li><a name="toc_Build-Process" href="#Build-Process">6 Build Process</a>
+<ul>
+<li><a href="#Build-Steps">6.1 Build Steps</a>
+<ul>
+<li><a href="#Common-Parameters">6.1.1 Common Parameters</a>
+<li><a href="#Source-Checkout">6.1.2 Source Checkout</a>
+<ul>
+<li><a href="#CVS">6.1.2.1 CVS</a>
+<li><a href="#SVN">6.1.2.2 SVN</a>
+<li><a href="#Darcs">6.1.2.3 Darcs</a>
+<li><a href="#Mercurial">6.1.2.4 Mercurial</a>
+<li><a href="#Arch">6.1.2.5 Arch</a>
+<li><a href="#Bazaar">6.1.2.6 Bazaar</a>
+<li><a href="#P4">6.1.2.7 P4</a>
+</li></ul>
+<li><a href="#ShellCommand">6.1.3 ShellCommand</a>
+<li><a href="#Simple-ShellCommand-Subclasses">6.1.4 Simple ShellCommand Subclasses</a>
+<ul>
+<li><a href="#Configure">6.1.4.1 Configure</a>
+<li><a href="#Compile">6.1.4.2 Compile</a>
+<li><a href="#Test">6.1.4.3 Test</a>
+<li><a href="#Build-Properties">6.1.4.4 Build Properties</a>
+</li></ul>
+<li><a href="#Python-BuildSteps">6.1.5 Python BuildSteps</a>
+<ul>
+<li><a href="#BuildEPYDoc">6.1.5.1 BuildEPYDoc</a>
+<li><a href="#PyFlakes">6.1.5.2 PyFlakes</a>
+</li></ul>
+<li><a href="#Transferring-Files">6.1.6 Transferring Files</a>
+<li><a href="#Writing-New-BuildSteps">6.1.7 Writing New BuildSteps</a>
+<ul>
+<li><a href="#BuildStep-LogFiles">6.1.7.1 BuildStep LogFiles</a>
+<li><a href="#Adding-LogObservers">6.1.7.2 Adding LogObservers</a>
+<li><a href="#BuildStep-URLs">6.1.7.3 BuildStep URLs</a>
+</li></ul>
+</li></ul>
+<li><a href="#Interlocks">6.2 Interlocks</a>
+<li><a href="#Build-Factories">6.3 Build Factories</a>
+<ul>
+<li><a href="#BuildStep-Objects">6.3.1 BuildStep Objects</a>
+<li><a href="#BuildFactory">6.3.2 BuildFactory</a>
+<ul>
+<li><a href="#BuildFactory-Attributes">6.3.2.1 BuildFactory Attributes</a>
+<li><a href="#Quick-builds">6.3.2.2 Quick builds</a>
+</li></ul>
+<li><a href="#Process_002dSpecific-build-factories">6.3.3 Process-Specific build factories</a>
+<ul>
+<li><a href="#GNUAutoconf">6.3.3.1 GNUAutoconf</a>
+<li><a href="#CPAN">6.3.3.2 CPAN</a>
+<li><a href="#Python-distutils">6.3.3.3 Python distutils</a>
+<li><a href="#Python_002fTwisted_002ftrial-projects">6.3.3.4 Python/Twisted/trial projects</a>
+</li></ul>
+</li></ul>
+</li></ul>
+<li><a name="toc_Status-Delivery" href="#Status-Delivery">7 Status Delivery</a>
+<ul>
+<li><a href="#HTML-Waterfall">7.1 HTML Waterfall</a>
+<li><a href="#IRC-Bot">7.2 IRC Bot</a>
+<li><a href="#PBListener">7.3 PBListener</a>
+<li><a href="#Writing-New-Status-Plugins">7.4 Writing New Status Plugins</a>
+</li></ul>
+<li><a name="toc_Command_002dline-tool" href="#Command_002dline-tool">8 Command-line tool</a>
+<ul>
+<li><a href="#Administrator-Tools">8.1 Administrator Tools</a>
+<li><a href="#Developer-Tools">8.2 Developer Tools</a>
+<ul>
+<li><a href="#statuslog">8.2.1 statuslog</a>
+<li><a href="#statusgui">8.2.2 statusgui</a>
+<li><a href="#try">8.2.3 try</a>
+</li></ul>
+<li><a href="#Other-Tools">8.3 Other Tools</a>
+<ul>
+<li><a href="#sendchange">8.3.1 sendchange</a>
+<li><a href="#debugclient">8.3.2 debugclient</a>
+</li></ul>
+<li><a href="#_002ebuildbot-config-directory">8.4 .buildbot config directory</a>
+</li></ul>
+<li><a name="toc_Resources" href="#Resources">9 Resources</a>
+<li><a name="toc_Developer_0027s-Appendix" href="#Developer_0027s-Appendix">Developer's Appendix</a>
+<li><a name="toc_Index-of-Useful-Classes" href="#Index-of-Useful-Classes">Index of Useful Classes</a>
+<li><a name="toc_Index-of-master_002ecfg-keys" href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>
+<li><a name="toc_Index" href="#Index">Index</a>
+</li></ul>
+</div>
+
+
+
+<div class="node">
+<p><hr>
+<a name="Top"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Introduction">Introduction</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#dir">(dir)</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#dir">(dir)</a>
+
+</div>
+
+<h2 class="unnumbered">BuildBot</h2>
+
+<p>This is the BuildBot manual.
+
+   <p>Copyright (C) 2005,2006 Brian Warner
+
+   <p>Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty
+provided the copyright notice and this notice are preserved.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Introduction">Introduction</a>:                 What the BuildBot does. 
+<li><a accesskey="2" href="#Installation">Installation</a>:                 Creating a buildmaster and buildslaves,
+                                running them. 
+<li><a accesskey="3" href="#Concepts">Concepts</a>:                     What goes on in the buildbot's little mind. 
+<li><a accesskey="4" href="#Configuration">Configuration</a>:                Controlling the buildbot. 
+<li><a accesskey="5" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>:   Discovering when to run a build. 
+<li><a accesskey="6" href="#Build-Process">Build Process</a>:                Controlling how each build is run. 
+<li><a accesskey="7" href="#Status-Delivery">Status Delivery</a>:              Telling the world about the build's results. 
+<li><a accesskey="8" href="#Command_002dline-tool">Command-line tool</a>
+<li><a accesskey="9" href="#Resources">Resources</a>:                    Getting help. 
+<li><a href="#Developer_0027s-Appendix">Developer's Appendix</a>
+<li><a href="#Index-of-Useful-Classes">Index of Useful Classes</a>
+<li><a href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>
+<li><a href="#Index">Index</a>:                        Complete index.
+
+</li></ul>
+<p>--- The Detailed Node Listing ---
+
+<p>Introduction
+
+</p>
+<ul class="menu">
+<li><a href="#History-and-Philosophy">History and Philosophy</a>
+<li><a href="#System-Architecture">System Architecture</a>
+<li><a href="#Control-Flow">Control Flow</a>
+
+</li></ul>
+<p>System Architecture
+
+</p>
+<ul class="menu">
+<li><a href="#BuildSlave-Connections">BuildSlave Connections</a>
+<li><a href="#Buildmaster-Architecture">Buildmaster Architecture</a>
+<li><a href="#Status-Delivery-Architecture">Status Delivery Architecture</a>
+
+</li></ul>
+<p>Installation
+
+</p>
+<ul class="menu">
+<li><a href="#Requirements">Requirements</a>
+<li><a href="#Installing-the-code">Installing the code</a>
+<li><a href="#Creating-a-buildmaster">Creating a buildmaster</a>
+<li><a href="#Creating-a-buildslave">Creating a buildslave</a>
+<li><a href="#Launching-the-daemons">Launching the daemons</a>
+<li><a href="#Logfiles">Logfiles</a>
+<li><a href="#Shutdown">Shutdown</a>
+<li><a href="#Maintenance">Maintenance</a>
+<li><a href="#Troubleshooting">Troubleshooting</a>
+
+</li></ul>
+<p>Creating a buildslave
+
+</p>
+<ul class="menu">
+<li><a href="#Buildslave-Options">Buildslave Options</a>
+
+</li></ul>
+<p>Troubleshooting
+
+</p>
+<ul class="menu">
+<li><a href="#Starting-the-buildslave">Starting the buildslave</a>
+<li><a href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>
+<li><a href="#Forcing-Builds">Forcing Builds</a>
+
+</li></ul>
+<p>Concepts
+
+</p>
+<ul class="menu">
+<li><a href="#Version-Control-Systems">Version Control Systems</a>
+<li><a href="#Schedulers">Schedulers</a>
+<li><a href="#BuildSet">BuildSet</a>
+<li><a href="#BuildRequest">BuildRequest</a>
+<li><a href="#Builder">Builder</a>
+<li><a href="#Users">Users</a>
+
+</li></ul>
+<p>Version Control Systems
+
+</p>
+<ul class="menu">
+<li><a href="#Generalizing-VC-Systems">Generalizing VC Systems</a>
+<li><a href="#Source-Tree-Specifications">Source Tree Specifications</a>
+<li><a href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>
+<li><a href="#Attributes-of-Changes">Attributes of Changes</a>
+
+</li></ul>
+<p>Users
+
+</p>
+<ul class="menu">
+<li><a href="#Doing-Things-With-Users">Doing Things With Users</a>
+<li><a href="#Email-Addresses">Email Addresses</a>
+<li><a href="#IRC-Nicknames">IRC Nicknames</a>
+<li><a href="#Live-Status-Clients">Live Status Clients</a>
+
+</li></ul>
+<p>Configuration
+
+</p>
+<ul class="menu">
+<li><a href="#Config-File-Format">Config File Format</a>
+<li><a href="#Loading-the-Config-File">Loading the Config File</a>
+<li><a href="#Defining-the-Project">Defining the Project</a>
+<li><a href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a>
+<li><a href="#Setting-the-slaveport">Setting the slaveport</a>
+<li><a href="#Buildslave-Specifiers">Buildslave Specifiers</a>
+<li><a href="#Defining-Builders">Defining Builders</a>
+<li><a href="#Defining-Status-Targets">Defining Status Targets</a>
+<li><a href="#Debug-options">Debug options</a>
+
+</li></ul>
+<p>Listing Change Sources and Schedulers
+
+</p>
+<ul class="menu">
+<li><a href="#Scheduler-Types">Scheduler Types</a>
+<li><a href="#Build-Dependencies">Build Dependencies</a>
+
+</li></ul>
+<p>Getting Source Code Changes
+
+</p>
+<ul class="menu">
+<li><a href="#Change-Sources">Change Sources</a>
+
+</li></ul>
+<p>Change Sources
+
+</p>
+<ul class="menu">
+<li><a href="#Choosing-ChangeSources">Choosing ChangeSources</a>
+<li><a href="#CVSToys-_002d-PBService">CVSToys - PBService</a>
+<li><a href="#CVSToys-_002d-mail-notification">CVSToys - mail notification</a>
+<li><a href="#Other-mail-notification-ChangeSources">Other mail notification ChangeSources</a>
+<li><a href="#PBChangeSource">PBChangeSource</a>
+<li><a href="#P4Source">P4Source</a>
+<li><a href="#BonsaiPoller">BonsaiPoller</a>
+<li><a href="#SVNPoller">SVNPoller</a>
+
+</li></ul>
+<p>Build Process
+
+</p>
+<ul class="menu">
+<li><a href="#Build-Steps">Build Steps</a>
+<li><a href="#Interlocks">Interlocks</a>
+<li><a href="#Build-Factories">Build Factories</a>
+
+</li></ul>
+<p>Build Steps
+
+</p>
+<ul class="menu">
+<li><a href="#Common-Parameters">Common Parameters</a>
+<li><a href="#Source-Checkout">Source Checkout</a>
+<li><a href="#ShellCommand">ShellCommand</a>
+<li><a href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+<li><a href="#Python-BuildSteps">Python BuildSteps</a>
+<li><a href="#Transferring-Files">Transferring Files</a>
+<li><a href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</li></ul>
+<p>Source Checkout
+
+</p>
+<ul class="menu">
+<li><a href="#CVS">CVS</a>
+<li><a href="#SVN">SVN</a>
+<li><a href="#Darcs">Darcs</a>
+<li><a href="#Mercurial">Mercurial</a>
+<li><a href="#Arch">Arch</a>
+<li><a href="#Bazaar">Bazaar</a>
+<li><a href="#P4">P4</a>
+
+</li></ul>
+<p>Simple ShellCommand Subclasses
+
+</p>
+<ul class="menu">
+<li><a href="#Configure">Configure</a>
+<li><a href="#Compile">Compile</a>
+<li><a href="#Test">Test</a>
+<li><a href="#Build-Properties">Build Properties</a>
+
+</li></ul>
+<p>Python BuildSteps
+
+</p>
+<ul class="menu">
+<li><a href="#BuildEPYDoc">BuildEPYDoc</a>
+<li><a href="#PyFlakes">PyFlakes</a>
+
+</li></ul>
+<p>Writing New BuildSteps
+
+</p>
+<ul class="menu">
+<li><a href="#BuildStep-LogFiles">BuildStep LogFiles</a>
+<li><a href="#Adding-LogObservers">Adding LogObservers</a>
+<li><a href="#BuildStep-URLs">BuildStep URLs</a>
+
+</li></ul>
+<p>Build Factories
+
+</p>
+<ul class="menu">
+<li><a href="#BuildStep-Objects">BuildStep Objects</a>
+<li><a href="#BuildFactory">BuildFactory</a>
+<li><a href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</li></ul>
+<p>BuildStep Objects
+
+</p>
+<ul class="menu">
+<li><a href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a href="#Quick-builds">Quick builds</a>
+
+</li></ul>
+<p>BuildFactory
+
+</p>
+<ul class="menu">
+<li><a href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a href="#Quick-builds">Quick builds</a>
+
+</li></ul>
+<p>Process-Specific build factories
+
+</p>
+<ul class="menu">
+<li><a href="#GNUAutoconf">GNUAutoconf</a>
+<li><a href="#CPAN">CPAN</a>
+<li><a href="#Python-distutils">Python distutils</a>
+<li><a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a>
+
+</li></ul>
+<p>Status Delivery
+
+</p>
+<ul class="menu">
+<li><a href="#HTML-Waterfall">HTML Waterfall</a>
+<li><a href="#IRC-Bot">IRC Bot</a>
+<li><a href="#PBListener">PBListener</a>
+<li><a href="#Writing-New-Status-Plugins">Writing New Status Plugins</a>
+
+</li></ul>
+<p>Command-line tool
+
+</p>
+<ul class="menu">
+<li><a href="#Administrator-Tools">Administrator Tools</a>
+<li><a href="#Developer-Tools">Developer Tools</a>
+<li><a href="#Other-Tools">Other Tools</a>
+<li><a href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>
+
+</li></ul>
+<p>Developer Tools
+
+</p>
+<ul class="menu">
+<li><a href="#statuslog">statuslog</a>
+<li><a href="#statusgui">statusgui</a>
+<li><a href="#try">try</a>
+
+</li></ul>
+<p>Other Tools
+
+</p>
+<ul class="menu">
+<li><a href="#sendchange">sendchange</a>
+<li><a href="#debugclient">debugclient</a>
+
+   </ul>
+
+<div class="node">
+<p><hr>
+<a name="Introduction"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Installation">Installation</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Top">Top</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">1 Introduction</h2>
+
+<p><a name="index-introduction-1"></a>
+The BuildBot is a system to automate the compile/test cycle required by most
+software projects to validate code changes. By automatically rebuilding and
+testing the tree each time something has changed, build problems are
+pinpointed quickly, before other developers are inconvenienced by the
+failure. The guilty developer can be identified and harassed without human
+intervention. By running the builds on a variety of platforms, developers
+who do not have the facilities to test their changes everywhere before
+checkin will at least know shortly afterwards whether they have broken the
+build or not. Warning counts, lint checks, image size, compile time, and
+other build parameters can be tracked over time, are more visible, and
+are therefore easier to improve.
+
+   <p>The overall goal is to reduce tree breakage and provide a platform to
+run tests or code-quality checks that are too annoying or pedantic for
+any human to waste their time with. Developers get immediate (and
+potentially public) feedback about their changes, encouraging them to
+be more careful about testing before checkin.
+
+   <p>Features:
+
+     <ul>
+<li>run builds on a variety of slave platforms
+<li>arbitrary build process: handles projects using C, Python, whatever
+<li>minimal host requirements: python and Twisted
+<li>slaves can be behind a firewall if they can still do checkout
+<li>status delivery through web page, email, IRC, other protocols
+<li>track builds in progress, provide estimated completion time
+<li>flexible configuration by subclassing generic build process classes
+<li>debug tools to force a new build, submit fake Changes, query slave status
+<li>released under the GPL
+</ul>
+
+<ul class="menu">
+<li><a accesskey="1" href="#History-and-Philosophy">History and Philosophy</a>
+<li><a accesskey="2" href="#System-Architecture">System Architecture</a>
+<li><a accesskey="3" href="#Control-Flow">Control Flow</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="History-and-Philosophy"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#System-Architecture">System Architecture</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Introduction">Introduction</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Introduction">Introduction</a>
+
+</div>
+
+<h3 class="section">1.1 History and Philosophy</h3>
+
+<p><a name="index-Philosophy-of-operation-2"></a>
+The Buildbot was inspired by a similar project built for a development
+team writing a cross-platform embedded system. The various components
+of the project were supposed to compile and run on several flavors of
+unix (linux, solaris, BSD), but individual developers had their own
+preferences and tended to stick to a single platform. From time to
+time, incompatibilities would sneak in (some unix platforms want to
+use <code>string.h</code>, some prefer <code>strings.h</code>), and then the tree
+would compile for some developers but not others. The buildbot was
+written to automate the human process of walking into the office,
+updating a tree, compiling (and discovering the breakage), finding the
+developer at fault, and complaining to them about the problem they had
+introduced. With multiple platforms it was difficult for developers to
+do the right thing (compile their potential change on all platforms);
+the buildbot offered a way to help.
+
+   <p>Another problem was when programmers would change the behavior of a
+library without warning its users, or change internal aspects that
+other code was (unfortunately) depending upon. Adding unit tests to
+the codebase helps here: if an application's unit tests pass despite
+changes in the libraries it uses, you can have more confidence that
+the library changes haven't broken anything. Many developers
+complained that the unit tests were inconvenient or took too long to
+run: having the buildbot run them reduces the developer's workload to
+a minimum.
+
+   <p>In general, having more visibility into the project is always good,
+and automation makes it easier for developers to do the right thing. 
+When everyone can see the status of the project, developers are
+encouraged to keep the tree in good working order. Unit tests that
+aren't run on a regular basis tend to suffer from bitrot just like
+code does: exercising them on a regular basis helps to keep them
+functioning and useful.
+
+   <p>The current version of the Buildbot is additionally targeted at
+distributed free-software projects, where resources and platforms are
+only available when provided by interested volunteers. The buildslaves
+are designed to require an absolute minimum of configuration, reducing
+the effort a potential volunteer needs to expend to be able to
+contribute a new test environment to the project. The goal is for
+anyone who wishes that a given project would run on their favorite
+platform should be able to offer that project a buildslave, running on
+that platform, where they can verify that their portability code
+works, and keeps working.
+
+<div class="node">
+<p><hr>
+<a name="System-Architecture"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Control-Flow">Control Flow</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#History-and-Philosophy">History and Philosophy</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Introduction">Introduction</a>
+
+</div>
+
+<!-- node-name,  next,  previous,  up -->
+<h3 class="section">1.2 System Architecture</h3>
+
+<p>The Buildbot consists of a single <code>buildmaster</code> and one or more
+<code>buildslaves</code>, connected in a star topology. The buildmaster
+makes all decisions about what, when, and how to build. It sends
+commands to be run on the build slaves, which simply execute the
+commands and return the results. (certain steps involve more local
+decision making, where the overhead of sending a lot of commands back
+and forth would be inappropriate, but in general the buildmaster is
+responsible for everything).
+
+   <p>The buildmaster is usually fed <code>Changes</code> by some sort of version
+control system (see <a href="#Change-Sources">Change Sources</a>), which may cause builds to be
+run. As the builds are performed, various status messages are
+produced, which are then sent to any registered Status Targets
+(see <a href="#Status-Delivery">Status Delivery</a>).
+
+<!-- @image{FILENAME, WIDTH, HEIGHT, ALTTEXT, EXTENSION} -->
+<div class="block-image"><img src="images/overview.png" alt="Overview Diagram"></div>
+
+   <p>The buildmaster is configured and maintained by the &ldquo;buildmaster
+admin&rdquo;, who is generally the project team member responsible for
+build process issues. Each buildslave is maintained by a &ldquo;buildslave
+admin&rdquo;, who do not need to be quite as involved. Generally slaves are
+run by anyone who has an interest in seeing the project work well on
+their favorite platform.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildSlave-Connections">BuildSlave Connections</a>
+<li><a accesskey="2" href="#Buildmaster-Architecture">Buildmaster Architecture</a>
+<li><a accesskey="3" href="#Status-Delivery-Architecture">Status Delivery Architecture</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildSlave-Connections"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Buildmaster-Architecture">Buildmaster Architecture</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#System-Architecture">System Architecture</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#System-Architecture">System Architecture</a>
+
+</div>
+
+<h4 class="subsection">1.2.1 BuildSlave Connections</h4>
+
+<p>The buildslaves are typically run on a variety of separate machines,
+at least one per platform of interest. These machines connect to the
+buildmaster over a TCP connection to a publically-visible port. As a
+result, the buildslaves can live behind a NAT box or similar
+firewalls, as long as they can get to buildmaster. The TCP connections
+are initiated by the buildslave and accepted by the buildmaster, but
+commands and results travel both ways within this connection. The
+buildmaster is always in charge, so all commands travel exclusively
+from the buildmaster to the buildslave.
+
+   <p>To perform builds, the buildslaves must typically obtain source code
+from a CVS/SVN/etc repository. Therefore they must also be able to
+reach the repository. The buildmaster provides instructions for
+performing builds, but does not provide the source code itself.
+
+   <div class="block-image"><img src="images/slaves.png" alt="BuildSlave Connections"></div>
+
+<div class="node">
+<p><hr>
+<a name="Buildmaster-Architecture"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Status-Delivery-Architecture">Status Delivery Architecture</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildSlave-Connections">BuildSlave Connections</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#System-Architecture">System Architecture</a>
+
+</div>
+
+<h4 class="subsection">1.2.2 Buildmaster Architecture</h4>
+
+<p>The Buildmaster consists of several pieces:
+
+   <div class="block-image"><img src="images/master.png" alt="BuildMaster Architecture"></div>
+
+     <ul>
+<li>Change Sources, which create a Change object each time something is
+modified in the VC repository. Most ChangeSources listen for messages
+from a hook script of some sort. Some sources actively poll the
+repository on a regular basis. All Changes are fed to the Schedulers.
+
+     <li>Schedulers, which decide when builds should be performed. They collect
+Changes into BuildRequests, which are then queued for delivery to
+Builders until a buildslave is available.
+
+     <li>Builders, which control exactly <em>how</em> each build is performed
+(with a series of BuildSteps, configured in a BuildFactory). Each
+Build is run on a single buildslave.
+
+     <li>Status plugins, which deliver information about the build results
+through protocols like HTTP, mail, and IRC.
+
+   </ul>
+
+   <div class="block-image"><img src="images/slavebuilder.png" alt="SlaveBuilders"></div>
+
+   <p>Each Builder is configured with a list of BuildSlaves that it will use
+for its builds. These buildslaves are expected to behave identically:
+the only reason to use multiple BuildSlaves for a single Builder is to
+provide a measure of load-balancing.
+
+   <p>Within a single BuildSlave, each Builder creates its own SlaveBuilder
+instance. These SlaveBuilders operate independently from each other. 
+Each gets its own base directory to work in. It is quite common to
+have many Builders sharing the same buildslave. For example, there
+might be two buildslaves: one for i386, and a second for PowerPC. 
+There may then be a pair of Builders that do a full compile/test run,
+one for each architecture, and a lone Builder that creates snapshot
+source tarballs if the full builders complete successfully. The full
+builders would each run on a single buildslave, whereas the tarball
+creation step might run on either buildslave (since the platform
+doesn't matter when creating source tarballs). In this case, the
+mapping would look like:
+
+<pre class="example">     Builder(full-i386)  -&gt;  BuildSlaves(slave-i386)
+     Builder(full-ppc)   -&gt;  BuildSlaves(slave-ppc)
+     Builder(source-tarball) -&gt; BuildSlaves(slave-i386, slave-ppc)
+</pre>
+   <p>and each BuildSlave would have two SlaveBuilders inside it, one for a
+full builder, and a second for the source-tarball builder.
+
+   <p>Once a SlaveBuilder is available, the Builder pulls one or more
+BuildRequests off its incoming queue. (It may pull more than one if it
+determines that it can merge the requests together; for example, there
+may be multiple requests to build the current HEAD revision). These
+requests are merged into a single Build instance, which includes the
+SourceStamp that describes what exact version of the source code
+should be used for the build. The Build is then assigned to a
+SlaveBuilder and the build begins.
+
+<div class="node">
+<p><hr>
+<a name="Status-Delivery-Architecture"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Buildmaster-Architecture">Buildmaster Architecture</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#System-Architecture">System Architecture</a>
+
+</div>
+
+<h4 class="subsection">1.2.3 Status Delivery Architecture</h4>
+
+<p>The buildmaster maintains a central Status object, to which various
+status plugins are connected. Through this Status object, a full
+hierarchy of build status objects can be obtained.
+
+   <div class="block-image"><img src="images/status.png" alt="Status Delivery"></div>
+
+   <p>The configuration file controls which status plugins are active. Each
+status plugin gets a reference to the top-level Status object. From
+there they can request information on each Builder, Build, Step, and
+LogFile. This query-on-demand interface is used by the html.Waterfall
+plugin to create the main status page each time a web browser hits the
+main URL.
+
+   <p>The status plugins can also subscribe to hear about new Builds as they
+occur: this is used by the MailNotifier to create new email messages
+for each recently-completed Build.
+
+   <p>The Status object records the status of old builds on disk in the
+buildmaster's base directory. This allows it to return information
+about historical builds.
+
+   <p>There are also status objects that correspond to Schedulers and
+BuildSlaves. These allow status plugins to report information about
+upcoming builds, and the online/offline status of each buildslave.
+
+<div class="node">
+<p><hr>
+<a name="Control-Flow"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#System-Architecture">System Architecture</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Introduction">Introduction</a>
+
+</div>
+
+<!-- node-name,  next,  previous,  up -->
+<h3 class="section">1.3 Control Flow</h3>
+
+<p>A day in the life of the buildbot:
+
+     <ul>
+<li>A developer commits some source code changes to the repository. A hook
+script or commit trigger of some sort sends information about this
+change to the buildmaster through one of its configured Change
+Sources. This notification might arrive via email, or over a network
+connection (either initiated by the buildmaster as it &ldquo;subscribes&rdquo;
+to changes, or by the commit trigger as it pushes Changes towards the
+buildmaster). The Change contains information about who made the
+change, what files were modified, which revision contains the change,
+and any checkin comments.
+
+     <li>The buildmaster distributes this change to all of its configured
+Schedulers. Any &ldquo;important&rdquo; changes cause the &ldquo;tree-stable-timer&rdquo;
+to be started, and the Change is added to a list of those that will go
+into a new Build. When the timer expires, a Build is started on each
+of a set of configured Builders, all compiling/testing the same source
+code. Unless configured otherwise, all Builds run in parallel on the
+various buildslaves.
+
+     <li>The Build consists of a series of Steps. Each Step causes some number
+of commands to be invoked on the remote buildslave associated with
+that Builder. The first step is almost always to perform a checkout of
+the appropriate revision from the same VC system that produced the
+Change. The rest generally perform a compile and run unit tests. As
+each Step runs, the buildslave reports back command output and return
+status to the buildmaster.
+
+     <li>As the Build runs, status messages like &ldquo;Build Started&rdquo;, &ldquo;Step
+Started&rdquo;, &ldquo;Build Finished&rdquo;, etc, are published to a collection of
+Status Targets. One of these targets is usually the HTML &ldquo;Waterfall&rdquo;
+display, which shows a chronological list of events, and summarizes
+the results of the most recent build at the top of each column. 
+Developers can periodically check this page to see how their changes
+have fared. If they see red, they know that they've made a mistake and
+need to fix it. If they see green, they know that they've done their
+duty and don't need to worry about their change breaking anything.
+
+     <li>If a MailNotifier status target is active, the completion of a build
+will cause email to be sent to any developers whose Changes were
+incorporated into this Build. The MailNotifier can be configured to
+only send mail upon failing builds, or for builds which have just
+transitioned from passing to failing. Other status targets can provide
+similar real-time notification via different communication channels,
+like IRC.
+
+   </ul>
+
+<div class="node">
+<p><hr>
+<a name="Installation"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Concepts">Concepts</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Introduction">Introduction</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">2 Installation</h2>
+
+<ul class="menu">
+<li><a accesskey="1" href="#Requirements">Requirements</a>
+<li><a accesskey="2" href="#Installing-the-code">Installing the code</a>
+<li><a accesskey="3" href="#Creating-a-buildmaster">Creating a buildmaster</a>
+<li><a accesskey="4" href="#Creating-a-buildslave">Creating a buildslave</a>
+<li><a accesskey="5" href="#Launching-the-daemons">Launching the daemons</a>
+<li><a accesskey="6" href="#Logfiles">Logfiles</a>
+<li><a accesskey="7" href="#Shutdown">Shutdown</a>
+<li><a accesskey="8" href="#Maintenance">Maintenance</a>
+<li><a accesskey="9" href="#Troubleshooting">Troubleshooting</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Requirements"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Installing-the-code">Installing the code</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Installation">Installation</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.1 Requirements</h3>
+
+<p>At a bare minimum, you'll need the following (for both the buildmaster
+and a buildslave):
+
+     <ul>
+<li>Python: http://www.python.org
+
+     <p>Buildbot requires python-2.2 or later, and is primarily developed
+against python-2.3. The buildmaster uses generators, a feature which
+is not available in python-2.1, and both master and slave require a
+version of Twisted which only works with python-2.2 or later. Certain
+features (like the inclusion of build logs in status emails) require
+python-2.2.2 or later. The IRC &ldquo;force build&rdquo; command requires
+python-2.3 (for the shlex.split function).
+
+     <li>Twisted: http://twistedmatrix.com
+
+     <p>Both the buildmaster and the buildslaves require Twisted-1.3.0 or
+later. It has been mainly developed against Twisted-2.0.1, but has
+been tested against Twisted-2.1.0 (the most recent as of this
+writing), and might even work on versions as old as Twisted-1.1.0, but
+as always the most recent version is recommended.
+
+     <p>Twisted-1.3.0 and earlier were released as a single monolithic
+package. When you run Buildbot against Twisted-2.0.0 or later (which
+are split into a number of smaller subpackages), you'll need at least
+"Twisted" (the core package), and you'll also want TwistedMail,
+TwistedWeb, and TwistedWords (for sending email, serving a web status
+page, and delivering build status via IRC, respectively). 
+</ul>
+
+   <p>Certain other packages may be useful on the system running the
+buildmaster:
+
+     <ul>
+<li>CVSToys: http://purl.net/net/CVSToys
+
+     <p>If your buildmaster uses FreshCVSSource to receive change notification
+from a cvstoys daemon, it will require CVSToys be installed (tested
+with CVSToys-1.0.10). If the it doesn't use that source (i.e. if you
+only use a mail-parsing change source, or the SVN notification
+script), you will not need CVSToys.
+
+   </ul>
+
+   <p>And of course, your project's build process will impose additional
+requirements on the buildslaves. These hosts must have all the tools
+necessary to compile and test your project's source code.
+
+<div class="node">
+<p><hr>
+<a name="Installing-the-code"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Creating-a-buildmaster">Creating a buildmaster</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Requirements">Requirements</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.2 Installing the code</h3>
+
+<p><a name="index-installation-3"></a>
+The Buildbot is installed using the standard python <code>distutils</code>
+module. After unpacking the tarball, the process is:
+
+<pre class="example">     python setup.py build
+     python setup.py install
+</pre>
+   <p>where the install step may need to be done as root. This will put the
+bulk of the code in somewhere like
+/usr/lib/python2.3/site-packages/buildbot . It will also install the
+<code>buildbot</code> command-line tool in /usr/bin/buildbot.
+
+   <p>To test this, shift to a different directory (like /tmp), and run:
+
+<pre class="example">     buildbot --version
+</pre>
+   <p>If it shows you the versions of Buildbot and Twisted, the install went
+ok. If it says <code>no such command</code> or it gets an <code>ImportError</code>
+when it tries to load the libaries, then something went wrong. 
+<code>pydoc buildbot</code> is another useful diagnostic tool.
+
+   <p>Windows users will find these files in other places. You will need to
+make sure that python can find the libraries, and will probably find
+it convenient to have <code>buildbot</code> on your PATH.
+
+   <p>If you wish, you can run the buildbot unit test suite like this:
+
+<pre class="example">     PYTHONPATH=. trial buildbot.test
+</pre>
+   <p>This should run up to 192 tests, depending upon what VC tools you have
+installed. On my desktop machine it takes about five minutes to
+complete. Nothing should fail, a few might be skipped. If any of the
+tests fail, you should stop and investigate the cause before
+continuing the installation process, as it will probably be easier to
+track down the bug early.
+
+   <p>If you cannot or do not wish to install the buildbot into a site-wide
+location like <samp><span class="file">/usr</span></samp> or <samp><span class="file">/usr/local</span></samp>, you can also install
+it into the account's home directory. Do the install command like
+this:
+
+<pre class="example">     python setup.py install --home=~
+</pre>
+   <p>That will populate <samp><span class="file">~/lib/python</span></samp> and create
+<samp><span class="file">~/bin/buildbot</span></samp>. Make sure this lib directory is on your
+<code>PYTHONPATH</code>.
+
+<div class="node">
+<p><hr>
+<a name="Creating-a-buildmaster"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Creating-a-buildslave">Creating a buildslave</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Installing-the-code">Installing the code</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.3 Creating a buildmaster</h3>
+
+<p>As you learned earlier (see <a href="#System-Architecture">System Architecture</a>), the buildmaster
+runs on a central host (usually one that is publically visible, so
+everybody can check on the status of the project), and controls all
+aspects of the buildbot system. Let us call this host
+<code>buildbot.example.org</code>.
+
+   <p>You may wish to create a separate user account for the buildmaster,
+perhaps named <code>buildmaster</code>. This can help keep your personal
+configuration distinct from that of the buildmaster and is useful if
+you have to use a mail-based notification system (see <a href="#Change-Sources">Change Sources</a>). However, the Buildbot will work just fine with your regular
+user account.
+
+   <p>You need to choose a directory for the buildmaster, called the
+<code>basedir</code>. This directory will be owned by the buildmaster, which
+will use configuration files therein, and create status files as it
+runs. <samp><span class="file">~/Buildbot</span></samp> is a likely value. If you run multiple
+buildmasters in the same account, or if you run both masters and
+slaves, you may want a more distinctive name like
+<samp><span class="file">~/Buildbot/master/gnomovision</span></samp> or
+<samp><span class="file">~/Buildmasters/fooproject</span></samp>. If you are using a separate user
+account, this might just be <samp><span class="file">~buildmaster/masters/fooproject</span></samp>.
+
+   <p>Once you've picked a directory, use the <samp><span class="command">buildbot
+create-master</span></samp> command to create the directory and populate it with
+startup files:
+
+<pre class="example">     buildbot create-master <var>basedir</var>
+</pre>
+   <p>You will need to create a configuration file (see <a href="#Configuration">Configuration</a>)
+before starting the buildmaster. Most of the rest of this manual is
+dedicated to explaining how to do this. A sample configuration file is
+placed in the working directory, named <samp><span class="file">master.cfg.sample</span></samp>, which
+can be copied to <samp><span class="file">master.cfg</span></samp> and edited to suit your purposes.
+
+   <p>(Internal details: This command creates a file named
+<samp><span class="file">buildbot.tac</span></samp> that contains all the state necessary to create
+the buildmaster. Twisted has a tool called <code>twistd</code> which can use
+this .tac file to create and launch a buildmaster instance. twistd
+takes care of logging and daemonization (running the program in the
+background). <samp><span class="file">/usr/bin/buildbot</span></samp> is a front end which runs twistd
+for you.)
+
+   <p>In addition to <samp><span class="file">buildbot.tac</span></samp>, a small <samp><span class="file">Makefile.sample</span></samp> is
+installed. This can be used as the basis for customized daemon startup,
+See <a href="#Launching-the-daemons">Launching the daemons</a>.
+
+<div class="node">
+<p><hr>
+<a name="Creating-a-buildslave"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Launching-the-daemons">Launching the daemons</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Creating-a-buildmaster">Creating a buildmaster</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.4 Creating a buildslave</h3>
+
+<p>Typically, you will be adding a buildslave to an existing buildmaster,
+to provide additional architecture coverage. The buildbot
+administrator will give you several pieces of information necessary to
+connect to the buildmaster. You should also be somewhat familiar with
+the project being tested, so you can troubleshoot build problems
+locally.
+
+   <p>The buildbot exists to make sure that the project's stated &ldquo;how to
+build it&rdquo; process actually works. To this end, the buildslave should
+run in an environment just like that of your regular developers. 
+Typically the project build process is documented somewhere
+(<samp><span class="file">README</span></samp>, <samp><span class="file">INSTALL</span></samp>, etc), in a document that should
+mention all library dependencies and contain a basic set of build
+instructions. This document will be useful as you configure the host
+and account in which the buildslave runs.
+
+   <p>Here's a good checklist for setting up a buildslave:
+
+     <ol type=1 start=1>
+<li>Set up the account
+
+     <p>It is recommended (although not mandatory) to set up a separate user
+account for the buildslave. This account is frequently named
+<code>buildbot</code> or <code>buildslave</code>. This serves to isolate your
+personal working environment from that of the slave's, and helps to
+minimize the security threat posed by letting possibly-unknown
+contributors run arbitrary code on your system. The account should
+have a minimum of fancy init scripts.
+
+     <li>Install the buildbot code
+
+     <p>Follow the instructions given earlier (see <a href="#Installing-the-code">Installing the code</a>). 
+If you use a separate buildslave account, and you didn't install the
+buildbot code to a shared location, then you will need to install it
+with <code>--home=~</code> for each account that needs it.
+
+     <li>Set up the host
+
+     <p>Make sure the host can actually reach the buildmaster. Usually the
+buildmaster is running a status webserver on the same machine, so
+simply point your web browser at it and see if you can get there. 
+Install whatever additional packages or libraries the project's
+INSTALL document advises. (or not: if your buildslave is supposed to
+make sure that building without optional libraries still works, then
+don't install those libraries).
+
+     <p>Again, these libraries don't necessarily have to be installed to a
+site-wide shared location, but they must be available to your build
+process. Accomplishing this is usually very specific to the build
+process, so installing them to <samp><span class="file">/usr</span></samp> or <samp><span class="file">/usr/local</span></samp> is
+usually the best approach.
+
+     <li>Test the build process
+
+     <p>Follow the instructions in the INSTALL document, in the buildslave's
+account. Perform a full CVS (or whatever) checkout, configure, make,
+run tests, etc. Confirm that the build works without manual fussing. 
+If it doesn't work when you do it by hand, it will be unlikely to work
+when the buildbot attempts to do it in an automated fashion.
+
+     <li>Choose a base directory
+
+     <p>This should be somewhere in the buildslave's account, typically named
+after the project which is being tested. The buildslave will not touch
+any file outside of this directory. Something like <samp><span class="file">~/Buildbot</span></samp>
+or <samp><span class="file">~/Buildslaves/fooproject</span></samp> is appropriate.
+
+     <li>Get the buildmaster host/port, botname, and password
+
+     <p>When the buildbot admin configures the buildmaster to accept and use
+your buildslave, they will provide you with the following pieces of
+information:
+
+          <ul>
+<li>your buildslave's name
+<li>the password assigned to your buildslave
+<li>the hostname and port number of the buildmaster, i.e. buildbot.example.org:8007
+</ul>
+
+     <li>Create the buildslave
+
+     <p>Now run the 'buildbot' command as follows:
+
+     <pre class="example">          buildbot create-slave <var>BASEDIR</var> <var>MASTERHOST</var>:<var>PORT</var> <var>SLAVENAME</var> <var>PASSWORD</var>
+     </pre>
+     <p>This will create the base directory and a collection of files inside,
+including the <samp><span class="file">buildbot.tac</span></samp> file that contains all the
+information you passed to the <code>buildbot</code> command.
+
+     <li>Fill in the hostinfo files
+
+     <p>When it first connects, the buildslave will send a few files up to the
+buildmaster which describe the host that it is running on. These files
+are presented on the web status display so that developers have more
+information to reproduce any test failures that are witnessed by the
+buildbot. There are sample files in the <samp><span class="file">info</span></samp> subdirectory of
+the buildbot's base directory. You should edit these to correctly
+describe you and your host.
+
+     <p><samp><span class="file">BASEDIR/info/admin</span></samp> should contain your name and email address. 
+This is the &ldquo;buildslave admin address&rdquo;, and will be visible from the
+build status page (so you may wish to munge it a bit if
+address-harvesting spambots are a concern).
+
+     <p><samp><span class="file">BASEDIR/info/host</span></samp> should be filled with a brief description of
+the host: OS, version, memory size, CPU speed, versions of relevant
+libraries installed, and finally the version of the buildbot code
+which is running the buildslave.
+
+     <p>If you run many buildslaves, you may want to create a single
+<samp><span class="file">~buildslave/info</span></samp> file and share it among all the buildslaves
+with symlinks.
+
+        </ol>
+
+<ul class="menu">
+<li><a accesskey="1" href="#Buildslave-Options">Buildslave Options</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Buildslave-Options"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Creating-a-buildslave">Creating a buildslave</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Creating-a-buildslave">Creating a buildslave</a>
+
+</div>
+
+<h4 class="subsection">2.4.1 Buildslave Options</h4>
+
+<p>There are a handful of options you might want to use when creating the
+buildslave with the <samp><span class="command">buildbot create-slave &lt;options&gt; DIR &lt;params&gt;</span></samp>
+command. You can type <samp><span class="command">buildbot create-slave --help</span></samp> for a summary. 
+To use these, just include them on the <samp><span class="command">buildbot create-slave</span></samp>
+command line, like this:
+
+<pre class="example">     buildbot create-slave --umask=022 ~/buildslave buildmaster.example.org:42012 myslavename mypasswd
+</pre>
+     <dl>
+<dt><code>--usepty</code><dd>This is a boolean flag that tells the buildslave whether to launch
+child processes in a PTY (the default) or with regular pipes. The
+advantage of using a PTY is that &ldquo;grandchild&rdquo; processes are more
+likely to be cleaned up if the build is interrupted or times out
+(since it enables the use of a &ldquo;process group&rdquo; in which all child
+processes will be placed). The disadvantages: some forms of Unix have
+problems with PTYs, some of your unit tests may behave differently
+when run under a PTY (generally those which check to see if they are
+being run interactively), and PTYs will merge the stdout and stderr
+streams into a single output stream (which means the red-vs-black
+coloring in the logfiles will be lost). If you encounter problems, you
+can add <code>--usepty=0</code> to disable the use of PTYs. Note that
+windows buildslaves never use PTYs.
+
+     <br><dt><code>--umask</code><dd>This is a string (generally an octal representation of an integer)
+which will cause the buildslave process' &ldquo;umask&rdquo; value to be set
+shortly after initialization. The &ldquo;twistd&rdquo; daemonization utility
+forces the umask to 077 at startup (which means that all files created
+by the buildslave or its child processes will be unreadable by any
+user other than the buildslave account). If you want build products to
+be readable by other accounts, you can add <code>--umask=022</code> to tell
+the buildslave to fix the umask after twistd clobbers it. If you want
+build products to be <em>writable</em> by other accounts too, use
+<code>--umask=000</code>, but this is likely to be a security problem.
+
+     <br><dt><code>--keepalive</code><dd>This is a number that indicates how frequently &ldquo;keepalive&rdquo; messages
+should be sent from the buildslave to the buildmaster, expressed in
+seconds. The default (600) causes a message to be sent to the
+buildmaster at least once every 10 minutes. To set this to a lower
+value, use e.g. <code>--keepalive=120</code>.
+
+     <p>If the buildslave is behind a NAT box or stateful firewall, these
+messages may help to keep the connection alive: some NAT boxes tend to
+forget about a connection if it has not been used in a while. When
+this happens, the buildmaster will think that the buildslave has
+disappeared, and builds will time out. Meanwhile the buildslave will
+not realize than anything is wrong.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Launching-the-daemons"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Logfiles">Logfiles</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Creating-a-buildslave">Creating a buildslave</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.5 Launching the daemons</h3>
+
+<p>Both the buildmaster and the buildslave run as daemon programs. To
+launch them, pass the working directory to the <code>buildbot</code>
+command:
+
+<pre class="example">     buildbot start <var>BASEDIR</var>
+</pre>
+   <p>This command will start the daemon and then return, so normally it
+will not produce any output. To verify that the programs are indeed
+running, look for a pair of files named <samp><span class="file">twistd.log</span></samp> and
+<samp><span class="file">twistd.pid</span></samp> that should be created in the working directory. 
+<samp><span class="file">twistd.pid</span></samp> contains the process ID of the newly-spawned daemon.
+
+   <p>When the buildslave connects to the buildmaster, new directories will
+start appearing in its base directory. The buildmaster tells the slave
+to create a directory for each Builder which will be using that slave. 
+All build operations are performed within these directories: CVS
+checkouts, compiles, and tests.
+
+   <p>Once you get everything running, you will want to arrange for the
+buildbot daemons to be started at boot time. One way is to use
+<code>cron</code>, by putting them in a @reboot crontab entry<a rel="footnote" href="#fn-1" name="fnd-1"><sup>1</sup></a>:
+
+<pre class="example">     @reboot buildbot start <var>BASEDIR</var>
+</pre>
+   <p>When you run <samp><span class="command">crontab</span></samp> to set this up, remember to do it as
+the buildmaster or buildslave account! If you add this to your crontab
+when running as your regular account (or worse yet, root), then the
+daemon will run as the wrong user, quite possibly as one with more
+authority than you intended to provide.
+
+   <p>It is important to remember that the environment provided to cron jobs
+and init scripts can be quite different that your normal runtime. 
+There may be fewer environment variables specified, and the PATH may
+be shorter than usual. It is a good idea to test out this method of
+launching the buildslave by using a cron job with a time in the near
+future, with the same command, and then check <samp><span class="file">twistd.log</span></samp> to
+make sure the slave actually started correctly. Common problems here
+are for <samp><span class="file">/usr/local</span></samp> or <samp><span class="file">~/bin</span></samp> to not be on your
+<code>PATH</code>, or for <code>PYTHONPATH</code> to not be set correctly. 
+Sometimes <code>HOME</code> is messed up too.
+
+   <p>To modify the way the daemons are started (perhaps you want to set
+some environment variables first, or perform some cleanup each time),
+you can create a file named <samp><span class="file">Makefile.buildbot</span></samp> in the base
+directory. When the <samp><span class="file">buildbot</span></samp> front-end tool is told to
+<samp><span class="command">start</span></samp> the daemon, and it sees this file (and
+<samp><span class="file">/usr/bin/make</span></samp> exists), it will do <samp><span class="command">make -f
+Makefile.buildbot start</span></samp> instead of its usual action (which involves
+running <samp><span class="command">twistd</span></samp>). When the buildmaster or buildslave is
+installed, a <samp><span class="file">Makefile.sample</span></samp> is created which implements the
+same behavior as the the <samp><span class="file">buildbot</span></samp> tool uses, so if you want to
+customize the process, just copy <samp><span class="file">Makefile.sample</span></samp> to
+<samp><span class="file">Makefile.buildbot</span></samp> and edit it as necessary.
+
+<div class="node">
+<p><hr>
+<a name="Logfiles"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Shutdown">Shutdown</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Launching-the-daemons">Launching the daemons</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.6 Logfiles</h3>
+
+<p><a name="index-logfiles-4"></a>
+While a buildbot daemon runs, it emits text to a logfile, named
+<samp><span class="file">twistd.log</span></samp>. A command like <code>tail -f twistd.log</code> is useful
+to watch the command output as it runs.
+
+   <p>The buildmaster will announce any errors with its configuration file
+in the logfile, so it is a good idea to look at the log at startup
+time to check for any problems. Most buildmaster activities will cause
+lines to be added to the log.
+
+<div class="node">
+<p><hr>
+<a name="Shutdown"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Maintenance">Maintenance</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Logfiles">Logfiles</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.7 Shutdown</h3>
+
+<p>To stop a buildmaster or buildslave manually, use:
+
+<pre class="example">     buildbot stop <var>BASEDIR</var>
+</pre>
+   <p>This simply looks for the <samp><span class="file">twistd.pid</span></samp> file and kills whatever
+process is identified within.
+
+   <p>At system shutdown, all processes are sent a <code>SIGKILL</code>. The
+buildmaster and buildslave will respond to this by shutting down
+normally.
+
+   <p>The buildmaster will respond to a <code>SIGHUP</code> by re-reading its
+config file. The following shortcut is available:
+
+<pre class="example">     buildbot reconfig <var>BASEDIR</var>
+</pre>
+   <p>When you update the Buildbot code to a new release, you will need to
+restart the buildmaster and/or buildslave before it can take advantage
+of the new code. You can do a <code>buildbot stop </code><var>BASEDIR</var> and
+<code>buildbot start </code><var>BASEDIR</var> in quick succession, or you can
+use the <code>restart</code> shortcut, which does both steps for you:
+
+<pre class="example">     buildbot restart <var>BASEDIR</var>
+</pre>
+   <div class="node">
+<p><hr>
+<a name="Maintenance"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Troubleshooting">Troubleshooting</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Shutdown">Shutdown</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.8 Maintenance</h3>
+
+<p>It is a good idea to check the buildmaster's status page every once in
+a while, to see if your buildslave is still online. Eventually the
+buildbot will probably be enhanced to send you email (via the
+<samp><span class="file">info/admin</span></samp> email address) when the slave has been offline for
+more than a few hours.
+
+   <p>If you find you can no longer provide a buildslave to the project, please
+let the project admins know, so they can put out a call for a
+replacement.
+
+   <p>The Buildbot records status and logs output continually, each time a
+build is performed. The status tends to be small, but the build logs
+can become quite large. Each build and log are recorded in a separate
+file, arranged hierarchically under the buildmaster's base directory. 
+To prevent these files from growing without bound, you should
+periodically delete old build logs. A simple cron job to delete
+anything older than, say, two weeks should do the job. The only trick
+is to leave the <samp><span class="file">buildbot.tac</span></samp> and other support files alone, for
+which find's <code>-mindepth</code> argument helps skip everything in the
+top directory. You can use something like the following:
+
+<pre class="example">     @weekly cd BASEDIR &amp;&amp; find . -mindepth 2 -type f -mtime +14 -exec rm {} \;
+     @weekly cd BASEDIR &amp;&amp; find twistd.log* -mtime +14 -exec rm {} \;
+</pre>
+   <div class="node">
+<p><hr>
+<a name="Troubleshooting"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Maintenance">Maintenance</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.9 Troubleshooting</h3>
+
+<p>Here are a few hints on diagnosing common problems.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Starting-the-buildslave">Starting the buildslave</a>
+<li><a accesskey="2" href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>
+<li><a accesskey="3" href="#Forcing-Builds">Forcing Builds</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Starting-the-buildslave"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Troubleshooting">Troubleshooting</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Troubleshooting">Troubleshooting</a>
+
+</div>
+
+<h4 class="subsection">2.9.1 Starting the buildslave</h4>
+
+<p>Cron jobs are typically run with a minimal shell (<samp><span class="file">/bin/sh</span></samp>, not
+<samp><span class="file">/bin/bash</span></samp>), and tilde expansion is not always performed in such
+commands. You may want to use explicit paths, because the <code>PATH</code>
+is usually quite short and doesn't include anything set by your
+shell's startup scripts (<samp><span class="file">.profile</span></samp>, <samp><span class="file">.bashrc</span></samp>, etc). If
+you've installed buildbot (or other python libraries) to an unusual
+location, you may need to add a <code>PYTHONPATH</code> specification (note
+that python will do tilde-expansion on <code>PYTHONPATH</code> elements by
+itself). Sometimes it is safer to fully-specify everything:
+
+<pre class="example">     @reboot PYTHONPATH=~/lib/python /usr/local/bin/buildbot start /usr/home/buildbot/basedir
+</pre>
+   <p>Take the time to get the @reboot job set up. Otherwise, things will work
+fine for a while, but the first power outage or system reboot you have will
+stop the buildslave with nothing but the cries of sorrowful developers to
+remind you that it has gone away.
+
+<div class="node">
+<p><hr>
+<a name="Connecting-to-the-buildmaster"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Forcing-Builds">Forcing Builds</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Starting-the-buildslave">Starting the buildslave</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Troubleshooting">Troubleshooting</a>
+
+</div>
+
+<h4 class="subsection">2.9.2 Connecting to the buildmaster</h4>
+
+<p>If the buildslave cannot connect to the buildmaster, the reason should
+be described in the <samp><span class="file">twistd.log</span></samp> logfile. Some common problems
+are an incorrect master hostname or port number, or a mistyped bot
+name or password. If the buildslave loses the connection to the
+master, it is supposed to attempt to reconnect with an
+exponentially-increasing backoff. Each attempt (and the time of the
+next attempt) will be logged. If you get impatient, just manually stop
+and re-start the buildslave.
+
+   <p>When the buildmaster is restarted, all slaves will be disconnected,
+and will attempt to reconnect as usual. The reconnect time will depend
+upon how long the buildmaster is offline (i.e. how far up the
+exponential backoff curve the slaves have travelled). Again,
+<code>buildbot stop </code><var>BASEDIR</var><code>; buildbot start </code><var>BASEDIR</var> will
+speed up the process.
+
+<div class="node">
+<p><hr>
+<a name="Forcing-Builds"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Troubleshooting">Troubleshooting</a>
+
+</div>
+
+<h4 class="subsection">2.9.3 Forcing Builds</h4>
+
+<p>From the buildmaster's main status web page, you can force a build to
+be run on your build slave. Figure out which column is for a builder
+that runs on your slave, click on that builder's name, and the page
+that comes up will have a &ldquo;Force Build&rdquo; button. Fill in the form,
+hit the button, and a moment later you should see your slave's
+<samp><span class="file">twistd.log</span></samp> filling with commands being run. Using <code>pstree</code>
+or <code>top</code> should also reveal the cvs/make/gcc/etc processes being
+run by the buildslave. Note that the same web page should also show
+the <samp><span class="file">admin</span></samp> and <samp><span class="file">host</span></samp> information files that you configured
+earlier.
+
+<div class="node">
+<p><hr>
+<a name="Concepts"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Configuration">Configuration</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Installation">Installation</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">3 Concepts</h2>
+
+<p>This chapter defines some of the basic concepts that the Buildbot
+uses. You'll need to understand how the Buildbot sees the world to
+configure it properly.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Version-Control-Systems">Version Control Systems</a>
+<li><a accesskey="2" href="#Schedulers">Schedulers</a>
+<li><a accesskey="3" href="#BuildSet">BuildSet</a>
+<li><a accesskey="4" href="#BuildRequest">BuildRequest</a>
+<li><a accesskey="5" href="#Builder">Builder</a>
+<li><a accesskey="6" href="#Users">Users</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Version-Control-Systems"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Schedulers">Schedulers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Concepts">Concepts</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.1 Version Control Systems</h3>
+
+<p><a name="index-Version-Control-5"></a>
+These source trees come from a Version Control System of some kind. 
+CVS and Subversion are two popular ones, but the Buildbot supports
+others. All VC systems have some notion of an upstream
+<code>repository</code> which acts as a server<a rel="footnote" href="#fn-2" name="fnd-2"><sup>2</sup></a>, from which clients
+can obtain source trees according to various parameters. The VC
+repository provides source trees of various projects, for different
+branches, and from various points in time. The first thing we have to
+do is to specify which source tree we want to get.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Generalizing-VC-Systems">Generalizing VC Systems</a>
+<li><a accesskey="2" href="#Source-Tree-Specifications">Source Tree Specifications</a>
+<li><a accesskey="3" href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>
+<li><a accesskey="4" href="#Attributes-of-Changes">Attributes of Changes</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Generalizing-VC-Systems"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Source-Tree-Specifications">Source Tree Specifications</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Version-Control-Systems">Version Control Systems</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.1 Generalizing VC Systems</h4>
+
+<p>For the purposes of the Buildbot, we will try to generalize all VC
+systems as having repositories that each provide sources for a variety
+of projects. Each project is defined as a directory tree with source
+files. The individual files may each have revisions, but we ignore
+that and treat the project as a whole as having a set of revisions. 
+Each time someone commits a change to the project, a new revision
+becomes available. These revisions can be described by a tuple with
+two items: the first is a branch tag, and the second is some kind of
+timestamp or revision stamp. Complex projects may have multiple branch
+tags, but there is always a default branch. The timestamp may be an
+actual timestamp (such as the -D option to CVS), or it may be a
+monotonically-increasing transaction number (such as the change number
+used by SVN and P4, or the revision number used by Arch, or a labeled
+tag used in CVS)<a rel="footnote" href="#fn-3" name="fnd-3"><sup>3</sup></a>. The SHA1 revision ID used by Monotone and
+Mercurial is also a kind of revision stamp, in that it specifies a
+unique copy of the source tree, as does a Darcs &ldquo;context&rdquo; file.
+
+   <p>When we aren't intending to make any changes to the sources we check out
+(at least not any that need to be committed back upstream), there are two
+basic ways to use a VC system:
+
+     <ul>
+<li>Retrieve a specific set of source revisions: some tag or key is used
+to index this set, which is fixed and cannot be changed by subsequent
+developers committing new changes to the tree. Releases are built from
+tagged revisions like this, so that they can be rebuilt again later
+(probably with controlled modifications). 
+<li>Retrieve the latest sources along a specific branch: some tag is used
+to indicate which branch is to be used, but within that constraint we want
+to get the latest revisions. 
+</ul>
+
+   <p>Build personnel or CM staff typically use the first approach: the
+build that results is (ideally) completely specified by the two
+parameters given to the VC system: repository and revision tag. This
+gives QA and end-users something concrete to point at when reporting
+bugs. Release engineers are also reportedly fond of shipping code that
+can be traced back to a concise revision tag of some sort.
+
+   <p>Developers are more likely to use the second approach: each morning
+the developer does an update to pull in the changes committed by the
+team over the last day. These builds are not easy to fully specify: it
+depends upon exactly when you did a checkout, and upon what local
+changes the developer has in their tree. Developers do not normally
+tag each build they produce, because there is usually significant
+overhead involved in creating these tags. Recreating the trees used by
+one of these builds can be a challenge. Some VC systems may provide
+implicit tags (like a revision number), while others may allow the use
+of timestamps to mean &ldquo;the state of the tree at time X&rdquo; as opposed
+to a tree-state that has been explicitly marked.
+
+   <p>The Buildbot is designed to help developers, so it usually works in
+terms of <em>the latest</em> sources as opposed to specific tagged
+revisions. However, it would really prefer to build from reproducible
+source trees, so implicit revisions are used whenever possible.
+
+<div class="node">
+<p><hr>
+<a name="Source-Tree-Specifications"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Generalizing-VC-Systems">Generalizing VC Systems</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.2 Source Tree Specifications</h4>
+
+<p>So for the Buildbot's purposes we treat each VC system as a server
+which can take a list of specifications as input and produce a source
+tree as output. Some of these specifications are static: they are
+attributes of the builder and do not change over time. Others are more
+variable: each build will have a different value. The repository is
+changed over time by a sequence of Changes, each of which represents a
+single developer making changes to some set of files. These Changes
+are cumulative<a rel="footnote" href="#fn-4" name="fnd-4"><sup>4</sup></a>.
+
+   <p>For normal builds, the Buildbot wants to get well-defined source trees
+that contain specific Changes, and exclude other Changes that may have
+occurred after the desired ones. We assume that the Changes arrive at
+the buildbot (through one of the mechanisms described in see <a href="#Change-Sources">Change Sources</a>) in the same order in which they are committed to the
+repository. The Buildbot waits for the tree to become &ldquo;stable&rdquo;
+before initiating a build, for two reasons. The first is that
+developers frequently make multiple related commits in quick
+succession, even when the VC system provides ways to make atomic
+transactions involving multiple files at the same time. Running a
+build in the middle of these sets of changes would use an inconsistent
+set of source files, and is likely to fail (and is certain to be less
+useful than a build which uses the full set of changes). The
+tree-stable-timer is intended to avoid these useless builds that
+include some of the developer's changes but not all. The second reason
+is that some VC systems (i.e. CVS) do not provide repository-wide
+transaction numbers, so that timestamps are the only way to refer to
+a specific repository state. These timestamps may be somewhat
+ambiguous, due to processing and notification delays. By waiting until
+the tree has been stable for, say, 10 minutes, we can choose a
+timestamp from the middle of that period to use for our source
+checkout, and then be reasonably sure that any clock-skew errors will
+not cause the build to be performed on an inconsistent set of source
+files.
+
+   <p>The Schedulers always use the tree-stable-timer, with a timeout that
+is configured to reflect a reasonable tradeoff between build latency
+and change frequency. When the VC system provides coherent
+repository-wide revision markers (such as Subversion's revision
+numbers, or in fact anything other than CVS's timestamps), the
+resulting Build is simply performed against a source tree defined by
+that revision marker. When the VC system does not provide this, a
+timestamp from the middle of the tree-stable period is used to
+generate the source tree<a rel="footnote" href="#fn-5" name="fnd-5"><sup>5</sup></a>.
+
+<div class="node">
+<p><hr>
+<a name="How-Different-VC-Systems-Specify-Sources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Attributes-of-Changes">Attributes of Changes</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Source-Tree-Specifications">Source Tree Specifications</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.3 How Different VC Systems Specify Sources</h4>
+
+<p>For CVS, the static specifications are <code>repository</code> and
+<code>module</code>. In addition to those, each build uses a timestamp (or
+omits the timestamp to mean <code>the latest</code>) and <code>branch tag</code>
+(which defaults to HEAD). These parameters collectively specify a set
+of sources from which a build may be performed.
+
+   <p><a href="http://subversion.tigris.org">Subversion</a> combines the
+repository, module, and branch into a single <code>Subversion URL</code>
+parameter. Within that scope, source checkouts can be specified by a
+numeric <code>revision number</code> (a repository-wide
+monotonically-increasing marker, such that each transaction that
+changes the repository is indexed by a different revision number), or
+a revision timestamp. When branches are used, the repository and
+module form a static <code>baseURL</code>, while each build has a
+<code>revision number</code> and a <code>branch</code> (which defaults to a
+statically-specified <code>defaultBranch</code>). The <code>baseURL</code> and
+<code>branch</code> are simply concatenated together to derive the
+<code>svnurl</code> to use for the checkout.
+
+   <p><a href="http://www.perforce.com/">Perforce</a> is similar. The server
+is specified through a <code>P4PORT</code> parameter. Module and branch
+are specified in a single depot path, and revisions are
+depot-wide. When branches are used, the <code>p4base</code> and
+<code>defaultBranch</code> are concatenated together to produce the depot
+path.
+
+   <p><a href="http://wiki.gnuarch.org/">Arch</a> and
+<a href="http://bazaar.canonical.com/">Bazaar</a> specify a repository by
+URL, as well as a <code>version</code> which is kind of like a branch name. 
+Arch uses the word <code>archive</code> to represent the repository. Arch
+lets you push changes from one archive to another, removing the strict
+centralization required by CVS and SVN. It retains the distinction
+between repository and working directory that most other VC systems
+use. For complex multi-module directory structures, Arch has a
+built-in <code>build config</code> layer with which the checkout process has
+two steps. First, an initial bootstrap checkout is performed to
+retrieve a set of build-config files. Second, one of these files is
+used to figure out which archives/modules should be used to populate
+subdirectories of the initial checkout.
+
+   <p>Builders which use Arch and Bazaar therefore have a static archive
+<code>url</code>, and a default &ldquo;branch&rdquo; (which is a string that specifies
+a complete category&ndash;branch&ndash;version triple). Each build can have its
+own branch (the category&ndash;branch&ndash;version string) to override the
+default, as well as a revision number (which is turned into a
+&ndash;patch-NN suffix when performing the checkout).
+
+   <p><a href="http://abridgegame.org/darcs/">Darcs</a> doesn't really have the
+notion of a single master repository. Nor does it really have
+branches. In Darcs, each working directory is also a repository, and
+there are operations to push and pull patches from one of these
+<code>repositories</code> to another. For the Buildbot's purposes, all you
+need to do is specify the URL of a repository that you want to build
+from. The build slave will then pull the latest patches from that
+repository and build them. Multiple branches are implemented by using
+multiple repositories (possibly living on the same server).
+
+   <p>Builders which use Darcs therefore have a static <code>repourl</code> which
+specifies the location of the repository. If branches are being used,
+the source Step is instead configured with a <code>baseURL</code> and a
+<code>defaultBranch</code>, and the two strings are simply concatenated
+together to obtain the repository's URL. Each build then has a
+specific branch which replaces <code>defaultBranch</code>, or just uses the
+default one. Instead of a revision number, each build can have a
+&ldquo;context&rdquo;, which is a string that records all the patches that are
+present in a given tree (this is the output of <samp><span class="command">darcs changes
+--context</span></samp>, and is considerably less concise than, e.g. Subversion's
+revision number, but the patch-reordering flexibility of Darcs makes
+it impossible to provide a shorter useful specification).
+
+   <p><a href="http://selenic.com/mercurial">Mercurial</a> is like Darcs, in that
+each branch is stored in a separate repository. The <code>repourl</code>,
+<code>baseURL</code>, and <code>defaultBranch</code> arguments are all handled the
+same way as with Darcs. The &ldquo;revision&rdquo;, however, is the hash
+identifier returned by <samp><span class="command">hg identify</span></samp>.
+
+<div class="node">
+<p><hr>
+<a name="Attributes-of-Changes"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.4 Attributes of Changes</h4>
+
+<h3 class="heading">Who</h3>
+
+<p>Each Change has a <code>who</code> attribute, which specifies which
+developer is responsible for the change. This is a string which comes
+from a namespace controlled by the VC repository. Frequently this
+means it is a username on the host which runs the repository, but not
+all VC systems require this (Arch, for example, uses a fully-qualified
+<code>Arch ID</code>, which looks like an email address, as does Darcs). 
+Each StatusNotifier will map the <code>who</code> attribute into something
+appropriate for their particular means of communication: an email
+address, an IRC handle, etc.
+
+<h3 class="heading">Files</h3>
+
+<p>It also has a list of <code>files</code>, which are just the tree-relative
+filenames of any files that were added, deleted, or modified for this
+Change. These filenames are used by the <code>isFileImportant</code>
+function (in the Scheduler) to decide whether it is worth triggering a
+new build or not, e.g. the function could use
+<code>filename.endswith(".c")</code> to only run a build if a C file were
+checked in. Certain BuildSteps can also use the list of changed files
+to run a more targeted series of tests, e.g. the
+<code>python_twisted.Trial</code> step can run just the unit tests that
+provide coverage for the modified .py files instead of running the
+full test suite.
+
+<h3 class="heading">Comments</h3>
+
+<p>The Change also has a <code>comments</code> attribute, which is a string
+containing any checkin comments.
+
+<h3 class="heading">Revision</h3>
+
+<p>Each Change can have a <code>revision</code> attribute, which describes how
+to get a tree with a specific state: a tree which includes this Change
+(and all that came before it) but none that come after it. If this
+information is unavailable, the <code>.revision</code> attribute will be
+<code>None</code>. These revisions are provided by the ChangeSource, and
+consumed by the <code>computeSourceRevision</code> method in the appropriate
+<code>step.Source</code> class.
+
+     <dl>
+<dt>`<samp><span class="samp">CVS</span></samp>'<dd><code>revision</code> is an int, seconds since the epoch
+<br><dt>`<samp><span class="samp">SVN</span></samp>'<dd><code>revision</code> is an int, a transation number (r%d)
+<br><dt>`<samp><span class="samp">Darcs</span></samp>'<dd><code>revision</code> is a large string, the output of <code>darcs changes --context</code>
+<br><dt>`<samp><span class="samp">Mercurial</span></samp>'<dd><code>revision</code> is a short string (a hash ID), the output of <code>hg identify</code>
+<br><dt>`<samp><span class="samp">Arch/Bazaar</span></samp>'<dd><code>revision</code> is the full revision ID (ending in &ndash;patch-%d)
+<br><dt>`<samp><span class="samp">P4</span></samp>'<dd><code>revision</code> is an int, the transaction number
+</dl>
+
+<h3 class="heading">Branches</h3>
+
+<p>The Change might also have a <code>branch</code> attribute. This indicates
+that all of the Change's files are in the same named branch. The
+Schedulers get to decide whether the branch should be built or not.
+
+   <p>For VC systems like CVS, Arch, and Monotone, the <code>branch</code> name is
+unrelated to the filename. (that is, the branch name and the filename
+inhabit unrelated namespaces). For SVN, branches are expressed as
+subdirectories of the repository, so the file's &ldquo;svnurl&rdquo; is a
+combination of some base URL, the branch name, and the filename within
+the branch. (In a sense, the branch name and the filename inhabit the
+same namespace). Darcs branches are subdirectories of a base URL just
+like SVN. Mercurial branches are the same as Darcs.
+
+     <dl>
+<dt>`<samp><span class="samp">CVS</span></samp>'<dd>branch='warner-newfeature', files=['src/foo.c']
+<br><dt>`<samp><span class="samp">SVN</span></samp>'<dd>branch='branches/warner-newfeature', files=['src/foo.c']
+<br><dt>`<samp><span class="samp">Darcs</span></samp>'<dd>branch='warner-newfeature', files=['src/foo.c']
+<br><dt>`<samp><span class="samp">Mercurial</span></samp>'<dd>branch='warner-newfeature', files=['src/foo.c']
+<br><dt>`<samp><span class="samp">Arch/Bazaar</span></samp>'<dd>branch='buildbot&ndash;usebranches&ndash;0', files=['buildbot/master.py']
+</dl>
+
+<h3 class="heading">Links</h3>
+
+<!-- TODO: who is using 'links'? how is it being used? -->
+<p>Finally, the Change might have a <code>links</code> list, which is intended
+to provide a list of URLs to a <em>viewcvs</em>-style web page that
+provides more detail for this Change, perhaps including the full file
+diffs.
+
+<div class="node">
+<p><hr>
+<a name="Schedulers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildSet">BuildSet</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Version-Control-Systems">Version Control Systems</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.2 Schedulers</h3>
+
+<p><a name="index-Scheduler-6"></a>
+Each Buildmaster has a set of <code>Scheduler</code> objects, each of which
+gets a copy of every incoming Change. The Schedulers are responsible
+for deciding when Builds should be run. Some Buildbot installations
+might have a single Scheduler, while others may have several, each for
+a different purpose.
+
+   <p>For example, a &ldquo;quick&rdquo; scheduler might exist to give immediate
+feedback to developers, hoping to catch obvious problems in the code
+that can be detected quickly. These typically do not run the full test
+suite, nor do they run on a wide variety of platforms. They also
+usually do a VC update rather than performing a brand-new checkout
+each time. You could have a &ldquo;quick&rdquo; scheduler which used a 30 second
+timeout, and feeds a single &ldquo;quick&rdquo; Builder that uses a VC
+<code>mode='update'</code> setting.
+
+   <p>A separate &ldquo;full&rdquo; scheduler would run more comprehensive tests a
+little while later, to catch more subtle problems. This scheduler
+would have a longer tree-stable-timer, maybe 30 minutes, and would
+feed multiple Builders (with a <code>mode=</code> of <code>'copy'</code>,
+<code>'clobber'</code>, or <code>'export'</code>).
+
+   <p>The <code>tree-stable-timer</code> and <code>isFileImportant</code> decisions are
+made by the Scheduler. Dependencies are also implemented here. 
+Periodic builds (those which are run every N seconds rather than after
+new Changes arrive) are triggered by a special <code>Periodic</code>
+Scheduler subclass. The default Scheduler class can also be told to
+watch for specific branches, ignoring Changes on other branches. This
+may be useful if you have a trunk and a few release branches which
+should be tracked, but when you don't want to have the Buildbot pay
+attention to several dozen private user branches.
+
+   <p>Some Schedulers may trigger builds for other reasons, other than
+recent Changes. For example, a Scheduler subclass could connect to a
+remote buildmaster and watch for builds of a library to succeed before
+triggering a local build that uses that library.
+
+   <p>Each Scheduler creates and submits <code>BuildSet</code> objects to the
+<code>BuildMaster</code>, which is then responsible for making sure the
+individual <code>BuildRequests</code> are delivered to the target
+<code>Builders</code>.
+
+   <p><code>Scheduler</code> instances are activated by placing them in the
+<code>c['schedulers']</code> list in the buildmaster config file. Each
+Scheduler has a unique name.
+
+<div class="node">
+<p><hr>
+<a name="BuildSet"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildRequest">BuildRequest</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Schedulers">Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.3 BuildSet</h3>
+
+<p><a name="index-BuildSet-7"></a>
+A <code>BuildSet</code> is the name given to a set of Builds that all
+compile/test the same version of the tree on multiple Builders. In
+general, all these component Builds will perform the same sequence of
+Steps, using the same source code, but on different platforms or
+against a different set of libraries.
+
+   <p>The <code>BuildSet</code> is tracked as a single unit, which fails if any of
+the component Builds have failed, and therefore can succeed only if
+<em>all</em> of the component Builds have succeeded. There are two kinds
+of status notification messages that can be emitted for a BuildSet:
+the <code>firstFailure</code> type (which fires as soon as we know the
+BuildSet will fail), and the <code>Finished</code> type (which fires once
+the BuildSet has completely finished, regardless of whether the
+overall set passed or failed).
+
+   <p>A <code>BuildSet</code> is created with a <em>source stamp</em> tuple of
+(branch, revision, changes, patch), some of which may be None, and a
+list of Builders on which it is to be run. They are then given to the
+BuildMaster, which is responsible for creating a separate
+<code>BuildRequest</code> for each Builder.
+
+   <p>There are a couple of different likely values for the
+<code>SourceStamp</code>:
+
+     <dl>
+<dt><code>(revision=None, changes=[CHANGES], patch=None)</code><dd>This is a <code>SourceStamp</code> used when a series of Changes have
+triggered a build. The VC step will attempt to check out a tree that
+contains CHANGES (and any changes that occurred before CHANGES, but
+not any that occurred after them).
+
+     <br><dt><code>(revision=None, changes=None, patch=None)</code><dd>This builds the most recent code on the default branch. This is the
+sort of <code>SourceStamp</code> that would be used on a Build that was
+triggered by a user request, or a Periodic scheduler. It is also
+possible to configure the VC Source Step to always check out the
+latest sources rather than paying attention to the Changes in the
+SourceStamp, which will result in same behavior as this.
+
+     <br><dt><code>(branch=BRANCH, revision=None, changes=None, patch=None)</code><dd>This builds the most recent code on the given BRANCH. Again, this is
+generally triggered by a user request or Periodic build.
+
+     <br><dt><code>(revision=REV, changes=None, patch=(LEVEL, DIFF))</code><dd>This checks out the tree at the given revision REV, then applies a
+patch (using <code>diff -pLEVEL &lt;DIFF</code>). The <a href="#try">try</a> feature uses
+this kind of <code>SourceStamp</code>. If <code>patch</code> is None, the patching
+step is bypassed.
+
+   </dl>
+
+   <p>The buildmaster is responsible for turning the <code>BuildSet</code> into a
+set of <code>BuildRequest</code> objects and queueing them on the
+appropriate Builders.
+
+<div class="node">
+<p><hr>
+<a name="BuildRequest"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Builder">Builder</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildSet">BuildSet</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.4 BuildRequest</h3>
+
+<p><a name="index-BuildRequest-8"></a>
+A <code>BuildRequest</code> is a request to build a specific set of sources
+on a single specific Builder. Each Builder runs the
+<code>BuildRequest</code> as soon as it can (i.e. when an associated
+buildslave becomes free).
+
+   <p>The <code>BuildRequest</code> contains the <code>SourceStamp</code> specification. 
+The actual process of running the build (the series of Steps that will
+be executed) is implemented by the <code>Build</code> object. In this future
+this might be changed, to have the <code>Build</code> define <em>what</em>
+gets built, and a separate <code>BuildProcess</code> (provided by the
+Builder) to define <em>how</em> it gets built.
+
+   <p>The <code>BuildRequest</code> may be mergeable with other compatible
+<code>BuildRequest</code>s. Builds that are triggered by incoming Changes
+will generally be mergeable. Builds that are triggered by user
+requests are generally not, unless they are multiple requests to build
+the <em>latest sources</em> of the same branch.
+
+<div class="node">
+<p><hr>
+<a name="Builder"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Users">Users</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildRequest">BuildRequest</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.5 Builder</h3>
+
+<p><a name="index-Builder-9"></a>
+The <code>Builder</code> is a long-lived object which controls all Builds of
+a given type. Each one is created when the config file is first
+parsed, and lives forever (or rather until it is removed from the
+config file). It mediates the connections to the buildslaves that do
+all the work, and is responsible for creating the <code>Build</code> objects
+that decide <em>how</em> a build is performed (i.e., which steps are
+executed in what order).
+
+   <p>Each <code>Builder</code> gets a unique name, and the path name of a
+directory where it gets to do all its work (there is a
+buildmaster-side directory for keeping status information, as well as
+a buildslave-side directory where the actual checkout/compile/test
+commands are executed). It also gets a <code>BuildFactory</code>, which is
+responsible for creating new <code>Build</code> instances: because the
+<code>Build</code> instance is what actually performs each build, choosing
+the <code>BuildFactory</code> is the way to specify what happens each time a
+build is done.
+
+   <p>Each <code>Builder</code> is associated with one of more <code>BuildSlaves</code>. 
+A <code>Builder</code> which is used to perform OS-X builds (as opposed to
+Linux or Solaris builds) should naturally be associated with an
+OS-X-based buildslave.
+
+<div class="node">
+<p><hr>
+<a name="Users"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Builder">Builder</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.6 Users</h3>
+
+<p><a name="index-Users-10"></a>
+Buildbot has a somewhat limited awareness of <em>users</em>. It assumes
+the world consists of a set of developers, each of whom can be
+described by a couple of simple attributes. These developers make
+changes to the source code, causing builds which may succeed or fail.
+
+   <p>Each developer is primarily known through the source control system. Each
+Change object that arrives is tagged with a <code>who</code> field that
+typically gives the account name (on the repository machine) of the user
+responsible for that change. This string is the primary key by which the
+User is known, and is displayed on the HTML status pages and in each Build's
+&ldquo;blamelist&rdquo;.
+
+   <p>To do more with the User than just refer to them, this username needs to
+be mapped into an address of some sort. The responsibility for this mapping
+is left up to the status module which needs the address. The core code knows
+nothing about email addresses or IRC nicknames, just user names.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Doing-Things-With-Users">Doing Things With Users</a>
+<li><a accesskey="2" href="#Email-Addresses">Email Addresses</a>
+<li><a accesskey="3" href="#IRC-Nicknames">IRC Nicknames</a>
+<li><a accesskey="4" href="#Live-Status-Clients">Live Status Clients</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Doing-Things-With-Users"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Email-Addresses">Email Addresses</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Users">Users</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.1 Doing Things With Users</h4>
+
+<p>Each Change has a single User who is responsible for that Change. Most
+Builds have a set of Changes: the Build represents the first time these
+Changes have been built and tested by the Buildbot. The build has a
+&ldquo;blamelist&rdquo; that consists of a simple union of the Users responsible
+for all the Build's Changes.
+
+   <p>The Build provides (through the IBuildStatus interface) a list of Users
+who are &ldquo;involved&rdquo; in the build. For now this is equal to the
+blamelist, but in the future it will be expanded to include a &ldquo;build
+sheriff&rdquo; (a person who is &ldquo;on duty&rdquo; at that time and responsible for
+watching over all builds that occur during their shift), as well as
+per-module owners who simply want to keep watch over their domain (chosen by
+subdirectory or a regexp matched against the filenames pulled out of the
+Changes). The Involved Users are those who probably have an interest in the
+results of any given build.
+
+   <p>In the future, Buildbot will acquire the concept of &ldquo;Problems&rdquo;,
+which last longer than builds and have beginnings and ends. For example, a
+test case which passed in one build and then failed in the next is a
+Problem. The Problem lasts until the test case starts passing again, at
+which point the Problem is said to be &ldquo;resolved&rdquo;.
+
+   <p>If there appears to be a code change that went into the tree at the
+same time as the test started failing, that Change is marked as being
+resposible for the Problem, and the user who made the change is added
+to the Problem's &ldquo;Guilty&rdquo; list. In addition to this user, there may
+be others who share responsibility for the Problem (module owners,
+sponsoring developers). In addition to the Responsible Users, there
+may be a set of Interested Users, who take an interest in the fate of
+the Problem.
+
+   <p>Problems therefore have sets of Users who may want to be kept aware of
+the condition of the problem as it changes over time. If configured, the
+Buildbot can pester everyone on the Responsible list with increasing
+harshness until the problem is resolved, with the most harshness reserved
+for the Guilty parties themselves. The Interested Users may merely be told
+when the problem starts and stops, as they are not actually responsible for
+fixing anything.
+
+<div class="node">
+<p><hr>
+<a name="Email-Addresses"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#IRC-Nicknames">IRC Nicknames</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Doing-Things-With-Users">Doing Things With Users</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.2 Email Addresses</h4>
+
+<p>The <code>buildbot.status.mail.MailNotifier</code> class provides a
+status target which can send email about the results of each build. It
+accepts a static list of email addresses to which each message should be
+delivered, but it can also be configured to send mail to the Build's
+Interested Users. To do this, it needs a way to convert User names into
+email addresses.
+
+   <p>For many VC systems, the User Name is actually an account name on the
+system which hosts the repository. As such, turning the name into an
+email address is a simple matter of appending
+&ldquo;@repositoryhost.com&rdquo;. Some projects use other kinds of mappings
+(for example the preferred email address may be at &ldquo;project.org&rdquo;
+despite the repository host being named &ldquo;cvs.project.org&rdquo;), and some
+VC systems have full separation between the concept of a user and that
+of an account on the repository host (like Perforce). Some systems
+(like Arch) put a full contact email address in every change.
+
+   <p>To convert these names to addresses, the MailNotifier uses an EmailLookup
+object. This provides a .getAddress method which accepts a name and
+(eventually) returns an address. The default <code>MailNotifier</code>
+module provides an EmailLookup which simply appends a static string,
+configurable when the notifier is created. To create more complex behaviors
+(perhaps using an LDAP lookup, or using &ldquo;finger&rdquo; on a central host to
+determine a preferred address for the developer), provide a different object
+as the <code>lookup</code> argument.
+
+   <p>In the future, when the Problem mechanism has been set up, the Buildbot
+will need to send mail to arbitrary Users. It will do this by locating a
+MailNotifier-like object among all the buildmaster's status targets, and
+asking it to send messages to various Users. This means the User-to-address
+mapping only has to be set up once, in your MailNotifier, and every email
+message the buildbot emits will take advantage of it.
+
+<div class="node">
+<p><hr>
+<a name="IRC-Nicknames"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Live-Status-Clients">Live Status Clients</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Email-Addresses">Email Addresses</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.3 IRC Nicknames</h4>
+
+<p>Like MailNotifier, the <code>buildbot.status.words.IRC</code> class
+provides a status target which can announce the results of each build. It
+also provides an interactive interface by responding to online queries
+posted in the channel or sent as private messages.
+
+   <p>In the future, the buildbot can be configured map User names to IRC
+nicknames, to watch for the recent presence of these nicknames, and to
+deliver build status messages to the interested parties. Like
+<code>MailNotifier</code> does for email addresses, the <code>IRC</code> object
+will have an <code>IRCLookup</code> which is responsible for nicknames. The
+mapping can be set up statically, or it can be updated by online users
+themselves (by claiming a username with some kind of &ldquo;buildbot: i am
+user warner&rdquo; commands).
+
+   <p>Once the mapping is established, the rest of the buildbot can ask the
+<code>IRC</code> object to send messages to various users. It can report on
+the likelihood that the user saw the given message (based upon how long the
+user has been inactive on the channel), which might prompt the Problem
+Hassler logic to send them an email message instead.
+
+<div class="node">
+<p><hr>
+<a name="Live-Status-Clients"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#IRC-Nicknames">IRC Nicknames</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.4 Live Status Clients</h4>
+
+<p>The Buildbot also offers a PB-based status client interface which can
+display real-time build status in a GUI panel on the developer's desktop. 
+This interface is normally anonymous, but it could be configured to let the
+buildmaster know <em>which</em> developer is using the status client. The
+status client could then be used as a message-delivery service, providing an
+alternative way to deliver low-latency high-interruption messages to the
+developer (like &ldquo;hey, you broke the build&rdquo;).
+
+<div class="node">
+<p><hr>
+<a name="Configuration"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Concepts">Concepts</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">4 Configuration</h2>
+
+<p><a name="index-Configuration-11"></a>
+The buildbot's behavior is defined by the &ldquo;config file&rdquo;, which
+normally lives in the <samp><span class="file">master.cfg</span></samp> file in the buildmaster's base
+directory (but this can be changed with an option to the
+<code>buildbot create-master</code> command). This file completely specifies
+which Builders are to be run, which slaves they should use, how
+Changes should be tracked, and where the status information is to be
+sent. The buildmaster's <samp><span class="file">buildbot.tac</span></samp> file names the base
+directory; everything else comes from the config file.
+
+   <p>A sample config file was installed for you when you created the
+buildmaster, but you will need to edit it before your buildbot will do
+anything useful.
+
+   <p>This chapter gives an overview of the format of this file and the
+various sections in it. You will need to read the later chapters to
+understand how to fill in each section properly.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Config-File-Format">Config File Format</a>
+<li><a accesskey="2" href="#Loading-the-Config-File">Loading the Config File</a>
+<li><a accesskey="3" href="#Defining-the-Project">Defining the Project</a>
+<li><a accesskey="4" href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a>
+<li><a accesskey="5" href="#Setting-the-slaveport">Setting the slaveport</a>
+<li><a accesskey="6" href="#Buildslave-Specifiers">Buildslave Specifiers</a>
+<li><a accesskey="7" href="#Defining-Builders">Defining Builders</a>
+<li><a accesskey="8" href="#Defining-Status-Targets">Defining Status Targets</a>
+<li><a accesskey="9" href="#Debug-options">Debug options</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Config-File-Format"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Loading-the-Config-File">Loading the Config File</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Configuration">Configuration</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.1 Config File Format</h3>
+
+<p>The config file is, fundamentally, just a piece of Python code which
+defines a dictionary named <code>BuildmasterConfig</code>, with a number of
+keys that are treated specially. You don't need to know Python to do
+basic configuration, though, you can just copy the syntax of the
+sample file. If you <em>are</em> comfortable writing Python code,
+however, you can use all the power of a full programming language to
+achieve more complicated configurations.
+
+   <p>The <code>BuildmasterConfig</code> name is the only one which matters: all
+other names defined during the execution of the file are discarded. 
+When parsing the config file, the Buildmaster generally compares the
+old configuration with the new one and performs the minimum set of
+actions necessary to bring the buildbot up to date: Builders which are
+not changed are left untouched, and Builders which are modified get to
+keep their old event history.
+
+   <p>Basic Python syntax: comments start with a hash character (&ldquo;#&rdquo;),
+tuples are defined with <code>(parenthesis, pairs)</code>, arrays are
+defined with <code>[square, brackets]</code>, tuples and arrays are mostly
+interchangeable. Dictionaries (data structures which map &ldquo;keys&rdquo; to
+&ldquo;values&rdquo;) are defined with curly braces: <code>{'key1': 'value1',
+'key2': 'value2'} </code>. Function calls (and object instantiation) can use
+named parameters, like <code>w = html.Waterfall(http_port=8010)</code>.
+
+   <p>The config file starts with a series of <code>import</code> statements,
+which make various kinds of Steps and Status targets available for
+later use. The main <code>BuildmasterConfig</code> dictionary is created,
+then it is populated with a variety of keys. These keys are broken
+roughly into the following sections, each of which is documented in
+the rest of this chapter:
+
+     <ul>
+<li>Project Definitions
+<li>Change Sources / Schedulers
+<li>Slaveport
+<li>Buildslave Configuration
+<li>Builders / Interlocks
+<li>Status Targets
+<li>Debug options
+</ul>
+
+   <p>The config file can use a few names which are placed into its namespace:
+
+     <dl>
+<dt><code>basedir</code><dd>the base directory for the buildmaster. This string has not been
+expanded, so it may start with a tilde. It needs to be expanded before
+use. The config file is located in
+<code>os.path.expanduser(os.path.join(basedir, 'master.cfg'))</code>
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Loading-the-Config-File"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Defining-the-Project">Defining the Project</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Config-File-Format">Config File Format</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.2 Loading the Config File</h3>
+
+<p>The config file is only read at specific points in time. It is first
+read when the buildmaster is launched. Once it is running, there are
+various ways to ask it to reload the config file. If you are on the
+system hosting the buildmaster, you can send a <code>SIGHUP</code> signal to
+it: the <samp><span class="command">buildbot</span></samp> tool has a shortcut for this:
+
+<pre class="example">     buildbot reconfig <var>BASEDIR</var>
+</pre>
+   <p>This command will show you all of the lines from <samp><span class="file">twistd.log</span></samp>
+that relate to the reconfiguration. If there are any problems during
+the config-file reload, they will be displayed in these lines.
+
+   <p>The debug tool (<code>buildbot debugclient --master HOST:PORT</code>) has a
+&ldquo;Reload .cfg&rdquo; button which will also trigger a reload. In the
+future, there will be other ways to accomplish this step (probably a
+password-protected button on the web page, as well as a privileged IRC
+command).
+
+   <p>When reloading the config file, the buildmaster will endeavor to
+change as little as possible about the running system. For example,
+although old status targets may be shut down and new ones started up,
+any status targets that were not changed since the last time the
+config file was read will be left running and untouched. Likewise any
+Builders which have not been changed will be left running. If a
+Builder is modified (say, the build process is changed) while a Build
+is currently running, that Build will keep running with the old
+process until it completes. Any previously queued Builds (or Builds
+which get queued after the reconfig) will use the new process.
+
+<div class="node">
+<p><hr>
+<a name="Defining-the-Project"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Loading-the-Config-File">Loading the Config File</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.3 Defining the Project</h3>
+
+<p>There are a couple of basic settings that you use to tell the buildbot
+what project it is working on. This information is used by status
+reporters to let users find out more about the codebase being
+exercised by this particular Buildbot installation.
+
+<pre class="example">     c['projectName'] = "Buildbot"
+     c['projectURL'] = "http://buildbot.sourceforge.net/"
+     c['buildbotURL'] = "http://localhost:8010/"
+</pre>
+   <p><a name="index-c_005b_0027projectName_0027_005d-12"></a><code>projectName</code> is a short string will be used to describe the
+project that this buildbot is working on. For example, it is used as
+the title of the waterfall HTML page.
+
+   <p><a name="index-c_005b_0027projectURL_0027_005d-13"></a><code>projectURL</code> is a string that gives a URL for the project as a
+whole. HTML status displays will show <code>projectName</code> as a link to
+<code>projectURL</code>, to provide a link from buildbot HTML pages to your
+project's home page.
+
+   <p><a name="index-c_005b_0027buildbotURL_0027_005d-14"></a>The <code>buildbotURL</code> string should point to the location where the
+buildbot's internal web server (usually the <code>html.Waterfall</code>
+page) is visible. This typically uses the port number set when you
+create the <code>Waterfall</code> object: the buildbot needs your help to
+figure out a suitable externally-visible host name.
+
+   <p>When status notices are sent to users (either by email or over IRC),
+<code>buildbotURL</code> will be used to create a URL to the specific build
+or problem that they are being notified about. It will also be made
+available to queriers (over IRC) who want to find out where to get
+more information about this buildbot.
+
+<div class="node">
+<p><hr>
+<a name="Listing-Change-Sources-and-Schedulers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Setting-the-slaveport">Setting the slaveport</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Defining-the-Project">Defining the Project</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.4 Listing Change Sources and Schedulers</h3>
+
+<p><a name="index-c_005b_0027sources_0027_005d-15"></a>The <code>c['sources']</code> key is a list of ChangeSource
+instances<a rel="footnote" href="#fn-6" name="fnd-6"><sup>6</sup></a>. 
+This defines how the buildmaster learns about source code changes. 
+More information about what goes here is available in See <a href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>.
+
+<pre class="example">     import buildbot.changes.pb
+     c['sources'] = [buildbot.changes.pb.PBChangeSource()]
+</pre>
+   <p><a name="index-c_005b_0027schedulers_0027_005d-16"></a><code>c['schedulers']</code> is a list of Scheduler instances, each of which
+causes builds to be started on a particular set of Builders. The two
+basic Scheduler classes you are likely to start with are
+<code>Scheduler</code> and <code>Periodic</code>, but you can write a customized
+subclass to implement more complicated build scheduling.
+
+   <p>The docstring for <code>buildbot.scheduler.Scheduler</code> is the best
+place to see all the options that can be used. Type <code>pydoc
+buildbot.scheduler.Scheduler</code> to see it, or look in
+<samp><span class="file">buildbot/scheduler.py</span></samp> directly.
+
+   <p>The basic Scheduler takes four arguments:
+
+     <dl>
+<dt><code>name</code><dd>Each Scheduler must have a unique name. This is only used in status
+displays.
+
+     <br><dt><code>branch</code><dd>This Scheduler will pay attention to a single branch, ignoring Changes
+that occur on other branches. Setting <code>branch</code> equal to the
+special value of <code>None</code> means it should only pay attention to the
+default branch. Note that <code>None</code> is a keyword, not a string, so
+you want to use <code>None</code> and not <code>"None"</code>.
+
+     <br><dt><code>treeStableTimer</code><dd>The Scheduler will wait for this many seconds before starting the
+build. If new changes are made during this interval, the timer will be
+restarted, so really the build will be started after a change and then
+after this many seconds of inactivity.
+
+     <br><dt><code>builderNames</code><dd>When the tree-stable-timer finally expires, builds will be started on
+these Builders. Each Builder gets a unique name: these strings must
+match.
+
+   </dl>
+
+<pre class="example">     from buildbot import scheduler
+     quick = scheduler.Scheduler("quick", None, 60,
+                                 ["quick-linux", "quick-netbsd"])
+     full = scheduler.Scheduler("full", None, 5*60,
+                                ["full-linux", "full-netbsd", "full-OSX"])
+     nightly = scheduler.Periodic("nightly", ["full-solaris"], 24*60*60)
+     c['schedulers'] = [quick, full, nightly]
+</pre>
+   <p>In this example, the two &ldquo;quick&rdquo; builds are triggered 60 seconds
+after the tree has been changed. The &ldquo;full&rdquo; builds do not run quite
+so quickly (they wait 5 minutes), so hopefully if the quick builds
+fail due to a missing file or really simple typo, the developer can
+discover and fix the problem before the full builds are started. Both
+Schedulers only pay attention to the default branch: any changes on
+other branches are ignored by these Schedulers. Each Scheduler
+triggers a different set of Builders, referenced by name.
+
+   <p>The third Scheduler in this example just runs the full solaris build
+once per day. (note that this Scheduler only lets you control the time
+between builds, not the absolute time-of-day of each Build, so this
+could easily wind up a &ldquo;daily&rdquo; or &ldquo;every afternoon&rdquo; scheduler
+depending upon when it was first activated).
+
+<ul class="menu">
+<li><a accesskey="1" href="#Scheduler-Types">Scheduler Types</a>
+<li><a accesskey="2" href="#Build-Dependencies">Build Dependencies</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Scheduler-Types"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Build-Dependencies">Build Dependencies</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.4.1 Scheduler Types</h4>
+
+<p><a name="index-buildbot_002escheduler_002eScheduler-17"></a><a name="index-buildbot_002escheduler_002eAnyBranchScheduler-18"></a><a name="index-buildbot_002escheduler_002ePeriodic-19"></a><a name="index-buildbot_002escheduler_002eNightly-20"></a>
+Here is a brief catalog of the available Scheduler types. All these
+Schedulers are classes in <code>buildbot.scheduler</code>, and the
+docstrings there are the best source of documentation on the arguments
+taken by each one.
+
+     <dl>
+<dt><code>Scheduler</code><dd>This is the default Scheduler class. It follows exactly one branch,
+and starts a configurable tree-stable-timer after each change on that
+branch. When the timer expires, it starts a build on some set of
+Builders. The Scheduler accepts a <code>fileIsImportant</code> function
+which can be used to ignore some Changes if they do not affect any
+&ldquo;important&rdquo; files.
+
+     <br><dt><code>AnyBranchScheduler</code><dd>This scheduler uses a tree-stable-timer like the default one, but
+follows multiple branches at once. Each branch gets a separate timer.
+
+     <br><dt><code>Dependent</code><dd>This scheduler watches an &ldquo;upstream&rdquo; Builder. When that Builder
+successfully builds a particular set of Changes, it triggers builds of
+the same code on a configured set of &ldquo;downstream&rdquo; builders. The next
+section (see <a href="#Build-Dependencies">Build Dependencies</a>) describes this scheduler in more
+detail.
+
+     <br><dt><code>Periodic</code><dd>This simple scheduler just triggers a build every N seconds.
+
+     <br><dt><code>Nightly</code><dd>This is highly configurable periodic build scheduler, which triggers a
+build at particular times of day, week, month, or year. The
+configuration syntax is very similar to the well-known <code>crontab</code>
+format, in which you provide values for minute, hour, day, and month
+(some of which can be wildcards), and a build is triggered whenever
+the current time matches the given constraints. This can run a build
+every night, every morning, every weekend, alternate Thursdays, on
+your boss's birthday, etc.
+
+     <br><dt><code>Try_Jobdir / Try_Userpass</code><dd>This scheduler allows developers to use the <code>buildbot try</code>
+command to trigger builds of code they have not yet committed. See
+<a href="#try">try</a> for complete details.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Build-Dependencies"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Scheduler-Types">Scheduler Types</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.4.2 Build Dependencies</h4>
+
+<p><a name="index-Dependent-21"></a><a name="index-Dependencies-22"></a><a name="index-buildbot_002escheduler_002eDependent-23"></a>
+It is common to wind up with one kind of build which should only be
+performed if the same source code was successfully handled by some
+other kind of build first. An example might be a packaging step: you
+might only want to produce .deb or RPM packages from a tree that was
+known to compile successfully and pass all unit tests. You could put
+the packaging step in the same Build as the compile and testing steps,
+but there might be other reasons to not do this (in particular you
+might have several Builders worth of compiles/tests, but only wish to
+do the packaging once). Another example is if you want to skip the
+&ldquo;full&rdquo; builds after a failing &ldquo;quick&rdquo; build of the same source
+code. Or, if one Build creates a product (like a compiled library)
+that is used by some other Builder, you'd want to make sure the
+consuming Build is run <em>after</em> the producing one.
+
+   <p>You can use <code>Dependencies</code> to express this relationship to the
+Buildbot. There is a special kind of Scheduler named
+<code>scheduler.Dependent</code> that will watch an &ldquo;upstream&rdquo; Scheduler
+for builds to complete successfully (on all of its Builders). Each
+time that happens, the same source code (i.e. the same
+<code>SourceStamp</code>) will be used to start a new set of builds, on a
+different set of Builders. This &ldquo;downstream&rdquo; scheduler doesn't pay
+attention to Changes at all, it only pays attention to the upstream
+scheduler.
+
+   <p>If the SourceStamp fails on any of the Builders in the upstream set,
+the downstream builds will not fire.
+
+<pre class="example">     from buildbot import scheduler
+     tests = scheduler.Scheduler("tests", None, 5*60,
+                                 ["full-linux", "full-netbsd", "full-OSX"])
+     package = scheduler.Dependent("package",
+                                   tests, # upstream scheduler
+                                   ["make-tarball", "make-deb", "make-rpm"])
+     c['schedulers'] = [tests, package]
+</pre>
+   <p>Note that <code>Dependent</code>'s upstream scheduler argument is given as a
+<code>Scheduler</code> <em>instance</em>, not a name. This makes it impossible
+to create circular dependencies in the config file.
+
+<div class="node">
+<p><hr>
+<a name="Setting-the-slaveport"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Buildslave-Specifiers">Buildslave Specifiers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.5 Setting the slaveport</h3>
+
+<p><a name="index-c_005b_0027slavePortnum_0027_005d-24"></a>
+The buildmaster will listen on a TCP port of your choosing for
+connections from buildslaves. It can also use this port for
+connections from remote Change Sources, status clients, and debug
+tools. This port should be visible to the outside world, and you'll
+need to tell your buildslave admins about your choice.
+
+   <p>It does not matter which port you pick, as long it is externally
+visible, however you should probably use something larger than 1024,
+since most operating systems don't allow non-root processes to bind to
+low-numbered ports. If your buildmaster is behind a firewall or a NAT
+box of some sort, you may have to configure your firewall to permit
+inbound connections to this port.
+
+<pre class="example">     c['slavePortnum'] = 10000
+</pre>
+   <p><code>c['slavePortnum']</code> is a <em>strports</em> specification string,
+defined in the <code>twisted.application.strports</code> module (try
+<samp><span class="command">pydoc twisted.application.strports</span></samp> to get documentation on
+the format). This means that you can have the buildmaster listen on a
+localhost-only port by doing:
+
+<pre class="example">     c['slavePortnum'] = "tcp:10000:interface=127.0.0.1"
+</pre>
+   <p>This might be useful if you only run buildslaves on the same machine,
+and they are all configured to contact the buildmaster at
+<code>localhost:10000</code>.
+
+<div class="node">
+<p><hr>
+<a name="Buildslave-Specifiers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Defining-Builders">Defining Builders</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Setting-the-slaveport">Setting the slaveport</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.6 Buildslave Specifiers</h3>
+
+<p><a name="index-c_005b_0027bots_0027_005d-25"></a>
+The <code>c['bots']</code> key is a list of known buildslaves. Each
+buildslave is defined by a tuple of (slavename, slavepassword). These
+are the same two values that need to be provided to the buildslave
+administrator when they create the buildslave.
+
+<pre class="example">     c['bots'] = [('bot-solaris', 'solarispasswd'),
+                  ('bot-bsd', 'bsdpasswd'),
+                 ]
+</pre>
+   <p>The slavenames must be unique, of course. The password exists to
+prevent evildoers from interfering with the buildbot by inserting
+their own (broken) buildslaves into the system and thus displacing the
+real ones.
+
+   <p>Buildslaves with an unrecognized slavename or a non-matching password
+will be rejected when they attempt to connect, and a message
+describing the problem will be put in the log file (see <a href="#Logfiles">Logfiles</a>).
+
+<div class="node">
+<p><hr>
+<a name="Defining-Builders"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Defining-Status-Targets">Defining Status Targets</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Buildslave-Specifiers">Buildslave Specifiers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.7 Defining Builders</h3>
+
+<p><a name="index-c_005b_0027builders_0027_005d-26"></a>
+The <code>c['builders']</code> key is a list of dictionaries which specify
+the Builders. The Buildmaster runs a collection of Builders, each of
+which handles a single type of build (e.g. full versus quick), on a
+single build slave. A Buildbot which makes sure that the latest code
+(&ldquo;HEAD&rdquo;) compiles correctly across four separate architecture will
+have four Builders, each performing the same build but on different
+slaves (one per platform).
+
+   <p>Each Builder gets a separate column in the waterfall display. In
+general, each Builder runs independently (although various kinds of
+interlocks can cause one Builder to have an effect on another).
+
+   <p>Each Builder specification dictionary has several required keys:
+
+     <dl>
+<dt><code>name</code><dd>This specifies the Builder's name, which is used in status
+reports.
+
+     <br><dt><code>slavename</code><dd>This specifies which buildslave will be used by this Builder. 
+<code>slavename</code> must appear in the <code>c['bots']</code> list. Each
+buildslave can accomodate multiple Builders.
+
+     <br><dt><code>slavenames</code><dd>If you provide <code>slavenames</code> instead of <code>slavename</code>, you can
+give a list of buildslaves which are capable of running this Builder. 
+If multiple buildslaves are available for any given Builder, you will
+have some measure of redundancy: in case one slave goes offline, the
+others can still keep the Builder working. In addition, multiple
+buildslaves will allow multiple simultaneous builds for the same
+Builder, which might be useful if you have a lot of forced or &ldquo;try&rdquo;
+builds taking place.
+
+     <p>If you use this feature, it is important to make sure that the
+buildslaves are all, in fact, capable of running the given build. The
+slave hosts should be configured similarly, otherwise you will spend a
+lot of time trying (unsuccessfully) to reproduce a failure that only
+occurs on some of the buildslaves and not the others. Different
+platforms, operating systems, versions of major programs or libraries,
+all these things mean you should use separate Builders.
+
+     <br><dt><code>builddir</code><dd>This specifies the name of a subdirectory (under the base directory)
+in which everything related to this builder will be placed. On the
+buildmaster, this holds build status information. On the buildslave,
+this is where checkouts, compiles, and tests are run.
+
+     <br><dt><code>factory</code><dd>This is a <code>buildbot.process.factory.BuildFactory</code> instance which
+controls how the build is performed. Full details appear in their own
+chapter, See <a href="#Build-Process">Build Process</a>. Parameters like the location of the CVS
+repository and the compile-time options used for the build are
+generally provided as arguments to the factory's constructor.
+
+   </dl>
+
+   <p>Other optional keys may be set on each Builder:
+
+     <dl>
+<dt><code>category</code><dd>If provided, this is a string that identifies a category for the
+builder to be a part of. Status clients can limit themselves to a
+subset of the available categories. A common use for this is to add
+new builders to your setup (for a new module, or for a new buildslave)
+that do not work correctly yet and allow you to integrate them with
+the active builders. You can put these new builders in a test
+category, make your main status clients ignore them, and have only
+private status clients pick them up. As soon as they work, you can
+move them over to the active category.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Defining-Status-Targets"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Debug-options">Debug options</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Defining-Builders">Defining Builders</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.8 Defining Status Targets</h3>
+
+<p>The Buildmaster has a variety of ways to present build status to
+various users. Each such delivery method is a &ldquo;Status Target&rdquo; object
+in the configuration's <code>status</code> list. To add status targets, you
+just append more objects to this list:
+
+   <p><a name="index-c_005b_0027status_0027_005d-27"></a>
+<pre class="example">     c['status'] = []
+     
+     from buildbot.status import html
+     c['status'].append(html.Waterfall(http_port=8010))
+     
+     from buildbot.status import mail
+     m = mail.MailNotifier(fromaddr="buildbot at localhost",
+                           extraRecipients=["builds at lists.example.com"],
+                           sendToInterestedUsers=False)
+     c['status'].append(m)
+     
+     from buildbot.status import words
+     c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+                                  channels=["#example"]))
+</pre>
+   <p>Status delivery has its own chapter, See <a href="#Status-Delivery">Status Delivery</a>, in which
+all the built-in status targets are documented.
+
+<div class="node">
+<p><hr>
+<a name="Debug-options"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Defining-Status-Targets">Defining Status Targets</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.9 Debug options</h3>
+
+<p><a name="index-c_005b_0027debugPassword_0027_005d-28"></a>If you set <code>c['debugPassword']</code>, then you can connect to the
+buildmaster with the diagnostic tool launched by <code>buildbot
+debugclient MASTER:PORT</code>. From this tool, you can reload the config
+file, manually force builds, and inject changes, which may be useful
+for testing your buildmaster without actually commiting changes to
+your repository (or before you have the Change Sources set up). The
+debug tool uses the same port number as the slaves do:
+<code>c['slavePortnum']</code>, and is authenticated with this password.
+
+<pre class="example">     c['debugPassword'] = "debugpassword"
+</pre>
+   <p><a name="index-c_005b_0027manhole_0027_005d-29"></a>If you set <code>c['manhole']</code> to an instance of one of the classes in
+<code>buildbot.manhole</code>, you can telnet or ssh into the buildmaster
+and get an interactive Python shell, which may be useful for debugging
+buildbot internals. It is probably only useful for buildbot
+developers. It exposes full access to the buildmaster's account
+(including the ability to modify and delete files), so it should not
+be enabled with a weak or easily guessable password.
+
+   <p>There are three separate <code>Manhole</code> classes. Two of them use SSH,
+one uses unencrypted telnet. Two of them use a username+password
+combination to grant access, one of them uses an SSH-style
+<samp><span class="file">authorized_keys</span></samp> file which contains a list of ssh public keys.
+
+     <dl>
+<dt><code>manhole.AuthorizedKeysManhole</code><dd>You construct this with the name of a file that contains one SSH
+public key per line, just like <samp><span class="file">~/.ssh/authorized_keys</span></samp>. If you
+provide a non-absolute filename, it will be interpreted relative to
+the buildmaster's base directory.
+
+     <br><dt><code>manhole.PasswordManhole</code><dd>This one accepts SSH connections but asks for a username and password
+when authenticating. It accepts only one such pair.
+
+     <br><dt><code>manhole.TelnetManhole</code><dd>This accepts regular unencrypted telnet connections, and asks for a
+username/password pair before providing access. Because this
+username/password is transmitted in the clear, and because Manhole
+access to the buildmaster is equivalent to granting full shell
+privileges to both the buildmaster and all the buildslaves (and to all
+accounts which then run code produced by the buildslaves), it is
+highly recommended that you use one of the SSH manholes instead.
+
+   </dl>
+
+<pre class="example">     # some examples:
+     from buildbot import manhole
+     c['manhole'] = manhole.AuthorizedKeysManhole(1234, "authorized_keys")
+     c['manhole'] = manhole.PasswordManhole(1234, "alice", "mysecretpassword")
+     c['manhole'] = manhole.TelnetManhole(1234, "bob", "snoop_my_password_please")
+</pre>
+   <p>The <code>Manhole</code> instance can be configured to listen on a specific
+port. You may wish to have this listening port bind to the loopback
+interface (sometimes known as &ldquo;lo0&rdquo;, &ldquo;localhost&rdquo;, or 127.0.0.1) to
+restrict access to clients which are running on the same host.
+
+<pre class="example">     from buildbot.manhole import PasswordManhole
+     c['manhole'] = PasswordManhole("tcp:9999:interface=127.0.0.1","admin","passwd")
+</pre>
+   <p>To have the <code>Manhole</code> listen on all interfaces, use
+<code>"tcp:9999"</code> or simply 9999. This port specification uses
+<code>twisted.application.strports</code>, so you can make it listen on SSL
+or even UNIX-domain sockets if you want.
+
+   <p>Note that using any Manhole requires that the TwistedConch package be
+installed, and that you be using Twisted version 2.0 or later.
+
+   <p>The buildmaster's SSH server will use a different host key than the
+normal sshd running on a typical unix host. This will cause the ssh
+client to complain about a &ldquo;host key mismatch&rdquo;, because it does not
+realize there are two separate servers running on the same host. To
+avoid this, use a clause like the following in your <samp><span class="file">.ssh/config</span></samp>
+file:
+
+<pre class="example">     Host remotehost-buildbot
+      HostName remotehost
+      HostKeyAlias remotehost-buildbot
+      Port 9999
+      # use 'user' if you use PasswordManhole and your name is not 'admin'.
+      # if you use AuthorizedKeysManhole, this probably doesn't matter.
+      User admin
+</pre>
+   <div class="node">
+<p><hr>
+<a name="Getting-Source-Code-Changes"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Build-Process">Build Process</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Configuration">Configuration</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">5 Getting Source Code Changes</h2>
+
+<p>The most common way to use the Buildbot is centered around the idea of
+<code>Source Trees</code>: a directory tree filled with source code of some form
+which can be compiled and/or tested. Some projects use languages that don't
+involve any compilation step: nevertheless there may be a <code>build</code> phase
+where files are copied or rearranged into a form that is suitable for
+installation. Some projects do not have unit tests, and the Buildbot is
+merely helping to make sure that the sources can compile correctly. But in
+all of these cases, the thing-being-tested is a single source tree.
+
+   <p>A Version Control System mantains a source tree, and tells the
+buildmaster when it changes. The first step of each Build is typically
+to acquire a copy of some version of this tree.
+
+   <p>This chapter describes how the Buildbot learns about what Changes have
+occurred. For more information on VC systems and Changes, see
+<a href="#Version-Control-Systems">Version Control Systems</a>.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Change-Sources">Change Sources</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Change-Sources"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.1 Change Sources</h3>
+
+<!-- TODO: rework this, the one-buildmaster-one-tree thing isn't quite -->
+<!-- so narrow-minded anymore -->
+<p>Each Buildmaster watches a single source tree. Changes can be provided
+by a variety of ChangeSource types, however any given project will
+typically have only a single ChangeSource active. This section
+provides a description of all available ChangeSource types and
+explains how to set up each of them.
+
+   <p>There are a variety of ChangeSources available, some of which are
+meant to be used in conjunction with other tools to deliver Change
+events from the VC repository to the buildmaster.
+
+     <ul>
+<li>CVSToys
+This ChangeSource opens a TCP connection from the buildmaster to a
+waiting FreshCVS daemon that lives on the repository machine, and
+subscribes to hear about Changes.
+
+     <li>MaildirSource
+This one watches a local maildir-format inbox for email sent out by
+the repository when a change is made. When a message arrives, it is
+parsed to create the Change object. A variety of parsing functions are
+available to accomodate different email-sending tools.
+
+     <li>PBChangeSource
+This ChangeSource listens on a local TCP socket for inbound
+connections from a separate tool. Usually, this tool would be run on
+the VC repository machine in a commit hook. It is expected to connect
+to the TCP socket and send a Change message over the network
+connection. The <samp><span class="command">buildbot sendchange</span></samp> command is one example
+of a tool that knows how to send these messages, so you can write a
+commit script for your VC system that calls it to deliver the Change. 
+There are other tools in the contrib/ directory that use the same
+protocol.
+
+   </ul>
+
+   <p>As a quick guide, here is a list of VC systems and the ChangeSources
+that might be useful with them. All of these ChangeSources are in the
+<code>buildbot.changes</code> module.
+
+     <dl>
+<dt><code>CVS</code><dd>
+          <ul>
+<li>freshcvs.FreshCVSSource (connected via TCP to the freshcvs daemon)
+<li>mail.FCMaildirSource (watching for email sent by a freshcvs daemon)
+<li>mail.BonsaiMaildirSource (watching for email sent by Bonsai)
+<li>mail.SyncmailMaildirSource (watching for email sent by syncmail)
+<li>pb.PBChangeSource (listening for connections from <code>buildbot
+sendchange</code> run in a loginfo script)
+<li>pb.PBChangeSource (listening for connections from a long-running
+<code>contrib/viewcvspoll.py</code> polling process which examines the ViewCVS
+database directly
+</ul>
+
+     <br><dt><code>SVN</code><dd>
+          <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/svn_buildbot.py</code> run in a postcommit script)
+<li>pb.PBChangeSource (listening for connections from a long-running
+<code>contrib/svn_watcher.py</code> or <code>contrib/svnpoller.py</code> polling
+process
+<li>svnpoller.SVNPoller (polling the SVN repository)
+</ul>
+
+     <br><dt><code>Darcs</code><dd>
+          <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/darcs_buildbot.py</code> in a commit script
+</ul>
+
+     <br><dt><code>Mercurial</code><dd>
+          <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/hg_buildbot.py</code> run in an 'incoming' hook)
+</ul>
+
+     <br><dt><code>Arch/Bazaar</code><dd>
+          <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/arch_buildbot.py</code> run in a commit hook)
+</ul>
+
+   </dl>
+
+   <p>All VC systems can be driven by a PBChangeSource and the
+<code>buildbot sendchange</code> tool run from some form of commit script. 
+If you write an email parsing function, they can also all be driven by
+a suitable <code>MaildirSource</code>.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Choosing-ChangeSources">Choosing ChangeSources</a>
+<li><a accesskey="2" href="#CVSToys-_002d-PBService">CVSToys - PBService</a>
+<li><a accesskey="3" href="#CVSToys-_002d-mail-notification">CVSToys - mail notification</a>
+<li><a accesskey="4" href="#Other-mail-notification-ChangeSources">Other mail notification ChangeSources</a>
+<li><a accesskey="5" href="#PBChangeSource">PBChangeSource</a>
+<li><a accesskey="6" href="#P4Source">P4Source</a>
+<li><a accesskey="7" href="#BonsaiPoller">BonsaiPoller</a>
+<li><a accesskey="8" href="#SVNPoller">SVNPoller</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Choosing-ChangeSources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#CVSToys-_002d-PBService">CVSToys - PBService</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Change-Sources">Change Sources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.1 Choosing ChangeSources</h4>
+
+<p>The <code>master.cfg</code> configuration file has a dictionary key named
+<code>BuildmasterConfig['sources']</code>, which holds a list of
+<code>IChangeSource</code> objects. The config file will typically create an
+object from one of the classes described below and stuff it into the
+list.
+
+<pre class="example">     s = FreshCVSSourceNewcred(host="host", port=4519,
+                               user="alice", passwd="secret",
+                               prefix="Twisted")
+     BuildmasterConfig['sources'] = [s]
+</pre>
+   <p>Each source tree has a nominal <code>top</code>. Each Change has a list of
+filenames, which are all relative to this top location. The
+ChangeSource is responsible for doing whatever is necessary to
+accomplish this. Most sources have a <code>prefix</code> argument: a partial
+pathname which is stripped from the front of all filenames provided to
+that <code>ChangeSource</code>. Files which are outside this sub-tree are
+ignored by the changesource: it does not generate Changes for those
+files.
+
+<div class="node">
+<p><hr>
+<a name="CVSToys---PBService"></a>
+<a name="CVSToys-_002d-PBService"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#CVSToys-_002d-mail-notification">CVSToys - mail notification</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Choosing-ChangeSources">Choosing ChangeSources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.2 CVSToys - PBService</h4>
+
+<p><a name="index-buildbot_002echanges_002efreshcvs_002eFreshCVSSource-30"></a>
+The <a href="http://purl.net/net/CVSToys">CVSToys</a> package provides a
+server which runs on the machine that hosts the CVS repository it
+watches. It has a variety of ways to distribute commit notifications,
+and offers a flexible regexp-based way to filter out uninteresting
+changes. One of the notification options is named <code>PBService</code> and
+works by listening on a TCP port for clients. These clients subscribe
+to hear about commit notifications.
+
+   <p>The buildmaster has a CVSToys-compatible <code>PBService</code> client built
+in. There are two versions of it, one for old versions of CVSToys
+(1.0.9 and earlier) which used the <code>oldcred</code> authentication
+framework, and one for newer versions (1.0.10 and later) which use
+<code>newcred</code>. Both are classes in the
+<code>buildbot.changes.freshcvs</code> package.
+
+   <p><code>FreshCVSSourceNewcred</code> objects are created with the following
+parameters:
+
+     <dl>
+<dt>`<samp><code>host</code><span class="samp"> and </span><code>port</code></samp>'<dd>these specify where the CVSToys server can be reached
+
+     <br><dt>`<samp><code>user</code><span class="samp"> and </span><code>passwd</code></samp>'<dd>these specify the login information for the CVSToys server
+(<code>freshcvs</code>). These must match the server's values, which are
+defined in the <code>freshCfg</code> configuration file (which lives in the
+CVSROOT directory of the repository).
+
+     <br><dt>`<samp><code>prefix</code></samp>'<dd>this is the prefix to be found and stripped from filenames delivered
+by the CVSToys server. Most projects live in sub-directories of the
+main repository, as siblings of the CVSROOT sub-directory, so
+typically this prefix is set to that top sub-directory name.
+
+   </dl>
+
+<h3 class="heading">Example</h3>
+
+<p>To set up the freshCVS server, add a statement like the following to
+your <samp><span class="file">freshCfg</span></samp> file:
+
+<pre class="example">     pb = ConfigurationSet([
+         (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+         ])
+</pre>
+   <p>This will announce all changes to a client which connects to port 4519
+using a username of 'foo' and a password of 'bar'.
+
+   <p>Then add a clause like this to your buildmaster's <samp><span class="file">master.cfg</span></samp>:
+
+<pre class="example">     BuildmasterConfig['sources'] = [FreshCVSSource("cvs.example.com", 4519,
+                                     "foo", "bar",
+                                     prefix="glib/")]
+</pre>
+   <p>where "cvs.example.com" is the host that is running the FreshCVS daemon, and
+"glib" is the top-level directory (relative to the repository's root) where
+all your source code lives. Most projects keep one or more projects in the
+same repository (along with CVSROOT/ to hold admin files like loginfo and
+freshCfg); the prefix= argument tells the buildmaster to ignore everything
+outside that directory, and to strip that common prefix from all pathnames
+it handles.
+
+<div class="node">
+<p><hr>
+<a name="CVSToys---mail-notification"></a>
+<a name="CVSToys-_002d-mail-notification"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Other-mail-notification-ChangeSources">Other mail notification ChangeSources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#CVSToys-_002d-PBService">CVSToys - PBService</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.3 CVSToys - mail notification</h4>
+
+<p><a name="index-buildbot_002echanges_002email_002eFCMaildirSource-31"></a>
+CVSToys also provides a <code>MailNotification</code> action which will send
+email to a list of recipients for each commit. This tends to work
+better than using <code>/bin/mail</code> from within the CVSROOT/loginfo
+file directly, as CVSToys will batch together all files changed during
+the same CVS invocation, and can provide more information (like
+creating a ViewCVS URL for each file changed).
+
+   <p>The Buildbot's <code>FCMaildirSource</code> is a ChangeSource which knows
+how to parse these CVSToys messages and turn them into Change objects. 
+It watches a Maildir for new messages. The usually installation
+process looks like:
+
+     <ol type=1 start=1>
+<li>Create a mailing list, <code>projectname-commits</code>. 
+<li>In CVSToys' freshCfg file, use a <code>MailNotification</code> action to
+send commit mail to this mailing list. 
+<li>Subscribe the buildbot user to the mailing list. 
+<li>Configure your .qmail or .forward file to deliver these messages into
+a maildir. 
+<li>In the Buildbot's master.cfg file, use a <code>FCMaildirSource</code> to
+watch the maildir for commit messages.
+        </ol>
+
+   <p>The <code>FCMaildirSource</code> is created with two parameters: the
+directory name of the maildir root, and the prefix to strip.
+
+<div class="node">
+<p><hr>
+<a name="Other-mail-notification-ChangeSources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PBChangeSource">PBChangeSource</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#CVSToys-_002d-mail-notification">CVSToys - mail notification</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.4 Other mail notification ChangeSources</h4>
+
+<p><a name="index-buildbot_002echanges_002email_002eSyncmailMaildirSource-32"></a><a name="index-buildbot_002echanges_002email_002eBonsaiMaildirSource-33"></a>
+There are other types of maildir-watching ChangeSources, which only
+differ in the function used to parse the message body.
+
+   <p><code>SyncmailMaildirSource</code> knows how to parse the message format
+used in mail sent by Syncmail.
+
+   <p><code>BonsaiMaildirSource</code> parses messages sent out by Bonsai.
+
+<div class="node">
+<p><hr>
+<a name="PBChangeSource"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#P4Source">P4Source</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Other-mail-notification-ChangeSources">Other mail notification ChangeSources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.5 PBChangeSource</h4>
+
+<p><a name="index-buildbot_002echanges_002epb_002ePBChangeSource-34"></a>
+The last kind of ChangeSource actually listens on a TCP port for
+clients to connect and push change notices <em>into</em> the
+Buildmaster. This is used by the built-in <code>buildbot sendchange</code>
+notification tool, as well as the VC-specific
+<samp><span class="file">contrib/svn_buildbot.py</span></samp> and <samp><span class="file">contrib/arch_buildbot.py</span></samp>
+tools. These tools are run by the repository (in a commit hook
+script), and connect to the buildmaster directly each time a file is
+comitted. This is also useful for creating new kinds of change sources
+that work on a <code>push</code> model instead of some kind of subscription
+scheme, for example a script which is run out of an email .forward
+file.
+
+   <p>This ChangeSource can be configured to listen on its own TCP port, or
+it can share the port that the buildmaster is already using for the
+buildslaves to connect. (This is possible because the
+<code>PBChangeSource</code> uses the same protocol as the buildslaves, and
+they can be distinguished by the <code>username</code> attribute used when
+the initial connection is established). It might be useful to have it
+listen on a different port if, for example, you wanted to establish
+different firewall rules for that port. You could allow only the SVN
+repository machine access to the <code>PBChangeSource</code> port, while
+allowing only the buildslave machines access to the slave port. Or you
+could just expose one port and run everything over it. <em>Note:
+this feature is not yet implemented, the PBChangeSource will always
+share the slave port and will always have a </em><code>user</code><em> name of
+</em><code>change</code><em>, and a passwd of </em><code>changepw</code><em>. These limitations will
+be removed in the future.</em>.
+
+   <p>The <code>PBChangeSource</code> is created with the following arguments. All
+are optional.
+
+     <dl>
+<dt>`<samp><code>port</code></samp>'<dd>which port to listen on. If <code>None</code> (which is the default), it
+shares the port used for buildslave connections. <em>Not
+Implemented, always set to </em><code>None</code>.
+
+     <br><dt>`<samp><code>user</code><span class="samp"> and </span><code>passwd</code></samp>'<dd>The user/passwd account information that the client program must use
+to connect. Defaults to <code>change</code> and <code>changepw</code>. <em>Not
+Implemented, </em><code>user</code><em> is currently always set to </em><code>change</code><em>,
+</em><code>passwd</code><em> is always set to </em><code>changepw</code>.
+
+     <br><dt>`<samp><code>prefix</code></samp>'<dd>The prefix to be found and stripped from filenames delivered over the
+connection. Any filenames which do not start with this prefix will be
+removed. If all the filenames in a given Change are removed, the that
+whole Change will be dropped. This string should probably end with a
+directory separator.
+
+     <p>This is useful for changes coming from version control systems that
+represent branches as parent directories within the repository (like
+SVN and Perforce). Use a prefix of 'trunk/' or
+'project/branches/foobranch/' to only follow one branch and to get
+correct tree-relative filenames. Without a prefix, the PBChangeSource
+will probably deliver Changes with filenames like <samp><span class="file">trunk/foo.c</span></samp>
+instead of just <samp><span class="file">foo.c</span></samp>. Of course this also depends upon the
+tool sending the Changes in (like <samp><span class="command">buildbot sendchange</span></samp>) and
+what filenames it is delivering: that tool may be filtering and
+stripping prefixes at the sending end.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="P4Source"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BonsaiPoller">BonsaiPoller</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#PBChangeSource">PBChangeSource</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.6 P4Source</h4>
+
+<p><a name="index-buildbot_002echanges_002ep4poller_002eP4Source-35"></a>
+The <code>P4Source</code> periodically polls a <a href="http://www.perforce.com/">Perforce</a> depot for changes. It accepts the following arguments:
+
+     <dl>
+<dt>`<samp><code>p4base</code></samp>'<dd>The base depot path to watch, without the trailing '/...'.
+
+     <br><dt>`<samp><code>p4port</code></samp>'<dd>The Perforce server to connect to (as host:port).
+
+     <br><dt>`<samp><code>p4user</code></samp>'<dd>The Perforce user.
+
+     <br><dt>`<samp><code>p4passwd</code></samp>'<dd>The Perforce password.
+
+     <br><dt>`<samp><code>split_file</code></samp>'<dd>A function that maps a pathname, without the leading <code>p4base</code>, to a
+(branch, filename) tuple. The default just returns (None, branchfile),
+which effectively disables branch support. You should supply a function
+which understands your repository structure.
+
+     <br><dt>`<samp><code>pollinterval</code></samp>'<dd>How often to poll, in seconds. Defaults to 600 (10 minutes).
+
+     <br><dt>`<samp><code>histmax</code></samp>'<dd>The maximum number of changes to inspect at a time. If more than this
+number occur since the last poll, older changes will be silently
+ignored. 
+</dl>
+
+<h3 class="heading">Example</h3>
+
+<p>This configuration uses the <code>P4PORT</code>, <code>P4USER</code>, and <code>P4PASSWD</code>
+specified in the buildmaster's environment. It watches a project in which the
+branch name is simply the next path component, and the file is all path
+components after.
+
+<pre class="example">     import buildbot.changes.p4poller
+     c['sources'].append(p4poller.P4Source(
+             p4base='//depot/project/',
+             split_file=lambda branchfile: branchfile.split('/',1)
+     ))
+</pre>
+   <div class="node">
+<p><hr>
+<a name="BonsaiPoller"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#SVNPoller">SVNPoller</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#P4Source">P4Source</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.7 BonsaiPoller</h4>
+
+<p><a name="index-buildbot_002echanges_002ebonsaipoller_002eBonsaiPoller-36"></a>
+The <code>BonsaiPoller</code> periodically polls a Bonsai server. This is a
+CGI script accessed through a web server that provides information
+about a CVS tree, for example the Mozilla bonsai server at
+<a href="http://bonsai.mozilla.org">http://bonsai.mozilla.org</a>. Bonsai servers are usable by both
+humans and machines. In this case, the buildbot's change source forms
+a query which asks about any files in the specified branch which have
+changed since the last query.
+
+   <p>Please take a look at the BonsaiPoller docstring for details about the
+arguments it accepts.
+
+<div class="node">
+<p><hr>
+<a name="SVNPoller"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BonsaiPoller">BonsaiPoller</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources">Change Sources</a>
+
+</div>
+
+<h4 class="subsection">5.1.8 SVNPoller</h4>
+
+<p><a name="index-buildbot_002echanges_002esvnpoller_002eSVNPoller-37"></a>
+The <code>buildbot.changes.svnpoller.SVNPoller</code> is a ChangeSource
+which periodically polls a <a href="http://subversion.tigris.org/">Subversion</a> repository for new revisions, by running the <code>svn
+log</code> command in a subshell. It can watch a single branch or multiple
+branches.
+
+   <p><code>SVNPoller</code> accepts the following arguments:
+
+     <dl>
+<dt><code>svnurl</code><dd>The base URL path to watch, like
+<code>svn://svn.twistedmatrix.com/svn/Twisted/trunk</code>, or
+<code>http://divmod.org/svn/Divmod/</code>, or even
+<code>file:///home/svn/Repository/ProjectA/branches/1.5/</code>. This must
+include the access scheme, the location of the repository (both the
+hostname for remote ones, and any additional directory names necessary
+to get to the repository), and the sub-path within the repository's
+virtual filesystem for the project and branch of interest.
+
+     <p>The <code>SVNPoller</code> will only pay attention to files inside the
+subdirectory specified by the complete svnurl.
+
+     <br><dt><code>split_file</code><dd>A function to convert pathnames into (branch, relative_pathname)
+tuples. Use this to explain your repository's branch-naming policy to
+<code>SVNPoller</code>. This function must accept a single string and return
+a two-entry tuple. There are a few utility functions in
+<code>buildbot.changes.svnpoller</code> that can be used as a
+<code>split_file</code> function, see below for details.
+
+     <p>The default value always returns (None, path), which indicates that
+all files are on the trunk.
+
+     <p>Subclasses of <code>SVNPoller</code> can override the <code>split_file</code>
+method instead of using the <code>split_file=</code> argument.
+
+     <br><dt><code>svnuser</code><dd>An optional string parameter. If set, the <code>--user</code> argument will
+be added to all <code>svn</code> commands. Use this if you have to
+authenticate to the svn server before you can do <code>svn info</code> or
+<code>svn log</code> commands.
+
+     <br><dt><code>svnpasswd</code><dd>Like <code>svnuser</code>, this will cause a <code>--password</code> argument to
+be passed to all svn commands.
+
+     <br><dt><code>pollinterval</code><dd>How often to poll, in seconds. Defaults to 600 (checking once every 10
+minutes). Lower this if you want the buildbot to notice changes
+faster, raise it if you want to reduce the network and CPU load on
+your svn server. Please be considerate of public SVN repositories by
+using a large interval when polling them.
+
+     <br><dt><code>histmax</code><dd>The maximum number of changes to inspect at a time. Every POLLINTERVAL
+seconds, the <code>SVNPoller</code> asks for the last HISTMAX changes and
+looks through them for any ones it does not already know about. If
+more than HISTMAX revisions have been committed since the last poll,
+older changes will be silently ignored. Larger values of histmax will
+cause more time and memory to be consumed on each poll attempt. 
+<code>histmax</code> defaults to 100.
+
+     <br><dt><code>svnbin</code><dd>This controls the <code>svn</code> executable to use. If subversion is
+installed in a weird place on your system (outside of the
+buildmaster's <code>$PATH</code>), use this to tell <code>SVNPoller</code> where
+to find it. The default value of &ldquo;svn&rdquo; will almost always be
+sufficient.
+
+   </dl>
+
+<h3 class="heading">Branches</h3>
+
+<p>Each source file that is tracked by a Subversion repository has a
+fully-qualified SVN URL in the following form:
+(REPOURL)(PROJECT-plus-BRANCH)(FILEPATH). When you create the
+<code>SVNPoller</code>, you give it a <code>svnurl</code> value that includes all
+of the REPOURL and possibly some portion of the PROJECT-plus-BRANCH
+string. The <code>SVNPoller</code> is responsible for producing Changes that
+contain a branch name and a FILEPATH (which is relative to the top of
+a checked-out tree). The details of how these strings are split up
+depend upon how your repository names its branches.
+
+<h4 class="subheading">PROJECT/BRANCHNAME/FILEPATH repositories</h4>
+
+<p>One common layout is to have all the various projects that share a
+repository get a single top-level directory each. Then under a given
+project's directory, you get two subdirectories, one named &ldquo;trunk&rdquo;
+and another named &ldquo;branches&rdquo;. Under &ldquo;branches&rdquo; you have a bunch of
+other directories, one per branch, with names like &ldquo;1.5.x&rdquo; and
+&ldquo;testing&rdquo;. It is also common to see directories like &ldquo;tags&rdquo; and
+&ldquo;releases&rdquo; next to &ldquo;branches&rdquo; and &ldquo;trunk&rdquo;.
+
+   <p>For example, the Twisted project has a subversion server on
+&ldquo;svn.twistedmatrix.com&rdquo; that hosts several sub-projects. The
+repository is available through a SCHEME of &ldquo;svn:&rdquo;. The primary
+sub-project is Twisted, of course, with a repository root of
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted&rdquo;. Another sub-project is
+Informant, with a root of
+&ldquo;svn://svn.twistedmatrix.com/svn/Informant&rdquo;, etc. Inside any
+checked-out Twisted tree, there is a file named bin/trial (which is
+used to run unit test suites).
+
+   <p>The trunk for Twisted is in
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted/trunk&rdquo;, and the
+fully-qualified SVN URL for the trunk version of <code>trial</code> would be
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted/trunk/bin/trial&rdquo;. The same
+SVNURL for that file on a branch named &ldquo;1.5.x&rdquo; would be
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted/branches/1.5.x/bin/trial&rdquo;.
+
+   <p>To set up a <code>SVNPoller</code> that watches the Twisted trunk (and
+nothing else), we would use the following:
+
+<pre class="example">     from buildbot.changes.svnpoller import SVNPoller
+     s = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted/trunk")
+     c['sources'].append(ss)
+</pre>
+   <p>In this case, every Change that our <code>SVNPoller</code> produces will
+have <code>.branch=None</code>, to indicate that the Change is on the trunk. 
+No other sub-projects or branches will be tracked.
+
+   <p>If we want our ChangeSource to follow multiple branches, we have to do
+two things. First we have to change our <code>svnurl=</code> argument to
+watch more than just &ldquo;.../Twisted/trunk&rdquo;. We will set it to
+&ldquo;.../Twisted&rdquo; so that we'll see both the trunk and all the branches. 
+Second, we have to tell <code>SVNPoller</code> how to split the
+(PROJECT-plus-BRANCH)(FILEPATH) strings it gets from the repository
+out into (BRANCH) and (FILEPATH) pairs.
+
+   <p>We do the latter by providing a &ldquo;split_file&rdquo; function. This function
+is responsible for splitting something like
+&ldquo;branches/1.5.x/bin/trial&rdquo; into <code>branch</code>=&rdquo;branches/1.5.x&rdquo; and
+<code>filepath</code>=&rdquo;bin/trial&rdquo;. This function is always given a string
+that names a file relative to the subdirectory pointed to by the
+<code>SVNPoller</code>'s <code>svnurl=</code> argument. It is expected to return a
+(BRANCHNAME, FILEPATH) tuple (in which FILEPATH is relative to the
+branch indicated), or None to indicate that the file is outside any
+project of interest.
+
+   <p>(note that we want to see &ldquo;branches/1.5.x&rdquo; rather than just
+&ldquo;1.5.x&rdquo; because when we perform the SVN checkout, we will probably
+append the branch name to the baseURL, which requires that we keep the
+&ldquo;branches&rdquo; component in there. Other VC schemes use a different
+approach towards branches and may not require this artifact.)
+
+   <p>If your repository uses this same PROJECT/BRANCH/FILEPATH naming
+scheme, the following function will work:
+
+<pre class="example">     def split_file_branches(path):
+         pieces = path.split('/')
+         if pieces[0] == 'trunk':
+             return (None, '/'.join(pieces[1:]))
+         elif pieces[0] == 'branches':
+             return ('/'.join(pieces[0:2]),
+                     '/'.join(pieces[2:]))
+         else:
+             return None
+</pre>
+   <p>This function is provided as
+<code>buildbot.changes.svnpoller.split_file_branches</code> for your
+convenience. So to have our Twisted-watching <code>SVNPoller</code> follow
+multiple branches, we would use this:
+
+<pre class="example">     from buildbot.changes.svnpoller import SVNPoller, split_file_branches
+     s = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted",
+                   split_file=split_file_branches)
+     c['sources'].append(ss)
+</pre>
+   <p>Changes for all sorts of branches (with names like &ldquo;branches/1.5.x&rdquo;,
+and None to indicate the trunk) will be delivered to the Schedulers. 
+Each Scheduler is then free to use or ignore each branch as it sees
+fit.
+
+<h4 class="subheading">BRANCHNAME/PROJECT/FILEPATH repositories</h4>
+
+<p>Another common way to organize a Subversion repository is to put the
+branch name at the top, and the projects underneath. This is
+especially frequent when there are a number of related sub-projects
+that all get released in a group.
+
+   <p>For example, Divmod.org hosts a project named &ldquo;Nevow&rdquo; as well as one
+named &ldquo;Quotient&rdquo;. In a checked-out Nevow tree there is a directory
+named &ldquo;formless&rdquo; that contains a python source file named
+&ldquo;webform.py&rdquo;. This repository is accessible via webdav (and thus
+uses an &ldquo;http:&rdquo; scheme) through the divmod.org hostname. There are
+many branches in this repository, and they use a
+(BRANCHNAME)/(PROJECT) naming policy.
+
+   <p>The fully-qualified SVN URL for the trunk version of webform.py is
+<code>http://divmod.org/svn/Divmod/trunk/Nevow/formless/webform.py</code>. 
+You can do an <code>svn co</code> with that URL and get a copy of the latest
+version. The 1.5.x branch version of this file would have a URL of
+<code>http://divmod.org/svn/Divmod/branches/1.5.x/Nevow/formless/webform.py</code>. 
+The whole Nevow trunk would be checked out with
+<code>http://divmod.org/svn/Divmod/trunk/Nevow</code>, while the Quotient
+trunk would be checked out using
+<code>http://divmod.org/svn/Divmod/trunk/Quotient</code>.
+
+   <p>Now suppose we want to have an <code>SVNPoller</code> that only cares about
+the Nevow trunk. This case looks just like the PROJECT/BRANCH layout
+described earlier:
+
+<pre class="example">     from buildbot.changes.svnpoller import SVNPoller
+     s = SVNPoller("http://divmod.org/svn/Divmod/trunk/Nevow")
+     c['sources'].append(ss)
+</pre>
+   <p>But what happens when we want to track multiple Nevow branches? We
+have to point our <code>svnurl=</code> high enough to see all those
+branches, but we also don't want to include Quotient changes (since
+we're only building Nevow). To accomplish this, we must rely upon the
+<code>split_file</code> function to help us tell the difference between
+files that belong to Nevow and those that belong to Quotient, as well
+as figuring out which branch each one is on.
+
+<pre class="example">     from buildbot.changes.svnpoller import SVNPoller
+     s = SVNPoller("http://divmod.org/svn/Divmod",
+                   split_file=my_file_splitter)
+     c['sources'].append(ss)
+</pre>
+   <p>The <code>my_file_splitter</code> function will be called with
+repository-relative pathnames like:
+
+     <dl>
+<dt><code>trunk/Nevow/formless/webform.py</code><dd>This is a Nevow file, on the trunk. We want the Change that includes this
+to see a filename of <code>formless/webform.py"</code>, and a branch of None
+
+     <br><dt><code>branches/1.5.x/Nevow/formless/webform.py</code><dd>This is a Nevow file, on a branch. We want to get
+branch=&rdquo;branches/1.5.x&rdquo; and filename=&rdquo;formless/webform.py&rdquo;.
+
+     <br><dt><code>trunk/Quotient/setup.py</code><dd>This is a Quotient file, so we want to ignore it by having
+<code>my_file_splitter</code> return None.
+
+     <br><dt><code>branches/1.5.x/Quotient/setup.py</code><dd>This is also a Quotient file, which should be ignored. 
+</dl>
+
+   <p>The following definition for <code>my_file_splitter</code> will do the job:
+
+<pre class="example">     def my_file_splitter(path):
+         pieces = path.split('/')
+         if pieces[0] == 'trunk':
+             branch = None
+             pieces.pop(0) # remove 'trunk'
+         elif pieces[0] == 'branches':
+             pieces.pop(0) # remove 'branches'
+             # grab branch name
+             branch = 'branches/' + pieces.pop(0)
+         else:
+             return None # something weird
+         projectname = pieces.pop(0)
+         if projectname != 'Nevow':
+             return None # wrong project
+         return (branch, '/'.join(pieces))
+</pre>
+   <div class="node">
+<p><hr>
+<a name="Build-Process"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Status-Delivery">Status Delivery</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">6 Build Process</h2>
+
+<p>A <code>Build</code> object is responsible for actually performing a build. 
+It gets access to a remote <code>SlaveBuilder</code> where it may run
+commands, and a <code>BuildStatus</code> object where it must emit status
+events. The <code>Build</code> is created by the Builder's
+<code>BuildFactory</code>.
+
+   <p>The default <code>Build</code> class is made up of a fixed sequence of
+<code>BuildSteps</code>, executed one after another until all are complete
+(or one of them indicates that the build should be halted early). The
+default <code>BuildFactory</code> creates instances of this <code>Build</code>
+class with a list of <code>BuildSteps</code>, so the basic way to configure
+the build is to provide a list of <code>BuildSteps</code> to your
+<code>BuildFactory</code>.
+
+   <p>More complicated <code>Build</code> subclasses can make other decisions:
+execute some steps only if certain files were changed, or if certain
+previous steps passed or failed. The base class has been written to
+allow users to express basic control flow without writing code, but
+you can always subclass and customize to achieve more specialized
+behavior.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Build-Steps">Build Steps</a>
+<li><a accesskey="2" href="#Interlocks">Interlocks</a>
+<li><a accesskey="3" href="#Build-Factories">Build Factories</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Build-Steps"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Interlocks">Interlocks</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Process">Build Process</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Process">Build Process</a>
+
+</div>
+
+<h3 class="section">6.1 Build Steps</h3>
+
+<p><code>BuildStep</code>s are usually specified in the buildmaster's
+configuration file, in a list of &ldquo;step specifications&rdquo; that is used
+to create the <code>BuildFactory</code>. These &ldquo;step specifications&rdquo; are
+not actual steps, but rather a tuple of the <code>BuildStep</code> subclass
+to be created and a dictionary of arguments. (the actual
+<code>BuildStep</code> instances are not created until the Build is started,
+so that each Build gets an independent copy of each BuildStep). The
+preferred way to create these step specifications is with the
+<code>BuildFactory</code>'s <code>addStep</code> method:
+
+<pre class="example">     from buildbot.steps import source, shell
+     from buildbot.process import factory
+     
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, svnurl="http://svn.example.org/Trunk/")
+     f.addStep(shell.ShellCommand, command=["make", "all"])
+     f.addStep(shell.ShellCommand, command=["make", "test"])
+</pre>
+   <p>The rest of this section lists all the standard BuildStep objects
+available for use in a Build, and the parameters which can be used to
+control each.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Common-Parameters">Common Parameters</a>
+<li><a accesskey="2" href="#Source-Checkout">Source Checkout</a>
+<li><a accesskey="3" href="#ShellCommand">ShellCommand</a>
+<li><a accesskey="4" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+<li><a accesskey="5" href="#Python-BuildSteps">Python BuildSteps</a>
+<li><a accesskey="6" href="#Transferring-Files">Transferring Files</a>
+<li><a accesskey="7" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Common-Parameters"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Source-Checkout">Source Checkout</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Steps">Build Steps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.1 Common Parameters</h4>
+
+<p>The standard <code>Build</code> runs a series of <code>BuildStep</code>s in order,
+only stopping when it runs out of steps or if one of them requests
+that the build be halted. It collects status information from each one
+to create an overall build status (of SUCCESS, WARNINGS, or FAILURE).
+
+   <p>All BuildSteps accept some common parameters. Some of these control
+how their individual status affects the overall build. Others are used
+to specify which <code>Locks</code> (see see <a href="#Interlocks">Interlocks</a>) should be
+acquired before allowing the step to run.
+
+   <p>Arguments common to all <code>BuildStep</code> subclasses:
+
+     <dl>
+<dt><code>name</code><dd>the name used to describe the step on the status display. It is also
+used to give a name to any LogFiles created by this step.
+
+     <br><dt><code>haltOnFailure</code><dd>if True, a FAILURE of this build step will cause the build to halt
+immediately with an overall result of FAILURE.
+
+     <br><dt><code>flunkOnWarnings</code><dd>when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as FAILURE. The remaining steps will still be executed.
+
+     <br><dt><code>flunkOnFailure</code><dd>when True, a FAILURE of this build step will mark the overall build as
+a FAILURE. The remaining steps will still be executed.
+
+     <br><dt><code>warnOnWarnings</code><dd>when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as having WARNINGS. The remaining steps will still be
+executed.
+
+     <br><dt><code>warnOnFailure</code><dd>when True, a FAILURE of this build step will mark the overall build as
+having WARNINGS. The remaining steps will still be executed.
+
+     <br><dt><code>locks</code><dd>a list of Locks (instances of <code>buildbot.locks.SlaveLock</code> or
+<code>buildbot.locks.MasterLock</code>) that should be acquired before
+starting this Step. The Locks will be released when the step is
+complete. Note that this is a list of actual Lock instances, not
+names. Also note that all Locks must have unique names.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Source-Checkout"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#ShellCommand">ShellCommand</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Common-Parameters">Common Parameters</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.2 Source Checkout</h4>
+
+<p>The first step of any build is typically to acquire the source code
+from which the build will be performed. There are several classes to
+handle this, one for each of the different source control system that
+Buildbot knows about. For a description of how Buildbot treats source
+control in general, see <a href="#Version-Control-Systems">Version Control Systems</a>.
+
+   <p>All source checkout steps accept some common parameters to control how
+they get the sources and where they should be placed. The remaining
+per-VC-system parameters are mostly to specify where exactly the
+sources are coming from.
+
+     <dl>
+<dt><code>mode</code><dd>
+a string describing the kind of VC operation that is desired. Defaults
+to <code>update</code>.
+
+          <dl>
+<dt><code>update</code><dd>specifies that the CVS checkout/update should be performed directly
+into the workdir. Each build is performed in the same directory,
+allowing for incremental builds. This minimizes disk space, bandwidth,
+and CPU time. However, it may encounter problems if the build process
+does not handle dependencies properly (sometimes you must do a &ldquo;clean
+build&rdquo; to make sure everything gets compiled), or if source files are
+deleted but generated files can influence test behavior (e.g. python's
+.pyc files), or when source directories are deleted but generated
+files prevent CVS from removing them. Builds ought to be correct
+regardless of whether they are done &ldquo;from scratch&rdquo; or incrementally,
+but it is useful to test both kinds: this mode exercises the
+incremental-build style.
+
+          <br><dt><code>copy</code><dd>specifies that the CVS workspace should be maintained in a separate
+directory (called the 'copydir'), using checkout or update as
+necessary. For each build, a new workdir is created with a copy of the
+source tree (rm -rf workdir; cp -r copydir workdir). This doubles the
+disk space required, but keeps the bandwidth low (update instead of a
+full checkout). A full 'clean' build is performed each time. This
+avoids any generated-file build problems, but is still occasionally
+vulnerable to CVS problems such as a repository being manually
+rearranged, causing CVS errors on update which are not an issue with a
+full checkout.
+
+          <!-- TODO: something is screwy about this, revisit. Is it the source -->
+<!-- directory or the working directory that is deleted each time? -->
+<br><dt><code>clobber</code><dd>specifes that the working directory should be deleted each time,
+necessitating a full checkout for each build. This insures a clean
+build off a complete checkout, avoiding any of the problems described
+above. This mode exercises the &ldquo;from-scratch&rdquo; build style.
+
+          <br><dt><code>export</code><dd>this is like <code>clobber</code>, except that the 'cvs export' command is
+used to create the working directory. This command removes all CVS
+metadata files (the CVS/ directories) from the tree, which is
+sometimes useful for creating source tarballs (to avoid including the
+metadata in the tar file). 
+</dl>
+
+     <br><dt><code>workdir</code><dd>like all Steps, this indicates the directory where the build will take
+place. Source Steps are special in that they perform some operations
+outside of the workdir (like creating the workdir itself).
+
+     <br><dt><code>alwaysUseLatest</code><dd>if True, bypass the usual &ldquo;update to the last Change&rdquo; behavior, and
+always update to the latest changes instead.
+
+     <br><dt><code>retry</code><dd>If set, this specifies a tuple of <code>(delay, repeats)</code> which means
+that when a full VC checkout fails, it should be retried up to
+<var>repeats</var> times, waiting <var>delay</var> seconds between attempts. If
+you don't provide this, it defaults to <code>None</code>, which means VC
+operations should not be retried. This is provided to make life easier
+for buildslaves which are stuck behind poor network connections.
+
+   </dl>
+
+   <p>My habit as a developer is to do a <code>cvs update</code> and <code>make</code> each
+morning. Problems can occur, either because of bad code being checked in, or
+by incomplete dependencies causing a partial rebuild to fail where a
+complete from-scratch build might succeed. A quick Builder which emulates
+this incremental-build behavior would use the <code>mode='update'</code>
+setting.
+
+   <p>On the other hand, other kinds of dependency problems can cause a clean
+build to fail where a partial build might succeed. This frequently results
+from a link step that depends upon an object file that was removed from a
+later version of the tree: in the partial tree, the object file is still
+around (even though the Makefiles no longer know how to create it).
+
+   <p>&ldquo;official&rdquo; builds (traceable builds performed from a known set of
+source revisions) are always done as clean builds, to make sure it is
+not influenced by any uncontrolled factors (like leftover files from a
+previous build). A &ldquo;full&rdquo; Builder which behaves this way would want
+to use the <code>mode='clobber'</code> setting.
+
+   <p>Each VC system has a corresponding source checkout class: their
+arguments are described on the following pages.
+
+<ul class="menu">
+<li><a accesskey="1" href="#CVS">CVS</a>
+<li><a accesskey="2" href="#SVN">SVN</a>
+<li><a accesskey="3" href="#Darcs">Darcs</a>
+<li><a accesskey="4" href="#Mercurial">Mercurial</a>
+<li><a accesskey="5" href="#Arch">Arch</a>
+<li><a accesskey="6" href="#Bazaar">Bazaar</a>
+<li><a accesskey="7" href="#P4">P4</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="CVS"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#SVN">SVN</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Source-Checkout">Source Checkout</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.2.1 CVS</h5>
+
+<p><a name="index-CVS-Checkout-38"></a><a name="index-buildbot_002esteps_002esource_002eCVS-39"></a>
+
+   <p>The <code>CVS</code> build step performs a <a href="http://www.nongnu.org/cvs/">CVS</a> checkout or update. It takes the following arguments:
+
+     <dl>
+<dt><code>cvsroot</code><dd>(required): specify the CVSROOT value, which points to a CVS
+repository, probably on a remote machine. For example, the cvsroot
+value you would use to get a copy of the Buildbot source code is
+<code>:pserver:anonymous at cvs.sourceforge.net:/cvsroot/buildbot</code>
+
+     <br><dt><code>cvsmodule</code><dd>(required): specify the cvs <code>module</code>, which is generally a
+subdirectory of the CVSROOT. The cvsmodule for the Buildbot source
+code is <code>buildbot</code>.
+
+     <br><dt><code>branch</code><dd>a string which will be used in a <code>-r</code> argument. This is most
+useful for specifying a branch to work on. Defaults to <code>HEAD</code>.
+
+     <br><dt><code>global_options</code><dd>a list of flags to be put before the verb in the CVS command.
+
+     <br><dt><code>checkoutDelay</code><dd>if set, the number of seconds to put between the timestamp of the last
+known Change and the value used for the <code>-D</code> option. Defaults to
+half of the parent Build's treeStableTimer.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="SVN"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Darcs">Darcs</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#CVS">CVS</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.2.2 SVN</h5>
+
+<p><a name="index-SVN-Checkout-40"></a><a name="index-buildbot_002esteps_002esource_002eSVN-41"></a>
+
+   <p>The <code>SVN</code> build step performs a
+<a href="http://subversion.tigris.org">Subversion</a> checkout or update. 
+There are two basic ways of setting up the checkout step, depending
+upon whether you are using multiple branches or not.
+
+   <p>If all of your builds use the same branch, then you should create the
+<code>SVN</code> step with the <code>svnurl</code> argument:
+
+     <dl>
+<dt><code>svnurl</code><dd>(required): this specifies the <code>URL</code> argument that will be given
+to the <code>svn checkout</code> command. It dictates both where the
+repository is located and which sub-tree should be extracted. In this
+respect, it is like a combination of the CVS <code>cvsroot</code> and
+<code>cvsmodule</code> arguments. For example, if you are using a remote
+Subversion repository which is accessible through HTTP at a URL of
+<code>http://svn.example.com/repos</code>, and you wanted to check out the
+<code>trunk/calc</code> sub-tree, you would use
+<code>svnurl="http://svn.example.com/repos/trunk/calc"</code> as an argument
+to your <code>SVN</code> step. 
+</dl>
+
+   <p>If, on the other hand, you are building from multiple branches, then
+you should create the <code>SVN</code> step with the <code>baseURL</code> and
+<code>defaultBranch</code> arguments instead:
+
+     <dl>
+<dt><code>baseURL</code><dd>(required): this specifies the base repository URL, to which a branch
+name will be appended. It should probably end in a slash.
+
+     <br><dt><code>defaultBranch</code><dd>this specifies the name of the branch to use when a Build does not
+provide one of its own. This will be appended to <code>baseURL</code> to
+create the string that will be passed to the <code>svn checkout</code>
+command. 
+</dl>
+
+   <p>If you are using branches, you must also make sure your
+<code>ChangeSource</code> will report the correct branch names.
+
+<h3 class="heading">branch example</h3>
+
+<p>Let's suppose that the &ldquo;MyProject&rdquo; repository uses branches for the
+trunk, for various users' individual development efforts, and for
+several new features that will require some amount of work (involving
+multiple developers) before they are ready to merge onto the trunk. 
+Such a repository might be organized as follows:
+
+<pre class="example">     svn://svn.example.org/MyProject/trunk
+     svn://svn.example.org/MyProject/branches/User1/foo
+     svn://svn.example.org/MyProject/branches/User1/bar
+     svn://svn.example.org/MyProject/branches/User2/baz
+     svn://svn.example.org/MyProject/features/newthing
+     svn://svn.example.org/MyProject/features/otherthing
+</pre>
+   <p>Further assume that we want the Buildbot to run tests against the
+trunk and against all the feature branches (i.e., do a
+checkout/compile/build of branch X when a file has been changed on
+branch X, when X is in the set [trunk, features/newthing,
+features/otherthing]). We do not want the Buildbot to automatically
+build any of the user branches, but it should be willing to build a
+user branch when explicitly requested (most likely by the user who
+owns that branch).
+
+   <p>There are three things that need to be set up to accomodate this
+system. The first is a ChangeSource that is capable of identifying the
+branch which owns any given file. This depends upon a user-supplied
+function, in an external program that runs in the SVN commit hook and
+connects to the buildmaster's <code>PBChangeSource</code> over a TCP
+connection. (you can use the &ldquo;<code>buildbot sendchange</code>&rdquo; utility
+for this purpose, but you will still need an external program to
+decide what value should be passed to the <code>--branch=</code> argument). 
+For example, a change to a file with the SVN url of
+&ldquo;svn://svn.example.org/MyProject/features/newthing/src/foo.c&rdquo; should
+be broken down into a Change instance with
+<code>branch='features/newthing'</code> and <code>file='src/foo.c'</code>.
+
+   <p>The second piece is an <code>AnyBranchScheduler</code> which will pay
+attention to the desired branches. It will not pay attention to the
+user branches, so it will not automatically start builds in response
+to changes there. The AnyBranchScheduler class requires you to
+explicitly list all the branches you want it to use, but it would not
+be difficult to write a subclass which used
+<code>branch.startswith('features/'</code> to remove the need for this
+explicit list. Or, if you want to build user branches too, you can use
+AnyBranchScheduler with <code>branches=None</code> to indicate that you want
+it to pay attention to all branches.
+
+   <p>The third piece is an <code>SVN</code> checkout step that is configured to
+handle the branches correctly, with a <code>baseURL</code> value that
+matches the way the ChangeSource splits each file's URL into base,
+branch, and file.
+
+<pre class="example">     from buildbot.changes.pb import PBChangeSource
+     from buildbot.scheduler import AnyBranchScheduler
+     from buildbot.process import source, factory
+     from buildbot.steps import source, shell
+     
+     c['sources'] = [PBChangeSource()]
+     s1 = AnyBranchScheduler('main',
+                             ['trunk', 'features/newthing', 'features/otherthing'],
+                             10*60, ['test-i386', 'test-ppc'])
+     c['schedulers'] = [s1]
+     
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, mode='update',
+               baseURL='svn://svn.example.org/MyProject/',
+               defaultBranch='trunk')
+     f.addStep(shell.Compile, command="make all")
+     f.addStep(shell.Test, command="make test")
+     
+     c['builders'] = [
+       {'name':'test-i386', 'slavename':'bot-i386', 'builddir':'test-i386',
+                            'factory':f },
+       {'name':'test-ppc', 'slavename':'bot-ppc', 'builddir':'test-ppc',
+                           'factory':f },
+      ]
+</pre>
+   <p>In this example, when a change arrives with a <code>branch</code> attribute
+of &ldquo;trunk&rdquo;, the resulting build will have an SVN step that
+concatenates &ldquo;svn://svn.example.org/MyProject/&rdquo; (the baseURL) with
+&ldquo;trunk&rdquo; (the branch name) to get the correct svn command. If the
+&ldquo;newthing&rdquo; branch has a change to &ldquo;src/foo.c&rdquo;, then the SVN step
+will concatenate &ldquo;svn://svn.example.org/MyProject/&rdquo; with
+&ldquo;features/newthing&rdquo; to get the svnurl for checkout.
+
+<div class="node">
+<p><hr>
+<a name="Darcs"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Mercurial">Mercurial</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#SVN">SVN</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.2.3 Darcs</h5>
+
+<p><a name="index-Darcs-Checkout-42"></a><a name="index-buildbot_002esteps_002esource_002eDarcs-43"></a>
+
+   <p>The <code>Darcs</code> build step performs a
+<a href="http://abridgegame.org/darcs/">Darcs</a> checkout or update.
+
+   <p>Like See <a href="#SVN">SVN</a>, this step can either be configured to always check
+out a specific tree, or set up to pull from a particular branch that
+gets specified separately for each build. Also like SVN, the
+repository URL given to Darcs is created by concatenating a
+<code>baseURL</code> with the branch name, and if no particular branch is
+requested, it uses a <code>defaultBranch</code>. The only difference in
+usage is that each potential Darcs repository URL must point to a
+fully-fledged repository, whereas SVN URLs usually point to sub-trees
+of the main Subversion repository. In other words, doing an SVN
+checkout of <code>baseURL</code> is legal, but silly, since you'd probably
+wind up with a copy of every single branch in the whole repository. 
+Doing a Darcs checkout of <code>baseURL</code> is just plain wrong, since
+the parent directory of a collection of Darcs repositories is not
+itself a valid repository.
+
+   <p>The Darcs step takes the following arguments:
+
+     <dl>
+<dt><code>repourl</code><dd>(required unless <code>baseURL</code> is provided): the URL at which the
+Darcs source repository is available.
+
+     <br><dt><code>baseURL</code><dd>(required unless <code>repourl</code> is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+     <br><dt><code>defaultBranch</code><dd>(allowed if and only if <code>baseURL</code> is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to <code>baseURL</code> to create the string that
+will be passed to the <code>darcs get</code> command. 
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="Mercurial"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Arch">Arch</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Darcs">Darcs</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.2.4 Mercurial</h5>
+
+<p><a name="index-Mercurial-Checkout-44"></a><a name="index-buildbot_002esteps_002esource_002eMercurial-45"></a>
+
+   <p>The <code>Mercurial</code> build step performs a
+<a href="http://selenic.com/mercurial">Mercurial</a> (aka &ldquo;hg&rdquo;) checkout
+or update.
+
+   <p>Branches are handled just like See <a href="#Darcs">Darcs</a>.
+
+   <p>The Mercurial step takes the following arguments:
+
+     <dl>
+<dt><code>repourl</code><dd>(required unless <code>baseURL</code> is provided): the URL at which the
+Mercurial source repository is available.
+
+     <br><dt><code>baseURL</code><dd>(required unless <code>repourl</code> is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+     <br><dt><code>defaultBranch</code><dd>(allowed if and only if <code>baseURL</code> is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to <code>baseURL</code> to create the string that
+will be passed to the <code>hg clone</code> command. 
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="Arch"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Bazaar">Bazaar</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Mercurial">Mercurial</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.2.5 Arch</h5>
+
+<p><a name="index-Arch-Checkout-46"></a><a name="index-buildbot_002esteps_002esource_002eArch-47"></a>
+
+   <p>The <code>Arch</code> build step performs an <a href="http://gnuarch.org/">Arch</a> checkout or update using the <code>tla</code> client. It takes the
+following arguments:
+
+     <dl>
+<dt><code>url</code><dd>(required): this specifies the URL at which the Arch source archive is
+available.
+
+     <br><dt><code>version</code><dd>(required): this specifies which &ldquo;development line&rdquo; (like a branch)
+should be used. This provides the default branch name, but individual
+builds may specify a different one.
+
+     <br><dt><code>archive</code><dd>(optional): Each repository knows its own archive name. If this
+parameter is provided, it must match the repository's archive name. 
+The parameter is accepted for compatibility with the <code>Bazaar</code>
+step, below.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Bazaar"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#P4">P4</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Arch">Arch</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.2.6 Bazaar</h5>
+
+<p><a name="index-Bazaar-Checkout-48"></a><a name="index-buildbot_002esteps_002esource_002eBazaar-49"></a>
+
+   <p><code>Bazaar</code> is an alternate implementation of the Arch VC system,
+which uses a client named <code>baz</code>. The checkout semantics are just
+different enough from <code>tla</code> that there is a separate BuildStep for
+it.
+
+   <p>It takes exactly the same arguments as <code>Arch</code>, except that the
+<code>archive=</code> parameter is required. (baz does not emit the archive
+name when you do <code>baz register-archive</code>, so we must provide it
+ourselves).
+
+<div class="node">
+<p><hr>
+<a name="P4"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Bazaar">Bazaar</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.2.7 P4</h5>
+
+<p><a name="index-Perforce-Update-50"></a><a name="index-buildbot_002esteps_002esource_002eP4-51"></a><!-- TODO @bsindex buildbot.steps.source.P4Sync -->
+
+   <p>The <code>P4</code> build step creates a <a href="http://www.perforce.com/">Perforce</a> client specification and performs an update.
+
+     <dl>
+<dt><code>p4base</code><dd>A view into the Perforce depot without branch name or trailing "...". 
+Typically "//depot/proj/". 
+<br><dt><code>defaultBranch</code><dd>A branch name to append on build requests if none is specified. 
+Typically "trunk". 
+<br><dt><code>p4port</code><dd>(optional): the host:port string describing how to get to the P4 Depot
+(repository), used as the -p argument for all p4 commands. 
+<br><dt><code>p4user</code><dd>(optional): the Perforce user, used as the -u argument to all p4
+commands. 
+<br><dt><code>p4passwd</code><dd>(optional): the Perforce password, used as the -p argument to all p4
+commands. 
+<br><dt><code>p4extra_views</code><dd>(optional): a list of (depotpath, clientpath) tuples containing extra
+views to be mapped into the client specification. Both will have
+"/..." appended automatically. The client name and source directory
+will be prepended to the client path. 
+<br><dt><code>p4client</code><dd>(optional): The name of the client to use. In mode='copy' and
+mode='update', it's particularly important that a unique name is used
+for each checkout directory to avoid incorrect synchronization. For
+this reason, Python percent substitution will be performed on this value
+to replace %(slave)s with the slave name and %(builder)s with the
+builder name. The default is "buildbot_%(slave)s_%(build)s". 
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="ShellCommand"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Source-Checkout">Source Checkout</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.3 ShellCommand</h4>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eShellCommand-52"></a><!-- TODO @bsindex buildbot.steps.shell.TreeSize -->
+
+   <p>This is a useful base class for just about everything you might want
+to do during a build (except for the initial source checkout). It runs
+a single command in a child shell on the buildslave. All stdout/stderr
+is recorded into a LogFile. The step finishes with a status of FAILURE
+if the command's exit code is non-zero, otherwise it has a status of
+SUCCESS.
+
+   <p>The preferred way to specify the command is with a list of argv strings,
+since this allows for spaces in filenames and avoids doing any fragile
+shell-escaping. You can also specify the command with a single string, in
+which case the string is given to '/bin/sh -c COMMAND' for parsing.
+
+   <p>All ShellCommands are run by default in the &ldquo;workdir&rdquo;, which
+defaults to the &ldquo;<samp><span class="file">build</span></samp>&rdquo; subdirectory of the slave builder's
+base directory. The absolute path of the workdir will thus be the
+slave's basedir (set as an option to <code>buildbot create-slave</code>,
+see <a href="#Creating-a-buildslave">Creating a buildslave</a>) plus the builder's basedir (set in the
+builder's <code>c['builddir']</code> key in master.cfg) plus the workdir
+itself (a class-level attribute of the BuildFactory, defaults to
+&ldquo;<samp><span class="file">build</span></samp>&rdquo;).
+
+   <p><code>ShellCommand</code> arguments:
+
+     <dl>
+<dt><code>command</code><dd>a list of strings (preferred) or single string (discouraged) which
+specifies the command to be run. A list of strings is preferred
+because it can be used directly as an argv array. Using a single
+string (with embedded spaces) requires the buildslave to pass the
+string to /bin/sh for interpretation, which raises all sorts of
+difficult questions about how to escape or interpret shell
+metacharacters.
+
+     <br><dt><code>env</code><dd>a dictionary of environment strings which will be added to the child
+command's environment. For example, to run tests with a different i18n
+language setting, you might use
+
+     <pre class="example">          f.addStep(ShellCommand, command=["make", "test"],
+                    env={'LANG': 'fr_FR'})
+     </pre>
+     <p>These variable settings will override any existing ones in the
+buildslave's environment. The exception is PYTHONPATH, which is merged
+with (actually prepended to) any existing $PYTHONPATH setting. The
+value is treated as a list of directories to prepend, and a single
+string is treated like a one-item list. For example, to prepend both
+<samp><span class="file">/usr/local/lib/python2.3</span></samp> and <samp><span class="file">/home/buildbot/lib/python</span></samp>
+to any existing $PYTHONPATH setting, you would do something like the
+following:
+
+     <pre class="example">          f.addStep(ShellCommand, command=["make", "test"],
+                    env={'PYTHONPATH': ["/usr/local/lib/python2.3",
+                                        "/home/buildbot/lib/python"] })
+     </pre>
+     <br><dt><code>want_stdout</code><dd>if False, stdout from the child process is discarded rather than being
+sent to the buildmaster for inclusion in the step's LogFile.
+
+     <br><dt><code>want_stderr</code><dd>like <code>want_stdout</code> but for stderr. Note that commands run through
+a PTY do not have separate stdout/stderr streams: both are merged into
+stdout.
+
+     <br><dt><code>logfiles</code><dd>Sometimes commands will log interesting data to a local file, rather
+than emitting everything to stdout or stderr. For example, Twisted's
+&ldquo;trial&rdquo; command (which runs unit tests) only presents summary
+information to stdout, and puts the rest into a file named
+<samp><span class="file">_trial_temp/test.log</span></samp>. It is often useful to watch these files
+as the command runs, rather than using <samp><span class="command">/bin/cat</span></samp> to dump
+their contents afterwards.
+
+     <p>The <code>logfiles=</code> argument allows you to collect data from these
+secondary logfiles in near-real-time, as the step is running. It
+accepts a dictionary which maps from a local Log name (which is how
+the log data is presented in the build results) to a remote filename
+(interpreted relative to the build's working directory). Each named
+file will be polled on a regular basis (every couple of seconds) as
+the build runs, and any new text will be sent over to the buildmaster.
+
+     <pre class="example">          f.addStep(ShellCommand, command=["make", "test"],
+                    logfiles={"triallog": "_trial_temp/test.log"})
+     </pre>
+     <br><dt><code>timeout</code><dd>if the command fails to produce any output for this many seconds, it
+is assumed to be locked up and will be killed.
+
+     <br><dt><code>description</code><dd>This will be used to describe the command (on the Waterfall display)
+while the command is still running. It should be a single
+imperfect-tense verb, like &ldquo;compiling&rdquo; or &ldquo;testing&rdquo;. The preferred
+form is a list of short strings, which allows the HTML Waterfall
+display to create narrower columns by emitting a &lt;br&gt; tag between each
+word. You may also provide a single string.
+
+     <br><dt><code>descriptionDone</code><dd>This will be used to describe the command once it has finished. A
+simple noun like &ldquo;compile&rdquo; or &ldquo;tests&rdquo; should be used. Like
+<code>description</code>, this may either be a list of short strings or a
+single string.
+
+     <p>If neither <code>description</code> nor <code>descriptionDone</code> are set, the
+actual command arguments will be used to construct the description. 
+This may be a bit too wide to fit comfortably on the Waterfall
+display.
+
+     <pre class="example">          f.addStep(ShellCommand, command=["make", "test"],
+                    description=["testing"],
+                    descriptionDone=["tests"])
+     </pre>
+     </dl>
+
+<div class="node">
+<p><hr>
+<a name="Simple-ShellCommand-Subclasses"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Python-BuildSteps">Python BuildSteps</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#ShellCommand">ShellCommand</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.4 Simple ShellCommand Subclasses</h4>
+
+<p>Several subclasses of ShellCommand are provided as starting points for
+common build steps. These are all very simple: they just override a few
+parameters so you don't have to specify them yourself, making the master.cfg
+file less verbose.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Configure">Configure</a>
+<li><a accesskey="2" href="#Compile">Compile</a>
+<li><a accesskey="3" href="#Test">Test</a>
+<li><a accesskey="4" href="#Build-Properties">Build Properties</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Configure"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Compile">Compile</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.4.1 Configure</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eConfigure-53"></a>
+This is intended to handle the <code>./configure</code> step from
+autoconf-style projects, or the <code>perl Makefile.PL</code> step from perl
+MakeMaker.pm-style modules. The default command is <code>./configure</code>
+but you can change this by providing a <code>command=</code> parameter.
+
+<div class="node">
+<p><hr>
+<a name="Compile"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Test">Test</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Configure">Configure</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.4.2 Compile</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eCompile-54"></a>
+This is meant to handle compiling or building a project written in C. The
+default command is <code>make all</code>. When the compile is finished, the
+log file is scanned for GCC error/warning messages and a summary log is
+created with any problems that were seen (TODO: the summary is not yet
+created).
+
+<div class="node">
+<p><hr>
+<a name="Test"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Build-Properties">Build Properties</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Compile">Compile</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.4.3 Test</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eTest-55"></a>
+This is meant to handle unit tests. The default command is <code>make
+test</code>, and the <code>warnOnFailure</code> flag is set.
+
+<div class="node">
+<p><hr>
+<a name="Build-Properties"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Test">Test</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.4.4 Build Properties</h5>
+
+<p><a name="index-build-properties-56"></a>
+Each build has a set of &ldquo;Build Properties&rdquo;, which can be used by its
+BuildStep to modify their actions. For example, the SVN revision
+number of the source code being built is available as a build
+property, and a ShellCommand step could incorporate this number into a
+command which create a numbered release tarball.
+
+   <p>Some build properties are set when the build starts, such as the
+SourceStamp information. Other properties can be set by BuildSteps as
+they run, for example the various Source steps will set the
+<code>got_revision</code> property to the source revision that was actually
+checked out (which can be useful when the SourceStamp in use merely
+requested the &ldquo;latest revision&rdquo;: <code>got_revision</code> will tell you
+what was actually built).
+
+   <p>In custom BuildSteps, you can get and set the build properties with
+the <code>getProperty</code>/<code>setProperty</code> methods. Each takes a string
+for the name of the property, and returns or accepts an
+arbitrary<a rel="footnote" href="#fn-7" name="fnd-7"><sup>7</sup></a> object. For example:
+
+<pre class="example">     class MakeTarball(ShellCommand):
+         def start(self):
+             self.setCommand(["tar", "czf",
+                              "build-%s.tar.gz" % self.getProperty("revision"),
+                              "source"])
+             ShellCommand.start(self)
+</pre>
+   <p><a name="index-WithProperties-57"></a>
+You can use build properties in ShellCommands by using the
+<code>WithProperties</code> wrapper when setting the arguments of the
+ShellCommand. This interpolates the named build properties into the
+generated shell command.
+
+<pre class="example">     from buildbot.steps.shell import ShellCommand, WithProperties
+     
+     f.addStep(ShellCommand,
+               command=["tar", "czf",
+                        WithProperties("build-%s.tar.gz", "revision"),
+                        "source"])
+</pre>
+   <p>If this BuildStep were used in a tree obtained from Subversion, it
+would create a tarball with a name like <samp><span class="file">build-1234.tar.gz</span></samp>.
+
+   <p>The <code>WithProperties</code> function does <code>printf</code>-style string
+interpolation, using strings obtained by calling
+<code>build.getProperty(propname)</code>. Note that for every <code>%s</code> (or
+<code>%d</code>, etc), you must have exactly one additional argument to
+indicate which build property you want to insert.
+
+   <p>You can also use python dictionary-style string interpolation by using
+the <code>%(propname)s</code> syntax. In this form, the property name goes
+in the parentheses, and WithProperties takes <em>no</em> additional
+arguments:
+
+<pre class="example">     f.addStep(ShellCommand,
+               command=["tar", "czf",
+                        WithProperties("build-%(revision)s.tar.gz"),
+                        "source"])
+</pre>
+   <p>Don't forget the extra &ldquo;s&rdquo; after the closing parenthesis! This is
+the cause of many confusing errors. Also note that you can only use
+WithProperties in the list form of the command= definition. You cannot
+currently use it in the (discouraged) <code>command="stuff"</code>
+single-string form. However, you can use something like
+<code>command=["/bin/sh", "-c", "stuff", WithProperties(stuff)]</code> to
+use both shell expansion and WithProperties interpolation.
+
+   <p>Note that, like python, you can either do positional-argument
+interpolation <em>or</em> keyword-argument interpolation, not both. Thus
+you cannot use a string like
+<code>WithProperties("foo-%(revision)s-%s", "branch")</code>.
+
+   <p>At the moment, the only way to set build properties is by writing a
+custom BuildStep.
+
+<h3 class="heading">Common Build Properties</h3>
+
+<p>The following build properties are set when the build is started, and
+are available to all steps.
+
+     <dl>
+<dt><code>branch</code><dd>
+This comes from the build's SourceStamp, and describes which branch is
+being checked out. This will be <code>None</code> (which interpolates into
+<code>WithProperties</code> as an empty string) if the build is on the
+default branch, which is generally the trunk. Otherwise it will be a
+string like &ldquo;branches/beta1.4&rdquo;. The exact syntax depends upon the VC
+system being used.
+
+     <br><dt><code>revision</code><dd>
+This also comes from the SourceStamp, and is the revision of the
+source code tree that was requested from the VC system. When a build
+is requested of a specific revision (as is generally the case when the
+build is triggered by Changes), this will contain the revision
+specification. The syntax depends upon the VC system in use: for SVN
+it is an integer, for Mercurial it is a short string, for Darcs it is
+a rather large string, etc.
+
+     <p>If the &ldquo;force build&rdquo; button was pressed, the revision will be
+<code>None</code>, which means to use the most recent revision available. 
+This is a &ldquo;trunk build&rdquo;. This will be interpolated as an empty
+string.
+
+     <br><dt><code>got_revision</code><dd>
+This is set when a Source step checks out the source tree, and
+provides the revision that was actually obtained from the VC system. 
+In general this should be the same as <code>revision</code>, except for
+trunk builds, where <code>got_revision</code> indicates what revision was
+current when the checkout was performed. This can be used to rebuild
+the same source code later.
+
+     <p>Note that for some VC systems (Darcs in particular), the revision is a
+large string containing newlines, and is not suitable for
+interpolation into a filename.
+
+     <br><dt><code>buildername</code><dd>
+This is a string that indicates which Builder the build was a part of. 
+The combination of buildername and buildnumber uniquely identify a
+build.
+
+     <br><dt><code>buildnumber</code><dd>
+Each build gets a number, scoped to the Builder (so the first build
+performed on any given Builder will have a build number of 0). This
+integer property contains the build's number.
+
+     <br><dt><code>slavename</code><dd>
+This is a string which identifies which buildslave the build is
+running on.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Python-BuildSteps"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Transferring-Files">Transferring Files</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.5 Python BuildSteps</h4>
+
+<p>Here are some BuildSteps that are specifcally useful for projects
+implemented in Python.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildEPYDoc">BuildEPYDoc</a>
+<li><a accesskey="2" href="#PyFlakes">PyFlakes</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildEPYDoc"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PyFlakes">PyFlakes</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Python-BuildSteps">Python BuildSteps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Python-BuildSteps">Python BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.1 BuildEPYDoc</h5>
+
+<p><a name="index-buildbot_002esteps_002epython_002eBuildEPYDoc-58"></a>
+<a href="http://epydoc.sourceforge.net/">epydoc</a> is a tool for generating
+API documentation for Python modules from their docstrings. It reads
+all the .py files from your source tree, processes the docstrings
+therein, and creates a large tree of .html files (or a single .pdf
+file).
+
+   <p>The <code>buildbot.steps.python.BuildEPYDoc</code> step will run
+<samp><span class="command">epydoc</span></samp> to produce this API documentation, and will count the
+errors and warnings from its output.
+
+   <p>You must supply the command line to be used. The default is
+<samp><span class="command">make epydocs</span></samp>, which assumes that your project has a Makefile
+with an &ldquo;epydocs&rdquo; target. You might wish to use something like
+<samp><span class="command">epydoc -o apiref source/PKGNAME</span></samp> instead. You might also want
+to add <samp><span class="command">--pdf</span></samp> to generate a PDF file instead of a large tree
+of HTML files.
+
+   <p>The API docs are generated in-place in the build tree (under the
+workdir, in the subdirectory controlled by the &ldquo;-o&rdquo; argument). To
+make them useful, you will probably have to copy them to somewhere
+they can be read. A command like <samp><span class="command">rsync -ad apiref/
+dev.example.com:~public_html/current-apiref/</span></samp> might be useful. You
+might instead want to bundle them into a tarball and publish it in the
+same place where the generated install tarball is placed.
+
+<pre class="example">     from buildbot.steps.python import BuildEPYDoc
+     
+     ...
+     f.addStep(BuildEPYDoc, command=["epydoc", "-o", "apiref", "source/mypkg"])
+</pre>
+   <div class="node">
+<p><hr>
+<a name="PyFlakes"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildEPYDoc">BuildEPYDoc</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Python-BuildSteps">Python BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.2 PyFlakes</h5>
+
+<p><a name="index-buildbot_002esteps_002epython_002ePyFlakes-59"></a>
+<a href="http://divmod.org/trac/wiki/DivmodPyflakes">PyFlakes</a> is a tool
+to perform basic static analysis of Python code to look for simple
+errors, like missing imports and references of undefined names. It is
+like a fast and simple form of the C &ldquo;lint&rdquo; program. Other tools
+(like pychecker) provide more detailed results but take longer to run.
+
+   <p>The <code>buildbot.steps.python.PyFlakes</code> step will run pyflakes and
+count the various kinds of errors and warnings it detects.
+
+   <p>You must supply the command line to be used. The default is
+<samp><span class="command">make pyflakes</span></samp>, which assumes you have a top-level Makefile
+with a &ldquo;pyflakes&rdquo; target. You might want to use something like
+<samp><span class="command">pyflakes .</span></samp> or <samp><span class="command">pyflakes src</span></samp>.
+
+<pre class="example">     from buildbot.steps.python import PyFlakes
+     
+     ...
+     f.addStep(PyFlakes, command=["pyflakes", "src"])
+</pre>
+   <div class="node">
+<p><hr>
+<a name="Transferring-Files"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Python-BuildSteps">Python BuildSteps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.6 Transferring Files</h4>
+
+<p><a name="index-File-Transfer-60"></a><a name="index-buildbot_002esteps_002etransfer_002eFileUpload-61"></a><a name="index-buildbot_002esteps_002etransfer_002eFileDownload-62"></a>
+Most of the work involved in a build will take place on the
+buildslave. But occasionally it is useful to do some work on the
+buildmaster side. The most basic way to involve the buildmaster is
+simply to move a file from the slave to the master, or vice versa. 
+There are a pair of BuildSteps named <code>FileUpload</code> and
+<code>FileDownload</code> to provide this functionality. <code>FileUpload</code>
+moves a file <em>up to</em> the master, while <code>FileDownload</code> moves
+a file <em>down from</em> the master.
+
+   <p>As an example, let's assume that there is a step which produces an
+HTML file within the source tree that contains some sort of generated
+project documentation. We want to move this file to the buildmaster,
+into a <samp><span class="file">~/public_html</span></samp> directory, so it can be visible to
+developers. This file will wind up in the slave-side working directory
+under the name <samp><span class="file">docs/reference.html</span></samp>. We want to put it into the
+master-side <samp><span class="file">~/public_html/ref.html</span></samp>.
+
+<pre class="example">     from buildbot.steps.shell import ShellCommand
+     from buildbot.steps.transfer import FileUpload
+     
+     f.addStep(ShellCommand, command=["make", "docs"])
+     f.addStep(FileUpload,
+               slavesrc="docs/reference.html",
+               masterdest="~/public_html/ref.html")
+</pre>
+   <p>The <code>masterdest=</code> argument will be passed to os.path.expanduser,
+so things like &ldquo;~&rdquo; will be expanded properly. Non-absolute paths
+will be interpreted relative to the buildmaster's base directory. 
+Likewise, the <code>slavesrc=</code> argument will be expanded and
+interpreted relative to the builder's working directory.
+
+   <p>To move a file from the master to the slave, use the
+<code>FileDownload</code> command. For example, let's assume that some step
+requires a configuration file that, for whatever reason, could not be
+recorded in the source code repository or generated on the buildslave
+side:
+
+<pre class="example">     from buildbot.steps.shell import ShellCommand
+     from buildbot.steps.transfer import FileUpload
+     
+     f.addStep(FileDownload
+               mastersrc="~/todays_build_config.txt",
+               slavedest="build_config.txt")
+     f.addStep(ShellCommand, command=["make", "config"])
+</pre>
+   <p>Like <code>FileUpload</code>, the <code>mastersrc=</code> argument is interpreted
+relative to the buildmaster's base directory, and the
+<code>slavedest=</code> argument is relative to the builder's working
+directory. If the buildslave is running in <samp><span class="file">~buildslave</span></samp>, and the
+builder's &ldquo;builddir&rdquo; is something like <samp><span class="file">tests-i386</span></samp>, then the
+workdir is going to be <samp><span class="file">~buildslave/tests-i386/build</span></samp>, and a
+<code>slavedest=</code> of <samp><span class="file">foo/bar.html</span></samp> will get put in
+<samp><span class="file">~buildslave/tests-i386/build/foo/bar.html</span></samp>. Remember that
+neither of these commands will create missing directories for you.
+
+<h4 class="subheading">Other Parameters</h4>
+
+<p>The <code>maxsize=</code> argument lets you set a maximum size for the file
+to be transferred. This may help to avoid surprises: transferring a
+100MB coredump when you were expecting to move a 10kB status file
+might take an awfully long time. The <code>blocksize=</code> argument
+controls how the file is sent over the network: larger blocksizes are
+slightly more efficient but also consume more memory on each end, and
+there is a hard-coded limit of about 640kB.
+
+   <p>The <code>mode=</code> argument allows you to control the access permissions
+of the target file, traditionally expressed as an octal integer. The
+most common value is probably 0755, which sets the &ldquo;x&rdquo; executable
+bit on the file (useful for shell scripts and the like). The default
+value for <code>mode=</code> is None, which means the permission bits will
+default to whatever the umask of the writing process is. The default
+umask tends to be fairly restrictive, but at least on the buildslave
+you can make it less restrictive with a &ndash;umask command-line option at
+creation time (see <a href="#Buildslave-Options">Buildslave Options</a>).
+
+<div class="node">
+<p><hr>
+<a name="Writing-New-BuildSteps"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Transferring-Files">Transferring Files</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.7 Writing New BuildSteps</h4>
+
+<p>While it is a good idea to keep your build process self-contained in
+the source code tree, sometimes it is convenient to put more
+intelligence into your Buildbot configuration. One was to do this is
+to write a custom BuildStep. Once written, this Step can be used in
+the <samp><span class="file">master.cfg</span></samp> file.
+
+   <p>The best reason for writing a custom BuildStep is to better parse the
+results of the command being run. For example, a BuildStep that knows
+about JUnit could look at the logfiles to determine which tests had
+been run, how many passed and how many failed, and then report more
+detailed information than a simple <code>rc==0</code> -based &ldquo;good/bad&rdquo;
+decision.
+
+   <p>TODO: add more description of BuildSteps.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildStep-LogFiles">BuildStep LogFiles</a>
+<li><a accesskey="2" href="#Adding-LogObservers">Adding LogObservers</a>
+<li><a accesskey="3" href="#BuildStep-URLs">BuildStep URLs</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildStep-LogFiles"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Adding-LogObservers">Adding LogObservers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.7.1 BuildStep LogFiles</h5>
+
+<p>Each BuildStep has a collection of &ldquo;logfiles&rdquo;. Each one has a short
+name, like &ldquo;stdio&rdquo; or &ldquo;warnings&rdquo;. Each LogFile contains an
+arbitrary amount of text, usually the contents of some output file
+generated during a build or test step, or a record of everything that
+was printed to stdout/stderr during the execution of some command.
+
+   <p>These LogFiles are stored to disk, so they can be retrieved later.
+
+   <p>Each can contain multiple &ldquo;channels&rdquo;, generally limited to three
+basic ones: stdout, stderr, and &ldquo;headers&rdquo;. For example, when a
+ShellCommand runs, it writes a few lines to the &ldquo;headers&rdquo; channel to
+indicate the exact argv strings being run, which directory the command
+is being executed in, and the contents of the current environment
+variables. Then, as the command runs, it adds a lot of &ldquo;stdout&rdquo; and
+&ldquo;stderr&rdquo; messages. When the command finishes, a final &ldquo;header&rdquo;
+line is added with the exit code of the process.
+
+   <p>Status display plugins can format these different channels in
+different ways. For example, the web page shows LogFiles as text/html,
+with header lines in blue text, stdout in black, and stderr in red. A
+different URL is available which provides a text/plain format, in
+which stdout and stderr are collapsed together, and header lines are
+stripped completely. This latter option makes it easy to save the
+results to a file and run <samp><span class="command">grep</span></samp> or whatever against the
+output.
+
+   <p>Each BuildStep contains a mapping (implemented in a python dictionary)
+from LogFile name to the actual LogFile objects. Status plugins can
+get a list of LogFiles to display, for example, a list of HREF links
+that, when clicked, provide the full contents of the LogFile.
+
+<h3 class="heading">Using LogFiles in custom BuildSteps</h3>
+
+<p>The most common way for a custom BuildStep to use a LogFile is to
+summarize the results of a ShellCommand (after the command has
+finished running). For example, a compile step with thousands of lines
+of output might want to create a summary of just the warning messages. 
+If you were doing this from a shell, you would use something like:
+
+<pre class="example">     grep "warning:" output.log &gt;warnings.log
+</pre>
+   <p>In a custom BuildStep, you could instead create a &ldquo;warnings&rdquo; LogFile
+that contained the same text. To do this, you would add code to your
+<code>createSummary</code> method that pulls lines from the main output log
+and creates a new LogFile with the results:
+
+<pre class="example">         def createSummary(self, log):
+             warnings = []
+             for line in log.readlines():
+                 if "warning:" in line:
+                     warnings.append()
+             self.addCompleteLog('warnings', "".join(warnings))
+</pre>
+   <p>This example uses the <code>addCompleteLog</code> method, which creates a
+new LogFile, puts some text in it, and then &ldquo;closes&rdquo; it, meaning
+that no further contents will be added. This LogFile will appear in
+the HTML display under an HREF with the name &ldquo;warnings&rdquo;, since that
+is the name of the LogFile.
+
+   <p>You can also use <code>addHTMLLog</code> to create a complete (closed)
+LogFile that contains HTML instead of plain text. The normal LogFile
+will be HTML-escaped if presented through a web page, but the HTML
+LogFile will not. At the moment this is only used to present a pretty
+HTML representation of an otherwise ugly exception traceback when
+something goes badly wrong during the BuildStep.
+
+   <p>In contrast, you might want to create a new LogFile at the beginning
+of the step, and add text to it as the command runs. You can create
+the LogFile and attach it to the build by calling <code>addLog</code>, which
+returns the LogFile object. You then add text to this LogFile by
+calling methods like <code>addStdout</code> and <code>addHeader</code>. When you
+are done, you must call the <code>finish</code> method so the LogFile can be
+closed. It may be useful to create and populate a LogFile like this
+from a LogObserver method See <a href="#Adding-LogObservers">Adding LogObservers</a>.
+
+   <p>The <code>logfiles=</code> argument to <code>ShellCommand</code> (see
+see <a href="#ShellCommand">ShellCommand</a>) creates new LogFiles and fills them in realtime
+by asking the buildslave to watch a actual file on disk. The
+buildslave will look for additions in the target file and report them
+back to the BuildStep. These additions will be added to the LogFile by
+calling <code>addStdout</code>. These secondary LogFiles can be used as the
+source of a LogObserver just like the normal &ldquo;stdio&rdquo; LogFile.
+
+<div class="node">
+<p><hr>
+<a name="Adding-LogObservers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildStep-URLs">BuildStep URLs</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildStep-LogFiles">BuildStep LogFiles</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.7.2 Adding LogObservers</h5>
+
+<p><a name="index-LogObserver-63"></a><a name="index-LogLineObserver-64"></a>
+Most shell commands emit messages to stdout or stderr as they operate,
+especially if you ask them nicely with a <code>--verbose</code> flag of some
+sort. They may also write text to a log file while they run. Your
+BuildStep can watch this output as it arrives, to keep track of how
+much progress the command has made. You can get a better measure of
+progress by counting the number of source files compiled or test cases
+run than by merely tracking the number of bytes that have been written
+to stdout. This improves the accuracy and the smoothness of the ETA
+display.
+
+   <p>To accomplish this, you will need to attach a <code>LogObserver</code> to
+one of the log channels, most commonly to the &ldquo;stdio&rdquo; channel but
+perhaps to another one which tracks a log file. This observer is given
+all text as it is emitted from the command, and has the opportunity to
+parse that output incrementally. Once the observer has decided that
+some event has occurred (like a source file being compiled), it can
+use the <code>setProgress</code> method to tell the BuildStep about the
+progress that this event represents.
+
+   <p>There are a number of pre-built <code>LogObserver</code> classes that you
+can choose from (defined in <code>buildbot.process.buildstep</code>, and of
+course you can subclass them to add further customization. The
+<code>LogLineObserver</code> class handles the grunt work of buffering and
+scanning for end-of-line delimiters, allowing your parser to operate
+on complete stdout/stderr lines.
+
+   <p>For example, let's take a look at the <code>TrialTestCaseCounter</code>,
+which is used by the Trial step to count test cases as they are run. 
+As Trial executes, it emits lines like the following:
+
+<pre class="example">     buildbot.test.test_config.ConfigTest.testDebugPassword ... [OK]
+     buildbot.test.test_config.ConfigTest.testEmpty ... [OK]
+     buildbot.test.test_config.ConfigTest.testIRC ... [FAIL]
+     buildbot.test.test_config.ConfigTest.testLocks ... [OK]
+</pre>
+   <p>When the tests are finished, trial emits a long line of &ldquo;======&rdquo; and
+then some lines which summarize the tests that failed. We want to
+avoid parsing these trailing lines, because their format is less
+well-defined than the &ldquo;[OK]&rdquo; lines.
+
+   <p>The parser class looks like this:
+
+<pre class="example">     from buildbot.process.buildstep import LogLineObserver
+     
+     class TrialTestCaseCounter(LogLineObserver):
+         _line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
+         numTests = 0
+         finished = False
+     
+         def outLineReceived(self, line):
+             if self.finished:
+                 return
+             if line.startswith("=" * 40):
+                 self.finished = True
+                 return
+     
+             m = self._line_re.search(line.strip())
+             if m:
+                 testname, result = m.groups()
+                 self.numTests += 1
+                 self.step.setProgress('tests', self.numTests)
+</pre>
+   <p>This parser only pays attention to stdout, since that's where trial
+writes the progress lines. It has a mode flag named <code>finished</code> to
+ignore everything after the &ldquo;====&rdquo; marker, and a scary-looking
+regular expression to match each line while hopefully ignoring other
+messages that might get displayed as the test runs.
+
+   <p>Each time it identifies a test has been completed, it increments its
+counter and delivers the new progress value to the step with
+<code>self.step.setProgress</code>. This class is specifically measuring
+progress along the &ldquo;tests&rdquo; metric, in units of test cases (as
+opposed to other kinds of progress like the &ldquo;output&rdquo; metric, which
+measures in units of bytes). The Progress-tracking code uses each
+progress metric separately to come up with an overall completion
+percentage and an ETA value.
+
+   <p>To connect this parser into the <code>Trial</code> BuildStep,
+<code>Trial.__init__</code> ends with the following clause:
+
+<pre class="example">             # this counter will feed Progress along the 'test cases' metric
+             counter = TrialTestCaseCounter()
+             self.addLogObserver('stdio', counter)
+</pre>
+   <p>This creates a TrialTestCaseCounter and tells the step that the
+counter wants to watch the &ldquo;stdio&rdquo; log. The observer is
+automatically given a reference to the step in its <code>.step</code>
+attribute.
+
+<h4 class="subheading">A Somewhat Whimsical Example</h4>
+
+<p>Let's say that we've got some snazzy new unit-test framework called
+Framboozle. It's the hottest thing since sliced bread. It slices, it
+dices, it runs unit tests like there's no tomorrow. Plus if your unit
+tests fail, you can use its name for a Web 2.1 startup company, make
+millions of dollars, and hire engineers to fix the bugs for you, while
+you spend your afternoons lazily hang-gliding along a scenic pacific
+beach, blissfully unconcerned about the state of your
+tests.<a rel="footnote" href="#fn-8" name="fnd-8"><sup>8</sup></a>
+
+   <p>To run a Framboozle-enabled test suite, you just run the 'framboozler'
+command from the top of your source code tree. The 'framboozler'
+command emits a bunch of stuff to stdout, but the most interesting bit
+is that it emits the line "FNURRRGH!" every time it finishes running a
+test case<a rel="footnote" href="#fn-9" name="fnd-9"><sup>9</sup></a>. You'd like to have a test-case counting LogObserver that
+watches for these lines and counts them, because counting them will
+help the buildbot more accurately calculate how long the build will
+take, and this will let you know exactly how long you can sneak out of
+the office for your hang-gliding lessons without anyone noticing that
+you're gone.
+
+   <p>This will involve writing a new BuildStep (probably named
+"Framboozle") which inherits from ShellCommand. The BuildStep class
+definition itself will look something like this:
+
+<pre class="example">     # START
+     from buildbot.steps.shell import ShellCommand
+     from buildbot.process.buildstep import LogLineObserver
+     
+     class FNURRRGHCounter(LogLineObserver):
+         numTests = 0
+         def outLineReceived(self, line):
+             if "FNURRRGH!" in line:
+                 self.numTests += 1
+                 self.step.setProgress('tests', self.numTests)
+     
+     class Framboozle(ShellCommand):
+         command = ["framboozler"]
+     
+         def __init__(self, **kwargs):
+             ShellCommand.__init__(self, **kwargs)   # always upcall!
+             counter = FNURRRGHCounter())
+             self.addLogObserver(counter)
+     # FINISH
+</pre>
+   <p>So that's the code that we want to wind up using. How do we actually
+deploy it?
+
+   <p>You have a couple of different options.
+
+   <p>Option 1: The simplest technique is to simply put this text
+(everything from START to FINISH) in your master.cfg file, somewhere
+before the BuildFactory definition where you actually use it in a
+clause like:
+
+<pre class="example">     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(Framboozle)
+</pre>
+   <p>Remember that master.cfg is secretly just a python program with one
+job: populating the BuildmasterConfig dictionary. And python programs
+are allowed to define as many classes as they like. So you can define
+classes and use them in the same file, just as long as the class is
+defined before some other code tries to use it.
+
+   <p>This is easy, and it keeps the point of definition very close to the
+point of use, and whoever replaces you after that unfortunate
+hang-gliding accident will appreciate being able to easily figure out
+what the heck this stupid "Framboozle" step is doing anyways. The
+downside is that every time you reload the config file, the Framboozle
+class will get redefined, which means that the buildmaster will think
+that you've reconfigured all the Builders that use it, even though
+nothing changed. Bleh.
+
+   <p>Option 2: Instead, we can put this code in a separate file, and import
+it into the master.cfg file just like we would the normal buildsteps
+like ShellCommand and SVN.
+
+   <p>Create a directory named ~/lib/python, put everything from START to
+FINISH in ~/lib/python/framboozle.py, and run your buildmaster using:
+
+<pre class="example">      PYTHONPATH=~/lib/python buildbot start MASTERDIR
+</pre>
+   <p>or use the <samp><span class="file">Makefile.buildbot</span></samp> to control the way
+<samp><span class="command">buildbot start</span></samp> works. Or add something like this to
+something like your ~/.bashrc or ~/.bash_profile or ~/.cshrc:
+
+<pre class="example">      export PYTHONPATH=~/lib/python
+</pre>
+   <p>Once we've done this, our master.cfg can look like:
+
+<pre class="example">     from framboozle import Framboozle
+     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(Framboozle)
+</pre>
+   <p>or:
+
+<pre class="example">     import framboozle
+     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(framboozle.Framboozle)
+</pre>
+   <p>(check out the python docs for details about how "import" and "from A
+import B" work).
+
+   <p>What we've done here is to tell python that every time it handles an
+"import" statement for some named module, it should look in our
+~/lib/python/ for that module before it looks anywhere else. After our
+directories, it will try in a bunch of standard directories too
+(including the one where buildbot is installed). By setting the
+PYTHONPATH environment variable, you can add directories to the front
+of this search list.
+
+   <p>Python knows that once it "import"s a file, it doesn't need to
+re-import it again. This means that reconfiguring the buildmaster
+(with "buildbot reconfig", for example) won't make it think the
+Framboozle class has changed every time, so the Builders that use it
+will not be spuriously restarted. On the other hand, you either have
+to start your buildmaster in a slightly weird way, or you have to
+modify your environment to set the PYTHONPATH variable.
+
+   <p>Option 3: Install this code into a standard python library directory
+
+   <p>Find out what your python's standard include path is by asking it:
+
+<pre class="example">     80:warner at luther% python
+     Python 2.4.4c0 (#2, Oct  2 2006, 00:57:46)
+     [GCC 4.1.2 20060928 (prerelease) (Debian 4.1.1-15)] on linux2
+     Type "help", "copyright", "credits" or "license" for more information.
+     &gt;&gt;&gt; import sys
+     &gt;&gt;&gt; print sys.path
+     ['', '/usr/lib/python24.zip', '/usr/lib/python2.4', '/usr/lib/python2.4/plat-linux2', '/usr/lib/python2.4/lib-tk', '/usr/lib/python2.4/lib-dynload', '/usr/local/lib/python2.4/site-packages', '/usr/lib/python2.4/site-packages', '/usr/lib/python2.4/site-packages/Numeric', '/var/lib/python-support/python2.4', '/usr/lib/site-python']
+     &gt;&gt;&gt;
+</pre>
+   <p>In this case, putting the code into
+/usr/local/lib/python2.4/site-packages/framboozle.py would work just
+fine. We can use the same master.cfg "import framboozle" statement as
+in Option 2. By putting it in a standard include directory (instead of
+the decidedly non-standard ~/lib/python), we don't even have to set
+PYTHONPATH to anything special. The downside is that you probably have
+to be root to write to one of those standard include directories.
+
+   <p>Option 4: Submit the code for inclusion in the Buildbot distribution
+
+   <p>Contribute the code in an Enhancement Request on SourceForge, via
+http://buildbot.sf.net . Lobby, convince, coerce, bribe, badger,
+harass, threaten, or otherwise encourage the author to accept the
+patch. This lets you do something like:
+
+<pre class="example">     from buildbot.steps import framboozle
+     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(framboozle.Framboozle)
+</pre>
+   <p>And then you don't even have to install framboozle.py anywhere on your
+system, since it will ship with Buildbot. You don't have to be root,
+you don't have to set PYTHONPATH. But you do have to make a good case
+for Framboozle being worth going into the main distribution, you'll
+probably have to provide docs and some unit test cases, you'll need to
+figure out what kind of beer the author likes, and then you'll have to
+wait until the next release. But in some environments, all this is
+easier than getting root on your buildmaster box, so the tradeoffs may
+actually be worth it.
+
+   <p>Putting the code in master.cfg (1) makes it available to that
+buildmaster instance. Putting it in a file in a personal library
+directory (2) makes it available for any buildmasters you might be
+running. Putting it in a file in a system-wide shared library
+directory (3) makes it available for any buildmasters that anyone on
+that system might be running. Getting it into the buildbot's upstream
+repository (4) makes it available for any buildmasters that anyone in
+the world might be running. It's all a matter of how widely you want
+to deploy that new class.
+
+<div class="node">
+<p><hr>
+<a name="BuildStep-URLs"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Adding-LogObservers">Adding LogObservers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.7.3 BuildStep URLs</h5>
+
+<p><a name="index-links-65"></a><a name="index-BuildStep-URLs-66"></a><a name="index-addURL-67"></a>
+Each BuildStep has a collection of &ldquo;links&rdquo;. Like its collection of
+LogFiles, each link has a name and a target URL. The web status page
+creates HREFs for each link in the same box as it does for LogFiles,
+except that the target of the link is the external URL instead of an
+internal link to a page that shows the contents of the LogFile.
+
+   <p>These external links can be used to point at build information hosted
+on other servers. For example, the test process might produce an
+intricate description of which tests passed and failed, or some sort
+of code coverage data in HTML form, or a PNG or GIF image with a graph
+of memory usage over time. The external link can provide an easy way
+for users to navigate from the buildbot's status page to these
+external web sites or file servers. Note that the step itself is
+responsible for insuring that there will be a document available at
+the given URL (perhaps by using <samp><span class="command">scp</span></samp> to copy the HTML output
+to a <samp><span class="file">~/public_html/</span></samp> directory on a remote web server). Calling
+<code>addURL</code> does not magically populate a web server.
+
+   <p>To set one of these links, the BuildStep should call the <code>addURL</code>
+method with the name of the link and the target URL. Multiple URLs can
+be set.
+
+   <p>In this example, we assume that the <samp><span class="command">make test</span></samp> command causes
+a collection of HTML files to be created and put somewhere on the
+coverage.example.org web server, in a filename that incorporates the
+build number.
+
+<pre class="example">     class TestWithCodeCoverage(BuildStep):
+         command = ["make", "test",
+                    WithProperties("buildnum=%s" % "buildnumber")]
+     
+         def createSummary(self, log):
+             buildnumber = self.getProperty("buildnumber")
+             url = "http://coverage.example.org/builds/%s.html" % buildnumber
+             self.addURL("coverage", url)
+</pre>
+   <p>You might also want to extract the URL from some special message
+output by the build process itself:
+
+<pre class="example">     class TestWithCodeCoverage(BuildStep):
+         command = ["make", "test",
+                    WithProperties("buildnum=%s" % "buildnumber")]
+     
+         def createSummary(self, log):
+             output = StringIO(log.getText())
+             for line in output.readlines():
+                 if line.startswith("coverage-url:"):
+                     url = line[len("coverage-url:"):].strip()
+                     self.addURL("coverage", url)
+                     return
+</pre>
+   <p>Note that a build process which emits both stdout and stderr might
+cause this line to be split or interleaved between other lines. It
+might be necessary to restrict the getText() call to only stdout with
+something like this:
+
+<pre class="example">             output = StringIO("".join([c[1]
+                                        for c in log.getChunks()
+                                        if c[0] == LOG_CHANNEL_STDOUT]))
+</pre>
+   <p>Of course if the build is run under a PTY, then stdout and stderr will
+be merged before the buildbot ever sees them, so such interleaving
+will be unavoidable.
+
+<div class="node">
+<p><hr>
+<a name="Interlocks"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Build-Factories">Build Factories</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Steps">Build Steps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Process">Build Process</a>
+
+</div>
+
+<h3 class="section">6.2 Interlocks</h3>
+
+<p><a name="index-locks-68"></a><a name="index-buildbot_002elocks_002eMasterLock-69"></a><a name="index-buildbot_002elocks_002eSlaveLock-70"></a>
+For various reasons, you may want to prevent certain Steps (or perhaps
+entire Builds) from running simultaneously. Limited CPU speed or
+network bandwidth to the VC server, problems with simultaneous access
+to a database server used by unit tests, or multiple Builds which
+access shared state may all require some kind of interlock to prevent
+corruption, confusion, or resource overload. These resources might
+require completely exclusive access, or it might be sufficient to
+establish a limit of two or three simultaneous builds.
+
+   <p><code>Locks</code> are the mechanism used to express these kinds of
+constraints on when Builds or Steps can be run. There are two kinds of
+<code>Locks</code>, each with their own scope: <code>MasterLock</code> instances
+are scoped to the buildbot as a whole, while <code>SlaveLock</code>s are
+scoped to a single buildslave. This means that each buildslave has a
+separate copy of each <code>SlaveLock</code>, which could enforce a
+one-Build-at-a-time limit for each machine, but still allow as many
+simultaneous builds as there are machines.
+
+   <p>Each <code>Lock</code> is created with a unique name. Each lock gets a count
+of how many owners it may have: how many processes can claim it at ths
+same time. This limit defaults to one, and is controllable through the
+<code>maxCount</code> argument. On <code>SlaveLock</code>s you can set the owner
+count on a per-slave basis by providing a dictionary (that maps from
+slavename to maximum owner count) to its <code>maxCountForSlave</code>
+argument. Any buildslaves that aren't mentioned in
+<code>maxCountForSlave</code> get their owner count from <code>maxCount</code>.
+
+   <p>To use a lock, simply include it in the <code>locks=</code> argument of the
+<code>BuildStep</code> object that should obtain the lock before it runs. 
+This argument accepts a list of <code>Lock</code> objects: the Step will
+acquire all of them before it runs.
+
+   <p>To claim a lock for the whole Build, add a <code>'locks'</code> key to the
+builder specification dictionary with the same list of <code>Lock</code>
+objects. (This is the dictionary that has the <code>'name'</code>,
+<code>'slavename'</code>, <code>'builddir'</code>, and <code>'factory'</code> keys). The
+<code>Build</code> object also accepts a <code>locks=</code> argument, but unless
+you are writing your own <code>BuildFactory</code> subclass then it will be
+easier to set the locks in the builder dictionary.
+
+   <p>Note that there are no partial-acquire or partial-release semantics:
+this prevents deadlocks caused by two Steps each waiting for a lock
+held by the other<a rel="footnote" href="#fn-10" name="fnd-10"><sup>10</sup></a>. This also means
+that waiting to acquire a <code>Lock</code> can take an arbitrarily long
+time: if the buildmaster is very busy, a Step or Build which requires
+only one <code>Lock</code> may starve another that is waiting for that
+<code>Lock</code> plus some others.
+
+   <p>In the following example, we run the same build on three different
+platforms. The unit-test steps of these builds all use a common
+database server, and would interfere with each other if allowed to run
+simultaneously. The <code>Lock</code> prevents more than one of these builds
+from happening at the same time.
+
+<pre class="example">     from buildbot import locks
+     from buildbot.steps import source, shell
+     from buildbot.process import factory
+     
+     db_lock = locks.MasterLock("database")
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, svnurl="http://example.org/svn/Trunk")
+     f.addStep(shell.ShellCommand, command="make all")
+     f.addStep(shell.ShellCommand, command="make test", locks=[db_lock])
+     b1 = {'name': 'full1', 'slavename': 'bot-1', builddir='f1', 'factory': f}
+     b2 = {'name': 'full2', 'slavename': 'bot-2', builddir='f2', 'factory': f}
+     b3 = {'name': 'full3', 'slavename': 'bot-3', builddir='f3', 'factory': f}
+     c['builders'] = [b1, b2, b3]
+</pre>
+   <p>In the next example, we have one buildslave hosting three separate
+Builders (each running tests against a different version of Python). 
+The machine which hosts this buildslave is not particularly fast, so
+we want to prevent all three builds from all happening at the same
+time. (Assume we've experimentally determined that one build leaves
+unused CPU capacity, three builds causes a lot of disk thrashing, but
+two builds at a time is Just Right). We use a <code>SlaveLock</code> because
+the builds happening on this one slow slave should not affect builds
+running on other slaves, and we use the lock on the build as a whole
+because the slave is so slow that even multiple simultaneous SVN
+checkouts would be too taxing. We set <code>maxCount=2</code> to achieve our
+goal of two simultaneous builds per slave.
+
+<pre class="example">     from buildbot import locks
+     from buildbot.steps import source
+     from buildbot.process import s, factory
+     
+     slow_lock = locks.SlaveLock("cpu", maxCount=2)
+     source = s(source.SVN, svnurl="http://example.org/svn/Trunk")
+     f22 = factory.Trial(source, trialpython=["python2.2"])
+     f23 = factory.Trial(source, trialpython=["python2.3"])
+     f24 = factory.Trial(source, trialpython=["python2.4"])
+     b1 = {'name': 'p22', 'slavename': 'bot-1', builddir='p22', 'factory': f22,
+           'locks': [slow_lock] }
+     b2 = {'name': 'p23', 'slavename': 'bot-1', builddir='p23', 'factory': f23,
+           'locks': [slow_lock] }
+     b3 = {'name': 'p24', 'slavename': 'bot-1', builddir='p24', 'factory': f24,
+           'locks': [slow_lock] }
+     c['builders'] = [b1, b2, b3]
+</pre>
+   <p>In the last example, we use two Locks at the same time. In this case,
+we're concerned about both of the previous constraints, but we'll say
+that only the tests are computationally intensive, and that they have
+been split into those which use the database and those which do not. 
+In addition, two of the Builds run on a fast machine which does not
+need to worry about the cpu lock, but which still must be prevented
+from simultaneous database access. We use <code>maxCountForSlave</code> to
+limit the slow machine to one simultanous build, but allow practically
+unlimited concurrent builds on the fast machine.
+
+<pre class="example">     from buildbot import locks
+     from buildbot.steps import source, shell
+     from buildbot.process import factory
+     
+     db_lock = locks.MasterLock("database")
+     slavecounts = {"bot-slow": 1, "bot-fast": 100}
+     cpu_lock = locks.SlaveLock("cpu", maxCountForSlave=slavecounts)
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, svnurl="http://example.org/svn/Trunk")
+     f.addStep(shell.ShellCommand, command="make all", locks=[cpu_lock])
+     f.addStep(shell.ShellCommand, command="make test", locks=[cpu_lock])
+     f.addStep(shell.ShellCommand, command="make db-test",
+                                   locks=[db_lock, cpu_lock])
+     
+     b1 = {'name': 'full1', 'slavename': 'bot-slow', builddir='full1',
+           'factory': f}
+     b2 = {'name': 'full2', 'slavename': 'bot-slow', builddir='full2',
+           'factory': f}
+     b3 = {'name': 'full3', 'slavename': 'bot-fast', builddir='full3',
+           'factory': f}
+     b4 = {'name': 'full4', 'slavename': 'bot-fast', builddir='full4',
+           'factory': f}
+     c['builders'] = [b1, b2, b3, b4]
+</pre>
+   <p>As a final note, remember that a unit test system which breaks when
+multiple people run it at the same time is fragile and should be
+fixed. Asking your human developers to serialize themselves when
+running unit tests will just discourage them from running the unit
+tests at all. Find a way to fix this: change the database tests to
+create a new (uniquely-named) user or table for each test run, don't
+use fixed listening TCP ports for network tests (instead listen on
+port 0 to let the kernel choose a port for you and then query the
+socket to find out what port was allocated). <code>MasterLock</code>s can be
+used to accomodate broken test systems like this, but are really
+intended for other purposes: build processes that store or retrieve
+products in shared directories, or which do things that human
+developers would not (or which might slow down or break in ways that
+require human attention to deal with).
+
+   <p><code>SlaveLocks</code>s can be used to keep automated performance tests
+from interfering with each other, when there are multiple Builders all
+using the same buildslave. But they can't prevent other users from
+running CPU-intensive jobs on that host while the tests are running.
+
+<div class="node">
+<p><hr>
+<a name="Build-Factories"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Interlocks">Interlocks</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Process">Build Process</a>
+
+</div>
+
+<h3 class="section">6.3 Build Factories</h3>
+
+<p>Each Builder is equipped with a &ldquo;build factory&rdquo;, which is
+responsible for producing the actual <code>Build</code> objects that perform
+each build. This factory is created in the configuration file, and
+attached to a Builder through the <code>factory</code> element of its
+dictionary.
+
+   <p>The standard <code>BuildFactory</code> object creates <code>Build</code> objects
+by default. These Builds will each execute a collection of BuildSteps
+in a fixed sequence. Each step can affect the results of the build,
+but in general there is little intelligence to tie the different steps
+together. You can create subclasses of <code>Build</code> to implement more
+sophisticated build processes, and then use a subclass of
+<code>BuildFactory</code> (or simply set the <code>buildClass</code> attribute) to
+create instances of your new Build subclass.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildStep-Objects">BuildStep Objects</a>
+<li><a accesskey="2" href="#BuildFactory">BuildFactory</a>
+<li><a accesskey="3" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildStep-Objects"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildFactory">BuildFactory</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Factories">Build Factories</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Factories">Build Factories</a>
+
+</div>
+
+<h4 class="subsection">6.3.1 BuildStep Objects</h4>
+
+<p>The steps used by these builds are all subclasses of <code>BuildStep</code>. 
+The standard ones provided with Buildbot are documented later,
+See <a href="#Build-Steps">Build Steps</a>. You can also write your own subclasses to use in
+builds.
+
+   <p>The basic behavior for a <code>BuildStep</code> is to:
+
+     <ul>
+<li>run for a while, then stop
+<li>possibly invoke some RemoteCommands on the attached build slave
+<li>possibly produce a set of log files
+<li>finish with a status described by one of four values defined in
+buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, SKIPPED
+<li>provide a list of short strings to describe the step
+<li>define a color (generally green, orange, or red) with which the
+step should be displayed
+</ul>
+
+   <p>More sophisticated steps may produce additional information and
+provide it to later build steps, or store it in the factory to provide
+to later builds.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a accesskey="2" href="#Quick-builds">Quick builds</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildFactory"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildStep-Objects">BuildStep Objects</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Factories">Build Factories</a>
+
+</div>
+
+<h4 class="subsection">6.3.2 BuildFactory</h4>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eBuildFactory-71"></a><a name="index-buildbot_002eprocess_002efactory_002eBasicBuildFactory-72"></a><!-- TODO: what is BasicSVN anyway? -->
+<a name="index-buildbot_002eprocess_002efactory_002eBasicSVN-73"></a>
+The default <code>BuildFactory</code>, provided in the
+<code>buildbot.process.factory</code> module, contains a list of &ldquo;BuildStep
+specifications&rdquo;: a list of <code>(step_class, kwargs)</code> tuples for
+each. When asked to create a Build, it loads the list of steps into
+the new Build object. When the Build is actually started, these step
+specifications are used to create the actual set of BuildSteps, which
+are then executed one at a time. For example, a build which consists
+of a CVS checkout followed by a <code>make build</code> would be constructed
+as follows:
+
+<pre class="example">     from buildbot.steps import source, shell
+     from buildbot.process import factory
+     
+     f = factory.BuildFactory()
+     f.addStep(source.CVS, cvsroot=CVSROOT, cvsmodule="project", mode="update")
+     f.addStep(shell.Compile, command=["make", "build"])
+</pre>
+   <p>It is also possible to pass a list of step specifications into the
+<code>BuildFactory</code> when it is created. Using <code>addStep</code> is
+usually simpler, but there are cases where is is more convenient to
+create the list of steps ahead of time. To make this approach easier,
+a convenience function named <code>s</code> is available:
+
+<pre class="example">     from buildbot.steps import source, shell
+     from buildbot.process import factory
+     from buildbot.factory import s
+     # s is a convenience function, defined with:
+     # def s(steptype, **kwargs): return (steptype, kwargs)
+     
+     all_steps = [s(source.CVS, cvsroot=CVSROOT, cvsmodule="project",
+                    mode="update"),
+                  s(shell.Compile, command=["make", "build"]),
+                 ]
+     f = factory.BuildFactory(all_steps)
+</pre>
+   <p>Each step can affect the build process in the following ways:
+
+     <ul>
+<li>If the step's <code>haltOnFailure</code> attribute is True, then a failure
+in the step (i.e. if it completes with a result of FAILURE) will cause
+the whole build to be terminated immediately: no further steps will be
+executed. This is useful for setup steps upon which the rest of the
+build depends: if the CVS checkout or <code>./configure</code> process
+fails, there is no point in trying to compile or test the resulting
+tree.
+
+     <li>If the <code>flunkOnFailure</code> or <code>flunkOnWarnings</code> flag is set,
+then a result of FAILURE or WARNINGS will mark the build as a whole as
+FAILED. However, the remaining steps will still be executed. This is
+appropriate for things like multiple testing steps: a failure in any
+one of them will indicate that the build has failed, however it is
+still useful to run them all to completion.
+
+     <li>Similarly, if the <code>warnOnFailure</code> or <code>warnOnWarnings</code> flag
+is set, then a result of FAILURE or WARNINGS will mark the build as
+having WARNINGS, and the remaining steps will still be executed. This
+may be appropriate for certain kinds of optional build or test steps. 
+For example, a failure experienced while building documentation files
+should be made visible with a WARNINGS result but not be serious
+enough to warrant marking the whole build with a FAILURE.
+
+   </ul>
+
+   <p>In addition, each Step produces its own results, may create logfiles,
+etc. However only the flags described above have any effect on the
+build as a whole.
+
+   <p>The pre-defined BuildSteps like <code>CVS</code> and <code>Compile</code> have
+reasonably appropriate flags set on them already. For example, without
+a source tree there is no point in continuing the build, so the
+<code>CVS</code> class has the <code>haltOnFailure</code> flag set to True. Look
+in <samp><span class="file">buildbot/process/step.py</span></samp> to see how the other Steps are
+marked.
+
+   <p>Each Step is created with an additional <code>workdir</code> argument that
+indicates where its actions should take place. This is specified as a
+subdirectory of the slave builder's base directory, with a default
+value of <code>build</code>. This is only implemented as a step argument (as
+opposed to simply being a part of the base directory) because the
+CVS/SVN steps need to perform their checkouts from the parent
+directory.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a accesskey="2" href="#Quick-builds">Quick builds</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildFactory-Attributes"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Quick-builds">Quick builds</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildFactory">BuildFactory</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#BuildFactory">BuildFactory</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.2.1 BuildFactory Attributes</h5>
+
+<p>Some attributes from the BuildFactory are copied into each Build.
+
+   <p><a name="index-treeStableTimer-74"></a>
+     <dl>
+<dt><code>useProgress</code><dd>(defaults to True): if True, the buildmaster keeps track of how long
+each step takes, so it can provide estimates of how long future builds
+will take. If builds are not expected to take a consistent amount of
+time (such as incremental builds in which a random set of files are
+recompiled or tested each time), this should be set to False to
+inhibit progress-tracking.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Quick-builds"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildFactory-Attributes">BuildFactory Attributes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#BuildFactory">BuildFactory</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.2.2 Quick builds</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eQuickBuildFactory-75"></a>
+The difference between a &ldquo;full build&rdquo; and a &ldquo;quick build&rdquo; is that
+quick builds are generally done incrementally, starting with the tree
+where the previous build was performed. That simply means that the
+source-checkout step should be given a <code>mode='update'</code> flag, to
+do the source update in-place.
+
+   <p>In addition to that, the <code>useProgress</code> flag should be set to
+False. Incremental builds will (or at least the ought to) compile as
+few files as necessary, so they will take an unpredictable amount of
+time to run. Therefore it would be misleading to claim to predict how
+long the build will take.
+
+<div class="node">
+<p><hr>
+<a name="Process-Specific-build-factories"></a>
+<a name="Process_002dSpecific-build-factories"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildFactory">BuildFactory</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Factories">Build Factories</a>
+
+</div>
+
+<h4 class="subsection">6.3.3 Process-Specific build factories</h4>
+
+<p>Many projects use one of a few popular build frameworks to simplify
+the creation and maintenance of Makefiles or other compilation
+structures. Buildbot provides several pre-configured BuildFactory
+subclasses which let you build these projects with a minimum of fuss.
+
+<ul class="menu">
+<li><a accesskey="1" href="#GNUAutoconf">GNUAutoconf</a>
+<li><a accesskey="2" href="#CPAN">CPAN</a>
+<li><a accesskey="3" href="#Python-distutils">Python distutils</a>
+<li><a accesskey="4" href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="GNUAutoconf"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#CPAN">CPAN</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.1 GNUAutoconf</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eGNUAutoconf-76"></a>
+<a href="http://www.gnu.org/software/autoconf/">GNU Autoconf</a> is a
+software portability tool, intended to make it possible to write
+programs in C (and other languages) which will run on a variety of
+UNIX-like systems. Most GNU software is built using autoconf. It is
+frequently used in combination with GNU automake. These tools both
+encourage a build process which usually looks like this:
+
+<pre class="example">     % CONFIG_ENV=foo ./configure --with-flags
+     % make all
+     % make check
+     # make install
+</pre>
+   <p>(except of course the Buildbot always skips the <code>make install</code>
+part).
+
+   <p>The Buildbot's <code>buildbot.process.factory.GNUAutoconf</code> factory is
+designed to build projects which use GNU autoconf and/or automake. The
+configuration environment variables, the configure flags, and command
+lines used for the compile and test are all configurable, in general
+the default values will be suitable.
+
+   <p>Example:
+
+<pre class="example">     # use the s() convenience function defined earlier
+     f = factory.GNUAutoconf(source=s(step.SVN, svnurl=URL, mode="copy"),
+                             flags=["--disable-nls"])
+</pre>
+   <p>Required Arguments:
+
+     <dl>
+<dt><code>source</code><dd>This argument must be a step specification tuple that provides a
+BuildStep to generate the source tree. 
+</dl>
+
+   <p>Optional Arguments:
+
+     <dl>
+<dt><code>configure</code><dd>The command used to configure the tree. Defaults to
+<code>./configure</code>. Accepts either a string or a list of shell argv
+elements.
+
+     <br><dt><code>configureEnv</code><dd>The environment used for the initial configuration step. This accepts
+a dictionary which will be merged into the buildslave's normal
+environment. This is commonly used to provide things like
+<code>CFLAGS="-O2 -g"</code> (to turn off debug symbols during the compile). 
+Defaults to an empty dictionary.
+
+     <br><dt><code>configureFlags</code><dd>A list of flags to be appended to the argument list of the configure
+command. This is commonly used to enable or disable specific features
+of the autoconf-controlled package, like <code>["--without-x"]</code> to
+disable windowing support. Defaults to an empty list.
+
+     <br><dt><code>compile</code><dd>this is a shell command or list of argv values which is used to
+actually compile the tree. It defaults to <code>make all</code>. If set to
+None, the compile step is skipped.
+
+     <br><dt><code>test</code><dd>this is a shell command or list of argv values which is used to run
+the tree's self-tests. It defaults to <code>make check</code>. If set to
+None, the test step is skipped.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="CPAN"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Python-distutils">Python distutils</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#GNUAutoconf">GNUAutoconf</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.2 CPAN</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eCPAN-77"></a>
+Most Perl modules available from the <a href="http://www.cpan.org/">CPAN</a>
+archive use the <code>MakeMaker</code> module to provide configuration,
+build, and test services. The standard build routine for these modules
+looks like:
+
+<pre class="example">     % perl Makefile.PL
+     % make
+     % make test
+     # make install
+</pre>
+   <p>(except again Buildbot skips the install step)
+
+   <p>Buildbot provides a <code>CPAN</code> factory to compile and test these
+projects.
+
+   <p>Arguments:
+     <dl>
+<dt><code>source</code><dd>(required): A step specification tuple, that that used by GNUAutoconf.
+
+     <br><dt><code>perl</code><dd>A string which specifies the <code>perl</code> executable to use. Defaults
+to just <code>perl</code>.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Python-distutils"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#CPAN">CPAN</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.3 Python distutils</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eDistutils-78"></a>
+Most Python modules use the <code>distutils</code> package to provide
+configuration and build services. The standard build process looks
+like:
+
+<pre class="example">     % python ./setup.py build
+     % python ./setup.py install
+</pre>
+   <p>Unfortunately, although Python provides a standard unit-test framework
+named <code>unittest</code>, to the best of my knowledge <code>distutils</code>
+does not provide a standardized target to run such unit tests. (please
+let me know if I'm wrong, and I will update this factory).
+
+   <p>The <code>Distutils</code> factory provides support for running the build
+part of this process. It accepts the same <code>source=</code> parameter as
+the other build factories.
+
+   <p>Arguments:
+     <dl>
+<dt><code>source</code><dd>(required): A step specification tuple, that that used by GNUAutoconf.
+
+     <br><dt><code>python</code><dd>A string which specifies the <code>python</code> executable to use. Defaults
+to just <code>python</code>.
+
+     <br><dt><code>test</code><dd>Provides a shell command which runs unit tests. This accepts either a
+string or a list. The default value is None, which disables the test
+step (since there is no common default command to run unit tests in
+distutils modules).
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Python%2fTwisted%2ftrial-projects"></a>
+<a name="Python_002fTwisted_002ftrial-projects"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Python-distutils">Python distutils</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.4 Python/Twisted/trial projects</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eTrial-79"></a><!-- TODO: document these steps better -->
+<a name="index-buildbot_002esteps_002epython_005ftwisted_002eHLint-80"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eTrial-81"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eProcessDocs-82"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eBuildDebs-83"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eRemovePYCs-84"></a>
+Twisted provides a unit test tool named <code>trial</code> which provides a
+few improvements over Python's built-in <code>unittest</code> module. Many
+python projects which use Twisted for their networking or application
+services also use trial for their unit tests. These modules are
+usually built and tested with something like the following:
+
+<pre class="example">     % python ./setup.py build
+     % PYTHONPATH=build/lib.linux-i686-2.3 trial -v PROJECTNAME.test
+     % python ./setup.py install
+</pre>
+   <p>Unfortunately, the <samp><span class="file">build/lib</span></samp> directory into which the
+built/copied .py files are placed is actually architecture-dependent,
+and I do not yet know of a simple way to calculate its value. For many
+projects it is sufficient to import their libraries &ldquo;in place&rdquo; from
+the tree's base directory (<code>PYTHONPATH=.</code>).
+
+   <p>In addition, the <var>PROJECTNAME</var> value where the test files are
+located is project-dependent: it is usually just the project's
+top-level library directory, as common practice suggests the unit test
+files are put in the <code>test</code> sub-module. This value cannot be
+guessed, the <code>Trial</code> class must be told where to find the test
+files.
+
+   <p>The <code>Trial</code> class provides support for building and testing
+projects which use distutils and trial. If the test module name is
+specified, trial will be invoked. The library path used for testing
+can also be set.
+
+   <p>One advantage of trial is that the Buildbot happens to know how to
+parse trial output, letting it identify which tests passed and which
+ones failed. The Buildbot can then provide fine-grained reports about
+how many tests have failed, when individual tests fail when they had
+been passing previously, etc.
+
+   <p>Another feature of trial is that you can give it a series of source
+.py files, and it will search them for special <code>test-case-name</code>
+tags that indicate which test cases provide coverage for that file. 
+Trial can then run just the appropriate tests. This is useful for
+quick builds, where you want to only run the test cases that cover the
+changed functionality.
+
+   <p>Arguments:
+     <dl>
+<dt><code>source</code><dd>(required): A step specification tuple, like that used by GNUAutoconf.
+
+     <br><dt><code>buildpython</code><dd>A list (argv array) of strings which specifies the <code>python</code>
+executable to use when building the package. Defaults to just
+<code>['python']</code>. It may be useful to add flags here, to supress
+warnings during compilation of extension modules. This list is
+extended with <code>['./setup.py', 'build']</code> and then executed in a
+ShellCommand.
+
+     <br><dt><code>testpath</code><dd>Provides a directory to add to <code>PYTHONPATH</code> when running the unit
+tests, if tests are being run. Defaults to <code>.</code> to include the
+project files in-place. The generated build library is frequently
+architecture-dependent, but may simply be <samp><span class="file">build/lib</span></samp> for
+pure-python modules.
+
+     <br><dt><code>trialpython</code><dd>Another list of strings used to build the command that actually runs
+trial. This is prepended to the contents of the <code>trial</code> argument
+below. It may be useful to add <code>-W</code> flags here to supress
+warnings that occur while tests are being run. Defaults to an empty
+list, meaning <code>trial</code> will be run without an explicit
+interpreter, which is generally what you want if you're using
+<samp><span class="file">/usr/bin/trial</span></samp> instead of, say, the <samp><span class="file">./bin/trial</span></samp> that
+lives in the Twisted source tree.
+
+     <br><dt><code>trial</code><dd>provides the name of the <code>trial</code> command. It is occasionally
+useful to use an alternate executable, such as <code>trial2.2</code> which
+might run the tests under an older version of Python. Defaults to
+<code>trial</code>.
+
+     <br><dt><code>tests</code><dd>Provides a module name or names which contain the unit tests for this
+project. Accepts a string, typically <code>PROJECTNAME.test</code>, or a
+list of strings. Defaults to None, indicating that no tests should be
+run. You must either set this or <code>useTestCaseNames</code> to do anyting
+useful with the Trial factory.
+
+     <br><dt><code>useTestCaseNames</code><dd>Tells the Step to provide the names of all changed .py files to trial,
+so it can look for test-case-name tags and run just the matching test
+cases. Suitable for use in quick builds. Defaults to False.
+
+     <br><dt><code>randomly</code><dd>If <code>True</code>, tells Trial (with the <code>--random=0</code> argument) to
+run the test cases in random order, which sometimes catches subtle
+inter-test dependency bugs. Defaults to <code>False</code>.
+
+     <br><dt><code>recurse</code><dd>If <code>True</code>, tells Trial (with the <code>--recurse</code> argument) to
+look in all subdirectories for additional test cases. It isn't clear
+to me how this works, but it may be useful to deal with the
+unknown-PROJECTNAME problem described above, and is currently used in
+the Twisted buildbot to accomodate the fact that test cases are now
+distributed through multiple twisted.SUBPROJECT.test directories.
+
+   </dl>
+
+   <p>Unless one of <code>trialModule</code> or <code>useTestCaseNames</code>
+are set, no tests will be run.
+
+   <p>Some quick examples follow. Most of these examples assume that the
+target python code (the &ldquo;code under test&rdquo;) can be reached directly
+from the root of the target tree, rather than being in a <samp><span class="file">lib/</span></samp>
+subdirectory.
+
+<pre class="example">     #  Trial(source, tests="toplevel.test") does:
+     #   python ./setup.py build
+     #   PYTHONPATH=. trial -to toplevel.test
+     
+     #  Trial(source, tests=["toplevel.test", "other.test"]) does:
+     #   python ./setup.py build
+     #   PYTHONPATH=. trial -to toplevel.test other.test
+     
+     #  Trial(source, useTestCaseNames=True) does:
+     #   python ./setup.py build
+     #   PYTHONPATH=. trial -to --testmodule=foo/bar.py..  (from Changes)
+     
+     #  Trial(source, buildpython=["python2.3", "-Wall"], tests="foo.tests"):
+     #   python2.3 -Wall ./setup.py build
+     #   PYTHONPATH=. trial -to foo.tests
+     
+     #  Trial(source, trialpython="python2.3", trial="/usr/bin/trial",
+     #        tests="foo.tests") does:
+     #   python2.3 -Wall ./setup.py build
+     #   PYTHONPATH=. python2.3 /usr/bin/trial -to foo.tests
+     
+     # For running trial out of the tree being tested (only useful when the
+     # tree being built is Twisted itself):
+     #  Trial(source, trialpython=["python2.3", "-Wall"], trial="./bin/trial",
+     #        tests="foo.tests") does:
+     #   python2.3 -Wall ./setup.py build
+     #   PYTHONPATH=. python2.3 -Wall ./bin/trial -to foo.tests
+</pre>
+   <p>If the output directory of <code>./setup.py build</code> is known, you can
+pull the python code from the built location instead of the source
+directories. This should be able to handle variations in where the
+source comes from, as well as accomodating binary extension modules:
+
+<pre class="example">     # Trial(source,tests="toplevel.test",testpath='build/lib.linux-i686-2.3')
+     # does:
+     #  python ./setup.py build
+     #  PYTHONPATH=build/lib.linux-i686-2.3 trial -to toplevel.test
+</pre>
+   <div class="node">
+<p><hr>
+<a name="Status-Delivery"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Command_002dline-tool">Command-line tool</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Process">Build Process</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">7 Status Delivery</h2>
+
+<p>More details are available in the docstrings for each class, use
+<code>pydoc buildbot.status.html.Waterfall</code> to see them. Most status
+delivery objects take a <code>categories=</code> argument, which can contain
+a list of &ldquo;category&rdquo; names: in this case, it will only show status
+for Builders that are in one of the named categories.
+
+   <p>(implementor's note: each of these objects should be a
+service.MultiService which will be attached to the BuildMaster object
+when the configuration is processed. They should use
+<code>self.parent.getStatus()</code> to get access to the top-level IStatus
+object, either inside <code>startService</code> or later. They may call
+<code>status.subscribe()</code> in <code>startService</code> to receive
+notifications of builder events, in which case they must define
+<code>builderAdded</code> and related methods. See the docstrings in
+<samp><span class="file">buildbot/interfaces.py</span></samp> for full details.)
+
+<ul class="menu">
+<li><a accesskey="1" href="#HTML-Waterfall">HTML Waterfall</a>
+<li><a accesskey="2" href="#IRC-Bot">IRC Bot</a>
+<li><a accesskey="3" href="#PBListener">PBListener</a>
+<li><a accesskey="4" href="#Writing-New-Status-Plugins">Writing New Status Plugins</a>
+</ul>
+
+<!-- @node Email Delivery,  , Status Delivery, Status Delivery -->
+<!-- @subsection Email Delivery -->
+<!-- DOCUMENT THIS -->
+<div class="node">
+<p><hr>
+<a name="HTML-Waterfall"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#IRC-Bot">IRC Bot</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Status-Delivery">Status Delivery</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.1 HTML Waterfall</h3>
+
+<p><a name="index-Waterfall-85"></a><a name="index-buildbot_002estatus_002ehtml_002eWaterfall-86"></a>
+
+<pre class="example">     from buildbot.status import html
+     w = html.Waterfall(http_port=8080)
+     c['status'].append(w)
+</pre>
+   <p>The <code>buildbot.status.html.Waterfall</code> status target creates an
+HTML &ldquo;waterfall display&rdquo;, which shows a time-based chart of events. 
+This display provides detailed information about all steps of all
+recent builds, and provides hyperlinks to look at individual build
+logs and source changes. If the <code>http_port</code> argument is provided,
+it provides a strports specification for the port that the web server
+should listen on. This can be a simple port number, or a string like
+<code>tcp:8080:interface=127.0.0.1</code> (to limit connections to the
+loopback interface, and therefore to clients running on the same
+host)<a rel="footnote" href="#fn-11" name="fnd-11"><sup>11</sup></a>.
+
+   <p>If instead (or in addition) you provide the <code>distrib_port</code>
+argument, a twisted.web distributed server will be started either on a
+TCP port (if <code>distrib_port</code> is like <code>"tcp:12345"</code>) or more
+likely on a UNIX socket (if <code>distrib_port</code> is like
+<code>"unix:/path/to/socket"</code>).
+
+   <p>The <code>distrib_port</code> option means that, on a host with a
+suitably-configured twisted-web server, you do not need to consume a
+separate TCP port for the buildmaster's status web page. When the web
+server is constructed with <code>mktap web --user</code>, URLs that point to
+<code>http://host/~username/</code> are dispatched to a sub-server that is
+listening on a UNIX socket at <code>~username/.twisted-web-pb</code>. On
+such a system, it is convenient to create a dedicated <code>buildbot</code>
+user, then set <code>distrib_port</code> to
+<code>"unix:"+os.path.expanduser("~/.twistd-web-pb")</code>. This
+configuration will make the HTML status page available at
+<code>http://host/~buildbot/</code> . Suitable URL remapping can make it
+appear at <code>http://host/buildbot/</code>, and the right virtual host
+setup can even place it at <code>http://buildbot.host/</code> .
+
+   <p>Other arguments:
+
+     <dl>
+<dt><code>allowForce</code><dd>If set to True (the default), then the web page will provide a &ldquo;Force
+Build&rdquo; button that allows visitors to manually trigger builds. This
+is useful for developers to re-run builds that have failed because of
+intermittent problems in the test suite, or because of libraries that
+were not installed at the time of the previous build. You may not wish
+to allow strangers to cause a build to run: in that case, set this to
+False to remove these buttons.
+
+     <br><dt><code>favicon</code><dd>If set to a string, this will be interpreted as a filename containing
+a &ldquo;favicon&rdquo;: a small image that contains an icon for the web site. 
+This is returned to browsers that request the <code>favicon.ico</code> file,
+and should point to a .png or .ico image file. The default value uses
+the buildbot/buildbot.png image (a small hex nut) contained in the
+buildbot distribution. You can set this to None to avoid using a
+favicon at all.
+
+     <br><dt><code>robots_txt</code><dd>If set to a string, this will be interpreted as a filename containing
+the contents of &ldquo;robots.txt&rdquo;. Many search engine spiders request
+this file before indexing the site. Setting it to a file which
+contains:
+     <pre class="example">          User-agent: *
+          Disallow: /
+     </pre>
+     <p>will prevent most search engines from trawling the (voluminous)
+generated status pages.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="IRC-Bot"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PBListener">PBListener</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#HTML-Waterfall">HTML Waterfall</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.2 IRC Bot</h3>
+
+<p><a name="index-IRC-87"></a><a name="index-buildbot_002estatus_002ewords_002eIRC-88"></a>
+
+   <p>The <code>buildbot.status.words.IRC</code> status target creates an IRC bot
+which will attach to certain channels and be available for status
+queries. It can also be asked to announce builds as they occur, or be
+told to shut up.
+
+<pre class="example">     from twisted.status import words
+     irc = words.IRC("irc.example.org", "botnickname",
+                     channels=["channel1", "channel2"],
+                     password="mysecretpassword")
+     c['status'].append(irc)
+</pre>
+   <p>Take a look at the docstring for <code>words.IRC</code> for more details on
+configuring this service. The <code>password</code> argument, if provided,
+will be sent to Nickserv to claim the nickname: some IRC servers will
+not allow clients to send private messages until they have logged in
+with a password.
+
+   <p>To use the service, you address messages at the buildbot, either
+normally (<code>botnickname: status</code>) or with private messages
+(<code>/msg botnickname status</code>). The buildbot will respond in kind.
+
+   <p>Some of the commands currently available:
+
+     <dl>
+<dt><code>list builders</code><dd>Emit a list of all configured builders
+<br><dt><code>status BUILDER</code><dd>Announce the status of a specific Builder: what it is doing right now. 
+<br><dt><code>status all</code><dd>Announce the status of all Builders
+<br><dt><code>watch BUILDER</code><dd>If the given Builder is currently running, wait until the Build is
+finished and then announce the results. 
+<br><dt><code>last BUILDER</code><dd>Return the results of the last build to run on the given Builder.
+
+     <br><dt><code>help COMMAND</code><dd>Describe a command. Use <code>help commands</code> to get a list of known
+commands. 
+<br><dt><code>source</code><dd>Announce the URL of the Buildbot's home page. 
+<br><dt><code>version</code><dd>Announce the version of this Buildbot. 
+</dl>
+
+   <p>If the <code>allowForce=True</code> option was used, some addtional commands
+will be available:
+
+     <dl>
+<dt><code>force build BUILDER REASON</code><dd>Tell the given Builder to start a build of the latest code. The user
+requesting the build and REASON are recorded in the Build status. The
+buildbot will announce the build's status when it finishes.
+
+     <br><dt><code>stop build BUILDER REASON</code><dd>Terminate any running build in the given Builder. REASON will be added
+to the build status to explain why it was stopped. You might use this
+if you committed a bug, corrected it right away, and don't want to
+wait for the first build (which is destined to fail) to complete
+before starting the second (hopefully fixed) build. 
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="PBListener"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Writing-New-Status-Plugins">Writing New Status Plugins</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#IRC-Bot">IRC Bot</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.3 PBListener</h3>
+
+<p><a name="index-PBListener-89"></a><a name="index-buildbot_002estatus_002eclient_002ePBListener-90"></a>
+
+<pre class="example">     import buildbot.status.client
+     pbl = buildbot.status.client.PBListener(port=int, user=str,
+                                             passwd=str)
+     c['status'].append(pbl)
+</pre>
+   <p>This sets up a PB listener on the given TCP port, to which a PB-based
+status client can connect and retrieve status information. 
+<code>buildbot statusgui</code> (see <a href="#statusgui">statusgui</a>) is an example of such a
+status client. The <code>port</code> argument can also be a strports
+specification string.
+
+<div class="node">
+<p><hr>
+<a name="Writing-New-Status-Plugins"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#PBListener">PBListener</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.4 Writing New Status Plugins</h3>
+
+<p>TODO: this needs a lot more examples
+
+   <p>Each status plugin is an object which provides the
+<code>twisted.application.service.IService</code> interface, which creates a
+tree of Services with the buildmaster at the top [not strictly true]. 
+The status plugins are all children of an object which implements
+<code>buildbot.interfaces.IStatus</code>, the main status object. From this
+object, the plugin can retrieve anything it wants about current and
+past builds. It can also subscribe to hear about new and upcoming
+builds.
+
+   <p>Status plugins which only react to human queries (like the Waterfall
+display) never need to subscribe to anything: they are idle until
+someone asks a question, then wake up and extract the information they
+need to answer it, then they go back to sleep. Plugins which need to
+act spontaneously when builds complete (like the Mail plugin) need to
+subscribe to hear about new builds.
+
+   <p>If the status plugin needs to run network services (like the HTTP
+server used by the Waterfall plugin), they can be attached as Service
+children of the plugin itself, using the <code>IServiceCollection</code>
+interface.
+
+<div class="node">
+<p><hr>
+<a name="Command-line-tool"></a>
+<a name="Command_002dline-tool"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Resources">Resources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Status-Delivery">Status Delivery</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">8 Command-line tool</h2>
+
+<p>The <samp><span class="command">buildbot</span></samp> command-line tool can be used to start or stop a
+buildmaster or buildbot, and to interact with a running buildmaster. 
+Some of its subcommands are intended for buildmaster admins, while
+some are for developers who are editing the code that the buildbot is
+monitoring.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Administrator-Tools">Administrator Tools</a>
+<li><a accesskey="2" href="#Developer-Tools">Developer Tools</a>
+<li><a accesskey="3" href="#Other-Tools">Other Tools</a>
+<li><a accesskey="4" href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Administrator-Tools"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Developer-Tools">Developer Tools</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Command_002dline-tool">Command-line tool</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.1 Administrator Tools</h3>
+
+<p>The following <samp><span class="command">buildbot</span></samp> sub-commands are intended for
+buildmaster administrators:
+
+<h3 class="heading">create-master</h3>
+
+<p>This creates a new directory and populates it with files that allow it
+to be used as a buildmaster's base directory.
+
+<pre class="example">     buildbot create-master BASEDIR
+</pre>
+   <h3 class="heading">create-slave</h3>
+
+<p>This creates a new directory and populates it with files that let it
+be used as a buildslave's base directory. You must provide several
+arguments, which are used to create the initial <samp><span class="file">buildbot.tac</span></samp>
+file.
+
+<pre class="example">     buildbot create-slave <var>BASEDIR</var> <var>MASTERHOST</var>:<var>PORT</var> <var>SLAVENAME</var> <var>PASSWORD</var>
+</pre>
+   <h3 class="heading">start</h3>
+
+<p>This starts a buildmaster or buildslave which was already created in
+the given base directory. The daemon is launched in the background,
+with events logged to a file named <samp><span class="file">twistd.log</span></samp>.
+
+<pre class="example">     buildbot start BASEDIR
+</pre>
+   <h3 class="heading">stop</h3>
+
+<p>This terminates the daemon (either buildmaster or buildslave) running
+in the given directory.
+
+<pre class="example">     buildbot stop BASEDIR
+</pre>
+   <h3 class="heading">sighup</h3>
+
+<p>This sends a SIGHUP to the buildmaster running in the given directory,
+which causes it to re-read its <samp><span class="file">master.cfg</span></samp> file.
+
+<pre class="example">     buildbot sighup BASEDIR
+</pre>
+   <div class="node">
+<p><hr>
+<a name="Developer-Tools"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Other-Tools">Other Tools</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Administrator-Tools">Administrator Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.2 Developer Tools</h3>
+
+<p>These tools are provided for use by the developers who are working on
+the code that the buildbot is monitoring.
+
+<ul class="menu">
+<li><a accesskey="1" href="#statuslog">statuslog</a>
+<li><a accesskey="2" href="#statusgui">statusgui</a>
+<li><a accesskey="3" href="#try">try</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="statuslog"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#statusgui">statusgui</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Developer-Tools">Developer Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Developer-Tools">Developer Tools</a>
+
+</div>
+
+<h4 class="subsection">8.2.1 statuslog</h4>
+
+<pre class="example">     buildbot statuslog --master <var>MASTERHOST</var>:<var>PORT</var>
+</pre>
+   <p>This command starts a simple text-based status client, one which just
+prints out a new line each time an event occurs on the buildmaster.
+
+   <p>The <samp><span class="option">--master</span></samp> option provides the location of the
+<code>buildbot.status.client.PBListener</code> status port, used to deliver
+build information to realtime status clients. The option is always in
+the form of a string, with hostname and port number separated by a
+colon (<code>HOSTNAME:PORTNUM</code>). Note that this port is <em>not</em> the
+same as the slaveport (although a future version may allow the same
+port number to be used for both purposes). If you get an error message
+to the effect of &ldquo;Failure: twisted.cred.error.UnauthorizedLogin:&rdquo;,
+this may indicate that you are connecting to the slaveport rather than
+a <code>PBListener</code> port.
+
+   <p>The <samp><span class="option">--master</span></samp> option can also be provided by the
+<code>masterstatus</code> name in <samp><span class="file">.buildbot/options</span></samp> (see <a href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>).
+
+<div class="node">
+<p><hr>
+<a name="statusgui"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#try">try</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#statuslog">statuslog</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Developer-Tools">Developer Tools</a>
+
+</div>
+
+<h4 class="subsection">8.2.2 statusgui</h4>
+
+<p><a name="index-statusgui-91"></a>
+If you have set up a PBListener (see <a href="#PBListener">PBListener</a>), you will be able
+to monitor your Buildbot using a simple Gtk+ application invoked with
+the <code>buildbot statusgui</code> command:
+
+<pre class="example">     buildbot statusgui --master <var>MASTERHOST</var>:<var>PORT</var>
+</pre>
+   <p>This command starts a simple Gtk+-based status client, which contains
+a few boxes for each Builder that change color as events occur. It
+uses the same <samp><span class="option">--master</span></samp> argument as the <samp><span class="command">buildbot
+statuslog</span></samp> command (see <a href="#statuslog">statuslog</a>).
+
+<div class="node">
+<p><hr>
+<a name="try"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#statusgui">statusgui</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Developer-Tools">Developer Tools</a>
+
+</div>
+
+<h4 class="subsection">8.2.3 try</h4>
+
+<p>This lets a developer to ask the question &ldquo;What would happen if I
+committed this patch right now?&rdquo;. It runs the unit test suite (across
+multiple build platforms) on the developer's current code, allowing
+them to make sure they will not break the tree when they finally
+commit their changes.
+
+   <p>The <samp><span class="command">buildbot try</span></samp> command is meant to be run from within a
+developer's local tree, and starts by figuring out the base revision
+of that tree (what revision was current the last time the tree was
+updated), and a patch that can be applied to that revision of the tree
+to make it match the developer's copy. This (revision, patch) pair is
+then sent to the buildmaster, which runs a build with that
+SourceStamp. If you want, the tool will emit status messages as the
+builds run, and will not terminate until the first failure has been
+detected (or the last success).
+
+   <p>For this command to work, several pieces must be in place:
+
+<h3 class="heading">TryScheduler</h3>
+
+<p><a name="index-buildbot_002escheduler_002eTry_005fJobdir-92"></a><a name="index-buildbot_002escheduler_002eTry_005fUserpass-93"></a>
+The buildmaster must have a <code>scheduler.Try</code> instance in
+the config file's <code>c['schedulers']</code> list. This lets the
+administrator control who may initiate these &ldquo;trial&rdquo; builds, which
+branches are eligible for trial builds, and which Builders should be
+used for them.
+
+   <p>The <code>TryScheduler</code> has various means to accept build requests:
+all of them enforce more security than the usual buildmaster ports do. 
+Any source code being built can be used to compromise the buildslave
+accounts, but in general that code must be checked out from the VC
+repository first, so only people with commit privileges can get
+control of the buildslaves. The usual force-build control channels can
+waste buildslave time but do not allow arbitrary commands to be
+executed by people who don't have those commit privileges. However,
+the source code patch that is provided with the trial build does not
+have to go through the VC system first, so it is important to make
+sure these builds cannot be abused by a non-committer to acquire as
+much control over the buildslaves as a committer has. Ideally, only
+developers who have commit access to the VC repository would be able
+to start trial builds, but unfortunately the buildmaster does not, in
+general, have access to VC system's user list.
+
+   <p>As a result, the <code>TryScheduler</code> requires a bit more
+configuration. There are currently two ways to set this up:
+
+     <dl>
+<dt><strong>jobdir (ssh)</strong><dd>
+This approach creates a command queue directory, called the
+&ldquo;jobdir&rdquo;, in the buildmaster's working directory. The buildmaster
+admin sets the ownership and permissions of this directory to only
+grant write access to the desired set of developers, all of whom must
+have accounts on the machine. The <code>buildbot try</code> command creates
+a special file containing the source stamp information and drops it in
+the jobdir, just like a standard maildir. When the buildmaster notices
+the new file, it unpacks the information inside and starts the builds.
+
+     <p>The config file entries used by 'buildbot try' either specify a local
+queuedir (for which write and mv are used) or a remote one (using scp
+and ssh).
+
+     <p>The advantage of this scheme is that it is quite secure, the
+disadvantage is that it requires fiddling outside the buildmaster
+config (to set the permissions on the jobdir correctly). If the
+buildmaster machine happens to also house the VC repository, then it
+can be fairly easy to keep the VC userlist in sync with the
+trial-build userlist. If they are on different machines, this will be
+much more of a hassle. It may also involve granting developer accounts
+on a machine that would not otherwise require them.
+
+     <p>To implement this, the buildslave invokes 'ssh -l username host
+buildbot tryserver ARGS', passing the patch contents over stdin. The
+arguments must include the inlet directory and the revision
+information.
+
+     <br><dt><strong>user+password (PB)</strong><dd>
+In this approach, each developer gets a username/password pair, which
+are all listed in the buildmaster's configuration file. When the
+developer runs <code>buildbot try</code>, their machine connects to the
+buildmaster via PB and authenticates themselves using that username
+and password, then sends a PB command to start the trial build.
+
+     <p>The advantage of this scheme is that the entire configuration is
+performed inside the buildmaster's config file. The disadvantages are
+that it is less secure (while the &ldquo;cred&rdquo; authentication system does
+not expose the password in plaintext over the wire, it does not offer
+most of the other security properties that SSH does). In addition, the
+buildmaster admin is responsible for maintaining the username/password
+list, adding and deleting entries as developers come and go.
+
+   </dl>
+
+   <p>For example, to set up the &ldquo;jobdir&rdquo; style of trial build, using a
+command queue directory of <samp><span class="file">MASTERDIR/jobdir</span></samp> (and assuming that
+all your project developers were members of the <code>developers</code> unix
+group), you would first create that directory (with <samp><span class="command">mkdir
+MASTERDIR/jobdir MASTERDIR/jobdir/new MASTERDIR/jobdir/cur
+MASTERDIR/jobdir/tmp; chgrp developers MASTERDIR/jobdir
+MASTERDIR/jobdir/*; chmod g+rwx,o-rwx MASTERDIR/jobdir
+MASTERDIR/jobdir/*</span></samp>), and then use the following scheduler in the
+buildmaster's config file:
+
+<pre class="example">     from buildbot.scheduler import Try_Jobdir
+     s = Try_Jobdir("try1", ["full-linux", "full-netbsd", "full-OSX"],
+                    jobdir="jobdir")
+     c['schedulers'] = [s]
+</pre>
+   <p>Note that you must create the jobdir before telling the buildmaster to
+use this configuration, otherwise you will get an error. Also remember
+that the buildmaster must be able to read and write to the jobdir as
+well. Be sure to watch the <samp><span class="file">twistd.log</span></samp> file (see <a href="#Logfiles">Logfiles</a>)
+as you start using the jobdir, to make sure the buildmaster is happy
+with it.
+
+   <p>To use the username/password form of authentication, create a
+<code>Try_Userpass</code> instance instead. It takes the same
+<code>builderNames</code> argument as the <code>Try_Jobdir</code> form, but
+accepts an addtional <code>port</code> argument (to specify the TCP port to
+listen on) and a <code>userpass</code> list of username/password pairs to
+accept. Remember to use good passwords for this: the security of the
+buildslave accounts depends upon it:
+
+<pre class="example">     from buildbot.scheduler import Try_Userpass
+     s = Try_Userpass("try2", ["full-linux", "full-netbsd", "full-OSX"],
+                      port=8031, userpass=[("alice","pw1"), ("bob", "pw2")] )
+     c['schedulers'] = [s]
+</pre>
+   <p>Like most places in the buildbot, the <code>port</code> argument takes a
+strports specification. See <code>twisted.application.strports</code> for
+details.
+
+<h3 class="heading">locating the master</h3>
+
+<p>The <samp><span class="command">try</span></samp> command needs to be told how to connect to the
+<code>TryScheduler</code>, and must know which of the authentication
+approaches described above is in use by the buildmaster. You specify
+the approach by using <samp><span class="option">--connect=ssh</span></samp> or <samp><span class="option">--connect=pb</span></samp>
+(or <code>try_connect = 'ssh'</code> or <code>try_connect = 'pb'</code> in
+<samp><span class="file">.buildbot/options</span></samp>).
+
+   <p>For the PB approach, the command must be given a <samp><span class="option">--master</span></samp>
+argument (in the form HOST:PORT) that points to TCP port that you
+picked in the <code>Try_Userpass</code> scheduler. It also takes a
+<samp><span class="option">--username</span></samp> and <samp><span class="option">--passwd</span></samp> pair of arguments that match
+one of the entries in the buildmaster's <code>userpass</code> list. These
+arguments can also be provided as <code>try_master</code>,
+<code>try_username</code>, and <code>try_password</code> entries in the
+<samp><span class="file">.buildbot/options</span></samp> file.
+
+   <p>For the SSH approach, the command must be given <samp><span class="option">--tryhost</span></samp>,
+<samp><span class="option">--username</span></samp>, and optionally <samp><span class="option">--password</span></samp> (TODO:
+really?) to get to the buildmaster host. It must also be given
+<samp><span class="option">--trydir</span></samp>, which points to the inlet directory configured
+above. The trydir can be relative to the user's home directory, but
+most of the time you will use an explicit path like
+<samp><span class="file">~buildbot/project/trydir</span></samp>. These arguments can be provided in
+<samp><span class="file">.buildbot/options</span></samp> as <code>try_host</code>, <code>try_username</code>,
+<code>try_password</code>, and <code>try_dir</code>.
+
+   <p>In addition, the SSH approach needs to connect to a PBListener status
+port, so it can retrieve and report the results of the build (the PB
+approach uses the existing connection to retrieve status information,
+so this step is not necessary). This requires a <samp><span class="option">--master</span></samp>
+argument, or a <code>masterstatus</code> entry in <samp><span class="file">.buildbot/options</span></samp>,
+in the form of a HOSTNAME:PORT string.
+
+<h3 class="heading">choosing the Builders</h3>
+
+<p>A trial build is performed on multiple Builders at the same time, and
+the developer gets to choose which Builders are used (limited to a set
+selected by the buildmaster admin with the TryScheduler's
+<code>builderNames=</code> argument). The set you choose will depend upon
+what your goals are: if you are concerned about cross-platform
+compatibility, you should use multiple Builders, one from each
+platform of interest. You might use just one builder if that platform
+has libraries or other facilities that allow better test coverage than
+what you can accomplish on your own machine, or faster test runs.
+
+   <p>The set of Builders to use can be specified with multiple
+<samp><span class="option">--builder</span></samp> arguments on the command line. It can also be
+specified with a single <code>try_builders</code> option in
+<samp><span class="file">.buildbot/options</span></samp> that uses a list of strings to specify all
+the Builder names:
+
+<pre class="example">     try_builders = ["full-OSX", "full-win32", "full-linux"]
+</pre>
+   <h3 class="heading">specifying the VC system</h3>
+
+<p>The <samp><span class="command">try</span></samp> command also needs to know how to take the
+developer's current tree and extract the (revision, patch)
+source-stamp pair. Each VC system uses a different process, so you
+start by telling the <samp><span class="command">try</span></samp> command which VC system you are
+using, with an argument like <samp><span class="option">--vc=cvs</span></samp> or <samp><span class="option">--vc=tla</span></samp>. 
+This can also be provided as <code>try_vc</code> in
+<samp><span class="file">.buildbot/options</span></samp>.
+
+   <p>The following names are recognized: <code>cvs</code> <code>svn</code> <code>baz</code>
+<code>tla</code> <code>hg</code> <code>darcs</code>
+
+<h3 class="heading">finding the top of the tree</h3>
+
+<p>Some VC systems (notably CVS and SVN) track each directory
+more-or-less independently, which means the <samp><span class="command">try</span></samp> command
+needs to move up to the top of the project tree before it will be able
+to construct a proper full-tree patch. To accomplish this, the
+<samp><span class="command">try</span></samp> command will crawl up through the parent directories
+until it finds a marker file. The default name for this marker file is
+<samp><span class="file">.buildbot-top</span></samp>, so when you are using CVS or SVN you should
+<code>touch .buildbot-top</code> from the top of your tree before running
+<samp><span class="command">buildbot try</span></samp>. Alternatively, you can use a filename like
+<samp><span class="file">ChangeLog</span></samp> or <samp><span class="file">README</span></samp>, since many projects put one of
+these files in their top-most directory (and nowhere else). To set
+this filename, use <samp><span class="option">--try-topfile=ChangeLog</span></samp>, or set it in the
+options file with <code>try_topfile = 'ChangeLog'</code>.
+
+   <p>You can also manually set the top of the tree with
+<samp><span class="option">--try-topdir=~/trees/mytree</span></samp>, or <code>try_topdir =
+'~/trees/mytree'</code>. If you use <code>try_topdir</code>, in a
+<samp><span class="file">.buildbot/options</span></samp> file, you will need a separate options file
+for each tree you use, so it may be more convenient to use the
+<code>try_topfile</code> approach instead.
+
+   <p>Other VC systems which work on full projects instead of individual
+directories (tla, baz, darcs, monotone, mercurial) do not require
+<samp><span class="command">try</span></samp> to know the top directory, so the <samp><span class="option">--try-topfile</span></samp>
+and <samp><span class="option">--try-topdir</span></samp> arguments will be ignored. 
+<!-- is this true? I think I currently require topdirs all the time. -->
+
+   <p>If the <samp><span class="command">try</span></samp> command cannot find the top directory, it will
+abort with an error message.
+
+<h3 class="heading">determining the branch name</h3>
+
+<p>Some VC systems record the branch information in a way that &ldquo;try&rdquo;
+can locate it, in particular Arch (both <samp><span class="command">tla</span></samp> and
+<samp><span class="command">baz</span></samp>). For the others, if you are using something other than
+the default branch, you will have to tell the buildbot which branch
+your tree is using. You can do this with either the <samp><span class="option">--branch</span></samp>
+argument, or a <samp><span class="option">try_branch</span></samp> entry in the
+<samp><span class="file">.buildbot/options</span></samp> file.
+
+<h3 class="heading">determining the revision and patch</h3>
+
+<p>Each VC system has a separate approach for determining the tree's base
+revision and computing a patch.
+
+     <dl>
+<dt><code>CVS</code><dd>
+<samp><span class="command">try</span></samp> pretends that the tree is up to date. It converts the
+current time into a <code>-D</code> time specification, uses it as the base
+revision, and computes the diff between the upstream tree as of that
+point in time versus the current contents. This works, more or less,
+but requires that the local clock be in reasonably good sync with the
+repository.
+
+     <br><dt><code>SVN</code><dd><samp><span class="command">try</span></samp> does a <code>svn status -u</code> to find the latest
+repository revision number (emitted on the last line in the &ldquo;Status
+against revision: NN&rdquo; message). It then performs an <code>svn diff
+-rNN</code> to find out how your tree differs from the repository version,
+and sends the resulting patch to the buildmaster. If your tree is not
+up to date, this will result in the &ldquo;try&rdquo; tree being created with
+the latest revision, then <em>backwards</em> patches applied to bring it
+&ldquo;back&rdquo; to the version you actually checked out (plus your actual
+code changes), but this will still result in the correct tree being
+used for the build.
+
+     <br><dt><code>baz</code><dd><samp><span class="command">try</span></samp> does a <code>baz tree-id</code> to determine the
+fully-qualified version and patch identifier for the tree
+(ARCHIVE/VERSION&ndash;patch-NN), and uses the VERSION&ndash;patch-NN component
+as the base revision. It then does a <code>baz diff</code> to obtain the
+patch.
+
+     <br><dt><code>tla</code><dd><samp><span class="command">try</span></samp> does a <code>tla tree-version</code> to get the
+fully-qualified version identifier (ARCHIVE/VERSION), then takes the
+first line of <code>tla logs --reverse</code> to figure out the base
+revision. Then it does <code>tla changes --diffs</code> to obtain the patch.
+
+     <br><dt><code>Darcs</code><dd><code>darcs changes --context</code> emits a text file that contains a list
+of all patches back to and including the last tag was made. This text
+file (plus the location of a repository that contains all these
+patches) is sufficient to re-create the tree. Therefore the contents
+of this &ldquo;context&rdquo; file <em>are</em> the revision stamp for a
+Darcs-controlled source tree.
+
+     <p>So <samp><span class="command">try</span></samp> does a <code>darcs changes --context</code> to determine
+what your tree's base revision is, and then does a <code>darcs diff
+-u</code> to compute the patch relative to that revision.
+
+     <br><dt><code>Mercurial</code><dd><code>hg identify</code> emits a short revision ID (basically a truncated
+SHA1 hash of the current revision's contents), which is used as the
+base revision. <code>hg diff</code> then provides the patch relative to that
+revision. For <samp><span class="command">try</span></samp> to work, your working directory must only
+have patches that are available from the same remotely-available
+repository that the build process' <code>step.Mercurial</code> will use.
+
+     <!-- TODO: monotone, git -->
+</dl>
+
+<h3 class="heading">waiting for results</h3>
+
+<p>If you provide the <samp><span class="option">--wait</span></samp> option (or <code>try_wait = True</code>
+in <samp><span class="file">.buildbot/options</span></samp>), the <samp><span class="command">buildbot try</span></samp> command will
+wait until your changes have either been proven good or bad before
+exiting. Unless you use the <samp><span class="option">--quiet</span></samp> option (or
+<code>try_quiet=True</code>), it will emit a progress message every 60
+seconds until the builds have completed.
+
+<div class="node">
+<p><hr>
+<a name="Other-Tools"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Developer-Tools">Developer Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.3 Other Tools</h3>
+
+<p>These tools are generally used by buildmaster administrators.
+
+<ul class="menu">
+<li><a accesskey="1" href="#sendchange">sendchange</a>
+<li><a accesskey="2" href="#debugclient">debugclient</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="sendchange"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#debugclient">debugclient</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Other-Tools">Other Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Other-Tools">Other Tools</a>
+
+</div>
+
+<h4 class="subsection">8.3.1 sendchange</h4>
+
+<p>This command is used to tell the buildmaster about source changes. It
+is intended to be used from within a commit script, installed on the
+VC server. It requires that you have a PBChangeSource
+(see <a href="#PBChangeSource">PBChangeSource</a>) running in the buildmaster (by being included
+in the <code>c['sources']</code> list).
+
+<pre class="example">     buildbot sendchange --master <var>MASTERHOST</var>:<var>PORT</var> --username <var>USER</var> <var>FILENAMES..</var>
+</pre>
+   <p>There are other (optional) arguments which can influence the
+<code>Change</code> that gets submitted:
+
+     <dl>
+<dt><code>--branch</code><dd>This provides the (string) branch specifier. If omitted, it defaults
+to None, indicating the &ldquo;default branch&rdquo;. All files included in this
+Change must be on the same branch.
+
+     <br><dt><code>--revision_number</code><dd>This provides a (numeric) revision number for the change, used for VC systems
+that use numeric transaction numbers (like Subversion).
+
+     <br><dt><code>--revision</code><dd>This provides a (string) revision specifier, for VC systems that use
+strings (Arch would use something like patch-42 etc).
+
+     <br><dt><code>--revision_file</code><dd>This provides a filename which will be opened and the contents used as
+the revision specifier. This is specifically for Darcs, which uses the
+output of <samp><span class="command">darcs changes --context</span></samp> as a revision specifier. 
+This context file can be a couple of kilobytes long, spanning a couple
+lines per patch, and would be a hassle to pass as a command-line
+argument.
+
+     <br><dt><code>--comments</code><dd>This provides the change comments as a single argument. You may want
+to use <samp><span class="option">--logfile</span></samp> instead.
+
+     <br><dt><code>--logfile</code><dd>This instructs the tool to read the change comments from the given
+file. If you use <code>-</code> as the filename, the tool will read the
+change comments from stdin. 
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="debugclient"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#sendchange">sendchange</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Other-Tools">Other Tools</a>
+
+</div>
+
+<h4 class="subsection">8.3.2 debugclient</h4>
+
+<pre class="example">     buildbot debugclient --master <var>MASTERHOST</var>:<var>PORT</var> --passwd <var>DEBUGPW</var>
+</pre>
+   <p>This launches a small Gtk+/Glade-based debug tool, connecting to the
+buildmaster's &ldquo;debug port&rdquo;. This debug port shares the same port
+number as the slaveport (see <a href="#Setting-the-slaveport">Setting the slaveport</a>), but the
+<code>debugPort</code> is only enabled if you set a debug password in the
+buildmaster's config file (see <a href="#Debug-options">Debug options</a>). The
+<samp><span class="option">--passwd</span></samp> option must match the <code>c['debugPassword']</code>
+value.
+
+   <p><samp><span class="option">--master</span></samp> can also be provided in <samp><span class="file">.debug/options</span></samp> by the
+<code>master</code> key. <samp><span class="option">--passwd</span></samp> can be provided by the
+<code>debugPassword</code> key.
+
+   <p>The <code>Connect</code> button must be pressed before any of the other
+buttons will be active. This establishes the connection to the
+buildmaster. The other sections of the tool are as follows:
+
+     <dl>
+<dt><code>Reload .cfg</code><dd>Forces the buildmaster to reload its <samp><span class="file">master.cfg</span></samp> file. This is
+equivalent to sending a SIGHUP to the buildmaster, but can be done
+remotely through the debug port. Note that it is a good idea to be
+watching the buildmaster's <samp><span class="file">twistd.log</span></samp> as you reload the config
+file, as any errors which are detected in the config file will be
+announced there.
+
+     <br><dt><code>Rebuild .py</code><dd>(not yet implemented). The idea here is to use Twisted's &ldquo;rebuild&rdquo;
+facilities to replace the buildmaster's running code with a new
+version. Even if this worked, it would only be used by buildbot
+developers.
+
+     <br><dt><code>poke IRC</code><dd>This locates a <code>words.IRC</code> status target and causes it to emit a
+message on all the channels to which it is currently connected. This
+was used to debug a problem in which the buildmaster lost the
+connection to the IRC server and did not attempt to reconnect.
+
+     <br><dt><code>Commit</code><dd>This allows you to inject a Change, just as if a real one had been
+delivered by whatever VC hook you are using. You can set the name of
+the committed file and the name of the user who is doing the commit. 
+Optionally, you can also set a revision for the change. If the
+revision you provide looks like a number, it will be sent as an
+integer, otherwise it will be sent as a string.
+
+     <br><dt><code>Force Build</code><dd>This lets you force a Builder (selected by name) to start a build of
+the current source tree.
+
+     <br><dt><code>Currently</code><dd>(obsolete). This was used to manually set the status of the given
+Builder, but the status-assignment code was changed in an incompatible
+way and these buttons are no longer meaningful.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name=".buildbot-config-directory"></a>
+<a name="g_t_002ebuildbot-config-directory"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Other-Tools">Other Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.4 .buildbot config directory</h3>
+
+<p>Many of the <samp><span class="command">buildbot</span></samp> tools must be told how to contact the
+buildmaster that they interact with. This specification can be
+provided as a command-line argument, but most of the time it will be
+easier to set them in an &ldquo;options&rdquo; file. The <samp><span class="command">buildbot</span></samp>
+command will look for a special directory named <samp><span class="file">.buildbot</span></samp>,
+starting from the current directory (where the command was run) and
+crawling upwards, eventually looking in the user's home directory. It
+will look for a file named <samp><span class="file">options</span></samp> in this directory, and will
+evaluate it as a python script, looking for certain names to be set. 
+You can just put simple <code>name = 'value'</code> pairs in this file to
+set the options.
+
+   <p>For a description of the names used in this file, please see the
+documentation for the individual <samp><span class="command">buildbot</span></samp> sub-commands. The
+following is a brief sample of what this file's contents could be.
+
+<pre class="example">     # for status-reading tools
+     masterstatus = 'buildbot.example.org:12345'
+     # for 'sendchange' or the debug port
+     master = 'buildbot.example.org:18990'
+     debugPassword = 'eiv7Po'
+</pre>
+     <dl>
+<dt><code>masterstatus</code><dd>Location of the <code>client.PBListener</code> status port, used by
+<samp><span class="command">statuslog</span></samp> and <samp><span class="command">statusgui</span></samp>.
+
+     <br><dt><code>master</code><dd>Location of the <code>debugPort</code> (for <samp><span class="command">debugclient</span></samp>). Also the
+location of the <code>pb.PBChangeSource</code> (for <samp><span class="command">sendchange</span></samp>). 
+Usually shares the slaveport, but a future version may make it
+possible to have these listen on a separate port number.
+
+     <br><dt><code>debugPassword</code><dd>Must match the value of <code>c['debugPassword']</code>, used to protect the
+debug port, for the <samp><span class="command">debugclient</span></samp> command.
+
+     <br><dt><code>username</code><dd>Provides a default username for the <samp><span class="command">sendchange</span></samp> command.
+
+   </dl>
+
+   <p>The following options are used by the <code>buildbot try</code> command
+(see <a href="#try">try</a>):
+
+     <dl>
+<dt><code>try_connect</code><dd>This specifies how the &ldquo;try&rdquo; command should deliver its request to
+the buildmaster. The currently accepted values are &ldquo;ssh&rdquo; and &ldquo;pb&rdquo;. 
+<br><dt><code>try_builders</code><dd>Which builders should be used for the &ldquo;try&rdquo; build. 
+<br><dt><code>try_vc</code><dd>This specifies the version control system being used. 
+<br><dt><code>try_branch</code><dd>This indicates that the current tree is on a non-trunk branch. 
+<br><dt><code>try_topdir</code><br><dt><code>try_topfile</code><dd>Use <code>try_topdir</code> to explicitly indicate the top of your working
+tree, or <code>try_topfile</code> to name a file that will only be found in
+that top-most directory.
+
+     <br><dt><code>try_host</code><br><dt><code>try_username</code><br><dt><code>try_dir</code><dd>When try_connect is &ldquo;ssh&rdquo;, the command will pay attention to
+<code>try_host</code>, <code>try_username</code>, and <code>try_dir</code>.
+
+     <br><dt><code>try_username</code><br><dt><code>try_password</code><br><dt><code>try_master</code><dd>Instead, when <code>try_connect</code> is &ldquo;pb&rdquo;, the command will pay
+attention to <code>try_username</code>, <code>try_password</code>, and
+<code>try_master</code>.
+
+     <br><dt><code>try_wait</code><br><dt><code>masterstatus</code><dd><code>try_wait</code> and <code>masterstatus</code> are used to ask the &ldquo;try&rdquo;
+command to wait for the requested build to complete.
+
+   </dl>
+
+<div class="node">
+<p><hr>
+<a name="Resources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Developer_0027s-Appendix">Developer's Appendix</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Command_002dline-tool">Command-line tool</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">9 Resources</h2>
+
+<p>The Buildbot's home page is at <a href="http://buildbot.sourceforge.net/">http://buildbot.sourceforge.net/</a>
+
+   <p>For configuration questions and general discussion, please use the
+<code>buildbot-devel</code> mailing list. The subscription instructions and
+archives are available at
+<a href="http://lists.sourceforge.net/lists/listinfo/buildbot-devel">http://lists.sourceforge.net/lists/listinfo/buildbot-devel</a>
+
+<div class="node">
+<p><hr>
+<a name="Developer's-Appendix"></a>
+<a name="Developer_0027s-Appendix"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Index-of-Useful-Classes">Index of Useful Classes</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Resources">Resources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Developer's Appendix</h2>
+
+<p>This appendix contains random notes about the implementation of the
+Buildbot, and is likely to only be of use to people intending to
+extend the Buildbot's internals.
+
+   <p>The buildmaster consists of a tree of Service objects, which is shaped
+as follows:
+
+<pre class="example">     BuildMaster
+      ChangeMaster  (in .change_svc)
+       [IChangeSource instances]
+      [IScheduler instances]  (in .schedulers)
+      BotMaster  (in .botmaster)
+      [IStatusTarget instances]  (in .statusTargets)
+</pre>
+   <p>The BotMaster has a collection of Builder objects as values of its
+<code>.builders</code> dictionary.
+
+<div class="node">
+<p><hr>
+<a name="Index-of-Useful-Classes"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Developer_0027s-Appendix">Developer's Appendix</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Index of Useful Classes</h2>
+
+<p>This is a list of all user-visible classes. There are the ones that
+are useful in <samp><span class="file">master.cfg</span></samp>, the buildmaster's configuration file. 
+Classes that are not listed here are generally internal things that
+admins are unlikely to have much use for.
+
+<h3 class="heading">Change Sources</h3>
+
+<ul class="index-cs" compact>
+<li><a href="#index-buildbot_002echanges_002ebonsaipoller_002eBonsaiPoller-36"><code>buildbot.changes.bonsaipoller.BonsaiPoller</code></a>: <a href="#BonsaiPoller">BonsaiPoller</a></li>
+<li><a href="#index-buildbot_002echanges_002efreshcvs_002eFreshCVSSource-30"><code>buildbot.changes.freshcvs.FreshCVSSource</code></a>: <a href="#CVSToys-_002d-PBService">CVSToys - PBService</a></li>
+<li><a href="#index-buildbot_002echanges_002email_002eBonsaiMaildirSource-33"><code>buildbot.changes.mail.BonsaiMaildirSource</code></a>: <a href="#Other-mail-notification-ChangeSources">Other mail notification ChangeSources</a></li>
+<li><a href="#index-buildbot_002echanges_002email_002eFCMaildirSource-31"><code>buildbot.changes.mail.FCMaildirSource</code></a>: <a href="#CVSToys-_002d-mail-notification">CVSToys - mail notification</a></li>
+<li><a href="#index-buildbot_002echanges_002email_002eSyncmailMaildirSource-32"><code>buildbot.changes.mail.SyncmailMaildirSource</code></a>: <a href="#Other-mail-notification-ChangeSources">Other mail notification ChangeSources</a></li>
+<li><a href="#index-buildbot_002echanges_002ep4poller_002eP4Source-35"><code>buildbot.changes.p4poller.P4Source</code></a>: <a href="#P4Source">P4Source</a></li>
+<li><a href="#index-buildbot_002echanges_002epb_002ePBChangeSource-34"><code>buildbot.changes.pb.PBChangeSource</code></a>: <a href="#PBChangeSource">PBChangeSource</a></li>
+<li><a href="#index-buildbot_002echanges_002esvnpoller_002eSVNPoller-37"><code>buildbot.changes.svnpoller.SVNPoller</code></a>: <a href="#SVNPoller">SVNPoller</a></li>
+   </ul><h3 class="heading">Schedulers and Locks</h3>
+
+
+
+<ul class="index-sl" compact>
+<li><a href="#index-buildbot_002elocks_002eMasterLock-69"><code>buildbot.locks.MasterLock</code></a>: <a href="#Interlocks">Interlocks</a></li>
+<li><a href="#index-buildbot_002elocks_002eSlaveLock-70"><code>buildbot.locks.SlaveLock</code></a>: <a href="#Interlocks">Interlocks</a></li>
+<li><a href="#index-buildbot_002escheduler_002eAnyBranchScheduler-18"><code>buildbot.scheduler.AnyBranchScheduler</code></a>: <a href="#Scheduler-Types">Scheduler Types</a></li>
+<li><a href="#index-buildbot_002escheduler_002eDependent-23"><code>buildbot.scheduler.Dependent</code></a>: <a href="#Build-Dependencies">Build Dependencies</a></li>
+<li><a href="#index-buildbot_002escheduler_002eNightly-20"><code>buildbot.scheduler.Nightly</code></a>: <a href="#Scheduler-Types">Scheduler Types</a></li>
+<li><a href="#index-buildbot_002escheduler_002ePeriodic-19"><code>buildbot.scheduler.Periodic</code></a>: <a href="#Scheduler-Types">Scheduler Types</a></li>
+<li><a href="#index-buildbot_002escheduler_002eScheduler-17"><code>buildbot.scheduler.Scheduler</code></a>: <a href="#Scheduler-Types">Scheduler Types</a></li>
+<li><a href="#index-buildbot_002escheduler_002eTry_005fJobdir-92"><code>buildbot.scheduler.Try_Jobdir</code></a>: <a href="#try">try</a></li>
+<li><a href="#index-buildbot_002escheduler_002eTry_005fUserpass-93"><code>buildbot.scheduler.Try_Userpass</code></a>: <a href="#try">try</a></li>
+   </ul><h3 class="heading">Build Factories</h3>
+
+
+
+<ul class="index-bf" compact>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eBasicBuildFactory-72"><code>buildbot.process.factory.BasicBuildFactory</code></a>: <a href="#BuildFactory">BuildFactory</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eBasicSVN-73"><code>buildbot.process.factory.BasicSVN</code></a>: <a href="#BuildFactory">BuildFactory</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eBuildFactory-71"><code>buildbot.process.factory.BuildFactory</code></a>: <a href="#BuildFactory">BuildFactory</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eCPAN-77"><code>buildbot.process.factory.CPAN</code></a>: <a href="#CPAN">CPAN</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eDistutils-78"><code>buildbot.process.factory.Distutils</code></a>: <a href="#Python-distutils">Python distutils</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eGNUAutoconf-76"><code>buildbot.process.factory.GNUAutoconf</code></a>: <a href="#GNUAutoconf">GNUAutoconf</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eQuickBuildFactory-75"><code>buildbot.process.factory.QuickBuildFactory</code></a>: <a href="#Quick-builds">Quick builds</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eTrial-79"><code>buildbot.process.factory.Trial</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+   </ul><h3 class="heading">Build Steps</h3>
+
+
+
+<ul class="index-bs" compact>
+<li><a href="#index-buildbot_002esteps_002emaxq_002eMaxQ-95"><code>buildbot.steps.maxq.MaxQ</code></a>: <a href="#Index-of-Useful-Classes">Index of Useful Classes</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_002eBuildEPYDoc-58"><code>buildbot.steps.python.BuildEPYDoc</code></a>: <a href="#BuildEPYDoc">BuildEPYDoc</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_002ePyFlakes-59"><code>buildbot.steps.python.PyFlakes</code></a>: <a href="#PyFlakes">PyFlakes</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eBuildDebs-83"><code>buildbot.steps.python_twisted.BuildDebs</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eHLint-80"><code>buildbot.steps.python_twisted.HLint</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eProcessDocs-82"><code>buildbot.steps.python_twisted.ProcessDocs</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eRemovePYCs-84"><code>buildbot.steps.python_twisted.RemovePYCs</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eTrial-81"><code>buildbot.steps.python_twisted.Trial</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eCompile-54"><code>buildbot.steps.shell.Compile</code></a>: <a href="#Compile">Compile</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eConfigure-53"><code>buildbot.steps.shell.Configure</code></a>: <a href="#Configure">Configure</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eShellCommand-52"><code>buildbot.steps.shell.ShellCommand</code></a>: <a href="#ShellCommand">ShellCommand</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eTest-55"><code>buildbot.steps.shell.Test</code></a>: <a href="#Test">Test</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eArch-47"><code>buildbot.steps.source.Arch</code></a>: <a href="#Arch">Arch</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eBazaar-49"><code>buildbot.steps.source.Bazaar</code></a>: <a href="#Bazaar">Bazaar</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eCVS-39"><code>buildbot.steps.source.CVS</code></a>: <a href="#CVS">CVS</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eDarcs-43"><code>buildbot.steps.source.Darcs</code></a>: <a href="#Darcs">Darcs</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eGit-94"><code>buildbot.steps.source.Git</code></a>: <a href="#Index-of-Useful-Classes">Index of Useful Classes</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eMercurial-45"><code>buildbot.steps.source.Mercurial</code></a>: <a href="#Mercurial">Mercurial</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eP4-51"><code>buildbot.steps.source.P4</code></a>: <a href="#P4">P4</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eSVN-41"><code>buildbot.steps.source.SVN</code></a>: <a href="#SVN">SVN</a></li>
+<li><a href="#index-buildbot_002esteps_002etransfer_002eFileDownload-62"><code>buildbot.steps.transfer.FileDownload</code></a>: <a href="#Transferring-Files">Transferring Files</a></li>
+<li><a href="#index-buildbot_002esteps_002etransfer_002eFileUpload-61"><code>buildbot.steps.transfer.FileUpload</code></a>: <a href="#Transferring-Files">Transferring Files</a></li>
+   </ul><!-- undocumented steps -->
+<p><a name="index-buildbot_002esteps_002esource_002eGit-94"></a><a name="index-buildbot_002esteps_002emaxq_002eMaxQ-95"></a>
+
+<h3 class="heading">Status Targets</h3>
+
+
+
+<ul class="index-st" compact>
+<li><a href="#index-buildbot_002estatus_002eclient_002ePBListener-90"><code>buildbot.status.client.PBListener</code></a>: <a href="#PBListener">PBListener</a></li>
+<li><a href="#index-buildbot_002estatus_002ehtml_002eWaterfall-86"><code>buildbot.status.html.Waterfall</code></a>: <a href="#HTML-Waterfall">HTML Waterfall</a></li>
+<li><a href="#index-buildbot_002estatus_002email_002eMailNotifier-96"><code>buildbot.status.mail.MailNotifier</code></a>: <a href="#Index-of-Useful-Classes">Index of Useful Classes</a></li>
+<li><a href="#index-buildbot_002estatus_002ewords_002eIRC-88"><code>buildbot.status.words.IRC</code></a>: <a href="#IRC-Bot">IRC Bot</a></li>
+   </ul><!-- TODO: undocumented targets -->
+<p><a name="index-buildbot_002estatus_002email_002eMailNotifier-96"></a>
+<div class="node">
+<p><hr>
+<a name="Index-of-master.cfg-keys"></a>
+<a name="Index-of-master_002ecfg-keys"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Index">Index</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Index-of-Useful-Classes">Index of Useful Classes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Index of master.cfg keys</h2>
+
+<p>This is a list of all of the significant keys in master.cfg . Recall
+that master.cfg is effectively a small python program one
+responsibility: create a dictionary named <code>BuildmasterConfig</code>. 
+The keys of this dictionary are listed here. The beginning of the
+master.cfg file typically starts with something like:
+
+<pre class="example">     BuildmasterConfig = c = {}
+</pre>
+   <p>Therefore a config key of <code>sources</code> will usually appear in
+master.cfg as <code>c['sources']</code>.
+
+
+
+<ul class="index-bc" compact>
+<li><a href="#index-c_005b_0027bots_0027_005d-25"><code>c['bots']</code></a>: <a href="#Buildslave-Specifiers">Buildslave Specifiers</a></li>
+<li><a href="#index-c_005b_0027buildbotURL_0027_005d-14"><code>c['buildbotURL']</code></a>: <a href="#Defining-the-Project">Defining the Project</a></li>
+<li><a href="#index-c_005b_0027builders_0027_005d-26"><code>c['builders']</code></a>: <a href="#Defining-Builders">Defining Builders</a></li>
+<li><a href="#index-c_005b_0027debugPassword_0027_005d-28"><code>c['debugPassword']</code></a>: <a href="#Debug-options">Debug options</a></li>
+<li><a href="#index-c_005b_0027manhole_0027_005d-29"><code>c['manhole']</code></a>: <a href="#Debug-options">Debug options</a></li>
+<li><a href="#index-c_005b_0027projectName_0027_005d-12"><code>c['projectName']</code></a>: <a href="#Defining-the-Project">Defining the Project</a></li>
+<li><a href="#index-c_005b_0027projectURL_0027_005d-13"><code>c['projectURL']</code></a>: <a href="#Defining-the-Project">Defining the Project</a></li>
+<li><a href="#index-c_005b_0027schedulers_0027_005d-16"><code>c['schedulers']</code></a>: <a href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a></li>
+<li><a href="#index-c_005b_0027slavePortnum_0027_005d-24"><code>c['slavePortnum']</code></a>: <a href="#Setting-the-slaveport">Setting the slaveport</a></li>
+<li><a href="#index-c_005b_0027sources_0027_005d-15"><code>c['sources']</code></a>: <a href="#Listing-Change-Sources-and-Schedulers">Listing Change Sources and Schedulers</a></li>
+<li><a href="#index-c_005b_0027status_0027_005d-27"><code>c['status']</code></a>: <a href="#Defining-Status-Targets">Defining Status Targets</a></li>
+   </ul><div class="node">
+<p><hr>
+<a name="Index"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Index</h2>
+
+
+
+<ul class="index-cp" compact>
+<li><a href="#index-addURL-67">addURL</a>: <a href="#BuildStep-URLs">BuildStep URLs</a></li>
+<li><a href="#index-Arch-Checkout-46">Arch Checkout</a>: <a href="#Arch">Arch</a></li>
+<li><a href="#index-Bazaar-Checkout-48">Bazaar Checkout</a>: <a href="#Bazaar">Bazaar</a></li>
+<li><a href="#index-build-properties-56">build properties</a>: <a href="#Build-Properties">Build Properties</a></li>
+<li><a href="#index-Builder-9">Builder</a>: <a href="#Builder">Builder</a></li>
+<li><a href="#index-BuildRequest-8">BuildRequest</a>: <a href="#BuildRequest">BuildRequest</a></li>
+<li><a href="#index-BuildSet-7">BuildSet</a>: <a href="#BuildSet">BuildSet</a></li>
+<li><a href="#index-BuildStep-URLs-66">BuildStep URLs</a>: <a href="#BuildStep-URLs">BuildStep URLs</a></li>
+<li><a href="#index-Configuration-11">Configuration</a>: <a href="#Configuration">Configuration</a></li>
+<li><a href="#index-CVS-Checkout-38">CVS Checkout</a>: <a href="#CVS">CVS</a></li>
+<li><a href="#index-Darcs-Checkout-42">Darcs Checkout</a>: <a href="#Darcs">Darcs</a></li>
+<li><a href="#index-Dependencies-22">Dependencies</a>: <a href="#Build-Dependencies">Build Dependencies</a></li>
+<li><a href="#index-Dependent-21">Dependent</a>: <a href="#Build-Dependencies">Build Dependencies</a></li>
+<li><a href="#index-File-Transfer-60">File Transfer</a>: <a href="#Transferring-Files">Transferring Files</a></li>
+<li><a href="#index-installation-3">installation</a>: <a href="#Installing-the-code">Installing the code</a></li>
+<li><a href="#index-introduction-1">introduction</a>: <a href="#Introduction">Introduction</a></li>
+<li><a href="#index-IRC-87">IRC</a>: <a href="#IRC-Bot">IRC Bot</a></li>
+<li><a href="#index-links-65">links</a>: <a href="#BuildStep-URLs">BuildStep URLs</a></li>
+<li><a href="#index-locks-68">locks</a>: <a href="#Interlocks">Interlocks</a></li>
+<li><a href="#index-logfiles-4">logfiles</a>: <a href="#Logfiles">Logfiles</a></li>
+<li><a href="#index-LogLineObserver-64">LogLineObserver</a>: <a href="#Adding-LogObservers">Adding LogObservers</a></li>
+<li><a href="#index-LogObserver-63">LogObserver</a>: <a href="#Adding-LogObservers">Adding LogObservers</a></li>
+<li><a href="#index-Mercurial-Checkout-44">Mercurial Checkout</a>: <a href="#Mercurial">Mercurial</a></li>
+<li><a href="#index-PBListener-89">PBListener</a>: <a href="#PBListener">PBListener</a></li>
+<li><a href="#index-Perforce-Update-50">Perforce Update</a>: <a href="#P4">P4</a></li>
+<li><a href="#index-Philosophy-of-operation-2">Philosophy of operation</a>: <a href="#History-and-Philosophy">History and Philosophy</a></li>
+<li><a href="#index-Scheduler-6">Scheduler</a>: <a href="#Schedulers">Schedulers</a></li>
+<li><a href="#index-statusgui-91">statusgui</a>: <a href="#statusgui">statusgui</a></li>
+<li><a href="#index-SVN-Checkout-40">SVN Checkout</a>: <a href="#SVN">SVN</a></li>
+<li><a href="#index-treeStableTimer-74">treeStableTimer</a>: <a href="#BuildFactory-Attributes">BuildFactory Attributes</a></li>
+<li><a href="#index-Users-10">Users</a>: <a href="#Users">Users</a></li>
+<li><a href="#index-Version-Control-5">Version Control</a>: <a href="#Version-Control-Systems">Version Control Systems</a></li>
+<li><a href="#index-Waterfall-85">Waterfall</a>: <a href="#HTML-Waterfall">HTML Waterfall</a></li>
+<li><a href="#index-WithProperties-57">WithProperties</a>: <a href="#Build-Properties">Build Properties</a></li>
+   </ul><div class="footnote">
+<hr>
+<a name="texinfo-footnotes-in-document"></a><h4>Footnotes</h4><p class="footnote"><small>[<a name="fn-1" href="#fnd-1">1</a>]</small> this
+ at reboot syntax is understood by Vixie cron, which is the flavor
+usually provided with linux systems. Other unices may have a cron that
+doesn't understand @reboot</p>
+
+   <p class="footnote"><small>[<a name="fn-2" href="#fnd-2">2</a>]</small> except Darcs, but
+since the Buildbot never modifies its local source tree we can ignore
+the fact that Darcs uses a less centralized model</p>
+
+   <p class="footnote"><small>[<a name="fn-3" href="#fnd-3">3</a>]</small> many VC systems provide more complexity than
+this: in particular the local views that P4 and ClearCase can assemble
+out of various source directories are more complex than we're prepared
+to take advantage of here</p>
+
+   <p class="footnote"><small>[<a name="fn-4" href="#fnd-4">4</a>]</small> Monotone's <em>multiple heads</em> feature
+violates this assumption of cumulative Changes, but in most situations
+the changes don't occur frequently enough for this to be a significant
+problem</p>
+
+   <p class="footnote"><small>[<a name="fn-5" href="#fnd-5">5</a>]</small> this <code>checkoutDelay</code> defaults
+to half the tree-stable timer, but it can be overridden with an
+argument to the Source Step</p>
+
+   <p class="footnote"><small>[<a name="fn-6" href="#fnd-6">6</a>]</small> To be precise, it is a list of objects which all
+implement the <code>buildbot.interfaces.IChangeSource</code> Interface</p>
+
+   <p class="footnote"><small>[<a name="fn-7" href="#fnd-7">7</a>]</small> Build properties are serialized along with the
+build results, so they must be serializable. For this reason, the
+value of any build property should be simple inert data: strings,
+numbers, lists, tuples, and dictionaries. They should not contain
+class instances.</p>
+
+   <p class="footnote"><small>[<a name="fn-8" href="#fnd-8">8</a>]</small> framboozle.com is still available. Remember, I get 10%
+:).</p>
+
+   <p class="footnote"><small>[<a name="fn-9" href="#fnd-9">9</a>]</small> Framboozle gets very excited about running unit
+tests.</p>
+
+   <p class="footnote"><small>[<a name="fn-10" href="#fnd-10">10</a>]</small> Also note that a clever buildmaster admin
+could still create the opportunity for deadlock: Build A obtains Lock
+1, inside which Step A.two tries to acquire Lock 2 at the Step level. 
+Meanwhile Build B obtains Lock 2, and has a Step B.two which wants to
+acquire Lock 1 at the Step level. Don't Do That.</p>
+
+   <p class="footnote"><small>[<a name="fn-11" href="#fnd-11">11</a>]</small> It may even be possible to provide SSL access by using
+a specification like
+<code>"ssl:12345:privateKey=mykey.pen:certKey=cert.pem"</code>, but this is
+completely untested</p>
+
+   <p><hr></div>
+
+</body></html>
+

Added: vendor/buildbot/current/docs/buildbot.info
===================================================================
--- vendor/buildbot/current/docs/buildbot.info	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/buildbot.info	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,6597 @@
+This is buildbot.info, produced by makeinfo version 4.8 from
+buildbot.texinfo.
+
+   This is the BuildBot manual.
+
+   Copyright (C) 2005,2006 Brian Warner
+
+   Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty provided
+the copyright notice and this notice are preserved.
+
+
+File: buildbot.info,  Node: Top,  Next: Introduction,  Prev: (dir),  Up: (dir)
+
+BuildBot
+********
+
+This is the BuildBot manual.
+
+   Copyright (C) 2005,2006 Brian Warner
+
+   Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty provided
+the copyright notice and this notice are preserved.
+
+* Menu:
+
+* Introduction::                What the BuildBot does.
+* Installation::                Creating a buildmaster and buildslaves,
+                                running them.
+* Concepts::                    What goes on in the buildbot's little mind.
+* Configuration::               Controlling the buildbot.
+* Getting Source Code Changes::  Discovering when to run a build.
+* Build Process::               Controlling how each build is run.
+* Status Delivery::             Telling the world about the build's results.
+* Command-line tool::
+* Resources::                   Getting help.
+* Developer's Appendix::
+* Index of Useful Classes::
+* Index of master.cfg keys::
+* Index::                       Complete index.
+
+ --- The Detailed Node Listing ---
+
+Introduction
+
+* History and Philosophy::
+* System Architecture::
+* Control Flow::
+
+System Architecture
+
+* BuildSlave Connections::
+* Buildmaster Architecture::
+* Status Delivery Architecture::
+
+Installation
+
+* Requirements::
+* Installing the code::
+* Creating a buildmaster::
+* Creating a buildslave::
+* Launching the daemons::
+* Logfiles::
+* Shutdown::
+* Maintenance::
+* Troubleshooting::
+
+Creating a buildslave
+
+* Buildslave Options::
+
+Troubleshooting
+
+* Starting the buildslave::
+* Connecting to the buildmaster::
+* Forcing Builds::
+
+Concepts
+
+* Version Control Systems::
+* Schedulers::
+* BuildSet::
+* BuildRequest::
+* Builder::
+* Users::
+
+Version Control Systems
+
+* Generalizing VC Systems::
+* Source Tree Specifications::
+* How Different VC Systems Specify Sources::
+* Attributes of Changes::
+
+Users
+
+* Doing Things With Users::
+* Email Addresses::
+* IRC Nicknames::
+* Live Status Clients::
+
+Configuration
+
+* Config File Format::
+* Loading the Config File::
+* Defining the Project::
+* Listing Change Sources and Schedulers::
+* Setting the slaveport::
+* Buildslave Specifiers::
+* Defining Builders::
+* Defining Status Targets::
+* Debug options::
+
+Listing Change Sources and Schedulers
+
+* Scheduler Types::
+* Build Dependencies::
+
+Getting Source Code Changes
+
+* Change Sources::
+
+Change Sources
+
+* Choosing ChangeSources::
+* CVSToys - PBService::
+* CVSToys - mail notification::
+* Other mail notification ChangeSources::
+* PBChangeSource::
+* P4Source::
+* BonsaiPoller::
+* SVNPoller::
+
+Build Process
+
+* Build Steps::
+* Interlocks::
+* Build Factories::
+
+Build Steps
+
+* Common Parameters::
+* Source Checkout::
+* ShellCommand::
+* Simple ShellCommand Subclasses::
+* Python BuildSteps::
+* Transferring Files::
+* Writing New BuildSteps::
+
+Source Checkout
+
+* CVS::
+* SVN::
+* Darcs::
+* Mercurial::
+* Arch::
+* Bazaar::
+* P4::
+
+Simple ShellCommand Subclasses
+
+* Configure::
+* Compile::
+* Test::
+* Build Properties::
+
+Python BuildSteps
+
+* BuildEPYDoc::
+* PyFlakes::
+
+Writing New BuildSteps
+
+* BuildStep LogFiles::
+* Adding LogObservers::
+* BuildStep URLs::
+
+Build Factories
+
+* BuildStep Objects::
+* BuildFactory::
+* Process-Specific build factories::
+
+BuildStep Objects
+
+* BuildFactory Attributes::
+* Quick builds::
+
+BuildFactory
+
+* BuildFactory Attributes::
+* Quick builds::
+
+Process-Specific build factories
+
+* GNUAutoconf::
+* CPAN::
+* Python distutils::
+* Python/Twisted/trial projects::
+
+Status Delivery
+
+* HTML Waterfall::
+* IRC Bot::
+* PBListener::
+* Writing New Status Plugins::
+
+Command-line tool
+
+* Administrator Tools::
+* Developer Tools::
+* Other Tools::
+* .buildbot config directory::
+
+Developer Tools
+
+* statuslog::
+* statusgui::
+* try::
+
+Other Tools
+
+* sendchange::
+* debugclient::
+
+
+File: buildbot.info,  Node: Introduction,  Next: Installation,  Prev: Top,  Up: Top
+
+1 Introduction
+**************
+
+The BuildBot is a system to automate the compile/test cycle required
+by most software projects to validate code changes. By automatically
+rebuilding and testing the tree each time something has changed,
+build problems are pinpointed quickly, before other developers are
+inconvenienced by the failure. The guilty developer can be identified
+and harassed without human intervention. By running the builds on a
+variety of platforms, developers who do not have the facilities to
+test their changes everywhere before checkin will at least know
+shortly afterwards whether they have broken the build or not. Warning
+counts, lint checks, image size, compile time, and other build
+parameters can be tracked over time, are more visible, and are
+therefore easier to improve.
+
+   The overall goal is to reduce tree breakage and provide a platform
+to run tests or code-quality checks that are too annoying or pedantic
+for any human to waste their time with. Developers get immediate (and
+potentially public) feedback about their changes, encouraging them to
+be more careful about testing before checkin.
+
+   Features:
+
+   * run builds on a variety of slave platforms
+
+   * arbitrary build process: handles projects using C, Python,
+     whatever
+
+   * minimal host requirements: python and Twisted
+
+   * slaves can be behind a firewall if they can still do checkout
+
+   * status delivery through web page, email, IRC, other protocols
+
+   * track builds in progress, provide estimated completion time
+
+   * flexible configuration by subclassing generic build process
+     classes
+
+   * debug tools to force a new build, submit fake Changes, query
+     slave status
+
+   * released under the GPL
+
+* Menu:
+
+* History and Philosophy::
+* System Architecture::
+* Control Flow::
+
+
+File: buildbot.info,  Node: History and Philosophy,  Next: System Architecture,  Prev: Introduction,  Up: Introduction
+
+1.1 History and Philosophy
+==========================
+
+The Buildbot was inspired by a similar project built for a development
+team writing a cross-platform embedded system. The various components
+of the project were supposed to compile and run on several flavors of
+unix (linux, solaris, BSD), but individual developers had their own
+preferences and tended to stick to a single platform. From time to
+time, incompatibilities would sneak in (some unix platforms want to
+use `string.h', some prefer `strings.h'), and then the tree would
+compile for some developers but not others. The buildbot was written
+to automate the human process of walking into the office, updating a
+tree, compiling (and discovering the breakage), finding the developer
+at fault, and complaining to them about the problem they had
+introduced. With multiple platforms it was difficult for developers to
+do the right thing (compile their potential change on all platforms);
+the buildbot offered a way to help.
+
+   Another problem was when programmers would change the behavior of a
+library without warning its users, or change internal aspects that
+other code was (unfortunately) depending upon. Adding unit tests to
+the codebase helps here: if an application's unit tests pass despite
+changes in the libraries it uses, you can have more confidence that
+the library changes haven't broken anything. Many developers
+complained that the unit tests were inconvenient or took too long to
+run: having the buildbot run them reduces the developer's workload to
+a minimum.
+
+   In general, having more visibility into the project is always good,
+and automation makes it easier for developers to do the right thing.
+When everyone can see the status of the project, developers are
+encouraged to keep the tree in good working order. Unit tests that
+aren't run on a regular basis tend to suffer from bitrot just like
+code does: exercising them on a regular basis helps to keep them
+functioning and useful.
+
+   The current version of the Buildbot is additionally targeted at
+distributed free-software projects, where resources and platforms are
+only available when provided by interested volunteers. The buildslaves
+are designed to require an absolute minimum of configuration, reducing
+the effort a potential volunteer needs to expend to be able to
+contribute a new test environment to the project. The goal is for
+anyone who wishes that a given project would run on their favorite
+platform should be able to offer that project a buildslave, running on
+that platform, where they can verify that their portability code
+works, and keeps working.
+
+
+File: buildbot.info,  Node: System Architecture,  Next: Control Flow,  Prev: History and Philosophy,  Up: Introduction
+
+1.2 System Architecture
+=======================
+
+The Buildbot consists of a single `buildmaster' and one or more
+`buildslaves', connected in a star topology. The buildmaster makes
+all decisions about what, when, and how to build. It sends commands
+to be run on the build slaves, which simply execute the commands and
+return the results. (certain steps involve more local decision
+making, where the overhead of sending a lot of commands back and
+forth would be inappropriate, but in general the buildmaster is
+responsible for everything).
+
+   The buildmaster is usually fed `Changes' by some sort of version
+control system (*note Change Sources::), which may cause builds to be
+run. As the builds are performed, various status messages are
+produced, which are then sent to any registered Status Targets (*note
+Status Delivery::).
+
+
+                  +------------------+           +-----------+
+                  |                  |---------->|  Browser  |
+                  |   BuildMaster    |           +-----------+
+        Changes   |                  |--------------->+--------+
+     +----------->|                  | Build Status   | email  |
+     |            |                  |------------+   +--------+
+     |            |                  |-------+    |     +---------------+
+     |            +------------------+       |    +---->| Status Client |
++----------+         | ^      | ^            |          +---------------+
+| Change   |         | |     C| |            |             +-----+
+|  Sources |         | |     o| |            +------------>| IRC |
+|          |         | |     m| |R                         +-----+
+| CVS      |         v |     m| |e
+| SVN      |    +---------+  a| |s
+| Darcs    |    |  Build  |  n| |u
+| .. etc   |    |  Slave  |  d| |l
+|          |    +---------+  s| |t
+|          |                  v |s
++----------+                +---------+
+                            |  Build  |
+                            |  Slave  |
+                            +---------+
+
+   The buildmaster is configured and maintained by the "buildmaster
+admin", who is generally the project team member responsible for
+build process issues. Each buildslave is maintained by a "buildslave
+admin", who do not need to be quite as involved. Generally slaves are
+run by anyone who has an interest in seeing the project work well on
+their favorite platform.
+
+* Menu:
+
+* BuildSlave Connections::
+* Buildmaster Architecture::
+* Status Delivery Architecture::
+
+
+File: buildbot.info,  Node: BuildSlave Connections,  Next: Buildmaster Architecture,  Prev: System Architecture,  Up: System Architecture
+
+1.2.1 BuildSlave Connections
+----------------------------
+
+The buildslaves are typically run on a variety of separate machines,
+at least one per platform of interest. These machines connect to the
+buildmaster over a TCP connection to a publically-visible port. As a
+result, the buildslaves can live behind a NAT box or similar
+firewalls, as long as they can get to buildmaster. The TCP connections
+are initiated by the buildslave and accepted by the buildmaster, but
+commands and results travel both ways within this connection. The
+buildmaster is always in charge, so all commands travel exclusively
+from the buildmaster to the buildslave.
+
+   To perform builds, the buildslaves must typically obtain source
+code from a CVS/SVN/etc repository. Therefore they must also be able
+to reach the repository. The buildmaster provides instructions for
+performing builds, but does not provide the source code itself.
+
+
+
+Repository|  |       BuildMaster   |      |
+ (CVS/SVN)|  |                    ^|^^^   |
+          |  |                   / c   \  |
+----------+  +------------------/--o----\-+
+        ^                      /   m  ^  \
+        |                     /    m  |   \
+ checkout/update              --+  a  | +--
+        |                    TCP|  n  | |TCP
+        |                       |  d  | |
+        |                       |  s  | |
+        |                       |  |  | |
+        |                       |  |  r |
+        |                       |  |  e |
+ -N-A-T-|- - - - -N-A-T- - - - -|- |- s-|- - - - -N-A-T- - -
+        |                       |  |  u |
+        |                       |  |  l |
+        |    +------------------|--|--t-|-+
+        |    |                  |  |  s | |
+        +----|                     v  |   |
+             |                        |   |
+             |                        |   |
+             |                            |
+             |       BuildSlave           |
+             +----------------------------+
+
+
+File: buildbot.info,  Node: Buildmaster Architecture,  Next: Status Delivery Architecture,  Prev: BuildSlave Connections,  Up: System Architecture
+
+1.2.2 Buildmaster Architecture
+------------------------------
+
+The Buildmaster consists of several pieces:
+
+
+
+ +---------------+
+ | Change Source |----->----+
+ +---------------+          |
+                         Changes
+                            |
+ +---------------+          v
+ | Change Source |----->----+
+ +---------------+          v
+                      +-----+-------+
+                      |             |
+                      v             v
+              +-----------+    +-----------+
+              | Scheduler |    | Scheduler |
+              +-----------+    +-----------+
+                 |                |  |
+          +------+---------+  +---+  +-----+
+          |                |  |            |
+          v                |  |          Build
+      :      :           : v  v :        Request
+      :      :           :      :          |
+      : ---- :           :      :          |
+      : ---- :           : ---- :          |
+      +======+           +======+      :   v  :
+         |                  |          :      :
+         v                  v          :      :
+   +---------+        +---------+      :queue :
+   | Builder |        | Builder |      +======+
+   +---------+        +---------+         |
+                                          v
+                                    +---------+
+                                    | Builder |
+                                    +---------+
+
+   * Change Sources, which create a Change object each time something
+     is modified in the VC repository. Most ChangeSources listen for
+     messages from a hook script of some sort. Some sources actively
+     poll the repository on a regular basis. All Changes are fed to
+     the Schedulers.
+
+   * Schedulers, which decide when builds should be performed. They
+     collect Changes into BuildRequests, which are then queued for
+     delivery to Builders until a buildslave is available.
+
+   * Builders, which control exactly _how_ each build is performed
+     (with a series of BuildSteps, configured in a BuildFactory). Each
+     Build is run on a single buildslave.
+
+   * Status plugins, which deliver information about the build results
+     through protocols like HTTP, mail, and IRC.
+
+
+
+
+                       +-----------------+
+                       |  BuildSlave     |
+                       |                 |
+                       |                 |
+ +-------+             | +------------+  |
+ |Builder|----Build----->|SlaveBuilder|  |
+ +-------+             | +------------+  |
+                       |                 |
+                       | +------------+  |
+             +-Build---->|SlaveBuilder|  |
+             |         | +------------+  |
+ +-------+   |         |                 |
+ |Builder|---+         +-----------------+
+ +-------+   |
+             |
+             |      +-----------------+
+           Build    |  BuildSlave     |
+             |      |                 |
+             |      |                 |
+             |      | +------------+  |
+             +------->|SlaveBuilder|  |
+                    | +------------+  |
+ +-------+          |                 |
+ |Builder|--+       | +------------+  |
+ +-------+  +-------->|SlaveBuilder|  |
+                    | +------------+  |
+                    |                 |
+                    +-----------------+
+
+   Each Builder is configured with a list of BuildSlaves that it will
+use for its builds. These buildslaves are expected to behave
+identically: the only reason to use multiple BuildSlaves for a single
+Builder is to provide a measure of load-balancing.
+
+   Within a single BuildSlave, each Builder creates its own
+SlaveBuilder instance. These SlaveBuilders operate independently from
+each other.  Each gets its own base directory to work in. It is quite
+common to have many Builders sharing the same buildslave. For
+example, there might be two buildslaves: one for i386, and a second
+for PowerPC.  There may then be a pair of Builders that do a full
+compile/test run, one for each architecture, and a lone Builder that
+creates snapshot source tarballs if the full builders complete
+successfully. The full builders would each run on a single
+buildslave, whereas the tarball creation step might run on either
+buildslave (since the platform doesn't matter when creating source
+tarballs). In this case, the mapping would look like:
+
+     Builder(full-i386)  ->  BuildSlaves(slave-i386)
+     Builder(full-ppc)   ->  BuildSlaves(slave-ppc)
+     Builder(source-tarball) -> BuildSlaves(slave-i386, slave-ppc)
+
+   and each BuildSlave would have two SlaveBuilders inside it, one
+for a full builder, and a second for the source-tarball builder.
+
+   Once a SlaveBuilder is available, the Builder pulls one or more
+BuildRequests off its incoming queue. (It may pull more than one if it
+determines that it can merge the requests together; for example, there
+may be multiple requests to build the current HEAD revision). These
+requests are merged into a single Build instance, which includes the
+SourceStamp that describes what exact version of the source code
+should be used for the build. The Build is then assigned to a
+SlaveBuilder and the build begins.
+
+
+File: buildbot.info,  Node: Status Delivery Architecture,  Prev: Buildmaster Architecture,  Up: System Architecture
+
+1.2.3 Status Delivery Architecture
+----------------------------------
+
+The buildmaster maintains a central Status object, to which various
+status plugins are connected. Through this Status object, a full
+hierarchy of build status objects can be obtained.
+
+
+
+  Status Objects            Status Plugins       User Clients
+
+ +------+                   +---------+        +-----------+
+ |Status|<--------------+-->|Waterfall|<-------|Web Browser|
+ +------+               |   +---------+        +-----------+
+    |  +-----+          |
+    v        v          |
++-------+  +-------+    |     +---+            +----------+
+|Builder|  |Builder|    +---->|IRC|<----------->IRC Server|
+|Status |  |Status |    |     +---+            +----------+
++-------+  +-------+    |
+    |  +----+           |
+    v       v           |   +------------+     +----+
++------+  +------+      +-->|MailNotifier|---->|SMTP|
+|Build |  |Build |          +------------+     +----+
+|Status|  |Status|
++------+  +------+
+    | +-----+
+    v       v
++------+  +------+
+|Step  |  |Step  |
+|Status|  |Status|
++------+  +------+
+   | +---+
+   v     v
++----+ +----+
+|Log | |Log |
+|File| |File|
++----+ +----+
+
+   The configuration file controls which status plugins are active.
+Each status plugin gets a reference to the top-level Status object.
+From there they can request information on each Builder, Build, Step,
+and LogFile. This query-on-demand interface is used by the
+html.Waterfall plugin to create the main status page each time a web
+browser hits the main URL.
+
+   The status plugins can also subscribe to hear about new Builds as
+they occur: this is used by the MailNotifier to create new email
+messages for each recently-completed Build.
+
+   The Status object records the status of old builds on disk in the
+buildmaster's base directory. This allows it to return information
+about historical builds.
+
+   There are also status objects that correspond to Schedulers and
+BuildSlaves. These allow status plugins to report information about
+upcoming builds, and the online/offline status of each buildslave.
+
+
+File: buildbot.info,  Node: Control Flow,  Prev: System Architecture,  Up: Introduction
+
+1.3 Control Flow
+================
+
+A day in the life of the buildbot:
+
+   * A developer commits some source code changes to the repository.
+     A hook script or commit trigger of some sort sends information
+     about this change to the buildmaster through one of its
+     configured Change Sources. This notification might arrive via
+     email, or over a network connection (either initiated by the
+     buildmaster as it "subscribes" to changes, or by the commit
+     trigger as it pushes Changes towards the buildmaster). The
+     Change contains information about who made the change, what
+     files were modified, which revision contains the change, and any
+     checkin comments.
+
+   * The buildmaster distributes this change to all of its configured
+     Schedulers. Any "important" changes cause the "tree-stable-timer"
+     to be started, and the Change is added to a list of those that
+     will go into a new Build. When the timer expires, a Build is
+     started on each of a set of configured Builders, all
+     compiling/testing the same source code. Unless configured
+     otherwise, all Builds run in parallel on the various buildslaves.
+
+   * The Build consists of a series of Steps. Each Step causes some
+     number of commands to be invoked on the remote buildslave
+     associated with that Builder. The first step is almost always to
+     perform a checkout of the appropriate revision from the same VC
+     system that produced the Change. The rest generally perform a
+     compile and run unit tests. As each Step runs, the buildslave
+     reports back command output and return status to the buildmaster.
+
+   * As the Build runs, status messages like "Build Started", "Step
+     Started", "Build Finished", etc, are published to a collection of
+     Status Targets. One of these targets is usually the HTML
+     "Waterfall" display, which shows a chronological list of events,
+     and summarizes the results of the most recent build at the top
+     of each column.  Developers can periodically check this page to
+     see how their changes have fared. If they see red, they know
+     that they've made a mistake and need to fix it. If they see
+     green, they know that they've done their duty and don't need to
+     worry about their change breaking anything.
+
+   * If a MailNotifier status target is active, the completion of a
+     build will cause email to be sent to any developers whose
+     Changes were incorporated into this Build. The MailNotifier can
+     be configured to only send mail upon failing builds, or for
+     builds which have just transitioned from passing to failing.
+     Other status targets can provide similar real-time notification
+     via different communication channels, like IRC.
+
+
+
+File: buildbot.info,  Node: Installation,  Next: Concepts,  Prev: Introduction,  Up: Top
+
+2 Installation
+**************
+
+* Menu:
+
+* Requirements::
+* Installing the code::
+* Creating a buildmaster::
+* Creating a buildslave::
+* Launching the daemons::
+* Logfiles::
+* Shutdown::
+* Maintenance::
+* Troubleshooting::
+
+
+File: buildbot.info,  Node: Requirements,  Next: Installing the code,  Prev: Installation,  Up: Installation
+
+2.1 Requirements
+================
+
+At a bare minimum, you'll need the following (for both the buildmaster
+and a buildslave):
+
+   * Python: http://www.python.org
+
+     Buildbot requires python-2.2 or later, and is primarily developed
+     against python-2.3. The buildmaster uses generators, a feature
+     which is not available in python-2.1, and both master and slave
+     require a version of Twisted which only works with python-2.2 or
+     later. Certain features (like the inclusion of build logs in
+     status emails) require python-2.2.2 or later. The IRC "force
+     build" command requires python-2.3 (for the shlex.split
+     function).
+
+   * Twisted: http://twistedmatrix.com
+
+     Both the buildmaster and the buildslaves require Twisted-1.3.0 or
+     later. It has been mainly developed against Twisted-2.0.1, but
+     has been tested against Twisted-2.1.0 (the most recent as of this
+     writing), and might even work on versions as old as
+     Twisted-1.1.0, but as always the most recent version is
+     recommended.
+
+     Twisted-1.3.0 and earlier were released as a single monolithic
+     package. When you run Buildbot against Twisted-2.0.0 or later
+     (which are split into a number of smaller subpackages), you'll
+     need at least "Twisted" (the core package), and you'll also want
+     TwistedMail, TwistedWeb, and TwistedWords (for sending email,
+     serving a web status page, and delivering build status via IRC,
+     respectively).
+
+   Certain other packages may be useful on the system running the
+buildmaster:
+
+   * CVSToys: http://purl.net/net/CVSToys
+
+     If your buildmaster uses FreshCVSSource to receive change
+     notification from a cvstoys daemon, it will require CVSToys be
+     installed (tested with CVSToys-1.0.10). If the it doesn't use
+     that source (i.e. if you only use a mail-parsing change source,
+     or the SVN notification script), you will not need CVSToys.
+
+
+   And of course, your project's build process will impose additional
+requirements on the buildslaves. These hosts must have all the tools
+necessary to compile and test your project's source code.
+
+
+File: buildbot.info,  Node: Installing the code,  Next: Creating a buildmaster,  Prev: Requirements,  Up: Installation
+
+2.2 Installing the code
+=======================
+
+The Buildbot is installed using the standard python `distutils'
+module. After unpacking the tarball, the process is:
+
+     python setup.py build
+     python setup.py install
+
+   where the install step may need to be done as root. This will put
+the bulk of the code in somewhere like
+/usr/lib/python2.3/site-packages/buildbot . It will also install the
+`buildbot' command-line tool in /usr/bin/buildbot.
+
+   To test this, shift to a different directory (like /tmp), and run:
+
+     buildbot --version
+
+   If it shows you the versions of Buildbot and Twisted, the install
+went ok. If it says `no such command' or it gets an `ImportError'
+when it tries to load the libaries, then something went wrong.
+`pydoc buildbot' is another useful diagnostic tool.
+
+   Windows users will find these files in other places. You will need
+to make sure that python can find the libraries, and will probably
+find it convenient to have `buildbot' on your PATH.
+
+   If you wish, you can run the buildbot unit test suite like this:
+
+     PYTHONPATH=. trial buildbot.test
+
+   This should run up to 192 tests, depending upon what VC tools you
+have installed. On my desktop machine it takes about five minutes to
+complete. Nothing should fail, a few might be skipped. If any of the
+tests fail, you should stop and investigate the cause before
+continuing the installation process, as it will probably be easier to
+track down the bug early.
+
+   If you cannot or do not wish to install the buildbot into a
+site-wide location like `/usr' or `/usr/local', you can also install
+it into the account's home directory. Do the install command like
+this:
+
+     python setup.py install --home=~
+
+   That will populate `~/lib/python' and create `~/bin/buildbot'.
+Make sure this lib directory is on your `PYTHONPATH'.
+
+
+File: buildbot.info,  Node: Creating a buildmaster,  Next: Creating a buildslave,  Prev: Installing the code,  Up: Installation
+
+2.3 Creating a buildmaster
+==========================
+
+As you learned earlier (*note System Architecture::), the buildmaster
+runs on a central host (usually one that is publically visible, so
+everybody can check on the status of the project), and controls all
+aspects of the buildbot system. Let us call this host
+`buildbot.example.org'.
+
+   You may wish to create a separate user account for the buildmaster,
+perhaps named `buildmaster'. This can help keep your personal
+configuration distinct from that of the buildmaster and is useful if
+you have to use a mail-based notification system (*note Change
+Sources::). However, the Buildbot will work just fine with your
+regular user account.
+
+   You need to choose a directory for the buildmaster, called the
+`basedir'. This directory will be owned by the buildmaster, which
+will use configuration files therein, and create status files as it
+runs. `~/Buildbot' is a likely value. If you run multiple
+buildmasters in the same account, or if you run both masters and
+slaves, you may want a more distinctive name like
+`~/Buildbot/master/gnomovision' or `~/Buildmasters/fooproject'. If
+you are using a separate user account, this might just be
+`~buildmaster/masters/fooproject'.
+
+   Once you've picked a directory, use the `buildbot create-master'
+command to create the directory and populate it with startup files:
+
+     buildbot create-master BASEDIR
+
+   You will need to create a configuration file (*note
+Configuration::) before starting the buildmaster. Most of the rest of
+this manual is dedicated to explaining how to do this. A sample
+configuration file is placed in the working directory, named
+`master.cfg.sample', which can be copied to `master.cfg' and edited
+to suit your purposes.
+
+   (Internal details: This command creates a file named
+`buildbot.tac' that contains all the state necessary to create the
+buildmaster. Twisted has a tool called `twistd' which can use this
+.tac file to create and launch a buildmaster instance. twistd takes
+care of logging and daemonization (running the program in the
+background). `/usr/bin/buildbot' is a front end which runs twistd for
+you.)
+
+   In addition to `buildbot.tac', a small `Makefile.sample' is
+installed. This can be used as the basis for customized daemon
+startup, *Note Launching the daemons::.
+
+
+File: buildbot.info,  Node: Creating a buildslave,  Next: Launching the daemons,  Prev: Creating a buildmaster,  Up: Installation
+
+2.4 Creating a buildslave
+=========================
+
+Typically, you will be adding a buildslave to an existing buildmaster,
+to provide additional architecture coverage. The buildbot
+administrator will give you several pieces of information necessary to
+connect to the buildmaster. You should also be somewhat familiar with
+the project being tested, so you can troubleshoot build problems
+locally.
+
+   The buildbot exists to make sure that the project's stated "how to
+build it" process actually works. To this end, the buildslave should
+run in an environment just like that of your regular developers.
+Typically the project build process is documented somewhere
+(`README', `INSTALL', etc), in a document that should mention all
+library dependencies and contain a basic set of build instructions.
+This document will be useful as you configure the host and account in
+which the buildslave runs.
+
+   Here's a good checklist for setting up a buildslave:
+
+  1. Set up the account
+
+     It is recommended (although not mandatory) to set up a separate
+     user account for the buildslave. This account is frequently named
+     `buildbot' or `buildslave'. This serves to isolate your personal
+     working environment from that of the slave's, and helps to
+     minimize the security threat posed by letting possibly-unknown
+     contributors run arbitrary code on your system. The account
+     should have a minimum of fancy init scripts.
+
+  2. Install the buildbot code
+
+     Follow the instructions given earlier (*note Installing the
+     code::).  If you use a separate buildslave account, and you
+     didn't install the buildbot code to a shared location, then you
+     will need to install it with `--home=~' for each account that
+     needs it.
+
+  3. Set up the host
+
+     Make sure the host can actually reach the buildmaster. Usually
+     the buildmaster is running a status webserver on the same
+     machine, so simply point your web browser at it and see if you
+     can get there.  Install whatever additional packages or
+     libraries the project's INSTALL document advises. (or not: if
+     your buildslave is supposed to make sure that building without
+     optional libraries still works, then don't install those
+     libraries).
+
+     Again, these libraries don't necessarily have to be installed to
+     a site-wide shared location, but they must be available to your
+     build process. Accomplishing this is usually very specific to
+     the build process, so installing them to `/usr' or `/usr/local'
+     is usually the best approach.
+
+  4. Test the build process
+
+     Follow the instructions in the INSTALL document, in the
+     buildslave's account. Perform a full CVS (or whatever) checkout,
+     configure, make, run tests, etc. Confirm that the build works
+     without manual fussing.  If it doesn't work when you do it by
+     hand, it will be unlikely to work when the buildbot attempts to
+     do it in an automated fashion.
+
+  5. Choose a base directory
+
+     This should be somewhere in the buildslave's account, typically
+     named after the project which is being tested. The buildslave
+     will not touch any file outside of this directory. Something
+     like `~/Buildbot' or `~/Buildslaves/fooproject' is appropriate.
+
+  6. Get the buildmaster host/port, botname, and password
+
+     When the buildbot admin configures the buildmaster to accept and
+     use your buildslave, they will provide you with the following
+     pieces of information:
+
+        * your buildslave's name
+
+        * the password assigned to your buildslave
+
+        * the hostname and port number of the buildmaster, i.e.
+          buildbot.example.org:8007
+
+  7. Create the buildslave
+
+     Now run the 'buildbot' command as follows:
+
+          buildbot create-slave BASEDIR MASTERHOST:PORT SLAVENAME PASSWORD
+
+     This will create the base directory and a collection of files
+     inside, including the `buildbot.tac' file that contains all the
+     information you passed to the `buildbot' command.
+
+  8. Fill in the hostinfo files
+
+     When it first connects, the buildslave will send a few files up
+     to the buildmaster which describe the host that it is running
+     on. These files are presented on the web status display so that
+     developers have more information to reproduce any test failures
+     that are witnessed by the buildbot. There are sample files in
+     the `info' subdirectory of the buildbot's base directory. You
+     should edit these to correctly describe you and your host.
+
+     `BASEDIR/info/admin' should contain your name and email address.
+     This is the "buildslave admin address", and will be visible from
+     the build status page (so you may wish to munge it a bit if
+     address-harvesting spambots are a concern).
+
+     `BASEDIR/info/host' should be filled with a brief description of
+     the host: OS, version, memory size, CPU speed, versions of
+     relevant libraries installed, and finally the version of the
+     buildbot code which is running the buildslave.
+
+     If you run many buildslaves, you may want to create a single
+     `~buildslave/info' file and share it among all the buildslaves
+     with symlinks.
+
+
+* Menu:
+
+* Buildslave Options::
+
+
+File: buildbot.info,  Node: Buildslave Options,  Prev: Creating a buildslave,  Up: Creating a buildslave
+
+2.4.1 Buildslave Options
+------------------------
+
+There are a handful of options you might want to use when creating the
+buildslave with the `buildbot create-slave <options> DIR <params>'
+command. You can type `buildbot create-slave --help' for a summary.
+To use these, just include them on the `buildbot create-slave'
+command line, like this:
+
+     buildbot create-slave --umask=022 ~/buildslave buildmaster.example.org:42012 myslavename mypasswd
+
+`--usepty'
+     This is a boolean flag that tells the buildslave whether to
+     launch child processes in a PTY (the default) or with regular
+     pipes. The advantage of using a PTY is that "grandchild"
+     processes are more likely to be cleaned up if the build is
+     interrupted or times out (since it enables the use of a "process
+     group" in which all child processes will be placed). The
+     disadvantages: some forms of Unix have problems with PTYs, some
+     of your unit tests may behave differently when run under a PTY
+     (generally those which check to see if they are being run
+     interactively), and PTYs will merge the stdout and stderr
+     streams into a single output stream (which means the red-vs-black
+     coloring in the logfiles will be lost). If you encounter
+     problems, you can add `--usepty=0' to disable the use of PTYs.
+     Note that windows buildslaves never use PTYs.
+
+`--umask'
+     This is a string (generally an octal representation of an
+     integer) which will cause the buildslave process' "umask" value
+     to be set shortly after initialization. The "twistd"
+     daemonization utility forces the umask to 077 at startup (which
+     means that all files created by the buildslave or its child
+     processes will be unreadable by any user other than the
+     buildslave account). If you want build products to be readable
+     by other accounts, you can add `--umask=022' to tell the
+     buildslave to fix the umask after twistd clobbers it. If you want
+     build products to be _writable_ by other accounts too, use
+     `--umask=000', but this is likely to be a security problem.
+
+`--keepalive'
+     This is a number that indicates how frequently "keepalive"
+     messages should be sent from the buildslave to the buildmaster,
+     expressed in seconds. The default (600) causes a message to be
+     sent to the buildmaster at least once every 10 minutes. To set
+     this to a lower value, use e.g. `--keepalive=120'.
+
+     If the buildslave is behind a NAT box or stateful firewall, these
+     messages may help to keep the connection alive: some NAT boxes
+     tend to forget about a connection if it has not been used in a
+     while. When this happens, the buildmaster will think that the
+     buildslave has disappeared, and builds will time out. Meanwhile
+     the buildslave will not realize than anything is wrong.
+
+
+
+File: buildbot.info,  Node: Launching the daemons,  Next: Logfiles,  Prev: Creating a buildslave,  Up: Installation
+
+2.5 Launching the daemons
+=========================
+
+Both the buildmaster and the buildslave run as daemon programs. To
+launch them, pass the working directory to the `buildbot' command:
+
+     buildbot start BASEDIR
+
+   This command will start the daemon and then return, so normally it
+will not produce any output. To verify that the programs are indeed
+running, look for a pair of files named `twistd.log' and `twistd.pid'
+that should be created in the working directory.  `twistd.pid'
+contains the process ID of the newly-spawned daemon.
+
+   When the buildslave connects to the buildmaster, new directories
+will start appearing in its base directory. The buildmaster tells the
+slave to create a directory for each Builder which will be using that
+slave.  All build operations are performed within these directories:
+CVS checkouts, compiles, and tests.
+
+   Once you get everything running, you will want to arrange for the
+buildbot daemons to be started at boot time. One way is to use
+`cron', by putting them in a @reboot crontab entry(1):
+
+     @reboot buildbot start BASEDIR
+
+   When you run `crontab' to set this up, remember to do it as the
+buildmaster or buildslave account! If you add this to your crontab
+when running as your regular account (or worse yet, root), then the
+daemon will run as the wrong user, quite possibly as one with more
+authority than you intended to provide.
+
+   It is important to remember that the environment provided to cron
+jobs and init scripts can be quite different that your normal runtime.
+There may be fewer environment variables specified, and the PATH may
+be shorter than usual. It is a good idea to test out this method of
+launching the buildslave by using a cron job with a time in the near
+future, with the same command, and then check `twistd.log' to make
+sure the slave actually started correctly. Common problems here are
+for `/usr/local' or `~/bin' to not be on your `PATH', or for
+`PYTHONPATH' to not be set correctly.  Sometimes `HOME' is messed up
+too.
+
+   To modify the way the daemons are started (perhaps you want to set
+some environment variables first, or perform some cleanup each time),
+you can create a file named `Makefile.buildbot' in the base
+directory. When the `buildbot' front-end tool is told to `start' the
+daemon, and it sees this file (and `/usr/bin/make' exists), it will
+do `make -f Makefile.buildbot start' instead of its usual action
+(which involves running `twistd'). When the buildmaster or buildslave
+is installed, a `Makefile.sample' is created which implements the
+same behavior as the the `buildbot' tool uses, so if you want to
+customize the process, just copy `Makefile.sample' to
+`Makefile.buildbot' and edit it as necessary.
+
+   ---------- Footnotes ----------
+
+   (1) this @reboot syntax is understood by Vixie cron, which is the
+flavor usually provided with linux systems. Other unices may have a
+cron that doesn't understand @reboot
+
+
+File: buildbot.info,  Node: Logfiles,  Next: Shutdown,  Prev: Launching the daemons,  Up: Installation
+
+2.6 Logfiles
+============
+
+While a buildbot daemon runs, it emits text to a logfile, named
+`twistd.log'. A command like `tail -f twistd.log' is useful to watch
+the command output as it runs.
+
+   The buildmaster will announce any errors with its configuration
+file in the logfile, so it is a good idea to look at the log at
+startup time to check for any problems. Most buildmaster activities
+will cause lines to be added to the log.
+
+
+File: buildbot.info,  Node: Shutdown,  Next: Maintenance,  Prev: Logfiles,  Up: Installation
+
+2.7 Shutdown
+============
+
+To stop a buildmaster or buildslave manually, use:
+
+     buildbot stop BASEDIR
+
+   This simply looks for the `twistd.pid' file and kills whatever
+process is identified within.
+
+   At system shutdown, all processes are sent a `SIGKILL'. The
+buildmaster and buildslave will respond to this by shutting down
+normally.
+
+   The buildmaster will respond to a `SIGHUP' by re-reading its
+config file. The following shortcut is available:
+
+     buildbot reconfig BASEDIR
+
+   When you update the Buildbot code to a new release, you will need
+to restart the buildmaster and/or buildslave before it can take
+advantage of the new code. You can do a `buildbot stop BASEDIR' and
+`buildbot start BASEDIR' in quick succession, or you can use the
+`restart' shortcut, which does both steps for you:
+
+     buildbot restart BASEDIR
+
+
+File: buildbot.info,  Node: Maintenance,  Next: Troubleshooting,  Prev: Shutdown,  Up: Installation
+
+2.8 Maintenance
+===============
+
+It is a good idea to check the buildmaster's status page every once in
+a while, to see if your buildslave is still online. Eventually the
+buildbot will probably be enhanced to send you email (via the
+`info/admin' email address) when the slave has been offline for more
+than a few hours.
+
+   If you find you can no longer provide a buildslave to the project,
+please let the project admins know, so they can put out a call for a
+replacement.
+
+   The Buildbot records status and logs output continually, each time
+a build is performed. The status tends to be small, but the build logs
+can become quite large. Each build and log are recorded in a separate
+file, arranged hierarchically under the buildmaster's base directory.
+To prevent these files from growing without bound, you should
+periodically delete old build logs. A simple cron job to delete
+anything older than, say, two weeks should do the job. The only trick
+is to leave the `buildbot.tac' and other support files alone, for
+which find's `-mindepth' argument helps skip everything in the top
+directory. You can use something like the following:
+
+     @weekly cd BASEDIR && find . -mindepth 2 -type f -mtime +14 -exec rm {} \;
+     @weekly cd BASEDIR && find twistd.log* -mtime +14 -exec rm {} \;
+
+
+File: buildbot.info,  Node: Troubleshooting,  Prev: Maintenance,  Up: Installation
+
+2.9 Troubleshooting
+===================
+
+Here are a few hints on diagnosing common problems.
+
+* Menu:
+
+* Starting the buildslave::
+* Connecting to the buildmaster::
+* Forcing Builds::
+
+
+File: buildbot.info,  Node: Starting the buildslave,  Next: Connecting to the buildmaster,  Prev: Troubleshooting,  Up: Troubleshooting
+
+2.9.1 Starting the buildslave
+-----------------------------
+
+Cron jobs are typically run with a minimal shell (`/bin/sh', not
+`/bin/bash'), and tilde expansion is not always performed in such
+commands. You may want to use explicit paths, because the `PATH' is
+usually quite short and doesn't include anything set by your shell's
+startup scripts (`.profile', `.bashrc', etc). If you've installed
+buildbot (or other python libraries) to an unusual location, you may
+need to add a `PYTHONPATH' specification (note that python will do
+tilde-expansion on `PYTHONPATH' elements by itself). Sometimes it is
+safer to fully-specify everything:
+
+     @reboot PYTHONPATH=~/lib/python /usr/local/bin/buildbot start /usr/home/buildbot/basedir
+
+   Take the time to get the @reboot job set up. Otherwise, things
+will work fine for a while, but the first power outage or system
+reboot you have will stop the buildslave with nothing but the cries
+of sorrowful developers to remind you that it has gone away.
+
+
+File: buildbot.info,  Node: Connecting to the buildmaster,  Next: Forcing Builds,  Prev: Starting the buildslave,  Up: Troubleshooting
+
+2.9.2 Connecting to the buildmaster
+-----------------------------------
+
+If the buildslave cannot connect to the buildmaster, the reason should
+be described in the `twistd.log' logfile. Some common problems are an
+incorrect master hostname or port number, or a mistyped bot name or
+password. If the buildslave loses the connection to the master, it is
+supposed to attempt to reconnect with an exponentially-increasing
+backoff. Each attempt (and the time of the next attempt) will be
+logged. If you get impatient, just manually stop and re-start the
+buildslave.
+
+   When the buildmaster is restarted, all slaves will be disconnected,
+and will attempt to reconnect as usual. The reconnect time will depend
+upon how long the buildmaster is offline (i.e. how far up the
+exponential backoff curve the slaves have travelled). Again,
+`buildbot stop BASEDIR; buildbot start BASEDIR' will speed up the
+process.
+
+
+File: buildbot.info,  Node: Forcing Builds,  Prev: Connecting to the buildmaster,  Up: Troubleshooting
+
+2.9.3 Forcing Builds
+--------------------
+
+From the buildmaster's main status web page, you can force a build to
+be run on your build slave. Figure out which column is for a builder
+that runs on your slave, click on that builder's name, and the page
+that comes up will have a "Force Build" button. Fill in the form, hit
+the button, and a moment later you should see your slave's
+`twistd.log' filling with commands being run. Using `pstree' or `top'
+should also reveal the cvs/make/gcc/etc processes being run by the
+buildslave. Note that the same web page should also show the `admin'
+and `host' information files that you configured earlier.
+
+
+File: buildbot.info,  Node: Concepts,  Next: Configuration,  Prev: Installation,  Up: Top
+
+3 Concepts
+**********
+
+This chapter defines some of the basic concepts that the Buildbot
+uses. You'll need to understand how the Buildbot sees the world to
+configure it properly.
+
+* Menu:
+
+* Version Control Systems::
+* Schedulers::
+* BuildSet::
+* BuildRequest::
+* Builder::
+* Users::
+
+
+File: buildbot.info,  Node: Version Control Systems,  Next: Schedulers,  Prev: Concepts,  Up: Concepts
+
+3.1 Version Control Systems
+===========================
+
+These source trees come from a Version Control System of some kind.
+CVS and Subversion are two popular ones, but the Buildbot supports
+others. All VC systems have some notion of an upstream `repository'
+which acts as a server(1), from which clients can obtain source trees
+according to various parameters. The VC repository provides source
+trees of various projects, for different branches, and from various
+points in time. The first thing we have to do is to specify which
+source tree we want to get.
+
+* Menu:
+
+* Generalizing VC Systems::
+* Source Tree Specifications::
+* How Different VC Systems Specify Sources::
+* Attributes of Changes::
+
+   ---------- Footnotes ----------
+
+   (1) except Darcs, but since the Buildbot never modifies its local
+source tree we can ignore the fact that Darcs uses a less centralized
+model
+
+
+File: buildbot.info,  Node: Generalizing VC Systems,  Next: Source Tree Specifications,  Prev: Version Control Systems,  Up: Version Control Systems
+
+3.1.1 Generalizing VC Systems
+-----------------------------
+
+For the purposes of the Buildbot, we will try to generalize all VC
+systems as having repositories that each provide sources for a variety
+of projects. Each project is defined as a directory tree with source
+files. The individual files may each have revisions, but we ignore
+that and treat the project as a whole as having a set of revisions.
+Each time someone commits a change to the project, a new revision
+becomes available. These revisions can be described by a tuple with
+two items: the first is a branch tag, and the second is some kind of
+timestamp or revision stamp. Complex projects may have multiple branch
+tags, but there is always a default branch. The timestamp may be an
+actual timestamp (such as the -D option to CVS), or it may be a
+monotonically-increasing transaction number (such as the change number
+used by SVN and P4, or the revision number used by Arch, or a labeled
+tag used in CVS)(1). The SHA1 revision ID used by Monotone and
+Mercurial is also a kind of revision stamp, in that it specifies a
+unique copy of the source tree, as does a Darcs "context" file.
+
+   When we aren't intending to make any changes to the sources we
+check out (at least not any that need to be committed back upstream),
+there are two basic ways to use a VC system:
+
+   * Retrieve a specific set of source revisions: some tag or key is
+     used to index this set, which is fixed and cannot be changed by
+     subsequent developers committing new changes to the tree.
+     Releases are built from tagged revisions like this, so that they
+     can be rebuilt again later (probably with controlled
+     modifications).
+
+   * Retrieve the latest sources along a specific branch: some tag is
+     used to indicate which branch is to be used, but within that
+     constraint we want to get the latest revisions.
+
+   Build personnel or CM staff typically use the first approach: the
+build that results is (ideally) completely specified by the two
+parameters given to the VC system: repository and revision tag. This
+gives QA and end-users something concrete to point at when reporting
+bugs. Release engineers are also reportedly fond of shipping code that
+can be traced back to a concise revision tag of some sort.
+
+   Developers are more likely to use the second approach: each morning
+the developer does an update to pull in the changes committed by the
+team over the last day. These builds are not easy to fully specify: it
+depends upon exactly when you did a checkout, and upon what local
+changes the developer has in their tree. Developers do not normally
+tag each build they produce, because there is usually significant
+overhead involved in creating these tags. Recreating the trees used by
+one of these builds can be a challenge. Some VC systems may provide
+implicit tags (like a revision number), while others may allow the use
+of timestamps to mean "the state of the tree at time X" as opposed to
+a tree-state that has been explicitly marked.
+
+   The Buildbot is designed to help developers, so it usually works in
+terms of _the latest_ sources as opposed to specific tagged
+revisions. However, it would really prefer to build from reproducible
+source trees, so implicit revisions are used whenever possible.
+
+   ---------- Footnotes ----------
+
+   (1) many VC systems provide more complexity than this: in
+particular the local views that P4 and ClearCase can assemble out of
+various source directories are more complex than we're prepared to
+take advantage of here
+
+
+File: buildbot.info,  Node: Source Tree Specifications,  Next: How Different VC Systems Specify Sources,  Prev: Generalizing VC Systems,  Up: Version Control Systems
+
+3.1.2 Source Tree Specifications
+--------------------------------
+
+So for the Buildbot's purposes we treat each VC system as a server
+which can take a list of specifications as input and produce a source
+tree as output. Some of these specifications are static: they are
+attributes of the builder and do not change over time. Others are more
+variable: each build will have a different value. The repository is
+changed over time by a sequence of Changes, each of which represents a
+single developer making changes to some set of files. These Changes
+are cumulative(1).
+
+   For normal builds, the Buildbot wants to get well-defined source
+trees that contain specific Changes, and exclude other Changes that
+may have occurred after the desired ones. We assume that the Changes
+arrive at the buildbot (through one of the mechanisms described in
+*note Change Sources::) in the same order in which they are committed
+to the repository. The Buildbot waits for the tree to become "stable"
+before initiating a build, for two reasons. The first is that
+developers frequently make multiple related commits in quick
+succession, even when the VC system provides ways to make atomic
+transactions involving multiple files at the same time. Running a
+build in the middle of these sets of changes would use an inconsistent
+set of source files, and is likely to fail (and is certain to be less
+useful than a build which uses the full set of changes). The
+tree-stable-timer is intended to avoid these useless builds that
+include some of the developer's changes but not all. The second reason
+is that some VC systems (i.e. CVS) do not provide repository-wide
+transaction numbers, so that timestamps are the only way to refer to
+a specific repository state. These timestamps may be somewhat
+ambiguous, due to processing and notification delays. By waiting until
+the tree has been stable for, say, 10 minutes, we can choose a
+timestamp from the middle of that period to use for our source
+checkout, and then be reasonably sure that any clock-skew errors will
+not cause the build to be performed on an inconsistent set of source
+files.
+
+   The Schedulers always use the tree-stable-timer, with a timeout
+that is configured to reflect a reasonable tradeoff between build
+latency and change frequency. When the VC system provides coherent
+repository-wide revision markers (such as Subversion's revision
+numbers, or in fact anything other than CVS's timestamps), the
+resulting Build is simply performed against a source tree defined by
+that revision marker. When the VC system does not provide this, a
+timestamp from the middle of the tree-stable period is used to
+generate the source tree(2).
+
+   ---------- Footnotes ----------
+
+   (1) Monotone's _multiple heads_ feature violates this assumption
+of cumulative Changes, but in most situations the changes don't occur
+frequently enough for this to be a significant problem
+
+   (2) this `checkoutDelay' defaults to half the tree-stable timer,
+but it can be overridden with an argument to the Source Step
+
+
+File: buildbot.info,  Node: How Different VC Systems Specify Sources,  Next: Attributes of Changes,  Prev: Source Tree Specifications,  Up: Version Control Systems
+
+3.1.3 How Different VC Systems Specify Sources
+----------------------------------------------
+
+For CVS, the static specifications are `repository' and `module'. In
+addition to those, each build uses a timestamp (or omits the
+timestamp to mean `the latest') and `branch tag' (which defaults to
+HEAD). These parameters collectively specify a set of sources from
+which a build may be performed.
+
+   Subversion (http://subversion.tigris.org) combines the repository,
+module, and branch into a single `Subversion URL' parameter. Within
+that scope, source checkouts can be specified by a numeric `revision
+number' (a repository-wide monotonically-increasing marker, such that
+each transaction that changes the repository is indexed by a
+different revision number), or a revision timestamp. When branches
+are used, the repository and module form a static `baseURL', while
+each build has a `revision number' and a `branch' (which defaults to a
+statically-specified `defaultBranch'). The `baseURL' and `branch' are
+simply concatenated together to derive the `svnurl' to use for the
+checkout.
+
+   Perforce (http://www.perforce.com/) is similar. The server is
+specified through a `P4PORT' parameter. Module and branch are
+specified in a single depot path, and revisions are depot-wide. When
+branches are used, the `p4base' and `defaultBranch' are concatenated
+together to produce the depot path.
+
+   Arch (http://wiki.gnuarch.org/) and Bazaar
+(http://bazaar.canonical.com/) specify a repository by URL, as well
+as a `version' which is kind of like a branch name.  Arch uses the
+word `archive' to represent the repository. Arch lets you push
+changes from one archive to another, removing the strict
+centralization required by CVS and SVN. It retains the distinction
+between repository and working directory that most other VC systems
+use. For complex multi-module directory structures, Arch has a
+built-in `build config' layer with which the checkout process has two
+steps. First, an initial bootstrap checkout is performed to retrieve
+a set of build-config files. Second, one of these files is used to
+figure out which archives/modules should be used to populate
+subdirectories of the initial checkout.
+
+   Builders which use Arch and Bazaar therefore have a static archive
+`url', and a default "branch" (which is a string that specifies a
+complete category-branch-version triple). Each build can have its own
+branch (the category-branch-version string) to override the default,
+as well as a revision number (which is turned into a -patch-NN suffix
+when performing the checkout).
+
+   Darcs (http://abridgegame.org/darcs/) doesn't really have the
+notion of a single master repository. Nor does it really have
+branches. In Darcs, each working directory is also a repository, and
+there are operations to push and pull patches from one of these
+`repositories' to another. For the Buildbot's purposes, all you need
+to do is specify the URL of a repository that you want to build from.
+The build slave will then pull the latest patches from that
+repository and build them. Multiple branches are implemented by using
+multiple repositories (possibly living on the same server).
+
+   Builders which use Darcs therefore have a static `repourl' which
+specifies the location of the repository. If branches are being used,
+the source Step is instead configured with a `baseURL' and a
+`defaultBranch', and the two strings are simply concatenated together
+to obtain the repository's URL. Each build then has a specific branch
+which replaces `defaultBranch', or just uses the default one. Instead
+of a revision number, each build can have a "context", which is a
+string that records all the patches that are present in a given tree
+(this is the output of `darcs changes --context', and is considerably
+less concise than, e.g. Subversion's revision number, but the
+patch-reordering flexibility of Darcs makes it impossible to provide
+a shorter useful specification).
+
+   Mercurial (http://selenic.com/mercurial) is like Darcs, in that
+each branch is stored in a separate repository. The `repourl',
+`baseURL', and `defaultBranch' arguments are all handled the same way
+as with Darcs. The "revision", however, is the hash identifier
+returned by `hg identify'.
+
+
+File: buildbot.info,  Node: Attributes of Changes,  Prev: How Different VC Systems Specify Sources,  Up: Version Control Systems
+
+3.1.4 Attributes of Changes
+---------------------------
+
+Who
+===
+
+Each Change has a `who' attribute, which specifies which developer is
+responsible for the change. This is a string which comes from a
+namespace controlled by the VC repository. Frequently this means it
+is a username on the host which runs the repository, but not all VC
+systems require this (Arch, for example, uses a fully-qualified `Arch
+ID', which looks like an email address, as does Darcs).  Each
+StatusNotifier will map the `who' attribute into something
+appropriate for their particular means of communication: an email
+address, an IRC handle, etc.
+
+Files
+=====
+
+It also has a list of `files', which are just the tree-relative
+filenames of any files that were added, deleted, or modified for this
+Change. These filenames are used by the `isFileImportant' function
+(in the Scheduler) to decide whether it is worth triggering a new
+build or not, e.g. the function could use `filename.endswith(".c")'
+to only run a build if a C file were checked in. Certain BuildSteps
+can also use the list of changed files to run a more targeted series
+of tests, e.g. the `python_twisted.Trial' step can run just the unit
+tests that provide coverage for the modified .py files instead of
+running the full test suite.
+
+Comments
+========
+
+The Change also has a `comments' attribute, which is a string
+containing any checkin comments.
+
+Revision
+========
+
+Each Change can have a `revision' attribute, which describes how to
+get a tree with a specific state: a tree which includes this Change
+(and all that came before it) but none that come after it. If this
+information is unavailable, the `.revision' attribute will be `None'.
+These revisions are provided by the ChangeSource, and consumed by the
+`computeSourceRevision' method in the appropriate `step.Source' class.
+
+`CVS'
+     `revision' is an int, seconds since the epoch
+
+`SVN'
+     `revision' is an int, a transation number (r%d)
+
+`Darcs'
+     `revision' is a large string, the output of `darcs changes
+     --context'
+
+`Mercurial'
+     `revision' is a short string (a hash ID), the output of `hg
+     identify'
+
+`Arch/Bazaar'
+     `revision' is the full revision ID (ending in -patch-%d)
+
+`P4'
+     `revision' is an int, the transaction number
+
+Branches
+========
+
+The Change might also have a `branch' attribute. This indicates that
+all of the Change's files are in the same named branch. The
+Schedulers get to decide whether the branch should be built or not.
+
+   For VC systems like CVS, Arch, and Monotone, the `branch' name is
+unrelated to the filename. (that is, the branch name and the filename
+inhabit unrelated namespaces). For SVN, branches are expressed as
+subdirectories of the repository, so the file's "svnurl" is a
+combination of some base URL, the branch name, and the filename within
+the branch. (In a sense, the branch name and the filename inhabit the
+same namespace). Darcs branches are subdirectories of a base URL just
+like SVN. Mercurial branches are the same as Darcs.
+
+`CVS'
+     branch='warner-newfeature', files=['src/foo.c']
+
+`SVN'
+     branch='branches/warner-newfeature', files=['src/foo.c']
+
+`Darcs'
+     branch='warner-newfeature', files=['src/foo.c']
+
+`Mercurial'
+     branch='warner-newfeature', files=['src/foo.c']
+
+`Arch/Bazaar'
+     branch='buildbot-usebranches-0', files=['buildbot/master.py']
+
+Links
+=====
+
+Finally, the Change might have a `links' list, which is intended to
+provide a list of URLs to a _viewcvs_-style web page that provides
+more detail for this Change, perhaps including the full file diffs.
+
+
+File: buildbot.info,  Node: Schedulers,  Next: BuildSet,  Prev: Version Control Systems,  Up: Concepts
+
+3.2 Schedulers
+==============
+
+Each Buildmaster has a set of `Scheduler' objects, each of which gets
+a copy of every incoming Change. The Schedulers are responsible for
+deciding when Builds should be run. Some Buildbot installations might
+have a single Scheduler, while others may have several, each for a
+different purpose.
+
+   For example, a "quick" scheduler might exist to give immediate
+feedback to developers, hoping to catch obvious problems in the code
+that can be detected quickly. These typically do not run the full test
+suite, nor do they run on a wide variety of platforms. They also
+usually do a VC update rather than performing a brand-new checkout
+each time. You could have a "quick" scheduler which used a 30 second
+timeout, and feeds a single "quick" Builder that uses a VC
+`mode='update'' setting.
+
+   A separate "full" scheduler would run more comprehensive tests a
+little while later, to catch more subtle problems. This scheduler
+would have a longer tree-stable-timer, maybe 30 minutes, and would
+feed multiple Builders (with a `mode=' of `'copy'', `'clobber'', or
+`'export'').
+
+   The `tree-stable-timer' and `isFileImportant' decisions are made
+by the Scheduler. Dependencies are also implemented here.  Periodic
+builds (those which are run every N seconds rather than after new
+Changes arrive) are triggered by a special `Periodic' Scheduler
+subclass. The default Scheduler class can also be told to watch for
+specific branches, ignoring Changes on other branches. This may be
+useful if you have a trunk and a few release branches which should be
+tracked, but when you don't want to have the Buildbot pay attention
+to several dozen private user branches.
+
+   Some Schedulers may trigger builds for other reasons, other than
+recent Changes. For example, a Scheduler subclass could connect to a
+remote buildmaster and watch for builds of a library to succeed before
+triggering a local build that uses that library.
+
+   Each Scheduler creates and submits `BuildSet' objects to the
+`BuildMaster', which is then responsible for making sure the
+individual `BuildRequests' are delivered to the target `Builders'.
+
+   `Scheduler' instances are activated by placing them in the
+`c['schedulers']' list in the buildmaster config file. Each Scheduler
+has a unique name.
+
+
+File: buildbot.info,  Node: BuildSet,  Next: BuildRequest,  Prev: Schedulers,  Up: Concepts
+
+3.3 BuildSet
+============
+
+A `BuildSet' is the name given to a set of Builds that all
+compile/test the same version of the tree on multiple Builders. In
+general, all these component Builds will perform the same sequence of
+Steps, using the same source code, but on different platforms or
+against a different set of libraries.
+
+   The `BuildSet' is tracked as a single unit, which fails if any of
+the component Builds have failed, and therefore can succeed only if
+_all_ of the component Builds have succeeded. There are two kinds of
+status notification messages that can be emitted for a BuildSet: the
+`firstFailure' type (which fires as soon as we know the BuildSet will
+fail), and the `Finished' type (which fires once the BuildSet has
+completely finished, regardless of whether the overall set passed or
+failed).
+
+   A `BuildSet' is created with a _source stamp_ tuple of (branch,
+revision, changes, patch), some of which may be None, and a list of
+Builders on which it is to be run. They are then given to the
+BuildMaster, which is responsible for creating a separate
+`BuildRequest' for each Builder.
+
+   There are a couple of different likely values for the
+`SourceStamp':
+
+`(revision=None, changes=[CHANGES], patch=None)'
+     This is a `SourceStamp' used when a series of Changes have
+     triggered a build. The VC step will attempt to check out a tree
+     that contains CHANGES (and any changes that occurred before
+     CHANGES, but not any that occurred after them).
+
+`(revision=None, changes=None, patch=None)'
+     This builds the most recent code on the default branch. This is
+     the sort of `SourceStamp' that would be used on a Build that was
+     triggered by a user request, or a Periodic scheduler. It is also
+     possible to configure the VC Source Step to always check out the
+     latest sources rather than paying attention to the Changes in the
+     SourceStamp, which will result in same behavior as this.
+
+`(branch=BRANCH, revision=None, changes=None, patch=None)'
+     This builds the most recent code on the given BRANCH. Again,
+     this is generally triggered by a user request or Periodic build.
+
+`(revision=REV, changes=None, patch=(LEVEL, DIFF))'
+     This checks out the tree at the given revision REV, then applies
+     a patch (using `diff -pLEVEL <DIFF'). The *Note try:: feature
+     uses this kind of `SourceStamp'. If `patch' is None, the patching
+     step is bypassed.
+
+
+   The buildmaster is responsible for turning the `BuildSet' into a
+set of `BuildRequest' objects and queueing them on the appropriate
+Builders.
+
+
+File: buildbot.info,  Node: BuildRequest,  Next: Builder,  Prev: BuildSet,  Up: Concepts
+
+3.4 BuildRequest
+================
+
+A `BuildRequest' is a request to build a specific set of sources on a
+single specific Builder. Each Builder runs the `BuildRequest' as soon
+as it can (i.e. when an associated buildslave becomes free).
+
+   The `BuildRequest' contains the `SourceStamp' specification.  The
+actual process of running the build (the series of Steps that will be
+executed) is implemented by the `Build' object. In this future this
+might be changed, to have the `Build' define _what_ gets built, and a
+separate `BuildProcess' (provided by the Builder) to define _how_ it
+gets built.
+
+   The `BuildRequest' may be mergeable with other compatible
+`BuildRequest's. Builds that are triggered by incoming Changes will
+generally be mergeable. Builds that are triggered by user requests
+are generally not, unless they are multiple requests to build the
+_latest sources_ of the same branch.
+
+
+File: buildbot.info,  Node: Builder,  Next: Users,  Prev: BuildRequest,  Up: Concepts
+
+3.5 Builder
+===========
+
+The `Builder' is a long-lived object which controls all Builds of a
+given type. Each one is created when the config file is first parsed,
+and lives forever (or rather until it is removed from the config
+file). It mediates the connections to the buildslaves that do all the
+work, and is responsible for creating the `Build' objects that decide
+_how_ a build is performed (i.e., which steps are executed in what
+order).
+
+   Each `Builder' gets a unique name, and the path name of a
+directory where it gets to do all its work (there is a
+buildmaster-side directory for keeping status information, as well as
+a buildslave-side directory where the actual checkout/compile/test
+commands are executed). It also gets a `BuildFactory', which is
+responsible for creating new `Build' instances: because the `Build'
+instance is what actually performs each build, choosing the
+`BuildFactory' is the way to specify what happens each time a build
+is done.
+
+   Each `Builder' is associated with one of more `BuildSlaves'.  A
+`Builder' which is used to perform OS-X builds (as opposed to Linux
+or Solaris builds) should naturally be associated with an OS-X-based
+buildslave.
+
+
+File: buildbot.info,  Node: Users,  Prev: Builder,  Up: Concepts
+
+3.6 Users
+=========
+
+Buildbot has a somewhat limited awareness of _users_. It assumes the
+world consists of a set of developers, each of whom can be described
+by a couple of simple attributes. These developers make changes to
+the source code, causing builds which may succeed or fail.
+
+   Each developer is primarily known through the source control
+system. Each Change object that arrives is tagged with a `who' field
+that typically gives the account name (on the repository machine) of
+the user responsible for that change. This string is the primary key
+by which the User is known, and is displayed on the HTML status pages
+and in each Build's "blamelist".
+
+   To do more with the User than just refer to them, this username
+needs to be mapped into an address of some sort. The responsibility
+for this mapping is left up to the status module which needs the
+address. The core code knows nothing about email addresses or IRC
+nicknames, just user names.
+
+* Menu:
+
+* Doing Things With Users::
+* Email Addresses::
+* IRC Nicknames::
+* Live Status Clients::
+
+
+File: buildbot.info,  Node: Doing Things With Users,  Next: Email Addresses,  Prev: Users,  Up: Users
+
+3.6.1 Doing Things With Users
+-----------------------------
+
+Each Change has a single User who is responsible for that Change. Most
+Builds have a set of Changes: the Build represents the first time
+these Changes have been built and tested by the Buildbot. The build
+has a "blamelist" that consists of a simple union of the Users
+responsible for all the Build's Changes.
+
+   The Build provides (through the IBuildStatus interface) a list of
+Users who are "involved" in the build. For now this is equal to the
+blamelist, but in the future it will be expanded to include a "build
+sheriff" (a person who is "on duty" at that time and responsible for
+watching over all builds that occur during their shift), as well as
+per-module owners who simply want to keep watch over their domain
+(chosen by subdirectory or a regexp matched against the filenames
+pulled out of the Changes). The Involved Users are those who probably
+have an interest in the results of any given build.
+
+   In the future, Buildbot will acquire the concept of "Problems",
+which last longer than builds and have beginnings and ends. For
+example, a test case which passed in one build and then failed in the
+next is a Problem. The Problem lasts until the test case starts
+passing again, at which point the Problem is said to be "resolved".
+
+   If there appears to be a code change that went into the tree at the
+same time as the test started failing, that Change is marked as being
+resposible for the Problem, and the user who made the change is added
+to the Problem's "Guilty" list. In addition to this user, there may
+be others who share responsibility for the Problem (module owners,
+sponsoring developers). In addition to the Responsible Users, there
+may be a set of Interested Users, who take an interest in the fate of
+the Problem.
+
+   Problems therefore have sets of Users who may want to be kept
+aware of the condition of the problem as it changes over time. If
+configured, the Buildbot can pester everyone on the Responsible list
+with increasing harshness until the problem is resolved, with the
+most harshness reserved for the Guilty parties themselves. The
+Interested Users may merely be told when the problem starts and
+stops, as they are not actually responsible for fixing anything.
+
+
+File: buildbot.info,  Node: Email Addresses,  Next: IRC Nicknames,  Prev: Doing Things With Users,  Up: Users
+
+3.6.2 Email Addresses
+---------------------
+
+The `buildbot.status.mail.MailNotifier' class provides a status
+target which can send email about the results of each build. It
+accepts a static list of email addresses to which each message should
+be delivered, but it can also be configured to send mail to the
+Build's Interested Users. To do this, it needs a way to convert User
+names into email addresses.
+
+   For many VC systems, the User Name is actually an account name on
+the system which hosts the repository. As such, turning the name into
+an email address is a simple matter of appending
+"@repositoryhost.com". Some projects use other kinds of mappings (for
+example the preferred email address may be at "project.org" despite
+the repository host being named "cvs.project.org"), and some VC
+systems have full separation between the concept of a user and that
+of an account on the repository host (like Perforce). Some systems
+(like Arch) put a full contact email address in every change.
+
+   To convert these names to addresses, the MailNotifier uses an
+EmailLookup object. This provides a .getAddress method which accepts
+a name and (eventually) returns an address. The default `MailNotifier'
+module provides an EmailLookup which simply appends a static string,
+configurable when the notifier is created. To create more complex
+behaviors (perhaps using an LDAP lookup, or using "finger" on a
+central host to determine a preferred address for the developer),
+provide a different object as the `lookup' argument.
+
+   In the future, when the Problem mechanism has been set up, the
+Buildbot will need to send mail to arbitrary Users. It will do this
+by locating a MailNotifier-like object among all the buildmaster's
+status targets, and asking it to send messages to various Users. This
+means the User-to-address mapping only has to be set up once, in your
+MailNotifier, and every email message the buildbot emits will take
+advantage of it.
+
+
+File: buildbot.info,  Node: IRC Nicknames,  Next: Live Status Clients,  Prev: Email Addresses,  Up: Users
+
+3.6.3 IRC Nicknames
+-------------------
+
+Like MailNotifier, the `buildbot.status.words.IRC' class provides a
+status target which can announce the results of each build. It also
+provides an interactive interface by responding to online queries
+posted in the channel or sent as private messages.
+
+   In the future, the buildbot can be configured map User names to IRC
+nicknames, to watch for the recent presence of these nicknames, and to
+deliver build status messages to the interested parties. Like
+`MailNotifier' does for email addresses, the `IRC' object will have
+an `IRCLookup' which is responsible for nicknames. The mapping can be
+set up statically, or it can be updated by online users themselves
+(by claiming a username with some kind of "buildbot: i am user
+warner" commands).
+
+   Once the mapping is established, the rest of the buildbot can ask
+the `IRC' object to send messages to various users. It can report on
+the likelihood that the user saw the given message (based upon how
+long the user has been inactive on the channel), which might prompt
+the Problem Hassler logic to send them an email message instead.
+
+
+File: buildbot.info,  Node: Live Status Clients,  Prev: IRC Nicknames,  Up: Users
+
+3.6.4 Live Status Clients
+-------------------------
+
+The Buildbot also offers a PB-based status client interface which can
+display real-time build status in a GUI panel on the developer's
+desktop.  This interface is normally anonymous, but it could be
+configured to let the buildmaster know _which_ developer is using the
+status client. The status client could then be used as a
+message-delivery service, providing an alternative way to deliver
+low-latency high-interruption messages to the developer (like "hey,
+you broke the build").
+
+
+File: buildbot.info,  Node: Configuration,  Next: Getting Source Code Changes,  Prev: Concepts,  Up: Top
+
+4 Configuration
+***************
+
+The buildbot's behavior is defined by the "config file", which
+normally lives in the `master.cfg' file in the buildmaster's base
+directory (but this can be changed with an option to the `buildbot
+create-master' command). This file completely specifies which
+Builders are to be run, which slaves they should use, how Changes
+should be tracked, and where the status information is to be sent.
+The buildmaster's `buildbot.tac' file names the base directory;
+everything else comes from the config file.
+
+   A sample config file was installed for you when you created the
+buildmaster, but you will need to edit it before your buildbot will do
+anything useful.
+
+   This chapter gives an overview of the format of this file and the
+various sections in it. You will need to read the later chapters to
+understand how to fill in each section properly.
+
+* Menu:
+
+* Config File Format::
+* Loading the Config File::
+* Defining the Project::
+* Listing Change Sources and Schedulers::
+* Setting the slaveport::
+* Buildslave Specifiers::
+* Defining Builders::
+* Defining Status Targets::
+* Debug options::
+
+
+File: buildbot.info,  Node: Config File Format,  Next: Loading the Config File,  Prev: Configuration,  Up: Configuration
+
+4.1 Config File Format
+======================
+
+The config file is, fundamentally, just a piece of Python code which
+defines a dictionary named `BuildmasterConfig', with a number of keys
+that are treated specially. You don't need to know Python to do basic
+configuration, though, you can just copy the syntax of the sample
+file. If you _are_ comfortable writing Python code, however, you can
+use all the power of a full programming language to achieve more
+complicated configurations.
+
+   The `BuildmasterConfig' name is the only one which matters: all
+other names defined during the execution of the file are discarded.
+When parsing the config file, the Buildmaster generally compares the
+old configuration with the new one and performs the minimum set of
+actions necessary to bring the buildbot up to date: Builders which are
+not changed are left untouched, and Builders which are modified get to
+keep their old event history.
+
+   Basic Python syntax: comments start with a hash character ("#"),
+tuples are defined with `(parenthesis, pairs)', arrays are defined
+with `[square, brackets]', tuples and arrays are mostly
+interchangeable. Dictionaries (data structures which map "keys" to
+"values") are defined with curly braces: `{'key1': 'value1', 'key2':
+'value2'} '. Function calls (and object instantiation) can use named
+parameters, like `w = html.Waterfall(http_port=8010)'.
+
+   The config file starts with a series of `import' statements, which
+make various kinds of Steps and Status targets available for later
+use. The main `BuildmasterConfig' dictionary is created, then it is
+populated with a variety of keys. These keys are broken roughly into
+the following sections, each of which is documented in the rest of
+this chapter:
+
+   * Project Definitions
+
+   * Change Sources / Schedulers
+
+   * Slaveport
+
+   * Buildslave Configuration
+
+   * Builders / Interlocks
+
+   * Status Targets
+
+   * Debug options
+
+   The config file can use a few names which are placed into its
+namespace:
+
+`basedir'
+     the base directory for the buildmaster. This string has not been
+     expanded, so it may start with a tilde. It needs to be expanded
+     before use. The config file is located in
+     `os.path.expanduser(os.path.join(basedir, 'master.cfg'))'
+
+
+
+File: buildbot.info,  Node: Loading the Config File,  Next: Defining the Project,  Prev: Config File Format,  Up: Configuration
+
+4.2 Loading the Config File
+===========================
+
+The config file is only read at specific points in time. It is first
+read when the buildmaster is launched. Once it is running, there are
+various ways to ask it to reload the config file. If you are on the
+system hosting the buildmaster, you can send a `SIGHUP' signal to it:
+the `buildbot' tool has a shortcut for this:
+
+     buildbot reconfig BASEDIR
+
+   This command will show you all of the lines from `twistd.log' that
+relate to the reconfiguration. If there are any problems during the
+config-file reload, they will be displayed in these lines.
+
+   The debug tool (`buildbot debugclient --master HOST:PORT') has a
+"Reload .cfg" button which will also trigger a reload. In the future,
+there will be other ways to accomplish this step (probably a
+password-protected button on the web page, as well as a privileged IRC
+command).
+
+   When reloading the config file, the buildmaster will endeavor to
+change as little as possible about the running system. For example,
+although old status targets may be shut down and new ones started up,
+any status targets that were not changed since the last time the
+config file was read will be left running and untouched. Likewise any
+Builders which have not been changed will be left running. If a
+Builder is modified (say, the build process is changed) while a Build
+is currently running, that Build will keep running with the old
+process until it completes. Any previously queued Builds (or Builds
+which get queued after the reconfig) will use the new process.
+
+
+File: buildbot.info,  Node: Defining the Project,  Next: Listing Change Sources and Schedulers,  Prev: Loading the Config File,  Up: Configuration
+
+4.3 Defining the Project
+========================
+
+There are a couple of basic settings that you use to tell the buildbot
+what project it is working on. This information is used by status
+reporters to let users find out more about the codebase being
+exercised by this particular Buildbot installation.
+
+     c['projectName'] = "Buildbot"
+     c['projectURL'] = "http://buildbot.sourceforge.net/"
+     c['buildbotURL'] = "http://localhost:8010/"
+
+   `projectName' is a short string will be used to describe the
+project that this buildbot is working on. For example, it is used as
+the title of the waterfall HTML page.
+
+   `projectURL' is a string that gives a URL for the project as a
+whole. HTML status displays will show `projectName' as a link to
+`projectURL', to provide a link from buildbot HTML pages to your
+project's home page.
+
+   The `buildbotURL' string should point to the location where the
+buildbot's internal web server (usually the `html.Waterfall' page) is
+visible. This typically uses the port number set when you create the
+`Waterfall' object: the buildbot needs your help to figure out a
+suitable externally-visible host name.
+
+   When status notices are sent to users (either by email or over
+IRC), `buildbotURL' will be used to create a URL to the specific build
+or problem that they are being notified about. It will also be made
+available to queriers (over IRC) who want to find out where to get
+more information about this buildbot.
+
+
+File: buildbot.info,  Node: Listing Change Sources and Schedulers,  Next: Setting the slaveport,  Prev: Defining the Project,  Up: Configuration
+
+4.4 Listing Change Sources and Schedulers
+=========================================
+
+The `c['sources']' key is a list of ChangeSource instances(1).  This
+defines how the buildmaster learns about source code changes.  More
+information about what goes here is available in *Note Getting Source
+Code Changes::.
+
+     import buildbot.changes.pb
+     c['sources'] = [buildbot.changes.pb.PBChangeSource()]
+
+   `c['schedulers']' is a list of Scheduler instances, each of which
+causes builds to be started on a particular set of Builders. The two
+basic Scheduler classes you are likely to start with are `Scheduler'
+and `Periodic', but you can write a customized subclass to implement
+more complicated build scheduling.
+
+   The docstring for `buildbot.scheduler.Scheduler' is the best place
+to see all the options that can be used. Type `pydoc
+buildbot.scheduler.Scheduler' to see it, or look in
+`buildbot/scheduler.py' directly.
+
+   The basic Scheduler takes four arguments:
+
+`name'
+     Each Scheduler must have a unique name. This is only used in
+     status displays.
+
+`branch'
+     This Scheduler will pay attention to a single branch, ignoring
+     Changes that occur on other branches. Setting `branch' equal to
+     the special value of `None' means it should only pay attention
+     to the default branch. Note that `None' is a keyword, not a
+     string, so you want to use `None' and not `"None"'.
+
+`treeStableTimer'
+     The Scheduler will wait for this many seconds before starting the
+     build. If new changes are made during this interval, the timer
+     will be restarted, so really the build will be started after a
+     change and then after this many seconds of inactivity.
+
+`builderNames'
+     When the tree-stable-timer finally expires, builds will be
+     started on these Builders. Each Builder gets a unique name:
+     these strings must match.
+
+
+     from buildbot import scheduler
+     quick = scheduler.Scheduler("quick", None, 60,
+                                 ["quick-linux", "quick-netbsd"])
+     full = scheduler.Scheduler("full", None, 5*60,
+                                ["full-linux", "full-netbsd", "full-OSX"])
+     nightly = scheduler.Periodic("nightly", ["full-solaris"], 24*60*60)
+     c['schedulers'] = [quick, full, nightly]
+
+   In this example, the two "quick" builds are triggered 60 seconds
+after the tree has been changed. The "full" builds do not run quite
+so quickly (they wait 5 minutes), so hopefully if the quick builds
+fail due to a missing file or really simple typo, the developer can
+discover and fix the problem before the full builds are started. Both
+Schedulers only pay attention to the default branch: any changes on
+other branches are ignored by these Schedulers. Each Scheduler
+triggers a different set of Builders, referenced by name.
+
+   The third Scheduler in this example just runs the full solaris
+build once per day. (note that this Scheduler only lets you control
+the time between builds, not the absolute time-of-day of each Build,
+so this could easily wind up a "daily" or "every afternoon" scheduler
+depending upon when it was first activated).
+
+* Menu:
+
+* Scheduler Types::
+* Build Dependencies::
+
+   ---------- Footnotes ----------
+
+   (1) To be precise, it is a list of objects which all implement the
+`buildbot.interfaces.IChangeSource' Interface
+
+
+File: buildbot.info,  Node: Scheduler Types,  Next: Build Dependencies,  Prev: Listing Change Sources and Schedulers,  Up: Listing Change Sources and Schedulers
+
+4.4.1 Scheduler Types
+---------------------
+
+Here is a brief catalog of the available Scheduler types. All these
+Schedulers are classes in `buildbot.scheduler', and the docstrings
+there are the best source of documentation on the arguments taken by
+each one.
+
+`Scheduler'
+     This is the default Scheduler class. It follows exactly one
+     branch, and starts a configurable tree-stable-timer after each
+     change on that branch. When the timer expires, it starts a build
+     on some set of Builders. The Scheduler accepts a
+     `fileIsImportant' function which can be used to ignore some
+     Changes if they do not affect any "important" files.
+
+`AnyBranchScheduler'
+     This scheduler uses a tree-stable-timer like the default one, but
+     follows multiple branches at once. Each branch gets a separate
+     timer.
+
+`Dependent'
+     This scheduler watches an "upstream" Builder. When that Builder
+     successfully builds a particular set of Changes, it triggers
+     builds of the same code on a configured set of "downstream"
+     builders. The next section (*note Build Dependencies::)
+     describes this scheduler in more detail.
+
+`Periodic'
+     This simple scheduler just triggers a build every N seconds.
+
+`Nightly'
+     This is highly configurable periodic build scheduler, which
+     triggers a build at particular times of day, week, month, or
+     year. The configuration syntax is very similar to the well-known
+     `crontab' format, in which you provide values for minute, hour,
+     day, and month (some of which can be wildcards), and a build is
+     triggered whenever the current time matches the given
+     constraints. This can run a build every night, every morning,
+     every weekend, alternate Thursdays, on your boss's birthday, etc.
+
+`Try_Jobdir / Try_Userpass'
+     This scheduler allows developers to use the `buildbot try'
+     command to trigger builds of code they have not yet committed.
+     See *Note try:: for complete details.
+
+
+
+File: buildbot.info,  Node: Build Dependencies,  Prev: Scheduler Types,  Up: Listing Change Sources and Schedulers
+
+4.4.2 Build Dependencies
+------------------------
+
+It is common to wind up with one kind of build which should only be
+performed if the same source code was successfully handled by some
+other kind of build first. An example might be a packaging step: you
+might only want to produce .deb or RPM packages from a tree that was
+known to compile successfully and pass all unit tests. You could put
+the packaging step in the same Build as the compile and testing steps,
+but there might be other reasons to not do this (in particular you
+might have several Builders worth of compiles/tests, but only wish to
+do the packaging once). Another example is if you want to skip the
+"full" builds after a failing "quick" build of the same source code.
+Or, if one Build creates a product (like a compiled library) that is
+used by some other Builder, you'd want to make sure the consuming
+Build is run _after_ the producing one.
+
+   You can use `Dependencies' to express this relationship to the
+Buildbot. There is a special kind of Scheduler named
+`scheduler.Dependent' that will watch an "upstream" Scheduler for
+builds to complete successfully (on all of its Builders). Each time
+that happens, the same source code (i.e. the same `SourceStamp') will
+be used to start a new set of builds, on a different set of Builders.
+This "downstream" scheduler doesn't pay attention to Changes at all,
+it only pays attention to the upstream scheduler.
+
+   If the SourceStamp fails on any of the Builders in the upstream
+set, the downstream builds will not fire.
+
+     from buildbot import scheduler
+     tests = scheduler.Scheduler("tests", None, 5*60,
+                                 ["full-linux", "full-netbsd", "full-OSX"])
+     package = scheduler.Dependent("package",
+                                   tests, # upstream scheduler
+                                   ["make-tarball", "make-deb", "make-rpm"])
+     c['schedulers'] = [tests, package]
+
+   Note that `Dependent''s upstream scheduler argument is given as a
+`Scheduler' _instance_, not a name. This makes it impossible to
+create circular dependencies in the config file.
+
+
+File: buildbot.info,  Node: Setting the slaveport,  Next: Buildslave Specifiers,  Prev: Listing Change Sources and Schedulers,  Up: Configuration
+
+4.5 Setting the slaveport
+=========================
+
+The buildmaster will listen on a TCP port of your choosing for
+connections from buildslaves. It can also use this port for
+connections from remote Change Sources, status clients, and debug
+tools. This port should be visible to the outside world, and you'll
+need to tell your buildslave admins about your choice.
+
+   It does not matter which port you pick, as long it is externally
+visible, however you should probably use something larger than 1024,
+since most operating systems don't allow non-root processes to bind to
+low-numbered ports. If your buildmaster is behind a firewall or a NAT
+box of some sort, you may have to configure your firewall to permit
+inbound connections to this port.
+
+     c['slavePortnum'] = 10000
+
+   `c['slavePortnum']' is a _strports_ specification string, defined
+in the `twisted.application.strports' module (try `pydoc
+twisted.application.strports' to get documentation on the format).
+This means that you can have the buildmaster listen on a
+localhost-only port by doing:
+
+     c['slavePortnum'] = "tcp:10000:interface=127.0.0.1"
+
+   This might be useful if you only run buildslaves on the same
+machine, and they are all configured to contact the buildmaster at
+`localhost:10000'.
+
+
+File: buildbot.info,  Node: Buildslave Specifiers,  Next: Defining Builders,  Prev: Setting the slaveport,  Up: Configuration
+
+4.6 Buildslave Specifiers
+=========================
+
+The `c['bots']' key is a list of known buildslaves. Each buildslave
+is defined by a tuple of (slavename, slavepassword). These are the
+same two values that need to be provided to the buildslave
+administrator when they create the buildslave.
+
+     c['bots'] = [('bot-solaris', 'solarispasswd'),
+                  ('bot-bsd', 'bsdpasswd'),
+                 ]
+
+   The slavenames must be unique, of course. The password exists to
+prevent evildoers from interfering with the buildbot by inserting
+their own (broken) buildslaves into the system and thus displacing the
+real ones.
+
+   Buildslaves with an unrecognized slavename or a non-matching
+password will be rejected when they attempt to connect, and a message
+describing the problem will be put in the log file (see *Note
+Logfiles::).
+
+
+File: buildbot.info,  Node: Defining Builders,  Next: Defining Status Targets,  Prev: Buildslave Specifiers,  Up: Configuration
+
+4.7 Defining Builders
+=====================
+
+The `c['builders']' key is a list of dictionaries which specify the
+Builders. The Buildmaster runs a collection of Builders, each of
+which handles a single type of build (e.g. full versus quick), on a
+single build slave. A Buildbot which makes sure that the latest code
+("HEAD") compiles correctly across four separate architecture will
+have four Builders, each performing the same build but on different
+slaves (one per platform).
+
+   Each Builder gets a separate column in the waterfall display. In
+general, each Builder runs independently (although various kinds of
+interlocks can cause one Builder to have an effect on another).
+
+   Each Builder specification dictionary has several required keys:
+
+`name'
+     This specifies the Builder's name, which is used in status
+     reports.
+
+`slavename'
+     This specifies which buildslave will be used by this Builder.
+     `slavename' must appear in the `c['bots']' list. Each buildslave
+     can accomodate multiple Builders.
+
+`slavenames'
+     If you provide `slavenames' instead of `slavename', you can give
+     a list of buildslaves which are capable of running this Builder.
+     If multiple buildslaves are available for any given Builder, you
+     will have some measure of redundancy: in case one slave goes
+     offline, the others can still keep the Builder working. In
+     addition, multiple buildslaves will allow multiple simultaneous
+     builds for the same Builder, which might be useful if you have a
+     lot of forced or "try" builds taking place.
+
+     If you use this feature, it is important to make sure that the
+     buildslaves are all, in fact, capable of running the given
+     build. The slave hosts should be configured similarly, otherwise
+     you will spend a lot of time trying (unsuccessfully) to
+     reproduce a failure that only occurs on some of the buildslaves
+     and not the others. Different platforms, operating systems,
+     versions of major programs or libraries, all these things mean
+     you should use separate Builders.
+
+`builddir'
+     This specifies the name of a subdirectory (under the base
+     directory) in which everything related to this builder will be
+     placed. On the buildmaster, this holds build status information.
+     On the buildslave, this is where checkouts, compiles, and tests
+     are run.
+
+`factory'
+     This is a `buildbot.process.factory.BuildFactory' instance which
+     controls how the build is performed. Full details appear in
+     their own chapter, *Note Build Process::. Parameters like the
+     location of the CVS repository and the compile-time options used
+     for the build are generally provided as arguments to the
+     factory's constructor.
+
+
+   Other optional keys may be set on each Builder:
+
+`category'
+     If provided, this is a string that identifies a category for the
+     builder to be a part of. Status clients can limit themselves to a
+     subset of the available categories. A common use for this is to
+     add new builders to your setup (for a new module, or for a new
+     buildslave) that do not work correctly yet and allow you to
+     integrate them with the active builders. You can put these new
+     builders in a test category, make your main status clients
+     ignore them, and have only private status clients pick them up.
+     As soon as they work, you can move them over to the active
+     category.
+
+
+
+File: buildbot.info,  Node: Defining Status Targets,  Next: Debug options,  Prev: Defining Builders,  Up: Configuration
+
+4.8 Defining Status Targets
+===========================
+
+The Buildmaster has a variety of ways to present build status to
+various users. Each such delivery method is a "Status Target" object
+in the configuration's `status' list. To add status targets, you just
+append more objects to this list:
+
+     c['status'] = []
+
+     from buildbot.status import html
+     c['status'].append(html.Waterfall(http_port=8010))
+
+     from buildbot.status import mail
+     m = mail.MailNotifier(fromaddr="buildbot at localhost",
+                           extraRecipients=["builds at lists.example.com"],
+                           sendToInterestedUsers=False)
+     c['status'].append(m)
+
+     from buildbot.status import words
+     c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+                                  channels=["#example"]))
+
+   Status delivery has its own chapter, *Note Status Delivery::, in
+which all the built-in status targets are documented.
+
+
+File: buildbot.info,  Node: Debug options,  Prev: Defining Status Targets,  Up: Configuration
+
+4.9 Debug options
+=================
+
+If you set `c['debugPassword']', then you can connect to the
+buildmaster with the diagnostic tool launched by `buildbot
+debugclient MASTER:PORT'. From this tool, you can reload the config
+file, manually force builds, and inject changes, which may be useful
+for testing your buildmaster without actually commiting changes to
+your repository (or before you have the Change Sources set up). The
+debug tool uses the same port number as the slaves do:
+`c['slavePortnum']', and is authenticated with this password.
+
+     c['debugPassword'] = "debugpassword"
+
+   If you set `c['manhole']' to an instance of one of the classes in
+`buildbot.manhole', you can telnet or ssh into the buildmaster and
+get an interactive Python shell, which may be useful for debugging
+buildbot internals. It is probably only useful for buildbot
+developers. It exposes full access to the buildmaster's account
+(including the ability to modify and delete files), so it should not
+be enabled with a weak or easily guessable password.
+
+   There are three separate `Manhole' classes. Two of them use SSH,
+one uses unencrypted telnet. Two of them use a username+password
+combination to grant access, one of them uses an SSH-style
+`authorized_keys' file which contains a list of ssh public keys.
+
+`manhole.AuthorizedKeysManhole'
+     You construct this with the name of a file that contains one SSH
+     public key per line, just like `~/.ssh/authorized_keys'. If you
+     provide a non-absolute filename, it will be interpreted relative
+     to the buildmaster's base directory.
+
+`manhole.PasswordManhole'
+     This one accepts SSH connections but asks for a username and
+     password when authenticating. It accepts only one such pair.
+
+`manhole.TelnetManhole'
+     This accepts regular unencrypted telnet connections, and asks
+     for a username/password pair before providing access. Because
+     this username/password is transmitted in the clear, and because
+     Manhole access to the buildmaster is equivalent to granting full
+     shell privileges to both the buildmaster and all the buildslaves
+     (and to all accounts which then run code produced by the
+     buildslaves), it is highly recommended that you use one of the
+     SSH manholes instead.
+
+
+     # some examples:
+     from buildbot import manhole
+     c['manhole'] = manhole.AuthorizedKeysManhole(1234, "authorized_keys")
+     c['manhole'] = manhole.PasswordManhole(1234, "alice", "mysecretpassword")
+     c['manhole'] = manhole.TelnetManhole(1234, "bob", "snoop_my_password_please")
+
+   The `Manhole' instance can be configured to listen on a specific
+port. You may wish to have this listening port bind to the loopback
+interface (sometimes known as "lo0", "localhost", or 127.0.0.1) to
+restrict access to clients which are running on the same host.
+
+     from buildbot.manhole import PasswordManhole
+     c['manhole'] = PasswordManhole("tcp:9999:interface=127.0.0.1","admin","passwd")
+
+   To have the `Manhole' listen on all interfaces, use `"tcp:9999"'
+or simply 9999. This port specification uses
+`twisted.application.strports', so you can make it listen on SSL or
+even UNIX-domain sockets if you want.
+
+   Note that using any Manhole requires that the TwistedConch package
+be installed, and that you be using Twisted version 2.0 or later.
+
+   The buildmaster's SSH server will use a different host key than the
+normal sshd running on a typical unix host. This will cause the ssh
+client to complain about a "host key mismatch", because it does not
+realize there are two separate servers running on the same host. To
+avoid this, use a clause like the following in your `.ssh/config'
+file:
+
+     Host remotehost-buildbot
+      HostName remotehost
+      HostKeyAlias remotehost-buildbot
+      Port 9999
+      # use 'user' if you use PasswordManhole and your name is not 'admin'.
+      # if you use AuthorizedKeysManhole, this probably doesn't matter.
+      User admin
+
+
+File: buildbot.info,  Node: Getting Source Code Changes,  Next: Build Process,  Prev: Configuration,  Up: Top
+
+5 Getting Source Code Changes
+*****************************
+
+The most common way to use the Buildbot is centered around the idea of
+`Source Trees': a directory tree filled with source code of some form
+which can be compiled and/or tested. Some projects use languages that
+don't involve any compilation step: nevertheless there may be a
+`build' phase where files are copied or rearranged into a form that
+is suitable for installation. Some projects do not have unit tests,
+and the Buildbot is merely helping to make sure that the sources can
+compile correctly. But in all of these cases, the thing-being-tested
+is a single source tree.
+
+   A Version Control System mantains a source tree, and tells the
+buildmaster when it changes. The first step of each Build is typically
+to acquire a copy of some version of this tree.
+
+   This chapter describes how the Buildbot learns about what Changes
+have occurred. For more information on VC systems and Changes, see
+*Note Version Control Systems::.
+
+* Menu:
+
+* Change Sources::
+
+
+File: buildbot.info,  Node: Change Sources,  Prev: Getting Source Code Changes,  Up: Getting Source Code Changes
+
+5.1 Change Sources
+==================
+
+Each Buildmaster watches a single source tree. Changes can be provided
+by a variety of ChangeSource types, however any given project will
+typically have only a single ChangeSource active. This section
+provides a description of all available ChangeSource types and
+explains how to set up each of them.
+
+   There are a variety of ChangeSources available, some of which are
+meant to be used in conjunction with other tools to deliver Change
+events from the VC repository to the buildmaster.
+
+   * CVSToys This ChangeSource opens a TCP connection from the
+     buildmaster to a waiting FreshCVS daemon that lives on the
+     repository machine, and subscribes to hear about Changes.
+
+   * MaildirSource This one watches a local maildir-format inbox for
+     email sent out by the repository when a change is made. When a
+     message arrives, it is parsed to create the Change object. A
+     variety of parsing functions are available to accomodate
+     different email-sending tools.
+
+   * PBChangeSource This ChangeSource listens on a local TCP socket
+     for inbound connections from a separate tool. Usually, this tool
+     would be run on the VC repository machine in a commit hook. It
+     is expected to connect to the TCP socket and send a Change
+     message over the network connection. The `buildbot sendchange'
+     command is one example of a tool that knows how to send these
+     messages, so you can write a commit script for your VC system
+     that calls it to deliver the Change.  There are other tools in
+     the contrib/ directory that use the same protocol.
+
+
+   As a quick guide, here is a list of VC systems and the
+ChangeSources that might be useful with them. All of these
+ChangeSources are in the `buildbot.changes' module.
+
+`CVS'
+        * freshcvs.FreshCVSSource (connected via TCP to the freshcvs
+          daemon)
+
+        * mail.FCMaildirSource (watching for email sent by a freshcvs
+          daemon)
+
+        * mail.BonsaiMaildirSource (watching for email sent by Bonsai)
+
+        * mail.SyncmailMaildirSource (watching for email sent by
+          syncmail)
+
+        * pb.PBChangeSource (listening for connections from `buildbot
+          sendchange' run in a loginfo script)
+
+        * pb.PBChangeSource (listening for connections from a
+          long-running `contrib/viewcvspoll.py' polling process which
+          examines the ViewCVS database directly
+
+`SVN'
+        * pb.PBChangeSource (listening for connections from
+          `contrib/svn_buildbot.py' run in a postcommit script)
+
+        * pb.PBChangeSource (listening for connections from a
+          long-running `contrib/svn_watcher.py' or
+          `contrib/svnpoller.py' polling process
+
+        * svnpoller.SVNPoller (polling the SVN repository)
+
+`Darcs'
+        * pb.PBChangeSource (listening for connections from
+          `contrib/darcs_buildbot.py' in a commit script
+
+`Mercurial'
+        * pb.PBChangeSource (listening for connections from
+          `contrib/hg_buildbot.py' run in an 'incoming' hook)
+
+`Arch/Bazaar'
+        * pb.PBChangeSource (listening for connections from
+          `contrib/arch_buildbot.py' run in a commit hook)
+
+
+   All VC systems can be driven by a PBChangeSource and the `buildbot
+sendchange' tool run from some form of commit script.  If you write
+an email parsing function, they can also all be driven by a suitable
+`MaildirSource'.
+
+* Menu:
+
+* Choosing ChangeSources::
+* CVSToys - PBService::
+* CVSToys - mail notification::
+* Other mail notification ChangeSources::
+* PBChangeSource::
+* P4Source::
+* BonsaiPoller::
+* SVNPoller::
+
+
+File: buildbot.info,  Node: Choosing ChangeSources,  Next: CVSToys - PBService,  Prev: Change Sources,  Up: Change Sources
+
+5.1.1 Choosing ChangeSources
+----------------------------
+
+The `master.cfg' configuration file has a dictionary key named
+`BuildmasterConfig['sources']', which holds a list of `IChangeSource'
+objects. The config file will typically create an object from one of
+the classes described below and stuff it into the list.
+
+     s = FreshCVSSourceNewcred(host="host", port=4519,
+                               user="alice", passwd="secret",
+                               prefix="Twisted")
+     BuildmasterConfig['sources'] = [s]
+
+   Each source tree has a nominal `top'. Each Change has a list of
+filenames, which are all relative to this top location. The
+ChangeSource is responsible for doing whatever is necessary to
+accomplish this. Most sources have a `prefix' argument: a partial
+pathname which is stripped from the front of all filenames provided to
+that `ChangeSource'. Files which are outside this sub-tree are
+ignored by the changesource: it does not generate Changes for those
+files.
+
+
+File: buildbot.info,  Node: CVSToys - PBService,  Next: CVSToys - mail notification,  Prev: Choosing ChangeSources,  Up: Change Sources
+
+5.1.2 CVSToys - PBService
+-------------------------
+
+The CVSToys (http://purl.net/net/CVSToys) package provides a server
+which runs on the machine that hosts the CVS repository it watches.
+It has a variety of ways to distribute commit notifications, and
+offers a flexible regexp-based way to filter out uninteresting
+changes. One of the notification options is named `PBService' and
+works by listening on a TCP port for clients. These clients subscribe
+to hear about commit notifications.
+
+   The buildmaster has a CVSToys-compatible `PBService' client built
+in. There are two versions of it, one for old versions of CVSToys
+(1.0.9 and earlier) which used the `oldcred' authentication
+framework, and one for newer versions (1.0.10 and later) which use
+`newcred'. Both are classes in the `buildbot.changes.freshcvs'
+package.
+
+   `FreshCVSSourceNewcred' objects are created with the following
+parameters:
+
+``host' and `port''
+     these specify where the CVSToys server can be reached
+
+``user' and `passwd''
+     these specify the login information for the CVSToys server
+     (`freshcvs'). These must match the server's values, which are
+     defined in the `freshCfg' configuration file (which lives in the
+     CVSROOT directory of the repository).
+
+``prefix''
+     this is the prefix to be found and stripped from filenames
+     delivered by the CVSToys server. Most projects live in
+     sub-directories of the main repository, as siblings of the
+     CVSROOT sub-directory, so typically this prefix is set to that
+     top sub-directory name.
+
+
+Example
+=======
+
+To set up the freshCVS server, add a statement like the following to
+your `freshCfg' file:
+
+     pb = ConfigurationSet([
+         (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+         ])
+
+   This will announce all changes to a client which connects to port
+4519 using a username of 'foo' and a password of 'bar'.
+
+   Then add a clause like this to your buildmaster's `master.cfg':
+
+     BuildmasterConfig['sources'] = [FreshCVSSource("cvs.example.com", 4519,
+                                     "foo", "bar",
+                                     prefix="glib/")]
+
+   where "cvs.example.com" is the host that is running the FreshCVS
+daemon, and "glib" is the top-level directory (relative to the
+repository's root) where all your source code lives. Most projects
+keep one or more projects in the same repository (along with CVSROOT/
+to hold admin files like loginfo and freshCfg); the prefix= argument
+tells the buildmaster to ignore everything outside that directory,
+and to strip that common prefix from all pathnames it handles.
+
+
+File: buildbot.info,  Node: CVSToys - mail notification,  Next: Other mail notification ChangeSources,  Prev: CVSToys - PBService,  Up: Change Sources
+
+5.1.3 CVSToys - mail notification
+---------------------------------
+
+CVSToys also provides a `MailNotification' action which will send
+email to a list of recipients for each commit. This tends to work
+better than using `/bin/mail' from within the CVSROOT/loginfo file
+directly, as CVSToys will batch together all files changed during the
+same CVS invocation, and can provide more information (like creating
+a ViewCVS URL for each file changed).
+
+   The Buildbot's `FCMaildirSource' is a ChangeSource which knows how
+to parse these CVSToys messages and turn them into Change objects.
+It watches a Maildir for new messages. The usually installation
+process looks like:
+
+  1. Create a mailing list, `projectname-commits'.
+
+  2. In CVSToys' freshCfg file, use a `MailNotification' action to
+     send commit mail to this mailing list.
+
+  3. Subscribe the buildbot user to the mailing list.
+
+  4. Configure your .qmail or .forward file to deliver these messages
+     into a maildir.
+
+  5. In the Buildbot's master.cfg file, use a `FCMaildirSource' to
+     watch the maildir for commit messages.
+
+   The `FCMaildirSource' is created with two parameters: the
+directory name of the maildir root, and the prefix to strip.
+
+
+File: buildbot.info,  Node: Other mail notification ChangeSources,  Next: PBChangeSource,  Prev: CVSToys - mail notification,  Up: Change Sources
+
+5.1.4 Other mail notification ChangeSources
+-------------------------------------------
+
+There are other types of maildir-watching ChangeSources, which only
+differ in the function used to parse the message body.
+
+   `SyncmailMaildirSource' knows how to parse the message format used
+in mail sent by Syncmail.
+
+   `BonsaiMaildirSource' parses messages sent out by Bonsai.
+
+
+File: buildbot.info,  Node: PBChangeSource,  Next: P4Source,  Prev: Other mail notification ChangeSources,  Up: Change Sources
+
+5.1.5 PBChangeSource
+--------------------
+
+The last kind of ChangeSource actually listens on a TCP port for
+clients to connect and push change notices _into_ the Buildmaster.
+This is used by the built-in `buildbot sendchange' notification tool,
+as well as the VC-specific `contrib/svn_buildbot.py' and
+`contrib/arch_buildbot.py' tools. These tools are run by the
+repository (in a commit hook script), and connect to the buildmaster
+directly each time a file is comitted. This is also useful for
+creating new kinds of change sources that work on a `push' model
+instead of some kind of subscription scheme, for example a script
+which is run out of an email .forward file.
+
+   This ChangeSource can be configured to listen on its own TCP port,
+or it can share the port that the buildmaster is already using for the
+buildslaves to connect. (This is possible because the
+`PBChangeSource' uses the same protocol as the buildslaves, and they
+can be distinguished by the `username' attribute used when the
+initial connection is established). It might be useful to have it
+listen on a different port if, for example, you wanted to establish
+different firewall rules for that port. You could allow only the SVN
+repository machine access to the `PBChangeSource' port, while
+allowing only the buildslave machines access to the slave port. Or you
+could just expose one port and run everything over it. _Note: this
+feature is not yet implemented, the PBChangeSource will always share
+the slave port and will always have a `user' name of `change', and a
+passwd of `changepw'. These limitations will be removed in the
+future._.
+
+   The `PBChangeSource' is created with the following arguments. All
+are optional.
+
+``port''
+     which port to listen on. If `None' (which is the default), it
+     shares the port used for buildslave connections. _Not
+     Implemented, always set to `None'_.
+
+``user' and `passwd''
+     The user/passwd account information that the client program must
+     use to connect. Defaults to `change' and `changepw'. _Not
+     Implemented, `user' is currently always set to `change',
+     `passwd' is always set to `changepw'_.
+
+``prefix''
+     The prefix to be found and stripped from filenames delivered
+     over the connection. Any filenames which do not start with this
+     prefix will be removed. If all the filenames in a given Change
+     are removed, the that whole Change will be dropped. This string
+     should probably end with a directory separator.
+
+     This is useful for changes coming from version control systems
+     that represent branches as parent directories within the
+     repository (like SVN and Perforce). Use a prefix of 'trunk/' or
+     'project/branches/foobranch/' to only follow one branch and to
+     get correct tree-relative filenames. Without a prefix, the
+     PBChangeSource will probably deliver Changes with filenames like
+     `trunk/foo.c' instead of just `foo.c'. Of course this also
+     depends upon the tool sending the Changes in (like `buildbot
+     sendchange') and what filenames it is delivering: that tool may
+     be filtering and stripping prefixes at the sending end.
+
+
+
+File: buildbot.info,  Node: P4Source,  Next: BonsaiPoller,  Prev: PBChangeSource,  Up: Change Sources
+
+5.1.6 P4Source
+--------------
+
+The `P4Source' periodically polls a Perforce
+(http://www.perforce.com/) depot for changes. It accepts the
+following arguments:
+
+``p4base''
+     The base depot path to watch, without the trailing '/...'.
+
+``p4port''
+     The Perforce server to connect to (as host:port).
+
+``p4user''
+     The Perforce user.
+
+``p4passwd''
+     The Perforce password.
+
+``split_file''
+     A function that maps a pathname, without the leading `p4base',
+     to a (branch, filename) tuple. The default just returns (None,
+     branchfile), which effectively disables branch support. You
+     should supply a function which understands your repository
+     structure.
+
+``pollinterval''
+     How often to poll, in seconds. Defaults to 600 (10 minutes).
+
+``histmax''
+     The maximum number of changes to inspect at a time. If more than
+     this number occur since the last poll, older changes will be
+     silently ignored.
+
+Example
+=======
+
+This configuration uses the `P4PORT', `P4USER', and `P4PASSWD'
+specified in the buildmaster's environment. It watches a project in
+which the branch name is simply the next path component, and the file
+is all path components after.
+
+     import buildbot.changes.p4poller
+     c['sources'].append(p4poller.P4Source(
+             p4base='//depot/project/',
+             split_file=lambda branchfile: branchfile.split('/',1)
+     ))
+
+
+File: buildbot.info,  Node: BonsaiPoller,  Next: SVNPoller,  Prev: P4Source,  Up: Change Sources
+
+5.1.7 BonsaiPoller
+------------------
+
+The `BonsaiPoller' periodically polls a Bonsai server. This is a CGI
+script accessed through a web server that provides information about
+a CVS tree, for example the Mozilla bonsai server at
+`http://bonsai.mozilla.org'. Bonsai servers are usable by both humans
+and machines. In this case, the buildbot's change source forms a
+query which asks about any files in the specified branch which have
+changed since the last query.
+
+   Please take a look at the BonsaiPoller docstring for details about
+the arguments it accepts.
+
+
+File: buildbot.info,  Node: SVNPoller,  Prev: BonsaiPoller,  Up: Change Sources
+
+5.1.8 SVNPoller
+---------------
+
+The `buildbot.changes.svnpoller.SVNPoller' is a ChangeSource which
+periodically polls a Subversion (http://subversion.tigris.org/)
+repository for new revisions, by running the `svn log' command in a
+subshell. It can watch a single branch or multiple branches.
+
+   `SVNPoller' accepts the following arguments:
+
+`svnurl'
+     The base URL path to watch, like
+     `svn://svn.twistedmatrix.com/svn/Twisted/trunk', or
+     `http://divmod.org/svn/Divmod/', or even
+     `file:///home/svn/Repository/ProjectA/branches/1.5/'. This must
+     include the access scheme, the location of the repository (both
+     the hostname for remote ones, and any additional directory names
+     necessary to get to the repository), and the sub-path within the
+     repository's virtual filesystem for the project and branch of
+     interest.
+
+     The `SVNPoller' will only pay attention to files inside the
+     subdirectory specified by the complete svnurl.
+
+`split_file'
+     A function to convert pathnames into (branch, relative_pathname)
+     tuples. Use this to explain your repository's branch-naming
+     policy to `SVNPoller'. This function must accept a single string
+     and return a two-entry tuple. There are a few utility functions
+     in `buildbot.changes.svnpoller' that can be used as a
+     `split_file' function, see below for details.
+
+     The default value always returns (None, path), which indicates
+     that all files are on the trunk.
+
+     Subclasses of `SVNPoller' can override the `split_file' method
+     instead of using the `split_file=' argument.
+
+`svnuser'
+     An optional string parameter. If set, the `--user' argument will
+     be added to all `svn' commands. Use this if you have to
+     authenticate to the svn server before you can do `svn info' or
+     `svn log' commands.
+
+`svnpasswd'
+     Like `svnuser', this will cause a `--password' argument to be
+     passed to all svn commands.
+
+`pollinterval'
+     How often to poll, in seconds. Defaults to 600 (checking once
+     every 10 minutes). Lower this if you want the buildbot to notice
+     changes faster, raise it if you want to reduce the network and
+     CPU load on your svn server. Please be considerate of public SVN
+     repositories by using a large interval when polling them.
+
+`histmax'
+     The maximum number of changes to inspect at a time. Every
+     POLLINTERVAL seconds, the `SVNPoller' asks for the last HISTMAX
+     changes and looks through them for any ones it does not already
+     know about. If more than HISTMAX revisions have been committed
+     since the last poll, older changes will be silently ignored.
+     Larger values of histmax will cause more time and memory to be
+     consumed on each poll attempt.  `histmax' defaults to 100.
+
+`svnbin'
+     This controls the `svn' executable to use. If subversion is
+     installed in a weird place on your system (outside of the
+     buildmaster's `$PATH'), use this to tell `SVNPoller' where to
+     find it. The default value of "svn" will almost always be
+     sufficient.
+
+
+Branches
+========
+
+Each source file that is tracked by a Subversion repository has a
+fully-qualified SVN URL in the following form:
+(REPOURL)(PROJECT-plus-BRANCH)(FILEPATH). When you create the
+`SVNPoller', you give it a `svnurl' value that includes all of the
+REPOURL and possibly some portion of the PROJECT-plus-BRANCH string.
+The `SVNPoller' is responsible for producing Changes that contain a
+branch name and a FILEPATH (which is relative to the top of a
+checked-out tree). The details of how these strings are split up
+depend upon how your repository names its branches.
+
+PROJECT/BRANCHNAME/FILEPATH repositories
+----------------------------------------
+
+One common layout is to have all the various projects that share a
+repository get a single top-level directory each. Then under a given
+project's directory, you get two subdirectories, one named "trunk"
+and another named "branches". Under "branches" you have a bunch of
+other directories, one per branch, with names like "1.5.x" and
+"testing". It is also common to see directories like "tags" and
+"releases" next to "branches" and "trunk".
+
+   For example, the Twisted project has a subversion server on
+"svn.twistedmatrix.com" that hosts several sub-projects. The
+repository is available through a SCHEME of "svn:". The primary
+sub-project is Twisted, of course, with a repository root of
+"svn://svn.twistedmatrix.com/svn/Twisted". Another sub-project is
+Informant, with a root of
+"svn://svn.twistedmatrix.com/svn/Informant", etc. Inside any
+checked-out Twisted tree, there is a file named bin/trial (which is
+used to run unit test suites).
+
+   The trunk for Twisted is in
+"svn://svn.twistedmatrix.com/svn/Twisted/trunk", and the
+fully-qualified SVN URL for the trunk version of `trial' would be
+"svn://svn.twistedmatrix.com/svn/Twisted/trunk/bin/trial". The same
+SVNURL for that file on a branch named "1.5.x" would be
+"svn://svn.twistedmatrix.com/svn/Twisted/branches/1.5.x/bin/trial".
+
+   To set up a `SVNPoller' that watches the Twisted trunk (and
+nothing else), we would use the following:
+
+     from buildbot.changes.svnpoller import SVNPoller
+     s = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted/trunk")
+     c['sources'].append(ss)
+
+   In this case, every Change that our `SVNPoller' produces will have
+`.branch=None', to indicate that the Change is on the trunk.  No
+other sub-projects or branches will be tracked.
+
+   If we want our ChangeSource to follow multiple branches, we have
+to do two things. First we have to change our `svnurl=' argument to
+watch more than just ".../Twisted/trunk". We will set it to
+".../Twisted" so that we'll see both the trunk and all the branches.
+Second, we have to tell `SVNPoller' how to split the
+(PROJECT-plus-BRANCH)(FILEPATH) strings it gets from the repository
+out into (BRANCH) and (FILEPATH) pairs.
+
+   We do the latter by providing a "split_file" function. This
+function is responsible for splitting something like
+"branches/1.5.x/bin/trial" into `branch'="branches/1.5.x" and
+`filepath'="bin/trial". This function is always given a string that
+names a file relative to the subdirectory pointed to by the
+`SVNPoller''s `svnurl=' argument. It is expected to return a
+(BRANCHNAME, FILEPATH) tuple (in which FILEPATH is relative to the
+branch indicated), or None to indicate that the file is outside any
+project of interest.
+
+   (note that we want to see "branches/1.5.x" rather than just
+"1.5.x" because when we perform the SVN checkout, we will probably
+append the branch name to the baseURL, which requires that we keep the
+"branches" component in there. Other VC schemes use a different
+approach towards branches and may not require this artifact.)
+
+   If your repository uses this same PROJECT/BRANCH/FILEPATH naming
+scheme, the following function will work:
+
+     def split_file_branches(path):
+         pieces = path.split('/')
+         if pieces[0] == 'trunk':
+             return (None, '/'.join(pieces[1:]))
+         elif pieces[0] == 'branches':
+             return ('/'.join(pieces[0:2]),
+                     '/'.join(pieces[2:]))
+         else:
+             return None
+
+   This function is provided as
+`buildbot.changes.svnpoller.split_file_branches' for your
+convenience. So to have our Twisted-watching `SVNPoller' follow
+multiple branches, we would use this:
+
+     from buildbot.changes.svnpoller import SVNPoller, split_file_branches
+     s = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted",
+                   split_file=split_file_branches)
+     c['sources'].append(ss)
+
+   Changes for all sorts of branches (with names like
+"branches/1.5.x", and None to indicate the trunk) will be delivered
+to the Schedulers.  Each Scheduler is then free to use or ignore each
+branch as it sees fit.
+
+BRANCHNAME/PROJECT/FILEPATH repositories
+----------------------------------------
+
+Another common way to organize a Subversion repository is to put the
+branch name at the top, and the projects underneath. This is
+especially frequent when there are a number of related sub-projects
+that all get released in a group.
+
+   For example, Divmod.org hosts a project named "Nevow" as well as
+one named "Quotient". In a checked-out Nevow tree there is a directory
+named "formless" that contains a python source file named
+"webform.py". This repository is accessible via webdav (and thus uses
+an "http:" scheme) through the divmod.org hostname. There are many
+branches in this repository, and they use a (BRANCHNAME)/(PROJECT)
+naming policy.
+
+   The fully-qualified SVN URL for the trunk version of webform.py is
+`http://divmod.org/svn/Divmod/trunk/Nevow/formless/webform.py'.  You
+can do an `svn co' with that URL and get a copy of the latest
+version. The 1.5.x branch version of this file would have a URL of
+`http://divmod.org/svn/Divmod/branches/1.5.x/Nevow/formless/webform.py'.
+The whole Nevow trunk would be checked out with
+`http://divmod.org/svn/Divmod/trunk/Nevow', while the Quotient trunk
+would be checked out using
+`http://divmod.org/svn/Divmod/trunk/Quotient'.
+
+   Now suppose we want to have an `SVNPoller' that only cares about
+the Nevow trunk. This case looks just like the PROJECT/BRANCH layout
+described earlier:
+
+     from buildbot.changes.svnpoller import SVNPoller
+     s = SVNPoller("http://divmod.org/svn/Divmod/trunk/Nevow")
+     c['sources'].append(ss)
+
+   But what happens when we want to track multiple Nevow branches? We
+have to point our `svnurl=' high enough to see all those branches,
+but we also don't want to include Quotient changes (since we're only
+building Nevow). To accomplish this, we must rely upon the
+`split_file' function to help us tell the difference between files
+that belong to Nevow and those that belong to Quotient, as well as
+figuring out which branch each one is on.
+
+     from buildbot.changes.svnpoller import SVNPoller
+     s = SVNPoller("http://divmod.org/svn/Divmod",
+                   split_file=my_file_splitter)
+     c['sources'].append(ss)
+
+   The `my_file_splitter' function will be called with
+repository-relative pathnames like:
+
+`trunk/Nevow/formless/webform.py'
+     This is a Nevow file, on the trunk. We want the Change that
+     includes this to see a filename of `formless/webform.py"', and a
+     branch of None
+
+`branches/1.5.x/Nevow/formless/webform.py'
+     This is a Nevow file, on a branch. We want to get
+     branch="branches/1.5.x" and filename="formless/webform.py".
+
+`trunk/Quotient/setup.py'
+     This is a Quotient file, so we want to ignore it by having
+     `my_file_splitter' return None.
+
+`branches/1.5.x/Quotient/setup.py'
+     This is also a Quotient file, which should be ignored.
+
+   The following definition for `my_file_splitter' will do the job:
+
+     def my_file_splitter(path):
+         pieces = path.split('/')
+         if pieces[0] == 'trunk':
+             branch = None
+             pieces.pop(0) # remove 'trunk'
+         elif pieces[0] == 'branches':
+             pieces.pop(0) # remove 'branches'
+             # grab branch name
+             branch = 'branches/' + pieces.pop(0)
+         else:
+             return None # something weird
+         projectname = pieces.pop(0)
+         if projectname != 'Nevow':
+             return None # wrong project
+         return (branch, '/'.join(pieces))
+
+
+File: buildbot.info,  Node: Build Process,  Next: Status Delivery,  Prev: Getting Source Code Changes,  Up: Top
+
+6 Build Process
+***************
+
+A `Build' object is responsible for actually performing a build.  It
+gets access to a remote `SlaveBuilder' where it may run commands, and
+a `BuildStatus' object where it must emit status events. The `Build'
+is created by the Builder's `BuildFactory'.
+
+   The default `Build' class is made up of a fixed sequence of
+`BuildSteps', executed one after another until all are complete (or
+one of them indicates that the build should be halted early). The
+default `BuildFactory' creates instances of this `Build' class with a
+list of `BuildSteps', so the basic way to configure the build is to
+provide a list of `BuildSteps' to your `BuildFactory'.
+
+   More complicated `Build' subclasses can make other decisions:
+execute some steps only if certain files were changed, or if certain
+previous steps passed or failed. The base class has been written to
+allow users to express basic control flow without writing code, but
+you can always subclass and customize to achieve more specialized
+behavior.
+
+* Menu:
+
+* Build Steps::
+* Interlocks::
+* Build Factories::
+
+
+File: buildbot.info,  Node: Build Steps,  Next: Interlocks,  Prev: Build Process,  Up: Build Process
+
+6.1 Build Steps
+===============
+
+`BuildStep's are usually specified in the buildmaster's configuration
+file, in a list of "step specifications" that is used to create the
+`BuildFactory'. These "step specifications" are not actual steps, but
+rather a tuple of the `BuildStep' subclass to be created and a
+dictionary of arguments. (the actual `BuildStep' instances are not
+created until the Build is started, so that each Build gets an
+independent copy of each BuildStep). The preferred way to create
+these step specifications is with the `BuildFactory''s `addStep'
+method:
+
+     from buildbot.steps import source, shell
+     from buildbot.process import factory
+
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, svnurl="http://svn.example.org/Trunk/")
+     f.addStep(shell.ShellCommand, command=["make", "all"])
+     f.addStep(shell.ShellCommand, command=["make", "test"])
+
+   The rest of this section lists all the standard BuildStep objects
+available for use in a Build, and the parameters which can be used to
+control each.
+
+* Menu:
+
+* Common Parameters::
+* Source Checkout::
+* ShellCommand::
+* Simple ShellCommand Subclasses::
+* Python BuildSteps::
+* Transferring Files::
+* Writing New BuildSteps::
+
+
+File: buildbot.info,  Node: Common Parameters,  Next: Source Checkout,  Prev: Build Steps,  Up: Build Steps
+
+6.1.1 Common Parameters
+-----------------------
+
+The standard `Build' runs a series of `BuildStep's in order, only
+stopping when it runs out of steps or if one of them requests that
+the build be halted. It collects status information from each one to
+create an overall build status (of SUCCESS, WARNINGS, or FAILURE).
+
+   All BuildSteps accept some common parameters. Some of these control
+how their individual status affects the overall build. Others are used
+to specify which `Locks' (see *note Interlocks::) should be acquired
+before allowing the step to run.
+
+   Arguments common to all `BuildStep' subclasses:
+
+`name'
+     the name used to describe the step on the status display. It is
+     also used to give a name to any LogFiles created by this step.
+
+`haltOnFailure'
+     if True, a FAILURE of this build step will cause the build to
+     halt immediately with an overall result of FAILURE.
+
+`flunkOnWarnings'
+     when True, a WARNINGS or FAILURE of this build step will mark the
+     overall build as FAILURE. The remaining steps will still be
+     executed.
+
+`flunkOnFailure'
+     when True, a FAILURE of this build step will mark the overall
+     build as a FAILURE. The remaining steps will still be executed.
+
+`warnOnWarnings'
+     when True, a WARNINGS or FAILURE of this build step will mark the
+     overall build as having WARNINGS. The remaining steps will still
+     be executed.
+
+`warnOnFailure'
+     when True, a FAILURE of this build step will mark the overall
+     build as having WARNINGS. The remaining steps will still be
+     executed.
+
+`locks'
+     a list of Locks (instances of `buildbot.locks.SlaveLock' or
+     `buildbot.locks.MasterLock') that should be acquired before
+     starting this Step. The Locks will be released when the step is
+     complete. Note that this is a list of actual Lock instances, not
+     names. Also note that all Locks must have unique names.
+
+
+
+File: buildbot.info,  Node: Source Checkout,  Next: ShellCommand,  Prev: Common Parameters,  Up: Build Steps
+
+6.1.2 Source Checkout
+---------------------
+
+The first step of any build is typically to acquire the source code
+from which the build will be performed. There are several classes to
+handle this, one for each of the different source control system that
+Buildbot knows about. For a description of how Buildbot treats source
+control in general, see *Note Version Control Systems::.
+
+   All source checkout steps accept some common parameters to control
+how they get the sources and where they should be placed. The
+remaining per-VC-system parameters are mostly to specify where
+exactly the sources are coming from.
+
+`mode'
+     a string describing the kind of VC operation that is desired.
+     Defaults to `update'.
+
+    `update'
+          specifies that the CVS checkout/update should be performed
+          directly into the workdir. Each build is performed in the
+          same directory, allowing for incremental builds. This
+          minimizes disk space, bandwidth, and CPU time. However, it
+          may encounter problems if the build process does not handle
+          dependencies properly (sometimes you must do a "clean
+          build" to make sure everything gets compiled), or if source
+          files are deleted but generated files can influence test
+          behavior (e.g. python's .pyc files), or when source
+          directories are deleted but generated files prevent CVS
+          from removing them. Builds ought to be correct regardless
+          of whether they are done "from scratch" or incrementally,
+          but it is useful to test both kinds: this mode exercises the
+          incremental-build style.
+
+    `copy'
+          specifies that the CVS workspace should be maintained in a
+          separate directory (called the 'copydir'), using checkout
+          or update as necessary. For each build, a new workdir is
+          created with a copy of the source tree (rm -rf workdir; cp
+          -r copydir workdir). This doubles the disk space required,
+          but keeps the bandwidth low (update instead of a full
+          checkout). A full 'clean' build is performed each time. This
+          avoids any generated-file build problems, but is still
+          occasionally vulnerable to CVS problems such as a
+          repository being manually rearranged, causing CVS errors on
+          update which are not an issue with a full checkout.
+
+    `clobber'
+          specifes that the working directory should be deleted each
+          time, necessitating a full checkout for each build. This
+          insures a clean build off a complete checkout, avoiding any
+          of the problems described above. This mode exercises the
+          "from-scratch" build style.
+
+    `export'
+          this is like `clobber', except that the 'cvs export'
+          command is used to create the working directory. This
+          command removes all CVS metadata files (the CVS/
+          directories) from the tree, which is sometimes useful for
+          creating source tarballs (to avoid including the metadata
+          in the tar file).
+
+`workdir'
+     like all Steps, this indicates the directory where the build
+     will take place. Source Steps are special in that they perform
+     some operations outside of the workdir (like creating the
+     workdir itself).
+
+`alwaysUseLatest'
+     if True, bypass the usual "update to the last Change" behavior,
+     and always update to the latest changes instead.
+
+`retry'
+     If set, this specifies a tuple of `(delay, repeats)' which means
+     that when a full VC checkout fails, it should be retried up to
+     REPEATS times, waiting DELAY seconds between attempts. If you
+     don't provide this, it defaults to `None', which means VC
+     operations should not be retried. This is provided to make life
+     easier for buildslaves which are stuck behind poor network
+     connections.
+
+
+   My habit as a developer is to do a `cvs update' and `make' each
+morning. Problems can occur, either because of bad code being checked
+in, or by incomplete dependencies causing a partial rebuild to fail
+where a complete from-scratch build might succeed. A quick Builder
+which emulates this incremental-build behavior would use the
+`mode='update'' setting.
+
+   On the other hand, other kinds of dependency problems can cause a
+clean build to fail where a partial build might succeed. This
+frequently results from a link step that depends upon an object file
+that was removed from a later version of the tree: in the partial
+tree, the object file is still around (even though the Makefiles no
+longer know how to create it).
+
+   "official" builds (traceable builds performed from a known set of
+source revisions) are always done as clean builds, to make sure it is
+not influenced by any uncontrolled factors (like leftover files from a
+previous build). A "full" Builder which behaves this way would want
+to use the `mode='clobber'' setting.
+
+   Each VC system has a corresponding source checkout class: their
+arguments are described on the following pages.
+
+* Menu:
+
+* CVS::
+* SVN::
+* Darcs::
+* Mercurial::
+* Arch::
+* Bazaar::
+* P4::
+
+
+File: buildbot.info,  Node: CVS,  Next: SVN,  Prev: Source Checkout,  Up: Source Checkout
+
+6.1.2.1 CVS
+...........
+
+The `CVS' build step performs a CVS (http://www.nongnu.org/cvs/)
+checkout or update. It takes the following arguments:
+
+`cvsroot'
+     (required): specify the CVSROOT value, which points to a CVS
+     repository, probably on a remote machine. For example, the
+     cvsroot value you would use to get a copy of the Buildbot source
+     code is
+     `:pserver:anonymous at cvs.sourceforge.net:/cvsroot/buildbot'
+
+`cvsmodule'
+     (required): specify the cvs `module', which is generally a
+     subdirectory of the CVSROOT. The cvsmodule for the Buildbot
+     source code is `buildbot'.
+
+`branch'
+     a string which will be used in a `-r' argument. This is most
+     useful for specifying a branch to work on. Defaults to `HEAD'.
+
+`global_options'
+     a list of flags to be put before the verb in the CVS command.
+
+`checkoutDelay'
+     if set, the number of seconds to put between the timestamp of
+     the last known Change and the value used for the `-D' option.
+     Defaults to half of the parent Build's treeStableTimer.
+
+
+
+File: buildbot.info,  Node: SVN,  Next: Darcs,  Prev: CVS,  Up: Source Checkout
+
+6.1.2.2 SVN
+...........
+
+The `SVN' build step performs a Subversion
+(http://subversion.tigris.org) checkout or update.  There are two
+basic ways of setting up the checkout step, depending upon whether
+you are using multiple branches or not.
+
+   If all of your builds use the same branch, then you should create
+the `SVN' step with the `svnurl' argument:
+
+`svnurl'
+     (required): this specifies the `URL' argument that will be given
+     to the `svn checkout' command. It dictates both where the
+     repository is located and which sub-tree should be extracted. In
+     this respect, it is like a combination of the CVS `cvsroot' and
+     `cvsmodule' arguments. For example, if you are using a remote
+     Subversion repository which is accessible through HTTP at a URL
+     of `http://svn.example.com/repos', and you wanted to check out
+     the `trunk/calc' sub-tree, you would use
+     `svnurl="http://svn.example.com/repos/trunk/calc"' as an argument
+     to your `SVN' step.
+
+   If, on the other hand, you are building from multiple branches,
+then you should create the `SVN' step with the `baseURL' and
+`defaultBranch' arguments instead:
+
+`baseURL'
+     (required): this specifies the base repository URL, to which a
+     branch name will be appended. It should probably end in a slash.
+
+`defaultBranch'
+     this specifies the name of the branch to use when a Build does
+     not provide one of its own. This will be appended to `baseURL' to
+     create the string that will be passed to the `svn checkout'
+     command.
+
+   If you are using branches, you must also make sure your
+`ChangeSource' will report the correct branch names.
+
+branch example
+==============
+
+Let's suppose that the "MyProject" repository uses branches for the
+trunk, for various users' individual development efforts, and for
+several new features that will require some amount of work (involving
+multiple developers) before they are ready to merge onto the trunk.
+Such a repository might be organized as follows:
+
+     svn://svn.example.org/MyProject/trunk
+     svn://svn.example.org/MyProject/branches/User1/foo
+     svn://svn.example.org/MyProject/branches/User1/bar
+     svn://svn.example.org/MyProject/branches/User2/baz
+     svn://svn.example.org/MyProject/features/newthing
+     svn://svn.example.org/MyProject/features/otherthing
+
+   Further assume that we want the Buildbot to run tests against the
+trunk and against all the feature branches (i.e., do a
+checkout/compile/build of branch X when a file has been changed on
+branch X, when X is in the set [trunk, features/newthing,
+features/otherthing]). We do not want the Buildbot to automatically
+build any of the user branches, but it should be willing to build a
+user branch when explicitly requested (most likely by the user who
+owns that branch).
+
+   There are three things that need to be set up to accomodate this
+system. The first is a ChangeSource that is capable of identifying the
+branch which owns any given file. This depends upon a user-supplied
+function, in an external program that runs in the SVN commit hook and
+connects to the buildmaster's `PBChangeSource' over a TCP connection.
+(you can use the "`buildbot sendchange'" utility for this purpose,
+but you will still need an external program to decide what value
+should be passed to the `--branch=' argument).  For example, a change
+to a file with the SVN url of
+"svn://svn.example.org/MyProject/features/newthing/src/foo.c" should
+be broken down into a Change instance with
+`branch='features/newthing'' and `file='src/foo.c''.
+
+   The second piece is an `AnyBranchScheduler' which will pay
+attention to the desired branches. It will not pay attention to the
+user branches, so it will not automatically start builds in response
+to changes there. The AnyBranchScheduler class requires you to
+explicitly list all the branches you want it to use, but it would not
+be difficult to write a subclass which used
+`branch.startswith('features/'' to remove the need for this explicit
+list. Or, if you want to build user branches too, you can use
+AnyBranchScheduler with `branches=None' to indicate that you want it
+to pay attention to all branches.
+
+   The third piece is an `SVN' checkout step that is configured to
+handle the branches correctly, with a `baseURL' value that matches
+the way the ChangeSource splits each file's URL into base, branch,
+and file.
+
+     from buildbot.changes.pb import PBChangeSource
+     from buildbot.scheduler import AnyBranchScheduler
+     from buildbot.process import source, factory
+     from buildbot.steps import source, shell
+
+     c['sources'] = [PBChangeSource()]
+     s1 = AnyBranchScheduler('main',
+                             ['trunk', 'features/newthing', 'features/otherthing'],
+                             10*60, ['test-i386', 'test-ppc'])
+     c['schedulers'] = [s1]
+
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, mode='update',
+               baseURL='svn://svn.example.org/MyProject/',
+               defaultBranch='trunk')
+     f.addStep(shell.Compile, command="make all")
+     f.addStep(shell.Test, command="make test")
+
+     c['builders'] = [
+       {'name':'test-i386', 'slavename':'bot-i386', 'builddir':'test-i386',
+                            'factory':f },
+       {'name':'test-ppc', 'slavename':'bot-ppc', 'builddir':'test-ppc',
+                           'factory':f },
+      ]
+
+   In this example, when a change arrives with a `branch' attribute
+of "trunk", the resulting build will have an SVN step that
+concatenates "svn://svn.example.org/MyProject/" (the baseURL) with
+"trunk" (the branch name) to get the correct svn command. If the
+"newthing" branch has a change to "src/foo.c", then the SVN step will
+concatenate "svn://svn.example.org/MyProject/" with
+"features/newthing" to get the svnurl for checkout.
+
+
+File: buildbot.info,  Node: Darcs,  Next: Mercurial,  Prev: SVN,  Up: Source Checkout
+
+6.1.2.3 Darcs
+.............
+
+The `Darcs' build step performs a Darcs
+(http://abridgegame.org/darcs/) checkout or update.
+
+   Like *Note SVN::, this step can either be configured to always
+check out a specific tree, or set up to pull from a particular branch
+that gets specified separately for each build. Also like SVN, the
+repository URL given to Darcs is created by concatenating a `baseURL'
+with the branch name, and if no particular branch is requested, it
+uses a `defaultBranch'. The only difference in usage is that each
+potential Darcs repository URL must point to a fully-fledged
+repository, whereas SVN URLs usually point to sub-trees of the main
+Subversion repository. In other words, doing an SVN checkout of
+`baseURL' is legal, but silly, since you'd probably wind up with a
+copy of every single branch in the whole repository.  Doing a Darcs
+checkout of `baseURL' is just plain wrong, since the parent directory
+of a collection of Darcs repositories is not itself a valid
+repository.
+
+   The Darcs step takes the following arguments:
+
+`repourl'
+     (required unless `baseURL' is provided): the URL at which the
+     Darcs source repository is available.
+
+`baseURL'
+     (required unless `repourl' is provided): the base repository URL,
+     to which a branch name will be appended. It should probably end
+     in a slash.
+
+`defaultBranch'
+     (allowed if and only if `baseURL' is provided): this specifies
+     the name of the branch to use when a Build does not provide one
+     of its own. This will be appended to `baseURL' to create the
+     string that will be passed to the `darcs get' command.
+
+
+File: buildbot.info,  Node: Mercurial,  Next: Arch,  Prev: Darcs,  Up: Source Checkout
+
+6.1.2.4 Mercurial
+.................
+
+The `Mercurial' build step performs a Mercurial
+(http://selenic.com/mercurial) (aka "hg") checkout or update.
+
+   Branches are handled just like *Note Darcs::.
+
+   The Mercurial step takes the following arguments:
+
+`repourl'
+     (required unless `baseURL' is provided): the URL at which the
+     Mercurial source repository is available.
+
+`baseURL'
+     (required unless `repourl' is provided): the base repository URL,
+     to which a branch name will be appended. It should probably end
+     in a slash.
+
+`defaultBranch'
+     (allowed if and only if `baseURL' is provided): this specifies
+     the name of the branch to use when a Build does not provide one
+     of its own. This will be appended to `baseURL' to create the
+     string that will be passed to the `hg clone' command.
+
+
+File: buildbot.info,  Node: Arch,  Next: Bazaar,  Prev: Mercurial,  Up: Source Checkout
+
+6.1.2.5 Arch
+............
+
+The `Arch' build step performs an Arch (http://gnuarch.org/) checkout
+or update using the `tla' client. It takes the following arguments:
+
+`url'
+     (required): this specifies the URL at which the Arch source
+     archive is available.
+
+`version'
+     (required): this specifies which "development line" (like a
+     branch) should be used. This provides the default branch name,
+     but individual builds may specify a different one.
+
+`archive'
+     (optional): Each repository knows its own archive name. If this
+     parameter is provided, it must match the repository's archive
+     name.  The parameter is accepted for compatibility with the
+     `Bazaar' step, below.
+
+
+
+File: buildbot.info,  Node: Bazaar,  Next: P4,  Prev: Arch,  Up: Source Checkout
+
+6.1.2.6 Bazaar
+..............
+
+`Bazaar' is an alternate implementation of the Arch VC system, which
+uses a client named `baz'. The checkout semantics are just different
+enough from `tla' that there is a separate BuildStep for it.
+
+   It takes exactly the same arguments as `Arch', except that the
+`archive=' parameter is required. (baz does not emit the archive name
+when you do `baz register-archive', so we must provide it ourselves).
+
+
+File: buildbot.info,  Node: P4,  Prev: Bazaar,  Up: Source Checkout
+
+6.1.2.7 P4
+..........
+
+The `P4' build step creates a Perforce (http://www.perforce.com/)
+client specification and performs an update.
+
+`p4base'
+     A view into the Perforce depot without branch name or trailing
+     "...".  Typically "//depot/proj/".
+
+`defaultBranch'
+     A branch name to append on build requests if none is specified.
+     Typically "trunk".
+
+`p4port'
+     (optional): the host:port string describing how to get to the P4
+     Depot (repository), used as the -p argument for all p4 commands.
+
+`p4user'
+     (optional): the Perforce user, used as the -u argument to all p4
+     commands.
+
+`p4passwd'
+     (optional): the Perforce password, used as the -p argument to
+     all p4 commands.
+
+`p4extra_views'
+     (optional): a list of (depotpath, clientpath) tuples containing
+     extra views to be mapped into the client specification. Both
+     will have "/..." appended automatically. The client name and
+     source directory will be prepended to the client path.
+
+`p4client'
+     (optional): The name of the client to use. In mode='copy' and
+     mode='update', it's particularly important that a unique name is
+     used for each checkout directory to avoid incorrect
+     synchronization. For this reason, Python percent substitution
+     will be performed on this value to replace %(slave)s with the
+     slave name and %(builder)s with the builder name. The default is
+     "buildbot_%(slave)s_%(build)s".
+
+
+File: buildbot.info,  Node: ShellCommand,  Next: Simple ShellCommand Subclasses,  Prev: Source Checkout,  Up: Build Steps
+
+6.1.3 ShellCommand
+------------------
+
+This is a useful base class for just about everything you might want
+to do during a build (except for the initial source checkout). It runs
+a single command in a child shell on the buildslave. All stdout/stderr
+is recorded into a LogFile. The step finishes with a status of FAILURE
+if the command's exit code is non-zero, otherwise it has a status of
+SUCCESS.
+
+   The preferred way to specify the command is with a list of argv
+strings, since this allows for spaces in filenames and avoids doing
+any fragile shell-escaping. You can also specify the command with a
+single string, in which case the string is given to '/bin/sh -c
+COMMAND' for parsing.
+
+   All ShellCommands are run by default in the "workdir", which
+defaults to the "`build'" subdirectory of the slave builder's base
+directory. The absolute path of the workdir will thus be the slave's
+basedir (set as an option to `buildbot create-slave', *note Creating
+a buildslave::) plus the builder's basedir (set in the builder's
+`c['builddir']' key in master.cfg) plus the workdir itself (a
+class-level attribute of the BuildFactory, defaults to "`build'").
+
+   `ShellCommand' arguments:
+
+`command'
+     a list of strings (preferred) or single string (discouraged)
+     which specifies the command to be run. A list of strings is
+     preferred because it can be used directly as an argv array.
+     Using a single string (with embedded spaces) requires the
+     buildslave to pass the string to /bin/sh for interpretation,
+     which raises all sorts of difficult questions about how to
+     escape or interpret shell metacharacters.
+
+`env'
+     a dictionary of environment strings which will be added to the
+     child command's environment. For example, to run tests with a
+     different i18n language setting, you might use
+
+          f.addStep(ShellCommand, command=["make", "test"],
+                    env={'LANG': 'fr_FR'})
+
+     These variable settings will override any existing ones in the
+     buildslave's environment. The exception is PYTHONPATH, which is
+     merged with (actually prepended to) any existing $PYTHONPATH
+     setting. The value is treated as a list of directories to
+     prepend, and a single string is treated like a one-item list.
+     For example, to prepend both `/usr/local/lib/python2.3' and
+     `/home/buildbot/lib/python' to any existing $PYTHONPATH setting,
+     you would do something like the following:
+
+          f.addStep(ShellCommand, command=["make", "test"],
+                    env={'PYTHONPATH': ["/usr/local/lib/python2.3",
+                                        "/home/buildbot/lib/python"] })
+
+`want_stdout'
+     if False, stdout from the child process is discarded rather than
+     being sent to the buildmaster for inclusion in the step's
+     LogFile.
+
+`want_stderr'
+     like `want_stdout' but for stderr. Note that commands run through
+     a PTY do not have separate stdout/stderr streams: both are
+     merged into stdout.
+
+`logfiles'
+     Sometimes commands will log interesting data to a local file,
+     rather than emitting everything to stdout or stderr. For
+     example, Twisted's "trial" command (which runs unit tests) only
+     presents summary information to stdout, and puts the rest into a
+     file named `_trial_temp/test.log'. It is often useful to watch
+     these files as the command runs, rather than using `/bin/cat' to
+     dump their contents afterwards.
+
+     The `logfiles=' argument allows you to collect data from these
+     secondary logfiles in near-real-time, as the step is running. It
+     accepts a dictionary which maps from a local Log name (which is
+     how the log data is presented in the build results) to a remote
+     filename (interpreted relative to the build's working
+     directory). Each named file will be polled on a regular basis
+     (every couple of seconds) as the build runs, and any new text
+     will be sent over to the buildmaster.
+
+          f.addStep(ShellCommand, command=["make", "test"],
+                    logfiles={"triallog": "_trial_temp/test.log"})
+
+`timeout'
+     if the command fails to produce any output for this many
+     seconds, it is assumed to be locked up and will be killed.
+
+`description'
+     This will be used to describe the command (on the Waterfall
+     display) while the command is still running. It should be a
+     single imperfect-tense verb, like "compiling" or "testing". The
+     preferred form is a list of short strings, which allows the HTML
+     Waterfall display to create narrower columns by emitting a <br>
+     tag between each word. You may also provide a single string.
+
+`descriptionDone'
+     This will be used to describe the command once it has finished. A
+     simple noun like "compile" or "tests" should be used. Like
+     `description', this may either be a list of short strings or a
+     single string.
+
+     If neither `description' nor `descriptionDone' are set, the
+     actual command arguments will be used to construct the
+     description.  This may be a bit too wide to fit comfortably on
+     the Waterfall display.
+
+          f.addStep(ShellCommand, command=["make", "test"],
+                    description=["testing"],
+                    descriptionDone=["tests"])
+
+
+
+File: buildbot.info,  Node: Simple ShellCommand Subclasses,  Next: Python BuildSteps,  Prev: ShellCommand,  Up: Build Steps
+
+6.1.4 Simple ShellCommand Subclasses
+------------------------------------
+
+Several subclasses of ShellCommand are provided as starting points for
+common build steps. These are all very simple: they just override a
+few parameters so you don't have to specify them yourself, making the
+master.cfg file less verbose.
+
+* Menu:
+
+* Configure::
+* Compile::
+* Test::
+* Build Properties::
+
+
+File: buildbot.info,  Node: Configure,  Next: Compile,  Prev: Simple ShellCommand Subclasses,  Up: Simple ShellCommand Subclasses
+
+6.1.4.1 Configure
+.................
+
+This is intended to handle the `./configure' step from autoconf-style
+projects, or the `perl Makefile.PL' step from perl MakeMaker.pm-style
+modules. The default command is `./configure' but you can change this
+by providing a `command=' parameter.
+
+
+File: buildbot.info,  Node: Compile,  Next: Test,  Prev: Configure,  Up: Simple ShellCommand Subclasses
+
+6.1.4.2 Compile
+...............
+
+This is meant to handle compiling or building a project written in C.
+The default command is `make all'. When the compile is finished, the
+log file is scanned for GCC error/warning messages and a summary log
+is created with any problems that were seen (TODO: the summary is not
+yet created).
+
+
+File: buildbot.info,  Node: Test,  Next: Build Properties,  Prev: Compile,  Up: Simple ShellCommand Subclasses
+
+6.1.4.3 Test
+............
+
+This is meant to handle unit tests. The default command is `make
+test', and the `warnOnFailure' flag is set.
+
+
+File: buildbot.info,  Node: Build Properties,  Prev: Test,  Up: Simple ShellCommand Subclasses
+
+6.1.4.4 Build Properties
+........................
+
+Each build has a set of "Build Properties", which can be used by its
+BuildStep to modify their actions. For example, the SVN revision
+number of the source code being built is available as a build
+property, and a ShellCommand step could incorporate this number into a
+command which create a numbered release tarball.
+
+   Some build properties are set when the build starts, such as the
+SourceStamp information. Other properties can be set by BuildSteps as
+they run, for example the various Source steps will set the
+`got_revision' property to the source revision that was actually
+checked out (which can be useful when the SourceStamp in use merely
+requested the "latest revision": `got_revision' will tell you what
+was actually built).
+
+   In custom BuildSteps, you can get and set the build properties with
+the `getProperty'/`setProperty' methods. Each takes a string for the
+name of the property, and returns or accepts an arbitrary(1) object.
+For example:
+
+     class MakeTarball(ShellCommand):
+         def start(self):
+             self.setCommand(["tar", "czf",
+                              "build-%s.tar.gz" % self.getProperty("revision"),
+                              "source"])
+             ShellCommand.start(self)
+
+   You can use build properties in ShellCommands by using the
+`WithProperties' wrapper when setting the arguments of the
+ShellCommand. This interpolates the named build properties into the
+generated shell command.
+
+     from buildbot.steps.shell import ShellCommand, WithProperties
+
+     f.addStep(ShellCommand,
+               command=["tar", "czf",
+                        WithProperties("build-%s.tar.gz", "revision"),
+                        "source"])
+
+   If this BuildStep were used in a tree obtained from Subversion, it
+would create a tarball with a name like `build-1234.tar.gz'.
+
+   The `WithProperties' function does `printf'-style string
+interpolation, using strings obtained by calling
+`build.getProperty(propname)'. Note that for every `%s' (or `%d',
+etc), you must have exactly one additional argument to indicate which
+build property you want to insert.
+
+   You can also use python dictionary-style string interpolation by
+using the `%(propname)s' syntax. In this form, the property name goes
+in the parentheses, and WithProperties takes _no_ additional
+arguments:
+
+     f.addStep(ShellCommand,
+               command=["tar", "czf",
+                        WithProperties("build-%(revision)s.tar.gz"),
+                        "source"])
+
+   Don't forget the extra "s" after the closing parenthesis! This is
+the cause of many confusing errors. Also note that you can only use
+WithProperties in the list form of the command= definition. You cannot
+currently use it in the (discouraged) `command="stuff"' single-string
+form. However, you can use something like `command=["/bin/sh", "-c",
+"stuff", WithProperties(stuff)]' to use both shell expansion and
+WithProperties interpolation.
+
+   Note that, like python, you can either do positional-argument
+interpolation _or_ keyword-argument interpolation, not both. Thus you
+cannot use a string like `WithProperties("foo-%(revision)s-%s",
+"branch")'.
+
+   At the moment, the only way to set build properties is by writing a
+custom BuildStep.
+
+Common Build Properties
+=======================
+
+The following build properties are set when the build is started, and
+are available to all steps.
+
+`branch'
+     This comes from the build's SourceStamp, and describes which
+     branch is being checked out. This will be `None' (which
+     interpolates into `WithProperties' as an empty string) if the
+     build is on the default branch, which is generally the trunk.
+     Otherwise it will be a string like "branches/beta1.4". The exact
+     syntax depends upon the VC system being used.
+
+`revision'
+     This also comes from the SourceStamp, and is the revision of the
+     source code tree that was requested from the VC system. When a
+     build is requested of a specific revision (as is generally the
+     case when the build is triggered by Changes), this will contain
+     the revision specification. The syntax depends upon the VC
+     system in use: for SVN it is an integer, for Mercurial it is a
+     short string, for Darcs it is a rather large string, etc.
+
+     If the "force build" button was pressed, the revision will be
+     `None', which means to use the most recent revision available.
+     This is a "trunk build". This will be interpolated as an empty
+     string.
+
+`got_revision'
+     This is set when a Source step checks out the source tree, and
+     provides the revision that was actually obtained from the VC
+     system.  In general this should be the same as `revision',
+     except for trunk builds, where `got_revision' indicates what
+     revision was current when the checkout was performed. This can
+     be used to rebuild the same source code later.
+
+     Note that for some VC systems (Darcs in particular), the
+     revision is a large string containing newlines, and is not
+     suitable for interpolation into a filename.
+
+`buildername'
+     This is a string that indicates which Builder the build was a
+     part of.  The combination of buildername and buildnumber
+     uniquely identify a build.
+
+`buildnumber'
+     Each build gets a number, scoped to the Builder (so the first
+     build performed on any given Builder will have a build number of
+     0). This integer property contains the build's number.
+
+`slavename'
+     This is a string which identifies which buildslave the build is
+     running on.
+
+
+   ---------- Footnotes ----------
+
+   (1) Build properties are serialized along with the build results,
+so they must be serializable. For this reason, the value of any build
+property should be simple inert data: strings, numbers, lists,
+tuples, and dictionaries. They should not contain class instances.
+
+
+File: buildbot.info,  Node: Python BuildSteps,  Next: Transferring Files,  Prev: Simple ShellCommand Subclasses,  Up: Build Steps
+
+6.1.5 Python BuildSteps
+-----------------------
+
+Here are some BuildSteps that are specifcally useful for projects
+implemented in Python.
+
+* Menu:
+
+* BuildEPYDoc::
+* PyFlakes::
+
+
+File: buildbot.info,  Node: BuildEPYDoc,  Next: PyFlakes,  Prev: Python BuildSteps,  Up: Python BuildSteps
+
+6.1.5.1 BuildEPYDoc
+...................
+
+epydoc (http://epydoc.sourceforge.net/) is a tool for generating API
+documentation for Python modules from their docstrings. It reads all
+the .py files from your source tree, processes the docstrings
+therein, and creates a large tree of .html files (or a single .pdf
+file).
+
+   The `buildbot.steps.python.BuildEPYDoc' step will run `epydoc' to
+produce this API documentation, and will count the errors and
+warnings from its output.
+
+   You must supply the command line to be used. The default is `make
+epydocs', which assumes that your project has a Makefile with an
+"epydocs" target. You might wish to use something like `epydoc -o
+apiref source/PKGNAME' instead. You might also want to add `--pdf' to
+generate a PDF file instead of a large tree of HTML files.
+
+   The API docs are generated in-place in the build tree (under the
+workdir, in the subdirectory controlled by the "-o" argument). To
+make them useful, you will probably have to copy them to somewhere
+they can be read. A command like `rsync -ad apiref/
+dev.example.com:~public_html/current-apiref/' might be useful. You
+might instead want to bundle them into a tarball and publish it in the
+same place where the generated install tarball is placed.
+
+     from buildbot.steps.python import BuildEPYDoc
+
+     ...
+     f.addStep(BuildEPYDoc, command=["epydoc", "-o", "apiref", "source/mypkg"])
+
+
+File: buildbot.info,  Node: PyFlakes,  Prev: BuildEPYDoc,  Up: Python BuildSteps
+
+6.1.5.2 PyFlakes
+................
+
+PyFlakes (http://divmod.org/trac/wiki/DivmodPyflakes) is a tool to
+perform basic static analysis of Python code to look for simple
+errors, like missing imports and references of undefined names. It is
+like a fast and simple form of the C "lint" program. Other tools
+(like pychecker) provide more detailed results but take longer to run.
+
+   The `buildbot.steps.python.PyFlakes' step will run pyflakes and
+count the various kinds of errors and warnings it detects.
+
+   You must supply the command line to be used. The default is `make
+pyflakes', which assumes you have a top-level Makefile with a
+"pyflakes" target. You might want to use something like `pyflakes .'
+or `pyflakes src'.
+
+     from buildbot.steps.python import PyFlakes
+
+     ...
+     f.addStep(PyFlakes, command=["pyflakes", "src"])
+
+
+File: buildbot.info,  Node: Transferring Files,  Next: Writing New BuildSteps,  Prev: Python BuildSteps,  Up: Build Steps
+
+6.1.6 Transferring Files
+------------------------
+
+Most of the work involved in a build will take place on the
+buildslave. But occasionally it is useful to do some work on the
+buildmaster side. The most basic way to involve the buildmaster is
+simply to move a file from the slave to the master, or vice versa.
+There are a pair of BuildSteps named `FileUpload' and `FileDownload'
+to provide this functionality. `FileUpload' moves a file _up to_ the
+master, while `FileDownload' moves a file _down from_ the master.
+
+   As an example, let's assume that there is a step which produces an
+HTML file within the source tree that contains some sort of generated
+project documentation. We want to move this file to the buildmaster,
+into a `~/public_html' directory, so it can be visible to developers.
+This file will wind up in the slave-side working directory under the
+name `docs/reference.html'. We want to put it into the master-side
+`~/public_html/ref.html'.
+
+     from buildbot.steps.shell import ShellCommand
+     from buildbot.steps.transfer import FileUpload
+
+     f.addStep(ShellCommand, command=["make", "docs"])
+     f.addStep(FileUpload,
+               slavesrc="docs/reference.html",
+               masterdest="~/public_html/ref.html")
+
+   The `masterdest=' argument will be passed to os.path.expanduser,
+so things like "~" will be expanded properly. Non-absolute paths will
+be interpreted relative to the buildmaster's base directory.
+Likewise, the `slavesrc=' argument will be expanded and interpreted
+relative to the builder's working directory.
+
+   To move a file from the master to the slave, use the
+`FileDownload' command. For example, let's assume that some step
+requires a configuration file that, for whatever reason, could not be
+recorded in the source code repository or generated on the buildslave
+side:
+
+     from buildbot.steps.shell import ShellCommand
+     from buildbot.steps.transfer import FileUpload
+
+     f.addStep(FileDownload
+               mastersrc="~/todays_build_config.txt",
+               slavedest="build_config.txt")
+     f.addStep(ShellCommand, command=["make", "config"])
+
+   Like `FileUpload', the `mastersrc=' argument is interpreted
+relative to the buildmaster's base directory, and the `slavedest='
+argument is relative to the builder's working directory. If the
+buildslave is running in `~buildslave', and the builder's "builddir"
+is something like `tests-i386', then the workdir is going to be
+`~buildslave/tests-i386/build', and a `slavedest=' of `foo/bar.html'
+will get put in `~buildslave/tests-i386/build/foo/bar.html'. Remember
+that neither of these commands will create missing directories for
+you.
+
+Other Parameters
+----------------
+
+The `maxsize=' argument lets you set a maximum size for the file to
+be transferred. This may help to avoid surprises: transferring a
+100MB coredump when you were expecting to move a 10kB status file
+might take an awfully long time. The `blocksize=' argument controls
+how the file is sent over the network: larger blocksizes are slightly
+more efficient but also consume more memory on each end, and there is
+a hard-coded limit of about 640kB.
+
+   The `mode=' argument allows you to control the access permissions
+of the target file, traditionally expressed as an octal integer. The
+most common value is probably 0755, which sets the "x" executable bit
+on the file (useful for shell scripts and the like). The default
+value for `mode=' is None, which means the permission bits will
+default to whatever the umask of the writing process is. The default
+umask tends to be fairly restrictive, but at least on the buildslave
+you can make it less restrictive with a -umask command-line option at
+creation time (*note Buildslave Options::).
+
+
+File: buildbot.info,  Node: Writing New BuildSteps,  Prev: Transferring Files,  Up: Build Steps
+
+6.1.7 Writing New BuildSteps
+----------------------------
+
+While it is a good idea to keep your build process self-contained in
+the source code tree, sometimes it is convenient to put more
+intelligence into your Buildbot configuration. One was to do this is
+to write a custom BuildStep. Once written, this Step can be used in
+the `master.cfg' file.
+
+   The best reason for writing a custom BuildStep is to better parse
+the results of the command being run. For example, a BuildStep that
+knows about JUnit could look at the logfiles to determine which tests
+had been run, how many passed and how many failed, and then report
+more detailed information than a simple `rc==0' -based "good/bad"
+decision.
+
+   TODO: add more description of BuildSteps.
+
+* Menu:
+
+* BuildStep LogFiles::
+* Adding LogObservers::
+* BuildStep URLs::
+
+
+File: buildbot.info,  Node: BuildStep LogFiles,  Next: Adding LogObservers,  Prev: Writing New BuildSteps,  Up: Writing New BuildSteps
+
+6.1.7.1 BuildStep LogFiles
+..........................
+
+Each BuildStep has a collection of "logfiles". Each one has a short
+name, like "stdio" or "warnings". Each LogFile contains an arbitrary
+amount of text, usually the contents of some output file generated
+during a build or test step, or a record of everything that was
+printed to stdout/stderr during the execution of some command.
+
+   These LogFiles are stored to disk, so they can be retrieved later.
+
+   Each can contain multiple "channels", generally limited to three
+basic ones: stdout, stderr, and "headers". For example, when a
+ShellCommand runs, it writes a few lines to the "headers" channel to
+indicate the exact argv strings being run, which directory the command
+is being executed in, and the contents of the current environment
+variables. Then, as the command runs, it adds a lot of "stdout" and
+"stderr" messages. When the command finishes, a final "header" line
+is added with the exit code of the process.
+
+   Status display plugins can format these different channels in
+different ways. For example, the web page shows LogFiles as text/html,
+with header lines in blue text, stdout in black, and stderr in red. A
+different URL is available which provides a text/plain format, in
+which stdout and stderr are collapsed together, and header lines are
+stripped completely. This latter option makes it easy to save the
+results to a file and run `grep' or whatever against the output.
+
+   Each BuildStep contains a mapping (implemented in a python
+dictionary) from LogFile name to the actual LogFile objects. Status
+plugins can get a list of LogFiles to display, for example, a list of
+HREF links that, when clicked, provide the full contents of the
+LogFile.
+
+Using LogFiles in custom BuildSteps
+===================================
+
+The most common way for a custom BuildStep to use a LogFile is to
+summarize the results of a ShellCommand (after the command has
+finished running). For example, a compile step with thousands of lines
+of output might want to create a summary of just the warning messages.
+If you were doing this from a shell, you would use something like:
+
+     grep "warning:" output.log >warnings.log
+
+   In a custom BuildStep, you could instead create a "warnings"
+LogFile that contained the same text. To do this, you would add code
+to your `createSummary' method that pulls lines from the main output
+log and creates a new LogFile with the results:
+
+         def createSummary(self, log):
+             warnings = []
+             for line in log.readlines():
+                 if "warning:" in line:
+                     warnings.append()
+             self.addCompleteLog('warnings', "".join(warnings))
+
+   This example uses the `addCompleteLog' method, which creates a new
+LogFile, puts some text in it, and then "closes" it, meaning that no
+further contents will be added. This LogFile will appear in the HTML
+display under an HREF with the name "warnings", since that is the
+name of the LogFile.
+
+   You can also use `addHTMLLog' to create a complete (closed)
+LogFile that contains HTML instead of plain text. The normal LogFile
+will be HTML-escaped if presented through a web page, but the HTML
+LogFile will not. At the moment this is only used to present a pretty
+HTML representation of an otherwise ugly exception traceback when
+something goes badly wrong during the BuildStep.
+
+   In contrast, you might want to create a new LogFile at the
+beginning of the step, and add text to it as the command runs. You
+can create the LogFile and attach it to the build by calling
+`addLog', which returns the LogFile object. You then add text to this
+LogFile by calling methods like `addStdout' and `addHeader'. When you
+are done, you must call the `finish' method so the LogFile can be
+closed. It may be useful to create and populate a LogFile like this
+from a LogObserver method *Note Adding LogObservers::.
+
+   The `logfiles=' argument to `ShellCommand' (see *note
+ShellCommand::) creates new LogFiles and fills them in realtime by
+asking the buildslave to watch a actual file on disk. The buildslave
+will look for additions in the target file and report them back to
+the BuildStep. These additions will be added to the LogFile by
+calling `addStdout'. These secondary LogFiles can be used as the
+source of a LogObserver just like the normal "stdio" LogFile.
+
+
+File: buildbot.info,  Node: Adding LogObservers,  Next: BuildStep URLs,  Prev: BuildStep LogFiles,  Up: Writing New BuildSteps
+
+6.1.7.2 Adding LogObservers
+...........................
+
+Most shell commands emit messages to stdout or stderr as they operate,
+especially if you ask them nicely with a `--verbose' flag of some
+sort. They may also write text to a log file while they run. Your
+BuildStep can watch this output as it arrives, to keep track of how
+much progress the command has made. You can get a better measure of
+progress by counting the number of source files compiled or test cases
+run than by merely tracking the number of bytes that have been written
+to stdout. This improves the accuracy and the smoothness of the ETA
+display.
+
+   To accomplish this, you will need to attach a `LogObserver' to one
+of the log channels, most commonly to the "stdio" channel but perhaps
+to another one which tracks a log file. This observer is given all
+text as it is emitted from the command, and has the opportunity to
+parse that output incrementally. Once the observer has decided that
+some event has occurred (like a source file being compiled), it can
+use the `setProgress' method to tell the BuildStep about the progress
+that this event represents.
+
+   There are a number of pre-built `LogObserver' classes that you can
+choose from (defined in `buildbot.process.buildstep', and of course
+you can subclass them to add further customization. The
+`LogLineObserver' class handles the grunt work of buffering and
+scanning for end-of-line delimiters, allowing your parser to operate
+on complete stdout/stderr lines.
+
+   For example, let's take a look at the `TrialTestCaseCounter',
+which is used by the Trial step to count test cases as they are run.
+As Trial executes, it emits lines like the following:
+
+     buildbot.test.test_config.ConfigTest.testDebugPassword ... [OK]
+     buildbot.test.test_config.ConfigTest.testEmpty ... [OK]
+     buildbot.test.test_config.ConfigTest.testIRC ... [FAIL]
+     buildbot.test.test_config.ConfigTest.testLocks ... [OK]
+
+   When the tests are finished, trial emits a long line of "======"
+and then some lines which summarize the tests that failed. We want to
+avoid parsing these trailing lines, because their format is less
+well-defined than the "[OK]" lines.
+
+   The parser class looks like this:
+
+     from buildbot.process.buildstep import LogLineObserver
+
+     class TrialTestCaseCounter(LogLineObserver):
+         _line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
+         numTests = 0
+         finished = False
+
+         def outLineReceived(self, line):
+             if self.finished:
+                 return
+             if line.startswith("=" * 40):
+                 self.finished = True
+                 return
+
+             m = self._line_re.search(line.strip())
+             if m:
+                 testname, result = m.groups()
+                 self.numTests += 1
+                 self.step.setProgress('tests', self.numTests)
+
+   This parser only pays attention to stdout, since that's where trial
+writes the progress lines. It has a mode flag named `finished' to
+ignore everything after the "====" marker, and a scary-looking
+regular expression to match each line while hopefully ignoring other
+messages that might get displayed as the test runs.
+
+   Each time it identifies a test has been completed, it increments
+its counter and delivers the new progress value to the step with
+`self.step.setProgress'. This class is specifically measuring
+progress along the "tests" metric, in units of test cases (as opposed
+to other kinds of progress like the "output" metric, which measures
+in units of bytes). The Progress-tracking code uses each progress
+metric separately to come up with an overall completion percentage
+and an ETA value.
+
+   To connect this parser into the `Trial' BuildStep,
+`Trial.__init__' ends with the following clause:
+
+             # this counter will feed Progress along the 'test cases' metric
+             counter = TrialTestCaseCounter()
+             self.addLogObserver('stdio', counter)
+
+   This creates a TrialTestCaseCounter and tells the step that the
+counter wants to watch the "stdio" log. The observer is automatically
+given a reference to the step in its `.step' attribute.
+
+A Somewhat Whimsical Example
+----------------------------
+
+Let's say that we've got some snazzy new unit-test framework called
+Framboozle. It's the hottest thing since sliced bread. It slices, it
+dices, it runs unit tests like there's no tomorrow. Plus if your unit
+tests fail, you can use its name for a Web 2.1 startup company, make
+millions of dollars, and hire engineers to fix the bugs for you, while
+you spend your afternoons lazily hang-gliding along a scenic pacific
+beach, blissfully unconcerned about the state of your tests.(1)
+
+   To run a Framboozle-enabled test suite, you just run the
+'framboozler' command from the top of your source code tree. The
+'framboozler' command emits a bunch of stuff to stdout, but the most
+interesting bit is that it emits the line "FNURRRGH!" every time it
+finishes running a test case(2). You'd like to have a test-case
+counting LogObserver that watches for these lines and counts them,
+because counting them will help the buildbot more accurately
+calculate how long the build will take, and this will let you know
+exactly how long you can sneak out of the office for your
+hang-gliding lessons without anyone noticing that you're gone.
+
+   This will involve writing a new BuildStep (probably named
+"Framboozle") which inherits from ShellCommand. The BuildStep class
+definition itself will look something like this:
+
+     # START
+     from buildbot.steps.shell import ShellCommand
+     from buildbot.process.buildstep import LogLineObserver
+
+     class FNURRRGHCounter(LogLineObserver):
+         numTests = 0
+         def outLineReceived(self, line):
+             if "FNURRRGH!" in line:
+                 self.numTests += 1
+                 self.step.setProgress('tests', self.numTests)
+
+     class Framboozle(ShellCommand):
+         command = ["framboozler"]
+
+         def __init__(self, **kwargs):
+             ShellCommand.__init__(self, **kwargs)   # always upcall!
+             counter = FNURRRGHCounter())
+             self.addLogObserver(counter)
+     # FINISH
+
+   So that's the code that we want to wind up using. How do we
+actually deploy it?
+
+   You have a couple of different options.
+
+   Option 1: The simplest technique is to simply put this text
+(everything from START to FINISH) in your master.cfg file, somewhere
+before the BuildFactory definition where you actually use it in a
+clause like:
+
+     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(Framboozle)
+
+   Remember that master.cfg is secretly just a python program with one
+job: populating the BuildmasterConfig dictionary. And python programs
+are allowed to define as many classes as they like. So you can define
+classes and use them in the same file, just as long as the class is
+defined before some other code tries to use it.
+
+   This is easy, and it keeps the point of definition very close to
+the point of use, and whoever replaces you after that unfortunate
+hang-gliding accident will appreciate being able to easily figure out
+what the heck this stupid "Framboozle" step is doing anyways. The
+downside is that every time you reload the config file, the Framboozle
+class will get redefined, which means that the buildmaster will think
+that you've reconfigured all the Builders that use it, even though
+nothing changed. Bleh.
+
+   Option 2: Instead, we can put this code in a separate file, and
+import it into the master.cfg file just like we would the normal
+buildsteps like ShellCommand and SVN.
+
+   Create a directory named ~/lib/python, put everything from START to
+FINISH in ~/lib/python/framboozle.py, and run your buildmaster using:
+
+      PYTHONPATH=~/lib/python buildbot start MASTERDIR
+
+   or use the `Makefile.buildbot' to control the way `buildbot start'
+works. Or add something like this to something like your ~/.bashrc or
+~/.bash_profile or ~/.cshrc:
+
+      export PYTHONPATH=~/lib/python
+
+   Once we've done this, our master.cfg can look like:
+
+     from framboozle import Framboozle
+     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(Framboozle)
+
+   or:
+
+     import framboozle
+     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(framboozle.Framboozle)
+
+   (check out the python docs for details about how "import" and
+"from A import B" work).
+
+   What we've done here is to tell python that every time it handles
+an "import" statement for some named module, it should look in our
+~/lib/python/ for that module before it looks anywhere else. After our
+directories, it will try in a bunch of standard directories too
+(including the one where buildbot is installed). By setting the
+PYTHONPATH environment variable, you can add directories to the front
+of this search list.
+
+   Python knows that once it "import"s a file, it doesn't need to
+re-import it again. This means that reconfiguring the buildmaster
+(with "buildbot reconfig", for example) won't make it think the
+Framboozle class has changed every time, so the Builders that use it
+will not be spuriously restarted. On the other hand, you either have
+to start your buildmaster in a slightly weird way, or you have to
+modify your environment to set the PYTHONPATH variable.
+
+   Option 3: Install this code into a standard python library
+directory
+
+   Find out what your python's standard include path is by asking it:
+
+     80:warner at luther% python
+     Python 2.4.4c0 (#2, Oct  2 2006, 00:57:46)
+     [GCC 4.1.2 20060928 (prerelease) (Debian 4.1.1-15)] on linux2
+     Type "help", "copyright", "credits" or "license" for more information.
+     >>> import sys
+     >>> print sys.path
+     ['', '/usr/lib/python24.zip', '/usr/lib/python2.4', '/usr/lib/python2.4/plat-linux2', '/usr/lib/python2.4/lib-tk', '/usr/lib/python2.4/lib-dynload', '/usr/local/lib/python2.4/site-packages', '/usr/lib/python2.4/site-packages', '/usr/lib/python2.4/site-packages/Numeric', '/var/lib/python-support/python2.4', '/usr/lib/site-python']
+     >>>
+
+   In this case, putting the code into
+/usr/local/lib/python2.4/site-packages/framboozle.py would work just
+fine. We can use the same master.cfg "import framboozle" statement as
+in Option 2. By putting it in a standard include directory (instead of
+the decidedly non-standard ~/lib/python), we don't even have to set
+PYTHONPATH to anything special. The downside is that you probably have
+to be root to write to one of those standard include directories.
+
+   Option 4: Submit the code for inclusion in the Buildbot
+distribution
+
+   Contribute the code in an Enhancement Request on SourceForge, via
+http://buildbot.sf.net . Lobby, convince, coerce, bribe, badger,
+harass, threaten, or otherwise encourage the author to accept the
+patch. This lets you do something like:
+
+     from buildbot.steps import framboozle
+     f = BuildFactory()
+     f.addStep(SVN, svnurl="stuff")
+     f.addStep(framboozle.Framboozle)
+
+   And then you don't even have to install framboozle.py anywhere on
+your system, since it will ship with Buildbot. You don't have to be
+root, you don't have to set PYTHONPATH. But you do have to make a
+good case for Framboozle being worth going into the main
+distribution, you'll probably have to provide docs and some unit test
+cases, you'll need to figure out what kind of beer the author likes,
+and then you'll have to wait until the next release. But in some
+environments, all this is easier than getting root on your
+buildmaster box, so the tradeoffs may actually be worth it.
+
+   Putting the code in master.cfg (1) makes it available to that
+buildmaster instance. Putting it in a file in a personal library
+directory (2) makes it available for any buildmasters you might be
+running. Putting it in a file in a system-wide shared library
+directory (3) makes it available for any buildmasters that anyone on
+that system might be running. Getting it into the buildbot's upstream
+repository (4) makes it available for any buildmasters that anyone in
+the world might be running. It's all a matter of how widely you want
+to deploy that new class.
+
+   ---------- Footnotes ----------
+
+   (1) framboozle.com is still available. Remember, I get 10% :).
+
+   (2) Framboozle gets very excited about running unit tests.
+
+
+File: buildbot.info,  Node: BuildStep URLs,  Prev: Adding LogObservers,  Up: Writing New BuildSteps
+
+6.1.7.3 BuildStep URLs
+......................
+
+Each BuildStep has a collection of "links". Like its collection of
+LogFiles, each link has a name and a target URL. The web status page
+creates HREFs for each link in the same box as it does for LogFiles,
+except that the target of the link is the external URL instead of an
+internal link to a page that shows the contents of the LogFile.
+
+   These external links can be used to point at build information
+hosted on other servers. For example, the test process might produce
+an intricate description of which tests passed and failed, or some
+sort of code coverage data in HTML form, or a PNG or GIF image with a
+graph of memory usage over time. The external link can provide an
+easy way for users to navigate from the buildbot's status page to
+these external web sites or file servers. Note that the step itself is
+responsible for insuring that there will be a document available at
+the given URL (perhaps by using `scp' to copy the HTML output to a
+`~/public_html/' directory on a remote web server). Calling `addURL'
+does not magically populate a web server.
+
+   To set one of these links, the BuildStep should call the `addURL'
+method with the name of the link and the target URL. Multiple URLs can
+be set.
+
+   In this example, we assume that the `make test' command causes a
+collection of HTML files to be created and put somewhere on the
+coverage.example.org web server, in a filename that incorporates the
+build number.
+
+     class TestWithCodeCoverage(BuildStep):
+         command = ["make", "test",
+                    WithProperties("buildnum=%s" % "buildnumber")]
+
+         def createSummary(self, log):
+             buildnumber = self.getProperty("buildnumber")
+             url = "http://coverage.example.org/builds/%s.html" % buildnumber
+             self.addURL("coverage", url)
+
+   You might also want to extract the URL from some special message
+output by the build process itself:
+
+     class TestWithCodeCoverage(BuildStep):
+         command = ["make", "test",
+                    WithProperties("buildnum=%s" % "buildnumber")]
+
+         def createSummary(self, log):
+             output = StringIO(log.getText())
+             for line in output.readlines():
+                 if line.startswith("coverage-url:"):
+                     url = line[len("coverage-url:"):].strip()
+                     self.addURL("coverage", url)
+                     return
+
+   Note that a build process which emits both stdout and stderr might
+cause this line to be split or interleaved between other lines. It
+might be necessary to restrict the getText() call to only stdout with
+something like this:
+
+             output = StringIO("".join([c[1]
+                                        for c in log.getChunks()
+                                        if c[0] == LOG_CHANNEL_STDOUT]))
+
+   Of course if the build is run under a PTY, then stdout and stderr
+will be merged before the buildbot ever sees them, so such
+interleaving will be unavoidable.
+
+
+File: buildbot.info,  Node: Interlocks,  Next: Build Factories,  Prev: Build Steps,  Up: Build Process
+
+6.2 Interlocks
+==============
+
+For various reasons, you may want to prevent certain Steps (or perhaps
+entire Builds) from running simultaneously. Limited CPU speed or
+network bandwidth to the VC server, problems with simultaneous access
+to a database server used by unit tests, or multiple Builds which
+access shared state may all require some kind of interlock to prevent
+corruption, confusion, or resource overload. These resources might
+require completely exclusive access, or it might be sufficient to
+establish a limit of two or three simultaneous builds.
+
+   `Locks' are the mechanism used to express these kinds of
+constraints on when Builds or Steps can be run. There are two kinds of
+`Locks', each with their own scope: `MasterLock' instances are scoped
+to the buildbot as a whole, while `SlaveLock's are scoped to a single
+buildslave. This means that each buildslave has a separate copy of
+each `SlaveLock', which could enforce a one-Build-at-a-time limit for
+each machine, but still allow as many simultaneous builds as there
+are machines.
+
+   Each `Lock' is created with a unique name. Each lock gets a count
+of how many owners it may have: how many processes can claim it at ths
+same time. This limit defaults to one, and is controllable through the
+`maxCount' argument. On `SlaveLock's you can set the owner count on a
+per-slave basis by providing a dictionary (that maps from slavename
+to maximum owner count) to its `maxCountForSlave' argument. Any
+buildslaves that aren't mentioned in `maxCountForSlave' get their
+owner count from `maxCount'.
+
+   To use a lock, simply include it in the `locks=' argument of the
+`BuildStep' object that should obtain the lock before it runs.  This
+argument accepts a list of `Lock' objects: the Step will acquire all
+of them before it runs.
+
+   To claim a lock for the whole Build, add a `'locks'' key to the
+builder specification dictionary with the same list of `Lock'
+objects. (This is the dictionary that has the `'name'',
+`'slavename'', `'builddir'', and `'factory'' keys). The `Build'
+object also accepts a `locks=' argument, but unless you are writing
+your own `BuildFactory' subclass then it will be easier to set the
+locks in the builder dictionary.
+
+   Note that there are no partial-acquire or partial-release
+semantics: this prevents deadlocks caused by two Steps each waiting
+for a lock held by the other(1). This also means that waiting to
+acquire a `Lock' can take an arbitrarily long time: if the
+buildmaster is very busy, a Step or Build which requires only one
+`Lock' may starve another that is waiting for that `Lock' plus some
+others.
+
+   In the following example, we run the same build on three different
+platforms. The unit-test steps of these builds all use a common
+database server, and would interfere with each other if allowed to run
+simultaneously. The `Lock' prevents more than one of these builds
+from happening at the same time.
+
+     from buildbot import locks
+     from buildbot.steps import source, shell
+     from buildbot.process import factory
+
+     db_lock = locks.MasterLock("database")
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, svnurl="http://example.org/svn/Trunk")
+     f.addStep(shell.ShellCommand, command="make all")
+     f.addStep(shell.ShellCommand, command="make test", locks=[db_lock])
+     b1 = {'name': 'full1', 'slavename': 'bot-1', builddir='f1', 'factory': f}
+     b2 = {'name': 'full2', 'slavename': 'bot-2', builddir='f2', 'factory': f}
+     b3 = {'name': 'full3', 'slavename': 'bot-3', builddir='f3', 'factory': f}
+     c['builders'] = [b1, b2, b3]
+
+   In the next example, we have one buildslave hosting three separate
+Builders (each running tests against a different version of Python).
+The machine which hosts this buildslave is not particularly fast, so
+we want to prevent all three builds from all happening at the same
+time. (Assume we've experimentally determined that one build leaves
+unused CPU capacity, three builds causes a lot of disk thrashing, but
+two builds at a time is Just Right). We use a `SlaveLock' because the
+builds happening on this one slow slave should not affect builds
+running on other slaves, and we use the lock on the build as a whole
+because the slave is so slow that even multiple simultaneous SVN
+checkouts would be too taxing. We set `maxCount=2' to achieve our
+goal of two simultaneous builds per slave.
+
+     from buildbot import locks
+     from buildbot.steps import source
+     from buildbot.process import s, factory
+
+     slow_lock = locks.SlaveLock("cpu", maxCount=2)
+     source = s(source.SVN, svnurl="http://example.org/svn/Trunk")
+     f22 = factory.Trial(source, trialpython=["python2.2"])
+     f23 = factory.Trial(source, trialpython=["python2.3"])
+     f24 = factory.Trial(source, trialpython=["python2.4"])
+     b1 = {'name': 'p22', 'slavename': 'bot-1', builddir='p22', 'factory': f22,
+           'locks': [slow_lock] }
+     b2 = {'name': 'p23', 'slavename': 'bot-1', builddir='p23', 'factory': f23,
+           'locks': [slow_lock] }
+     b3 = {'name': 'p24', 'slavename': 'bot-1', builddir='p24', 'factory': f24,
+           'locks': [slow_lock] }
+     c['builders'] = [b1, b2, b3]
+
+   In the last example, we use two Locks at the same time. In this
+case, we're concerned about both of the previous constraints, but
+we'll say that only the tests are computationally intensive, and that
+they have been split into those which use the database and those
+which do not.  In addition, two of the Builds run on a fast machine
+which does not need to worry about the cpu lock, but which still must
+be prevented from simultaneous database access. We use
+`maxCountForSlave' to limit the slow machine to one simultanous
+build, but allow practically unlimited concurrent builds on the fast
+machine.
+
+     from buildbot import locks
+     from buildbot.steps import source, shell
+     from buildbot.process import factory
+
+     db_lock = locks.MasterLock("database")
+     slavecounts = {"bot-slow": 1, "bot-fast": 100}
+     cpu_lock = locks.SlaveLock("cpu", maxCountForSlave=slavecounts)
+     f = factory.BuildFactory()
+     f.addStep(source.SVN, svnurl="http://example.org/svn/Trunk")
+     f.addStep(shell.ShellCommand, command="make all", locks=[cpu_lock])
+     f.addStep(shell.ShellCommand, command="make test", locks=[cpu_lock])
+     f.addStep(shell.ShellCommand, command="make db-test",
+                                   locks=[db_lock, cpu_lock])
+
+     b1 = {'name': 'full1', 'slavename': 'bot-slow', builddir='full1',
+           'factory': f}
+     b2 = {'name': 'full2', 'slavename': 'bot-slow', builddir='full2',
+           'factory': f}
+     b3 = {'name': 'full3', 'slavename': 'bot-fast', builddir='full3',
+           'factory': f}
+     b4 = {'name': 'full4', 'slavename': 'bot-fast', builddir='full4',
+           'factory': f}
+     c['builders'] = [b1, b2, b3, b4]
+
+   As a final note, remember that a unit test system which breaks when
+multiple people run it at the same time is fragile and should be
+fixed. Asking your human developers to serialize themselves when
+running unit tests will just discourage them from running the unit
+tests at all. Find a way to fix this: change the database tests to
+create a new (uniquely-named) user or table for each test run, don't
+use fixed listening TCP ports for network tests (instead listen on
+port 0 to let the kernel choose a port for you and then query the
+socket to find out what port was allocated). `MasterLock's can be
+used to accomodate broken test systems like this, but are really
+intended for other purposes: build processes that store or retrieve
+products in shared directories, or which do things that human
+developers would not (or which might slow down or break in ways that
+require human attention to deal with).
+
+   `SlaveLocks's can be used to keep automated performance tests from
+interfering with each other, when there are multiple Builders all
+using the same buildslave. But they can't prevent other users from
+running CPU-intensive jobs on that host while the tests are running.
+
+   ---------- Footnotes ----------
+
+   (1) Also note that a clever buildmaster admin could still create
+the opportunity for deadlock: Build A obtains Lock 1, inside which
+Step A.two tries to acquire Lock 2 at the Step level.  Meanwhile
+Build B obtains Lock 2, and has a Step B.two which wants to acquire
+Lock 1 at the Step level. Don't Do That.
+
+
+File: buildbot.info,  Node: Build Factories,  Prev: Interlocks,  Up: Build Process
+
+6.3 Build Factories
+===================
+
+Each Builder is equipped with a "build factory", which is responsible
+for producing the actual `Build' objects that perform each build.
+This factory is created in the configuration file, and attached to a
+Builder through the `factory' element of its dictionary.
+
+   The standard `BuildFactory' object creates `Build' objects by
+default. These Builds will each execute a collection of BuildSteps in
+a fixed sequence. Each step can affect the results of the build, but
+in general there is little intelligence to tie the different steps
+together. You can create subclasses of `Build' to implement more
+sophisticated build processes, and then use a subclass of
+`BuildFactory' (or simply set the `buildClass' attribute) to create
+instances of your new Build subclass.
+
+* Menu:
+
+* BuildStep Objects::
+* BuildFactory::
+* Process-Specific build factories::
+
+
+File: buildbot.info,  Node: BuildStep Objects,  Next: BuildFactory,  Prev: Build Factories,  Up: Build Factories
+
+6.3.1 BuildStep Objects
+-----------------------
+
+The steps used by these builds are all subclasses of `BuildStep'.
+The standard ones provided with Buildbot are documented later, *Note
+Build Steps::. You can also write your own subclasses to use in
+builds.
+
+   The basic behavior for a `BuildStep' is to:
+
+   * run for a while, then stop
+
+   * possibly invoke some RemoteCommands on the attached build slave
+
+   * possibly produce a set of log files
+
+   * finish with a status described by one of four values defined in
+     buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, SKIPPED
+
+   * provide a list of short strings to describe the step
+
+   * define a color (generally green, orange, or red) with which the
+     step should be displayed
+
+   More sophisticated steps may produce additional information and
+provide it to later build steps, or store it in the factory to provide
+to later builds.
+
+* Menu:
+
+* BuildFactory Attributes::
+* Quick builds::
+
+
+File: buildbot.info,  Node: BuildFactory,  Next: Process-Specific build factories,  Prev: BuildStep Objects,  Up: Build Factories
+
+6.3.2 BuildFactory
+------------------
+
+The default `BuildFactory', provided in the
+`buildbot.process.factory' module, contains a list of "BuildStep
+specifications": a list of `(step_class, kwargs)' tuples for each.
+When asked to create a Build, it loads the list of steps into the new
+Build object. When the Build is actually started, these step
+specifications are used to create the actual set of BuildSteps, which
+are then executed one at a time. For example, a build which consists
+of a CVS checkout followed by a `make build' would be constructed as
+follows:
+
+     from buildbot.steps import source, shell
+     from buildbot.process import factory
+
+     f = factory.BuildFactory()
+     f.addStep(source.CVS, cvsroot=CVSROOT, cvsmodule="project", mode="update")
+     f.addStep(shell.Compile, command=["make", "build"])
+
+   It is also possible to pass a list of step specifications into the
+`BuildFactory' when it is created. Using `addStep' is usually
+simpler, but there are cases where is is more convenient to create
+the list of steps ahead of time. To make this approach easier, a
+convenience function named `s' is available:
+
+     from buildbot.steps import source, shell
+     from buildbot.process import factory
+     from buildbot.factory import s
+     # s is a convenience function, defined with:
+     # def s(steptype, **kwargs): return (steptype, kwargs)
+
+     all_steps = [s(source.CVS, cvsroot=CVSROOT, cvsmodule="project",
+                    mode="update"),
+                  s(shell.Compile, command=["make", "build"]),
+                 ]
+     f = factory.BuildFactory(all_steps)
+
+   Each step can affect the build process in the following ways:
+
+   * If the step's `haltOnFailure' attribute is True, then a failure
+     in the step (i.e. if it completes with a result of FAILURE) will
+     cause the whole build to be terminated immediately: no further
+     steps will be executed. This is useful for setup steps upon
+     which the rest of the build depends: if the CVS checkout or
+     `./configure' process fails, there is no point in trying to
+     compile or test the resulting tree.
+
+   * If the `flunkOnFailure' or `flunkOnWarnings' flag is set, then a
+     result of FAILURE or WARNINGS will mark the build as a whole as
+     FAILED. However, the remaining steps will still be executed.
+     This is appropriate for things like multiple testing steps: a
+     failure in any one of them will indicate that the build has
+     failed, however it is still useful to run them all to completion.
+
+   * Similarly, if the `warnOnFailure' or `warnOnWarnings' flag is
+     set, then a result of FAILURE or WARNINGS will mark the build as
+     having WARNINGS, and the remaining steps will still be executed.
+     This may be appropriate for certain kinds of optional build or
+     test steps.  For example, a failure experienced while building
+     documentation files should be made visible with a WARNINGS
+     result but not be serious enough to warrant marking the whole
+     build with a FAILURE.
+
+
+   In addition, each Step produces its own results, may create
+logfiles, etc. However only the flags described above have any effect
+on the build as a whole.
+
+   The pre-defined BuildSteps like `CVS' and `Compile' have
+reasonably appropriate flags set on them already. For example, without
+a source tree there is no point in continuing the build, so the `CVS'
+class has the `haltOnFailure' flag set to True. Look in
+`buildbot/process/step.py' to see how the other Steps are marked.
+
+   Each Step is created with an additional `workdir' argument that
+indicates where its actions should take place. This is specified as a
+subdirectory of the slave builder's base directory, with a default
+value of `build'. This is only implemented as a step argument (as
+opposed to simply being a part of the base directory) because the
+CVS/SVN steps need to perform their checkouts from the parent
+directory.
+
+* Menu:
+
+* BuildFactory Attributes::
+* Quick builds::
+
+
+File: buildbot.info,  Node: BuildFactory Attributes,  Next: Quick builds,  Prev: BuildFactory,  Up: BuildFactory
+
+6.3.2.1 BuildFactory Attributes
+...............................
+
+Some attributes from the BuildFactory are copied into each Build.
+
+`useProgress'
+     (defaults to True): if True, the buildmaster keeps track of how
+     long each step takes, so it can provide estimates of how long
+     future builds will take. If builds are not expected to take a
+     consistent amount of time (such as incremental builds in which a
+     random set of files are recompiled or tested each time), this
+     should be set to False to inhibit progress-tracking.
+
+
+
+File: buildbot.info,  Node: Quick builds,  Prev: BuildFactory Attributes,  Up: BuildFactory
+
+6.3.2.2 Quick builds
+....................
+
+The difference between a "full build" and a "quick build" is that
+quick builds are generally done incrementally, starting with the tree
+where the previous build was performed. That simply means that the
+source-checkout step should be given a `mode='update'' flag, to do
+the source update in-place.
+
+   In addition to that, the `useProgress' flag should be set to
+False. Incremental builds will (or at least the ought to) compile as
+few files as necessary, so they will take an unpredictable amount of
+time to run. Therefore it would be misleading to claim to predict how
+long the build will take.
+
+
+File: buildbot.info,  Node: Process-Specific build factories,  Prev: BuildFactory,  Up: Build Factories
+
+6.3.3 Process-Specific build factories
+--------------------------------------
+
+Many projects use one of a few popular build frameworks to simplify
+the creation and maintenance of Makefiles or other compilation
+structures. Buildbot provides several pre-configured BuildFactory
+subclasses which let you build these projects with a minimum of fuss.
+
+* Menu:
+
+* GNUAutoconf::
+* CPAN::
+* Python distutils::
+* Python/Twisted/trial projects::
+
+
+File: buildbot.info,  Node: GNUAutoconf,  Next: CPAN,  Prev: Process-Specific build factories,  Up: Process-Specific build factories
+
+6.3.3.1 GNUAutoconf
+...................
+
+GNU Autoconf (http://www.gnu.org/software/autoconf/) is a software
+portability tool, intended to make it possible to write programs in C
+(and other languages) which will run on a variety of UNIX-like
+systems. Most GNU software is built using autoconf. It is frequently
+used in combination with GNU automake. These tools both encourage a
+build process which usually looks like this:
+
+     % CONFIG_ENV=foo ./configure --with-flags
+     % make all
+     % make check
+     # make install
+
+   (except of course the Buildbot always skips the `make install'
+part).
+
+   The Buildbot's `buildbot.process.factory.GNUAutoconf' factory is
+designed to build projects which use GNU autoconf and/or automake. The
+configuration environment variables, the configure flags, and command
+lines used for the compile and test are all configurable, in general
+the default values will be suitable.
+
+   Example:
+
+     # use the s() convenience function defined earlier
+     f = factory.GNUAutoconf(source=s(step.SVN, svnurl=URL, mode="copy"),
+                             flags=["--disable-nls"])
+
+   Required Arguments:
+
+`source'
+     This argument must be a step specification tuple that provides a
+     BuildStep to generate the source tree.
+
+   Optional Arguments:
+
+`configure'
+     The command used to configure the tree. Defaults to
+     `./configure'. Accepts either a string or a list of shell argv
+     elements.
+
+`configureEnv'
+     The environment used for the initial configuration step. This
+     accepts a dictionary which will be merged into the buildslave's
+     normal environment. This is commonly used to provide things like
+     `CFLAGS="-O2 -g"' (to turn off debug symbols during the compile).
+     Defaults to an empty dictionary.
+
+`configureFlags'
+     A list of flags to be appended to the argument list of the
+     configure command. This is commonly used to enable or disable
+     specific features of the autoconf-controlled package, like
+     `["--without-x"]' to disable windowing support. Defaults to an
+     empty list.
+
+`compile'
+     this is a shell command or list of argv values which is used to
+     actually compile the tree. It defaults to `make all'. If set to
+     None, the compile step is skipped.
+
+`test'
+     this is a shell command or list of argv values which is used to
+     run the tree's self-tests. It defaults to `make check'. If set to
+     None, the test step is skipped.
+
+
+
+File: buildbot.info,  Node: CPAN,  Next: Python distutils,  Prev: GNUAutoconf,  Up: Process-Specific build factories
+
+6.3.3.2 CPAN
+............
+
+Most Perl modules available from the CPAN (http://www.cpan.org/)
+archive use the `MakeMaker' module to provide configuration, build,
+and test services. The standard build routine for these modules looks
+like:
+
+     % perl Makefile.PL
+     % make
+     % make test
+     # make install
+
+   (except again Buildbot skips the install step)
+
+   Buildbot provides a `CPAN' factory to compile and test these
+projects.
+
+   Arguments:
+`source'
+     (required): A step specification tuple, that that used by
+     GNUAutoconf.
+
+`perl'
+     A string which specifies the `perl' executable to use. Defaults
+     to just `perl'.
+
+
+
+File: buildbot.info,  Node: Python distutils,  Next: Python/Twisted/trial projects,  Prev: CPAN,  Up: Process-Specific build factories
+
+6.3.3.3 Python distutils
+........................
+
+Most Python modules use the `distutils' package to provide
+configuration and build services. The standard build process looks
+like:
+
+     % python ./setup.py build
+     % python ./setup.py install
+
+   Unfortunately, although Python provides a standard unit-test
+framework named `unittest', to the best of my knowledge `distutils'
+does not provide a standardized target to run such unit tests. (please
+let me know if I'm wrong, and I will update this factory).
+
+   The `Distutils' factory provides support for running the build
+part of this process. It accepts the same `source=' parameter as the
+other build factories.
+
+   Arguments:
+`source'
+     (required): A step specification tuple, that that used by
+     GNUAutoconf.
+
+`python'
+     A string which specifies the `python' executable to use. Defaults
+     to just `python'.
+
+`test'
+     Provides a shell command which runs unit tests. This accepts
+     either a string or a list. The default value is None, which
+     disables the test step (since there is no common default command
+     to run unit tests in distutils modules).
+
+
+
+File: buildbot.info,  Node: Python/Twisted/trial projects,  Prev: Python distutils,  Up: Process-Specific build factories
+
+6.3.3.4 Python/Twisted/trial projects
+.....................................
+
+Twisted provides a unit test tool named `trial' which provides a few
+improvements over Python's built-in `unittest' module. Many python
+projects which use Twisted for their networking or application
+services also use trial for their unit tests. These modules are
+usually built and tested with something like the following:
+
+     % python ./setup.py build
+     % PYTHONPATH=build/lib.linux-i686-2.3 trial -v PROJECTNAME.test
+     % python ./setup.py install
+
+   Unfortunately, the `build/lib' directory into which the
+built/copied .py files are placed is actually architecture-dependent,
+and I do not yet know of a simple way to calculate its value. For many
+projects it is sufficient to import their libraries "in place" from
+the tree's base directory (`PYTHONPATH=.').
+
+   In addition, the PROJECTNAME value where the test files are
+located is project-dependent: it is usually just the project's
+top-level library directory, as common practice suggests the unit test
+files are put in the `test' sub-module. This value cannot be guessed,
+the `Trial' class must be told where to find the test files.
+
+   The `Trial' class provides support for building and testing
+projects which use distutils and trial. If the test module name is
+specified, trial will be invoked. The library path used for testing
+can also be set.
+
+   One advantage of trial is that the Buildbot happens to know how to
+parse trial output, letting it identify which tests passed and which
+ones failed. The Buildbot can then provide fine-grained reports about
+how many tests have failed, when individual tests fail when they had
+been passing previously, etc.
+
+   Another feature of trial is that you can give it a series of source
+.py files, and it will search them for special `test-case-name' tags
+that indicate which test cases provide coverage for that file.  Trial
+can then run just the appropriate tests. This is useful for quick
+builds, where you want to only run the test cases that cover the
+changed functionality.
+
+   Arguments:
+`source'
+     (required): A step specification tuple, like that used by
+     GNUAutoconf.
+
+`buildpython'
+     A list (argv array) of strings which specifies the `python'
+     executable to use when building the package. Defaults to just
+     `['python']'. It may be useful to add flags here, to supress
+     warnings during compilation of extension modules. This list is
+     extended with `['./setup.py', 'build']' and then executed in a
+     ShellCommand.
+
+`testpath'
+     Provides a directory to add to `PYTHONPATH' when running the unit
+     tests, if tests are being run. Defaults to `.' to include the
+     project files in-place. The generated build library is frequently
+     architecture-dependent, but may simply be `build/lib' for
+     pure-python modules.
+
+`trialpython'
+     Another list of strings used to build the command that actually
+     runs trial. This is prepended to the contents of the `trial'
+     argument below. It may be useful to add `-W' flags here to
+     supress warnings that occur while tests are being run. Defaults
+     to an empty list, meaning `trial' will be run without an explicit
+     interpreter, which is generally what you want if you're using
+     `/usr/bin/trial' instead of, say, the `./bin/trial' that lives
+     in the Twisted source tree.
+
+`trial'
+     provides the name of the `trial' command. It is occasionally
+     useful to use an alternate executable, such as `trial2.2' which
+     might run the tests under an older version of Python. Defaults to
+     `trial'.
+
+`tests'
+     Provides a module name or names which contain the unit tests for
+     this project. Accepts a string, typically `PROJECTNAME.test', or
+     a list of strings. Defaults to None, indicating that no tests
+     should be run. You must either set this or `useTestCaseNames' to
+     do anyting useful with the Trial factory.
+
+`useTestCaseNames'
+     Tells the Step to provide the names of all changed .py files to
+     trial, so it can look for test-case-name tags and run just the
+     matching test cases. Suitable for use in quick builds. Defaults
+     to False.
+
+`randomly'
+     If `True', tells Trial (with the `--random=0' argument) to run
+     the test cases in random order, which sometimes catches subtle
+     inter-test dependency bugs. Defaults to `False'.
+
+`recurse'
+     If `True', tells Trial (with the `--recurse' argument) to look
+     in all subdirectories for additional test cases. It isn't clear
+     to me how this works, but it may be useful to deal with the
+     unknown-PROJECTNAME problem described above, and is currently
+     used in the Twisted buildbot to accomodate the fact that test
+     cases are now distributed through multiple
+     twisted.SUBPROJECT.test directories.
+
+
+   Unless one of `trialModule' or `useTestCaseNames' are set, no
+tests will be run.
+
+   Some quick examples follow. Most of these examples assume that the
+target python code (the "code under test") can be reached directly
+from the root of the target tree, rather than being in a `lib/'
+subdirectory.
+
+     #  Trial(source, tests="toplevel.test") does:
+     #   python ./setup.py build
+     #   PYTHONPATH=. trial -to toplevel.test
+
+     #  Trial(source, tests=["toplevel.test", "other.test"]) does:
+     #   python ./setup.py build
+     #   PYTHONPATH=. trial -to toplevel.test other.test
+
+     #  Trial(source, useTestCaseNames=True) does:
+     #   python ./setup.py build
+     #   PYTHONPATH=. trial -to --testmodule=foo/bar.py..  (from Changes)
+
+     #  Trial(source, buildpython=["python2.3", "-Wall"], tests="foo.tests"):
+     #   python2.3 -Wall ./setup.py build
+     #   PYTHONPATH=. trial -to foo.tests
+
+     #  Trial(source, trialpython="python2.3", trial="/usr/bin/trial",
+     #        tests="foo.tests") does:
+     #   python2.3 -Wall ./setup.py build
+     #   PYTHONPATH=. python2.3 /usr/bin/trial -to foo.tests
+
+     # For running trial out of the tree being tested (only useful when the
+     # tree being built is Twisted itself):
+     #  Trial(source, trialpython=["python2.3", "-Wall"], trial="./bin/trial",
+     #        tests="foo.tests") does:
+     #   python2.3 -Wall ./setup.py build
+     #   PYTHONPATH=. python2.3 -Wall ./bin/trial -to foo.tests
+
+   If the output directory of `./setup.py build' is known, you can
+pull the python code from the built location instead of the source
+directories. This should be able to handle variations in where the
+source comes from, as well as accomodating binary extension modules:
+
+     # Trial(source,tests="toplevel.test",testpath='build/lib.linux-i686-2.3')
+     # does:
+     #  python ./setup.py build
+     #  PYTHONPATH=build/lib.linux-i686-2.3 trial -to toplevel.test
+
+
+File: buildbot.info,  Node: Status Delivery,  Next: Command-line tool,  Prev: Build Process,  Up: Top
+
+7 Status Delivery
+*****************
+
+More details are available in the docstrings for each class, use
+`pydoc buildbot.status.html.Waterfall' to see them. Most status
+delivery objects take a `categories=' argument, which can contain a
+list of "category" names: in this case, it will only show status for
+Builders that are in one of the named categories.
+
+   (implementor's note: each of these objects should be a
+service.MultiService which will be attached to the BuildMaster object
+when the configuration is processed. They should use
+`self.parent.getStatus()' to get access to the top-level IStatus
+object, either inside `startService' or later. They may call
+`status.subscribe()' in `startService' to receive notifications of
+builder events, in which case they must define `builderAdded' and
+related methods. See the docstrings in `buildbot/interfaces.py' for
+full details.)
+
+* Menu:
+
+* HTML Waterfall::
+* IRC Bot::
+* PBListener::
+* Writing New Status Plugins::
+
+
+File: buildbot.info,  Node: HTML Waterfall,  Next: IRC Bot,  Prev: Status Delivery,  Up: Status Delivery
+
+7.1 HTML Waterfall
+==================
+
+     from buildbot.status import html
+     w = html.Waterfall(http_port=8080)
+     c['status'].append(w)
+
+   The `buildbot.status.html.Waterfall' status target creates an HTML
+"waterfall display", which shows a time-based chart of events.  This
+display provides detailed information about all steps of all recent
+builds, and provides hyperlinks to look at individual build logs and
+source changes. If the `http_port' argument is provided, it provides
+a strports specification for the port that the web server should
+listen on. This can be a simple port number, or a string like
+`tcp:8080:interface=127.0.0.1' (to limit connections to the loopback
+interface, and therefore to clients running on the same host)(1).
+
+   If instead (or in addition) you provide the `distrib_port'
+argument, a twisted.web distributed server will be started either on a
+TCP port (if `distrib_port' is like `"tcp:12345"') or more likely on
+a UNIX socket (if `distrib_port' is like `"unix:/path/to/socket"').
+
+   The `distrib_port' option means that, on a host with a
+suitably-configured twisted-web server, you do not need to consume a
+separate TCP port for the buildmaster's status web page. When the web
+server is constructed with `mktap web --user', URLs that point to
+`http://host/~username/' are dispatched to a sub-server that is
+listening on a UNIX socket at `~username/.twisted-web-pb'. On such a
+system, it is convenient to create a dedicated `buildbot' user, then
+set `distrib_port' to
+`"unix:"+os.path.expanduser("~/.twistd-web-pb")'. This configuration
+will make the HTML status page available at `http://host/~buildbot/'
+. Suitable URL remapping can make it appear at
+`http://host/buildbot/', and the right virtual host setup can even
+place it at `http://buildbot.host/' .
+
+   Other arguments:
+
+`allowForce'
+     If set to True (the default), then the web page will provide a
+     "Force Build" button that allows visitors to manually trigger
+     builds. This is useful for developers to re-run builds that have
+     failed because of intermittent problems in the test suite, or
+     because of libraries that were not installed at the time of the
+     previous build. You may not wish to allow strangers to cause a
+     build to run: in that case, set this to False to remove these
+     buttons.
+
+`favicon'
+     If set to a string, this will be interpreted as a filename
+     containing a "favicon": a small image that contains an icon for
+     the web site.  This is returned to browsers that request the
+     `favicon.ico' file, and should point to a .png or .ico image
+     file. The default value uses the buildbot/buildbot.png image (a
+     small hex nut) contained in the buildbot distribution. You can
+     set this to None to avoid using a favicon at all.
+
+`robots_txt'
+     If set to a string, this will be interpreted as a filename
+     containing the contents of "robots.txt". Many search engine
+     spiders request this file before indexing the site. Setting it
+     to a file which contains:
+          User-agent: *
+          Disallow: /
+     will prevent most search engines from trawling the (voluminous)
+     generated status pages.
+
+
+   ---------- Footnotes ----------
+
+   (1) It may even be possible to provide SSL access by using a
+specification like
+`"ssl:12345:privateKey=mykey.pen:certKey=cert.pem"', but this is
+completely untested
+
+
+File: buildbot.info,  Node: IRC Bot,  Next: PBListener,  Prev: HTML Waterfall,  Up: Status Delivery
+
+7.2 IRC Bot
+===========
+
+The `buildbot.status.words.IRC' status target creates an IRC bot
+which will attach to certain channels and be available for status
+queries. It can also be asked to announce builds as they occur, or be
+told to shut up.
+
+     from twisted.status import words
+     irc = words.IRC("irc.example.org", "botnickname",
+                     channels=["channel1", "channel2"],
+                     password="mysecretpassword")
+     c['status'].append(irc)
+
+   Take a look at the docstring for `words.IRC' for more details on
+configuring this service. The `password' argument, if provided, will
+be sent to Nickserv to claim the nickname: some IRC servers will not
+allow clients to send private messages until they have logged in with
+a password.
+
+   To use the service, you address messages at the buildbot, either
+normally (`botnickname: status') or with private messages (`/msg
+botnickname status'). The buildbot will respond in kind.
+
+   Some of the commands currently available:
+
+`list builders'
+     Emit a list of all configured builders
+
+`status BUILDER'
+     Announce the status of a specific Builder: what it is doing
+     right now.
+
+`status all'
+     Announce the status of all Builders
+
+`watch BUILDER'
+     If the given Builder is currently running, wait until the Build
+     is finished and then announce the results.
+
+`last BUILDER'
+     Return the results of the last build to run on the given Builder.
+
+`help COMMAND'
+     Describe a command. Use `help commands' to get a list of known
+     commands.
+
+`source'
+     Announce the URL of the Buildbot's home page.
+
+`version'
+     Announce the version of this Buildbot.
+
+   If the `allowForce=True' option was used, some addtional commands
+will be available:
+
+`force build BUILDER REASON'
+     Tell the given Builder to start a build of the latest code. The
+     user requesting the build and REASON are recorded in the Build
+     status. The buildbot will announce the build's status when it
+     finishes.
+
+`stop build BUILDER REASON'
+     Terminate any running build in the given Builder. REASON will be
+     added to the build status to explain why it was stopped. You
+     might use this if you committed a bug, corrected it right away,
+     and don't want to wait for the first build (which is destined to
+     fail) to complete before starting the second (hopefully fixed)
+     build.
+
+
+File: buildbot.info,  Node: PBListener,  Next: Writing New Status Plugins,  Prev: IRC Bot,  Up: Status Delivery
+
+7.3 PBListener
+==============
+
+     import buildbot.status.client
+     pbl = buildbot.status.client.PBListener(port=int, user=str,
+                                             passwd=str)
+     c['status'].append(pbl)
+
+   This sets up a PB listener on the given TCP port, to which a
+PB-based status client can connect and retrieve status information.
+`buildbot statusgui' (*note statusgui::) is an example of such a
+status client. The `port' argument can also be a strports
+specification string.
+
+
+File: buildbot.info,  Node: Writing New Status Plugins,  Prev: PBListener,  Up: Status Delivery
+
+7.4 Writing New Status Plugins
+==============================
+
+TODO: this needs a lot more examples
+
+   Each status plugin is an object which provides the
+`twisted.application.service.IService' interface, which creates a
+tree of Services with the buildmaster at the top [not strictly true].
+The status plugins are all children of an object which implements
+`buildbot.interfaces.IStatus', the main status object. From this
+object, the plugin can retrieve anything it wants about current and
+past builds. It can also subscribe to hear about new and upcoming
+builds.
+
+   Status plugins which only react to human queries (like the
+Waterfall display) never need to subscribe to anything: they are idle
+until someone asks a question, then wake up and extract the
+information they need to answer it, then they go back to sleep.
+Plugins which need to act spontaneously when builds complete (like
+the Mail plugin) need to subscribe to hear about new builds.
+
+   If the status plugin needs to run network services (like the HTTP
+server used by the Waterfall plugin), they can be attached as Service
+children of the plugin itself, using the `IServiceCollection'
+interface.
+
+
+File: buildbot.info,  Node: Command-line tool,  Next: Resources,  Prev: Status Delivery,  Up: Top
+
+8 Command-line tool
+*******************
+
+The `buildbot' command-line tool can be used to start or stop a
+buildmaster or buildbot, and to interact with a running buildmaster.
+Some of its subcommands are intended for buildmaster admins, while
+some are for developers who are editing the code that the buildbot is
+monitoring.
+
+* Menu:
+
+* Administrator Tools::
+* Developer Tools::
+* Other Tools::
+* .buildbot config directory::
+
+
+File: buildbot.info,  Node: Administrator Tools,  Next: Developer Tools,  Prev: Command-line tool,  Up: Command-line tool
+
+8.1 Administrator Tools
+=======================
+
+The following `buildbot' sub-commands are intended for buildmaster
+administrators:
+
+create-master
+=============
+
+This creates a new directory and populates it with files that allow it
+to be used as a buildmaster's base directory.
+
+     buildbot create-master BASEDIR
+
+create-slave
+============
+
+This creates a new directory and populates it with files that let it
+be used as a buildslave's base directory. You must provide several
+arguments, which are used to create the initial `buildbot.tac' file.
+
+     buildbot create-slave BASEDIR MASTERHOST:PORT SLAVENAME PASSWORD
+
+start
+=====
+
+This starts a buildmaster or buildslave which was already created in
+the given base directory. The daemon is launched in the background,
+with events logged to a file named `twistd.log'.
+
+     buildbot start BASEDIR
+
+stop
+====
+
+This terminates the daemon (either buildmaster or buildslave) running
+in the given directory.
+
+     buildbot stop BASEDIR
+
+sighup
+======
+
+This sends a SIGHUP to the buildmaster running in the given directory,
+which causes it to re-read its `master.cfg' file.
+
+     buildbot sighup BASEDIR
+
+
+File: buildbot.info,  Node: Developer Tools,  Next: Other Tools,  Prev: Administrator Tools,  Up: Command-line tool
+
+8.2 Developer Tools
+===================
+
+These tools are provided for use by the developers who are working on
+the code that the buildbot is monitoring.
+
+* Menu:
+
+* statuslog::
+* statusgui::
+* try::
+
+
+File: buildbot.info,  Node: statuslog,  Next: statusgui,  Prev: Developer Tools,  Up: Developer Tools
+
+8.2.1 statuslog
+---------------
+
+     buildbot statuslog --master MASTERHOST:PORT
+
+   This command starts a simple text-based status client, one which
+just prints out a new line each time an event occurs on the
+buildmaster.
+
+   The `--master' option provides the location of the
+`buildbot.status.client.PBListener' status port, used to deliver
+build information to realtime status clients. The option is always in
+the form of a string, with hostname and port number separated by a
+colon (`HOSTNAME:PORTNUM'). Note that this port is _not_ the same as
+the slaveport (although a future version may allow the same port
+number to be used for both purposes). If you get an error message to
+the effect of "Failure: twisted.cred.error.UnauthorizedLogin:", this
+may indicate that you are connecting to the slaveport rather than a
+`PBListener' port.
+
+   The `--master' option can also be provided by the `masterstatus'
+name in `.buildbot/options' (*note .buildbot config directory::).
+
+
+File: buildbot.info,  Node: statusgui,  Next: try,  Prev: statuslog,  Up: Developer Tools
+
+8.2.2 statusgui
+---------------
+
+If you have set up a PBListener (*note PBListener::), you will be able
+to monitor your Buildbot using a simple Gtk+ application invoked with
+the `buildbot statusgui' command:
+
+     buildbot statusgui --master MASTERHOST:PORT
+
+   This command starts a simple Gtk+-based status client, which
+contains a few boxes for each Builder that change color as events
+occur. It uses the same `--master' argument as the `buildbot
+statuslog' command (*note statuslog::).
+
+
+File: buildbot.info,  Node: try,  Prev: statusgui,  Up: Developer Tools
+
+8.2.3 try
+---------
+
+This lets a developer to ask the question "What would happen if I
+committed this patch right now?". It runs the unit test suite (across
+multiple build platforms) on the developer's current code, allowing
+them to make sure they will not break the tree when they finally
+commit their changes.
+
+   The `buildbot try' command is meant to be run from within a
+developer's local tree, and starts by figuring out the base revision
+of that tree (what revision was current the last time the tree was
+updated), and a patch that can be applied to that revision of the tree
+to make it match the developer's copy. This (revision, patch) pair is
+then sent to the buildmaster, which runs a build with that
+SourceStamp. If you want, the tool will emit status messages as the
+builds run, and will not terminate until the first failure has been
+detected (or the last success).
+
+   For this command to work, several pieces must be in place:
+
+TryScheduler
+============
+
+The buildmaster must have a `scheduler.Try' instance in the config
+file's `c['schedulers']' list. This lets the administrator control
+who may initiate these "trial" builds, which branches are eligible
+for trial builds, and which Builders should be used for them.
+
+   The `TryScheduler' has various means to accept build requests: all
+of them enforce more security than the usual buildmaster ports do.
+Any source code being built can be used to compromise the buildslave
+accounts, but in general that code must be checked out from the VC
+repository first, so only people with commit privileges can get
+control of the buildslaves. The usual force-build control channels can
+waste buildslave time but do not allow arbitrary commands to be
+executed by people who don't have those commit privileges. However,
+the source code patch that is provided with the trial build does not
+have to go through the VC system first, so it is important to make
+sure these builds cannot be abused by a non-committer to acquire as
+much control over the buildslaves as a committer has. Ideally, only
+developers who have commit access to the VC repository would be able
+to start trial builds, but unfortunately the buildmaster does not, in
+general, have access to VC system's user list.
+
+   As a result, the `TryScheduler' requires a bit more configuration.
+There are currently two ways to set this up:
+
+*jobdir (ssh)*
+     This approach creates a command queue directory, called the
+     "jobdir", in the buildmaster's working directory. The buildmaster
+     admin sets the ownership and permissions of this directory to
+     only grant write access to the desired set of developers, all of
+     whom must have accounts on the machine. The `buildbot try'
+     command creates a special file containing the source stamp
+     information and drops it in the jobdir, just like a standard
+     maildir. When the buildmaster notices the new file, it unpacks
+     the information inside and starts the builds.
+
+     The config file entries used by 'buildbot try' either specify a
+     local queuedir (for which write and mv are used) or a remote one
+     (using scp and ssh).
+
+     The advantage of this scheme is that it is quite secure, the
+     disadvantage is that it requires fiddling outside the buildmaster
+     config (to set the permissions on the jobdir correctly). If the
+     buildmaster machine happens to also house the VC repository,
+     then it can be fairly easy to keep the VC userlist in sync with
+     the trial-build userlist. If they are on different machines,
+     this will be much more of a hassle. It may also involve granting
+     developer accounts on a machine that would not otherwise require
+     them.
+
+     To implement this, the buildslave invokes 'ssh -l username host
+     buildbot tryserver ARGS', passing the patch contents over stdin.
+     The arguments must include the inlet directory and the revision
+     information.
+
+*user+password (PB)*
+     In this approach, each developer gets a username/password pair,
+     which are all listed in the buildmaster's configuration file.
+     When the developer runs `buildbot try', their machine connects
+     to the buildmaster via PB and authenticates themselves using
+     that username and password, then sends a PB command to start the
+     trial build.
+
+     The advantage of this scheme is that the entire configuration is
+     performed inside the buildmaster's config file. The
+     disadvantages are that it is less secure (while the "cred"
+     authentication system does not expose the password in plaintext
+     over the wire, it does not offer most of the other security
+     properties that SSH does). In addition, the buildmaster admin is
+     responsible for maintaining the username/password list, adding
+     and deleting entries as developers come and go.
+
+
+   For example, to set up the "jobdir" style of trial build, using a
+command queue directory of `MASTERDIR/jobdir' (and assuming that all
+your project developers were members of the `developers' unix group),
+you would first create that directory (with `mkdir MASTERDIR/jobdir
+MASTERDIR/jobdir/new MASTERDIR/jobdir/cur MASTERDIR/jobdir/tmp; chgrp
+developers MASTERDIR/jobdir MASTERDIR/jobdir/*; chmod g+rwx,o-rwx
+MASTERDIR/jobdir MASTERDIR/jobdir/*'), and then use the following
+scheduler in the buildmaster's config file:
+
+     from buildbot.scheduler import Try_Jobdir
+     s = Try_Jobdir("try1", ["full-linux", "full-netbsd", "full-OSX"],
+                    jobdir="jobdir")
+     c['schedulers'] = [s]
+
+   Note that you must create the jobdir before telling the
+buildmaster to use this configuration, otherwise you will get an
+error. Also remember that the buildmaster must be able to read and
+write to the jobdir as well. Be sure to watch the `twistd.log' file
+(*note Logfiles::) as you start using the jobdir, to make sure the
+buildmaster is happy with it.
+
+   To use the username/password form of authentication, create a
+`Try_Userpass' instance instead. It takes the same `builderNames'
+argument as the `Try_Jobdir' form, but accepts an addtional `port'
+argument (to specify the TCP port to listen on) and a `userpass' list
+of username/password pairs to accept. Remember to use good passwords
+for this: the security of the buildslave accounts depends upon it:
+
+     from buildbot.scheduler import Try_Userpass
+     s = Try_Userpass("try2", ["full-linux", "full-netbsd", "full-OSX"],
+                      port=8031, userpass=[("alice","pw1"), ("bob", "pw2")] )
+     c['schedulers'] = [s]
+
+   Like most places in the buildbot, the `port' argument takes a
+strports specification. See `twisted.application.strports' for
+details.
+
+locating the master
+===================
+
+The `try' command needs to be told how to connect to the
+`TryScheduler', and must know which of the authentication approaches
+described above is in use by the buildmaster. You specify the
+approach by using `--connect=ssh' or `--connect=pb' (or `try_connect
+= 'ssh'' or `try_connect = 'pb'' in `.buildbot/options').
+
+   For the PB approach, the command must be given a `--master'
+argument (in the form HOST:PORT) that points to TCP port that you
+picked in the `Try_Userpass' scheduler. It also takes a `--username'
+and `--passwd' pair of arguments that match one of the entries in the
+buildmaster's `userpass' list. These arguments can also be provided
+as `try_master', `try_username', and `try_password' entries in the
+`.buildbot/options' file.
+
+   For the SSH approach, the command must be given `--tryhost',
+`--username', and optionally `--password' (TODO: really?) to get to
+the buildmaster host. It must also be given `--trydir', which points
+to the inlet directory configured above. The trydir can be relative
+to the user's home directory, but most of the time you will use an
+explicit path like `~buildbot/project/trydir'. These arguments can be
+provided in `.buildbot/options' as `try_host', `try_username',
+`try_password', and `try_dir'.
+
+   In addition, the SSH approach needs to connect to a PBListener
+status port, so it can retrieve and report the results of the build
+(the PB approach uses the existing connection to retrieve status
+information, so this step is not necessary). This requires a
+`--master' argument, or a `masterstatus' entry in `.buildbot/options',
+in the form of a HOSTNAME:PORT string.
+
+choosing the Builders
+=====================
+
+A trial build is performed on multiple Builders at the same time, and
+the developer gets to choose which Builders are used (limited to a set
+selected by the buildmaster admin with the TryScheduler's
+`builderNames=' argument). The set you choose will depend upon what
+your goals are: if you are concerned about cross-platform
+compatibility, you should use multiple Builders, one from each
+platform of interest. You might use just one builder if that platform
+has libraries or other facilities that allow better test coverage than
+what you can accomplish on your own machine, or faster test runs.
+
+   The set of Builders to use can be specified with multiple
+`--builder' arguments on the command line. It can also be specified
+with a single `try_builders' option in `.buildbot/options' that uses
+a list of strings to specify all the Builder names:
+
+     try_builders = ["full-OSX", "full-win32", "full-linux"]
+
+specifying the VC system
+========================
+
+The `try' command also needs to know how to take the developer's
+current tree and extract the (revision, patch) source-stamp pair.
+Each VC system uses a different process, so you start by telling the
+`try' command which VC system you are using, with an argument like
+`--vc=cvs' or `--vc=tla'.  This can also be provided as `try_vc' in
+`.buildbot/options'.
+
+   The following names are recognized: `cvs' `svn' `baz' `tla' `hg'
+`darcs'
+
+finding the top of the tree
+===========================
+
+Some VC systems (notably CVS and SVN) track each directory
+more-or-less independently, which means the `try' command needs to
+move up to the top of the project tree before it will be able to
+construct a proper full-tree patch. To accomplish this, the `try'
+command will crawl up through the parent directories until it finds a
+marker file. The default name for this marker file is
+`.buildbot-top', so when you are using CVS or SVN you should `touch
+.buildbot-top' from the top of your tree before running `buildbot
+try'. Alternatively, you can use a filename like `ChangeLog' or
+`README', since many projects put one of these files in their
+top-most directory (and nowhere else). To set this filename, use
+`--try-topfile=ChangeLog', or set it in the options file with
+`try_topfile = 'ChangeLog''.
+
+   You can also manually set the top of the tree with
+`--try-topdir=~/trees/mytree', or `try_topdir = '~/trees/mytree''. If
+you use `try_topdir', in a `.buildbot/options' file, you will need a
+separate options file for each tree you use, so it may be more
+convenient to use the `try_topfile' approach instead.
+
+   Other VC systems which work on full projects instead of individual
+directories (tla, baz, darcs, monotone, mercurial) do not require
+`try' to know the top directory, so the `--try-topfile' and
+`--try-topdir' arguments will be ignored.
+
+   If the `try' command cannot find the top directory, it will abort
+with an error message.
+
+determining the branch name
+===========================
+
+Some VC systems record the branch information in a way that "try" can
+locate it, in particular Arch (both `tla' and `baz'). For the others,
+if you are using something other than the default branch, you will
+have to tell the buildbot which branch your tree is using. You can do
+this with either the `--branch' argument, or a `try_branch' entry in
+the `.buildbot/options' file.
+
+determining the revision and patch
+==================================
+
+Each VC system has a separate approach for determining the tree's base
+revision and computing a patch.
+
+`CVS'
+     `try' pretends that the tree is up to date. It converts the
+     current time into a `-D' time specification, uses it as the base
+     revision, and computes the diff between the upstream tree as of
+     that point in time versus the current contents. This works, more
+     or less, but requires that the local clock be in reasonably good
+     sync with the repository.
+
+`SVN'
+     `try' does a `svn status -u' to find the latest repository
+     revision number (emitted on the last line in the "Status against
+     revision: NN" message). It then performs an `svn diff -rNN' to
+     find out how your tree differs from the repository version, and
+     sends the resulting patch to the buildmaster. If your tree is not
+     up to date, this will result in the "try" tree being created with
+     the latest revision, then _backwards_ patches applied to bring it
+     "back" to the version you actually checked out (plus your actual
+     code changes), but this will still result in the correct tree
+     being used for the build.
+
+`baz'
+     `try' does a `baz tree-id' to determine the fully-qualified
+     version and patch identifier for the tree
+     (ARCHIVE/VERSION-patch-NN), and uses the VERSION-patch-NN
+     component as the base revision. It then does a `baz diff' to
+     obtain the patch.
+
+`tla'
+     `try' does a `tla tree-version' to get the fully-qualified
+     version identifier (ARCHIVE/VERSION), then takes the first line
+     of `tla logs --reverse' to figure out the base revision. Then it
+     does `tla changes --diffs' to obtain the patch.
+
+`Darcs'
+     `darcs changes --context' emits a text file that contains a list
+     of all patches back to and including the last tag was made. This
+     text file (plus the location of a repository that contains all
+     these patches) is sufficient to re-create the tree. Therefore
+     the contents of this "context" file _are_ the revision stamp for
+     a Darcs-controlled source tree.
+
+     So `try' does a `darcs changes --context' to determine what your
+     tree's base revision is, and then does a `darcs diff -u' to
+     compute the patch relative to that revision.
+
+`Mercurial'
+     `hg identify' emits a short revision ID (basically a truncated
+     SHA1 hash of the current revision's contents), which is used as
+     the base revision. `hg diff' then provides the patch relative to
+     that revision. For `try' to work, your working directory must
+     only have patches that are available from the same
+     remotely-available repository that the build process'
+     `step.Mercurial' will use.
+
+
+waiting for results
+===================
+
+If you provide the `--wait' option (or `try_wait = True' in
+`.buildbot/options'), the `buildbot try' command will wait until your
+changes have either been proven good or bad before exiting. Unless
+you use the `--quiet' option (or `try_quiet=True'), it will emit a
+progress message every 60 seconds until the builds have completed.
+
+
+File: buildbot.info,  Node: Other Tools,  Next: .buildbot config directory,  Prev: Developer Tools,  Up: Command-line tool
+
+8.3 Other Tools
+===============
+
+These tools are generally used by buildmaster administrators.
+
+* Menu:
+
+* sendchange::
+* debugclient::
+
+
+File: buildbot.info,  Node: sendchange,  Next: debugclient,  Prev: Other Tools,  Up: Other Tools
+
+8.3.1 sendchange
+----------------
+
+This command is used to tell the buildmaster about source changes. It
+is intended to be used from within a commit script, installed on the
+VC server. It requires that you have a PBChangeSource (*note
+PBChangeSource::) running in the buildmaster (by being included in
+the `c['sources']' list).
+
+     buildbot sendchange --master MASTERHOST:PORT --username USER FILENAMES..
+
+   There are other (optional) arguments which can influence the
+`Change' that gets submitted:
+
+`--branch'
+     This provides the (string) branch specifier. If omitted, it
+     defaults to None, indicating the "default branch". All files
+     included in this Change must be on the same branch.
+
+`--revision_number'
+     This provides a (numeric) revision number for the change, used
+     for VC systems that use numeric transaction numbers (like
+     Subversion).
+
+`--revision'
+     This provides a (string) revision specifier, for VC systems that
+     use strings (Arch would use something like patch-42 etc).
+
+`--revision_file'
+     This provides a filename which will be opened and the contents
+     used as the revision specifier. This is specifically for Darcs,
+     which uses the output of `darcs changes --context' as a revision
+     specifier.  This context file can be a couple of kilobytes long,
+     spanning a couple lines per patch, and would be a hassle to pass
+     as a command-line argument.
+
+`--comments'
+     This provides the change comments as a single argument. You may
+     want to use `--logfile' instead.
+
+`--logfile'
+     This instructs the tool to read the change comments from the
+     given file. If you use `-' as the filename, the tool will read
+     the change comments from stdin.
+
+
+File: buildbot.info,  Node: debugclient,  Prev: sendchange,  Up: Other Tools
+
+8.3.2 debugclient
+-----------------
+
+     buildbot debugclient --master MASTERHOST:PORT --passwd DEBUGPW
+
+   This launches a small Gtk+/Glade-based debug tool, connecting to
+the buildmaster's "debug port". This debug port shares the same port
+number as the slaveport (*note Setting the slaveport::), but the
+`debugPort' is only enabled if you set a debug password in the
+buildmaster's config file (*note Debug options::). The `--passwd'
+option must match the `c['debugPassword']' value.
+
+   `--master' can also be provided in `.debug/options' by the
+`master' key. `--passwd' can be provided by the `debugPassword' key.
+
+   The `Connect' button must be pressed before any of the other
+buttons will be active. This establishes the connection to the
+buildmaster. The other sections of the tool are as follows:
+
+`Reload .cfg'
+     Forces the buildmaster to reload its `master.cfg' file. This is
+     equivalent to sending a SIGHUP to the buildmaster, but can be
+     done remotely through the debug port. Note that it is a good
+     idea to be watching the buildmaster's `twistd.log' as you reload
+     the config file, as any errors which are detected in the config
+     file will be announced there.
+
+`Rebuild .py'
+     (not yet implemented). The idea here is to use Twisted's
+     "rebuild" facilities to replace the buildmaster's running code
+     with a new version. Even if this worked, it would only be used
+     by buildbot developers.
+
+`poke IRC'
+     This locates a `words.IRC' status target and causes it to emit a
+     message on all the channels to which it is currently connected.
+     This was used to debug a problem in which the buildmaster lost
+     the connection to the IRC server and did not attempt to
+     reconnect.
+
+`Commit'
+     This allows you to inject a Change, just as if a real one had
+     been delivered by whatever VC hook you are using. You can set
+     the name of the committed file and the name of the user who is
+     doing the commit.  Optionally, you can also set a revision for
+     the change. If the revision you provide looks like a number, it
+     will be sent as an integer, otherwise it will be sent as a
+     string.
+
+`Force Build'
+     This lets you force a Builder (selected by name) to start a
+     build of the current source tree.
+
+`Currently'
+     (obsolete). This was used to manually set the status of the given
+     Builder, but the status-assignment code was changed in an
+     incompatible way and these buttons are no longer meaningful.
+
+
+
+File: buildbot.info,  Node: .buildbot config directory,  Prev: Other Tools,  Up: Command-line tool
+
+8.4 .buildbot config directory
+==============================
+
+Many of the `buildbot' tools must be told how to contact the
+buildmaster that they interact with. This specification can be
+provided as a command-line argument, but most of the time it will be
+easier to set them in an "options" file. The `buildbot' command will
+look for a special directory named `.buildbot', starting from the
+current directory (where the command was run) and crawling upwards,
+eventually looking in the user's home directory. It will look for a
+file named `options' in this directory, and will evaluate it as a
+python script, looking for certain names to be set.  You can just put
+simple `name = 'value'' pairs in this file to set the options.
+
+   For a description of the names used in this file, please see the
+documentation for the individual `buildbot' sub-commands. The
+following is a brief sample of what this file's contents could be.
+
+     # for status-reading tools
+     masterstatus = 'buildbot.example.org:12345'
+     # for 'sendchange' or the debug port
+     master = 'buildbot.example.org:18990'
+     debugPassword = 'eiv7Po'
+
+`masterstatus'
+     Location of the `client.PBListener' status port, used by
+     `statuslog' and `statusgui'.
+
+`master'
+     Location of the `debugPort' (for `debugclient'). Also the
+     location of the `pb.PBChangeSource' (for `sendchange').  Usually
+     shares the slaveport, but a future version may make it possible
+     to have these listen on a separate port number.
+
+`debugPassword'
+     Must match the value of `c['debugPassword']', used to protect the
+     debug port, for the `debugclient' command.
+
+`username'
+     Provides a default username for the `sendchange' command.
+
+
+   The following options are used by the `buildbot try' command
+(*note try::):
+
+`try_connect'
+     This specifies how the "try" command should deliver its request
+     to the buildmaster. The currently accepted values are "ssh" and
+     "pb".
+
+`try_builders'
+     Which builders should be used for the "try" build.
+
+`try_vc'
+     This specifies the version control system being used.
+
+`try_branch'
+     This indicates that the current tree is on a non-trunk branch.
+
+`try_topdir'
+
+`try_topfile'
+     Use `try_topdir' to explicitly indicate the top of your working
+     tree, or `try_topfile' to name a file that will only be found in
+     that top-most directory.
+
+`try_host'
+
+`try_username'
+
+`try_dir'
+     When try_connect is "ssh", the command will pay attention to
+     `try_host', `try_username', and `try_dir'.
+
+`try_username'
+
+`try_password'
+
+`try_master'
+     Instead, when `try_connect' is "pb", the command will pay
+     attention to `try_username', `try_password', and `try_master'.
+
+`try_wait'
+
+`masterstatus'
+     `try_wait' and `masterstatus' are used to ask the "try" command
+     to wait for the requested build to complete.
+
+
+
+File: buildbot.info,  Node: Resources,  Next: Developer's Appendix,  Prev: Command-line tool,  Up: Top
+
+9 Resources
+***********
+
+The Buildbot's home page is at `http://buildbot.sourceforge.net/'
+
+   For configuration questions and general discussion, please use the
+`buildbot-devel' mailing list. The subscription instructions and
+archives are available at
+`http://lists.sourceforge.net/lists/listinfo/buildbot-devel'
+
+
+File: buildbot.info,  Node: Developer's Appendix,  Next: Index of Useful Classes,  Prev: Resources,  Up: Top
+
+Developer's Appendix
+********************
+
+This appendix contains random notes about the implementation of the
+Buildbot, and is likely to only be of use to people intending to
+extend the Buildbot's internals.
+
+   The buildmaster consists of a tree of Service objects, which is
+shaped as follows:
+
+     BuildMaster
+      ChangeMaster  (in .change_svc)
+       [IChangeSource instances]
+      [IScheduler instances]  (in .schedulers)
+      BotMaster  (in .botmaster)
+      [IStatusTarget instances]  (in .statusTargets)
+
+   The BotMaster has a collection of Builder objects as values of its
+`.builders' dictionary.
+
+
+File: buildbot.info,  Node: Index of Useful Classes,  Next: Index of master.cfg keys,  Prev: Developer's Appendix,  Up: Top
+
+Index of Useful Classes
+***********************
+
+This is a list of all user-visible classes. There are the ones that
+are useful in `master.cfg', the buildmaster's configuration file.
+Classes that are not listed here are generally internal things that
+admins are unlikely to have much use for.
+
+Change Sources
+==============
+
+ [index ]
+* Menu:
+
+* buildbot.changes.bonsaipoller.BonsaiPoller: BonsaiPoller.   (line 6)
+* buildbot.changes.freshcvs.FreshCVSSource: CVSToys - PBService.
+                                                              (line 6)
+* buildbot.changes.mail.BonsaiMaildirSource: Other mail notification ChangeSources.
+                                                              (line 6)
+* buildbot.changes.mail.FCMaildirSource: CVSToys - mail notification.
+                                                              (line 6)
+* buildbot.changes.mail.SyncmailMaildirSource: Other mail notification ChangeSources.
+                                                              (line 6)
+* buildbot.changes.p4poller.P4Source:    P4Source.            (line 6)
+* buildbot.changes.pb.PBChangeSource:    PBChangeSource.      (line 6)
+* buildbot.changes.svnpoller.SVNPoller:  SVNPoller.           (line 6)
+
+Schedulers and Locks
+====================
+
+ [index ]
+* Menu:
+
+* buildbot.locks.MasterLock:             Interlocks.         (line  6)
+* buildbot.locks.SlaveLock:              Interlocks.         (line  6)
+* buildbot.scheduler.AnyBranchScheduler: Scheduler Types.    (line  6)
+* buildbot.scheduler.Dependent:          Build Dependencies. (line  6)
+* buildbot.scheduler.Nightly:            Scheduler Types.    (line  6)
+* buildbot.scheduler.Periodic:           Scheduler Types.    (line  6)
+* buildbot.scheduler.Scheduler:          Scheduler Types.    (line  6)
+* buildbot.scheduler.Try_Jobdir:         try.                (line 27)
+* buildbot.scheduler.Try_Userpass:       try.                (line 27)
+
+Build Factories
+===============
+
+ [index ]
+* Menu:
+
+* buildbot.process.factory.BasicBuildFactory: BuildFactory.   (line 6)
+* buildbot.process.factory.BasicSVN:     BuildFactory.        (line 6)
+* buildbot.process.factory.BuildFactory: BuildFactory.        (line 6)
+* buildbot.process.factory.CPAN:         CPAN.                (line 6)
+* buildbot.process.factory.Distutils:    Python distutils.    (line 6)
+* buildbot.process.factory.GNUAutoconf:  GNUAutoconf.         (line 6)
+* buildbot.process.factory.QuickBuildFactory: Quick builds.   (line 6)
+* buildbot.process.factory.Trial:        Python/Twisted/trial projects.
+                                                              (line 6)
+
+Build Steps
+===========
+
+ [index ]
+* Menu:
+
+* buildbot.steps.maxq.MaxQ:              Index of Useful Classes.
+                                                             (line 65)
+* buildbot.steps.python.BuildEPYDoc:     BuildEPYDoc.        (line  6)
+* buildbot.steps.python.PyFlakes:        PyFlakes.           (line  6)
+* buildbot.steps.python_twisted.BuildDebs: Python/Twisted/trial projects.
+                                                             (line  6)
+* buildbot.steps.python_twisted.HLint:   Python/Twisted/trial projects.
+                                                             (line  6)
+* buildbot.steps.python_twisted.ProcessDocs: Python/Twisted/trial projects.
+                                                             (line  6)
+* buildbot.steps.python_twisted.RemovePYCs: Python/Twisted/trial projects.
+                                                             (line  6)
+* buildbot.steps.python_twisted.Trial:   Python/Twisted/trial projects.
+                                                             (line  6)
+* buildbot.steps.shell.Compile:          Compile.            (line  6)
+* buildbot.steps.shell.Configure:        Configure.          (line  6)
+* buildbot.steps.shell.ShellCommand:     ShellCommand.       (line  6)
+* buildbot.steps.shell.Test:             Test.               (line  6)
+* buildbot.steps.source.Arch:            Arch.               (line  6)
+* buildbot.steps.source.Bazaar:          Bazaar.             (line  6)
+* buildbot.steps.source.CVS:             CVS.                (line  6)
+* buildbot.steps.source.Darcs:           Darcs.              (line  6)
+* buildbot.steps.source.Git:             Index of Useful Classes.
+                                                             (line 65)
+* buildbot.steps.source.Mercurial:       Mercurial.          (line  6)
+* buildbot.steps.source.P4:              P4.                 (line  6)
+* buildbot.steps.source.SVN:             SVN.                (line  6)
+* buildbot.steps.transfer.FileDownload:  Transferring Files. (line  6)
+* buildbot.steps.transfer.FileUpload:    Transferring Files. (line  6)
+
+Status Targets
+==============
+
+ [index ]
+* Menu:
+
+* buildbot.status.client.PBListener:     PBListener.        (line   6)
+* buildbot.status.html.Waterfall:        HTML Waterfall.    (line   6)
+* buildbot.status.mail.MailNotifier:     Index of Useful Classes.
+                                                            (line 101)
+* buildbot.status.words.IRC:             IRC Bot.           (line   6)
+
+
+File: buildbot.info,  Node: Index of master.cfg keys,  Next: Index,  Prev: Index of Useful Classes,  Up: Top
+
+Index of master.cfg keys
+************************
+
+This is a list of all of the significant keys in master.cfg . Recall
+that master.cfg is effectively a small python program one
+responsibility: create a dictionary named `BuildmasterConfig'.  The
+keys of this dictionary are listed here. The beginning of the
+master.cfg file typically starts with something like:
+
+     BuildmasterConfig = c = {}
+
+   Therefore a config key of `sources' will usually appear in
+master.cfg as `c['sources']'.
+
+ [index ]
+* Menu:
+
+* c['bots']:                             Buildslave Specifiers.
+                                                             (line  6)
+* c['buildbotURL']:                      Defining the Project.
+                                                             (line 24)
+* c['builders']:                         Defining Builders.  (line  6)
+* c['debugPassword']:                    Debug options.      (line  6)
+* c['manhole']:                          Debug options.      (line 17)
+* c['projectName']:                      Defining the Project.
+                                                             (line 15)
+* c['projectURL']:                       Defining the Project.
+                                                             (line 19)
+* c['schedulers']:                       Listing Change Sources and Schedulers.
+                                                             (line 14)
+* c['slavePortnum']:                     Setting the slaveport.
+                                                             (line  6)
+* c['sources']:                          Listing Change Sources and Schedulers.
+                                                             (line  6)
+* c['status']:                           Defining Status Targets.
+                                                             (line 11)
+
+
+File: buildbot.info,  Node: Index,  Prev: Index of master.cfg keys,  Up: Top
+
+Index
+*****
+
+ [index ]
+* Menu:
+
+* addURL:                                BuildStep URLs.     (line  6)
+* Arch Checkout:                         Arch.               (line  6)
+* Bazaar Checkout:                       Bazaar.             (line  6)
+* build properties:                      Build Properties.   (line  6)
+* Builder:                               Builder.            (line  6)
+* BuildRequest:                          BuildRequest.       (line  6)
+* BuildSet:                              BuildSet.           (line  6)
+* BuildStep URLs:                        BuildStep URLs.     (line  6)
+* Configuration:                         Configuration.      (line  6)
+* CVS Checkout:                          CVS.                (line  6)
+* Darcs Checkout:                        Darcs.              (line  6)
+* Dependencies:                          Build Dependencies. (line  6)
+* Dependent:                             Build Dependencies. (line  6)
+* File Transfer:                         Transferring Files. (line  6)
+* installation:                          Installing the code.
+                                                             (line  6)
+* introduction:                          Introduction.       (line  6)
+* IRC:                                   IRC Bot.            (line  6)
+* links:                                 BuildStep URLs.     (line  6)
+* locks:                                 Interlocks.         (line  6)
+* logfiles:                              Logfiles.           (line  6)
+* LogLineObserver:                       Adding LogObservers.
+                                                             (line  6)
+* LogObserver:                           Adding LogObservers.
+                                                             (line  6)
+* Mercurial Checkout:                    Mercurial.          (line  6)
+* PBListener:                            PBListener.         (line  6)
+* Perforce Update:                       P4.                 (line  6)
+* Philosophy of operation:               History and Philosophy.
+                                                             (line  6)
+* Scheduler:                             Schedulers.         (line  6)
+* statusgui:                             statusgui.          (line  6)
+* SVN Checkout:                          SVN.                (line  6)
+* treeStableTimer:                       BuildFactory Attributes.
+                                                             (line  8)
+* Users:                                 Users.              (line  6)
+* Version Control:                       Version Control Systems.
+                                                             (line  6)
+* Waterfall:                             HTML Waterfall.     (line  6)
+* WithProperties:                        Build Properties.   (line 32)
+
+
+
+Tag Table:
+Node: Top332
+Node: Introduction4132
+Node: History and Philosophy6009
+Node: System Architecture8734
+Node: BuildSlave Connections11331
+Node: Buildmaster Architecture13444
+Node: Status Delivery Architecture18769
+Node: Control Flow20965
+Node: Installation23803
+Node: Requirements24118
+Node: Installing the code26352
+Node: Creating a buildmaster28302
+Node: Creating a buildslave30738
+Node: Buildslave Options36089
+Node: Launching the daemons39036
+Ref: Launching the daemons-Footnote-141903
+Node: Logfiles42078
+Node: Shutdown42617
+Node: Maintenance43552
+Node: Troubleshooting44944
+Node: Starting the buildslave45215
+Node: Connecting to the buildmaster46346
+Node: Forcing Builds47387
+Node: Concepts48137
+Node: Version Control Systems48515
+Ref: Version Control Systems-Footnote-149357
+Node: Generalizing VC Systems49503
+Ref: Generalizing VC Systems-Footnote-152965
+Node: Source Tree Specifications53186
+Ref: Source Tree Specifications-Footnote-156059
+Ref: Source Tree Specifications-Footnote-256253
+Node: How Different VC Systems Specify Sources56383
+Node: Attributes of Changes60780
+Node: Schedulers64471
+Node: BuildSet66861
+Node: BuildRequest69520
+Node: Builder70508
+Node: Users71781
+Node: Doing Things With Users72905
+Node: Email Addresses75270
+Node: IRC Nicknames77326
+Node: Live Status Clients78561
+Node: Configuration79183
+Node: Config File Format80415
+Node: Loading the Config File82790
+Node: Defining the Project84482
+Node: Listing Change Sources and Schedulers86090
+Ref: Listing Change Sources and Schedulers-Footnote-189443
+Node: Scheduler Types89560
+Node: Build Dependencies91700
+Node: Setting the slaveport93930
+Node: Buildslave Specifiers95348
+Node: Defining Builders96315
+Node: Defining Status Targets99874
+Node: Debug options100954
+Node: Getting Source Code Changes104997
+Node: Change Sources106131
+Node: Choosing ChangeSources109850
+Node: CVSToys - PBService110967
+Node: CVSToys - mail notification113727
+Node: Other mail notification ChangeSources115095
+Node: PBChangeSource115616
+Node: P4Source118882
+Node: BonsaiPoller120367
+Node: SVNPoller121028
+Node: Build Process132394
+Node: Build Steps133594
+Node: Common Parameters134911
+Node: Source Checkout136929
+Node: CVS142152
+Node: SVN143294
+Node: Darcs149164
+Node: Mercurial150870
+Node: Arch151784
+Node: Bazaar152580
+Node: P4153102
+Node: ShellCommand154607
+Node: Simple ShellCommand Subclasses159986
+Node: Configure160494
+Node: Compile160912
+Node: Test161345
+Node: Build Properties161596
+Ref: Build Properties-Footnote-1167310
+Node: Python BuildSteps167580
+Node: BuildEPYDoc167891
+Node: PyFlakes169397
+Node: Transferring Files170314
+Node: Writing New BuildSteps174159
+Node: BuildStep LogFiles175081
+Node: Adding LogObservers179556
+Ref: Adding LogObservers-Footnote-1191808
+Ref: Adding LogObservers-Footnote-2191875
+Node: BuildStep URLs191938
+Node: Interlocks195036
+Ref: Interlocks-Footnote-1203197
+Node: Build Factories203507
+Node: BuildStep Objects204484
+Node: BuildFactory205556
+Node: BuildFactory Attributes209657
+Node: Quick builds210319
+Node: Process-Specific build factories211055
+Node: GNUAutoconf211599
+Node: CPAN214178
+Node: Python distutils214939
+Node: Python/Twisted/trial projects216213
+Node: Status Delivery223088
+Node: HTML Waterfall224158
+Ref: HTML Waterfall-Footnote-1227484
+Node: IRC Bot227653
+Node: PBListener230128
+Node: Writing New Status Plugins230739
+Node: Command-line tool232001
+Node: Administrator Tools232527
+Node: Developer Tools233803
+Node: statuslog234122
+Node: statusgui235203
+Node: try235787
+Node: Other Tools250663
+Node: sendchange250926
+Node: debugclient252750
+Node: .buildbot config directory255326
+Node: Resources258282
+Node: Developer's Appendix258703
+Node: Index of Useful Classes259428
+Node: Index of master.cfg keys264687
+Node: Index266634
+
+End Tag Table

Added: vendor/buildbot/current/docs/buildbot.texinfo
===================================================================
--- vendor/buildbot/current/docs/buildbot.texinfo	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/buildbot.texinfo	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,6386 @@
+\input texinfo @c -*-texinfo-*-
+ at c %**start of header
+ at setfilename buildbot.info
+ at settitle BuildBot Manual 0.7.5
+ at defcodeindex cs
+ at defcodeindex sl
+ at defcodeindex bf
+ at defcodeindex bs
+ at defcodeindex st
+ at defcodeindex bc
+ at c %**end of header
+
+ at c these indices are for classes useful in a master.cfg config file
+ at c @csindex : Change Sources
+ at c @slindex : Schedulers and Locks
+ at c @bfindex : Build Factories
+ at c @bsindex : Build Steps
+ at c @stindex : Status Targets
+
+ at c @bcindex : keys that make up BuildmasterConfig
+
+ at copying
+This is the BuildBot manual.
+
+Copyright (C) 2005,2006 Brian Warner
+
+Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty
+provided the copyright notice and this notice are preserved.
+
+ at end copying
+
+ at titlepage
+ at title BuildBot
+ at page
+ at vskip 0pt plus 1filll
+ at insertcopying
+ at end titlepage
+
+ at c Output the table of the contents at the beginning.
+ at contents
+
+ at ifnottex
+ at node Top, Introduction, (dir), (dir)
+ at top BuildBot
+
+ at insertcopying
+ at end ifnottex
+
+ at menu
+* Introduction::                What the BuildBot does.
+* Installation::                Creating a buildmaster and buildslaves,
+                                running them.
+* Concepts::                    What goes on in the buildbot's little mind.
+* Configuration::               Controlling the buildbot.
+* Getting Source Code Changes::  Discovering when to run a build.
+* Build Process::               Controlling how each build is run.
+* Status Delivery::             Telling the world about the build's results.
+* Command-line tool::           
+* Resources::                   Getting help.
+* Developer's Appendix::        
+* Index of Useful Classes::     
+* Index of master.cfg keys::    
+* Index::                       Complete index.
+
+ at detailmenu
+ --- The Detailed Node Listing ---
+
+Introduction
+
+* History and Philosophy::      
+* System Architecture::         
+* Control Flow::                
+
+System Architecture
+
+* BuildSlave Connections::      
+* Buildmaster Architecture::    
+* Status Delivery Architecture::  
+
+Installation
+
+* Requirements::                
+* Installing the code::         
+* Creating a buildmaster::      
+* Creating a buildslave::       
+* Launching the daemons::       
+* Logfiles::                    
+* Shutdown::                    
+* Maintenance::                 
+* Troubleshooting::             
+
+Creating a buildslave
+
+* Buildslave Options::          
+
+Troubleshooting
+
+* Starting the buildslave::     
+* Connecting to the buildmaster::  
+* Forcing Builds::              
+
+Concepts
+
+* Version Control Systems::     
+* Schedulers::                  
+* BuildSet::                    
+* BuildRequest::                
+* Builder::                     
+* Users::                       
+
+Version Control Systems
+
+* Generalizing VC Systems::     
+* Source Tree Specifications::  
+* How Different VC Systems Specify Sources::  
+* Attributes of Changes::       
+
+Users
+
+* Doing Things With Users::     
+* Email Addresses::             
+* IRC Nicknames::               
+* Live Status Clients::         
+
+Configuration
+
+* Config File Format::          
+* Loading the Config File::     
+* Defining the Project::        
+* Listing Change Sources and Schedulers::  
+* Setting the slaveport::       
+* Buildslave Specifiers::       
+* Defining Builders::           
+* Defining Status Targets::     
+* Debug options::               
+
+Listing Change Sources and Schedulers
+
+* Scheduler Types::             
+* Build Dependencies::          
+
+Getting Source Code Changes
+
+* Change Sources::              
+
+Change Sources
+
+* Choosing ChangeSources::      
+* CVSToys - PBService::         
+* CVSToys - mail notification::  
+* Other mail notification ChangeSources::  
+* PBChangeSource::              
+* P4Source::                    
+* BonsaiPoller::                
+* SVNPoller::                   
+
+Build Process
+
+* Build Steps::                 
+* Interlocks::                  
+* Build Factories::             
+
+Build Steps
+
+* Common Parameters::           
+* Source Checkout::             
+* ShellCommand::                
+* Simple ShellCommand Subclasses::  
+* Python BuildSteps::           
+* Transferring Files::          
+* Writing New BuildSteps::      
+
+Source Checkout
+
+* CVS::                         
+* SVN::                         
+* Darcs::                       
+* Mercurial::                   
+* Arch::                        
+* Bazaar::                      
+* P4::                          
+
+Simple ShellCommand Subclasses
+
+* Configure::                   
+* Compile::                     
+* Test::                        
+* Build Properties::            
+
+Python BuildSteps
+
+* BuildEPYDoc::                 
+* PyFlakes::                    
+
+Writing New BuildSteps
+
+* BuildStep LogFiles::          
+* Adding LogObservers::         
+* BuildStep URLs::              
+
+Build Factories
+
+* BuildStep Objects::           
+* BuildFactory::                
+* Process-Specific build factories::  
+
+BuildStep Objects
+
+* BuildFactory Attributes::     
+* Quick builds::                
+
+BuildFactory
+
+* BuildFactory Attributes::     
+* Quick builds::                
+
+Process-Specific build factories
+
+* GNUAutoconf::                 
+* CPAN::                        
+* Python distutils::            
+* Python/Twisted/trial projects::  
+
+Status Delivery
+
+* HTML Waterfall::              
+* IRC Bot::                     
+* PBListener::                  
+* Writing New Status Plugins::  
+
+Command-line tool
+
+* Administrator Tools::         
+* Developer Tools::             
+* Other Tools::                 
+* .buildbot config directory::  
+
+Developer Tools
+
+* statuslog::                   
+* statusgui::                   
+* try::                         
+
+Other Tools
+
+* sendchange::                  
+* debugclient::                 
+
+ at end detailmenu
+ at end menu
+
+ at node Introduction, Installation, Top, Top
+ at chapter Introduction
+
+ at cindex introduction
+
+The BuildBot is a system to automate the compile/test cycle required by most
+software projects to validate code changes. By automatically rebuilding and
+testing the tree each time something has changed, build problems are
+pinpointed quickly, before other developers are inconvenienced by the
+failure. The guilty developer can be identified and harassed without human
+intervention. By running the builds on a variety of platforms, developers
+who do not have the facilities to test their changes everywhere before
+checkin will at least know shortly afterwards whether they have broken the
+build or not. Warning counts, lint checks, image size, compile time, and
+other build parameters can be tracked over time, are more visible, and
+are therefore easier to improve.
+
+The overall goal is to reduce tree breakage and provide a platform to
+run tests or code-quality checks that are too annoying or pedantic for
+any human to waste their time with. Developers get immediate (and
+potentially public) feedback about their changes, encouraging them to
+be more careful about testing before checkin.
+
+Features:
+
+ at itemize @bullet
+ at item
+run builds on a variety of slave platforms
+ at item
+arbitrary build process: handles projects using C, Python, whatever
+ at item
+minimal host requirements: python and Twisted
+ at item
+slaves can be behind a firewall if they can still do checkout
+ at item
+status delivery through web page, email, IRC, other protocols
+ at item
+track builds in progress, provide estimated completion time
+ at item
+flexible configuration by subclassing generic build process classes
+ at item
+debug tools to force a new build, submit fake Changes, query slave status
+ at item
+released under the GPL
+ at end itemize
+
+ at menu
+* History and Philosophy::      
+* System Architecture::         
+* Control Flow::                
+ at end menu
+
+
+ at node History and Philosophy, System Architecture, Introduction, Introduction
+ at section History and Philosophy
+
+ at cindex Philosophy of operation
+
+The Buildbot was inspired by a similar project built for a development
+team writing a cross-platform embedded system. The various components
+of the project were supposed to compile and run on several flavors of
+unix (linux, solaris, BSD), but individual developers had their own
+preferences and tended to stick to a single platform. From time to
+time, incompatibilities would sneak in (some unix platforms want to
+use @code{string.h}, some prefer @code{strings.h}), and then the tree
+would compile for some developers but not others. The buildbot was
+written to automate the human process of walking into the office,
+updating a tree, compiling (and discovering the breakage), finding the
+developer at fault, and complaining to them about the problem they had
+introduced. With multiple platforms it was difficult for developers to
+do the right thing (compile their potential change on all platforms);
+the buildbot offered a way to help.
+
+Another problem was when programmers would change the behavior of a
+library without warning its users, or change internal aspects that
+other code was (unfortunately) depending upon. Adding unit tests to
+the codebase helps here: if an application's unit tests pass despite
+changes in the libraries it uses, you can have more confidence that
+the library changes haven't broken anything. Many developers
+complained that the unit tests were inconvenient or took too long to
+run: having the buildbot run them reduces the developer's workload to
+a minimum.
+
+In general, having more visibility into the project is always good,
+and automation makes it easier for developers to do the right thing.
+When everyone can see the status of the project, developers are
+encouraged to keep the tree in good working order. Unit tests that
+aren't run on a regular basis tend to suffer from bitrot just like
+code does: exercising them on a regular basis helps to keep them
+functioning and useful.
+
+The current version of the Buildbot is additionally targeted at
+distributed free-software projects, where resources and platforms are
+only available when provided by interested volunteers. The buildslaves
+are designed to require an absolute minimum of configuration, reducing
+the effort a potential volunteer needs to expend to be able to
+contribute a new test environment to the project. The goal is for
+anyone who wishes that a given project would run on their favorite
+platform should be able to offer that project a buildslave, running on
+that platform, where they can verify that their portability code
+works, and keeps working.
+
+ at node System Architecture, Control Flow, History and Philosophy, Introduction
+ at comment  node-name,  next,  previous,  up
+ at section System Architecture
+
+The Buildbot consists of a single @code{buildmaster} and one or more
+ at code{buildslaves}, connected in a star topology. The buildmaster
+makes all decisions about what, when, and how to build. It sends
+commands to be run on the build slaves, which simply execute the
+commands and return the results. (certain steps involve more local
+decision making, where the overhead of sending a lot of commands back
+and forth would be inappropriate, but in general the buildmaster is
+responsible for everything).
+
+The buildmaster is usually fed @code{Changes} by some sort of version
+control system (@pxref{Change Sources}), which may cause builds to be
+run. As the builds are performed, various status messages are
+produced, which are then sent to any registered Status Targets
+(@pxref{Status Delivery}).
+
+ at c @image{FILENAME, WIDTH, HEIGHT, ALTTEXT, EXTENSION}
+ at image{images/overview,,,Overview Diagram,}
+
+The buildmaster is configured and maintained by the ``buildmaster
+admin'', who is generally the project team member responsible for
+build process issues. Each buildslave is maintained by a ``buildslave
+admin'', who do not need to be quite as involved. Generally slaves are
+run by anyone who has an interest in seeing the project work well on
+their favorite platform.
+
+ at menu
+* BuildSlave Connections::      
+* Buildmaster Architecture::    
+* Status Delivery Architecture::  
+ at end menu
+
+ at node BuildSlave Connections, Buildmaster Architecture, System Architecture, System Architecture
+ at subsection BuildSlave Connections
+
+The buildslaves are typically run on a variety of separate machines,
+at least one per platform of interest. These machines connect to the
+buildmaster over a TCP connection to a publically-visible port. As a
+result, the buildslaves can live behind a NAT box or similar
+firewalls, as long as they can get to buildmaster. The TCP connections
+are initiated by the buildslave and accepted by the buildmaster, but
+commands and results travel both ways within this connection. The
+buildmaster is always in charge, so all commands travel exclusively
+from the buildmaster to the buildslave.
+
+To perform builds, the buildslaves must typically obtain source code
+from a CVS/SVN/etc repository. Therefore they must also be able to
+reach the repository. The buildmaster provides instructions for
+performing builds, but does not provide the source code itself.
+
+ at image{images/slaves,,,BuildSlave Connections,}
+
+ at node Buildmaster Architecture, Status Delivery Architecture, BuildSlave Connections, System Architecture
+ at subsection Buildmaster Architecture
+
+The Buildmaster consists of several pieces:
+
+ at image{images/master,,,BuildMaster Architecture,}
+
+ at itemize @bullet
+
+ at item
+Change Sources, which create a Change object each time something is
+modified in the VC repository. Most ChangeSources listen for messages
+from a hook script of some sort. Some sources actively poll the
+repository on a regular basis. All Changes are fed to the Schedulers.
+
+ at item
+Schedulers, which decide when builds should be performed. They collect
+Changes into BuildRequests, which are then queued for delivery to
+Builders until a buildslave is available.
+
+ at item
+Builders, which control exactly @emph{how} each build is performed
+(with a series of BuildSteps, configured in a BuildFactory). Each
+Build is run on a single buildslave.
+
+ at item
+Status plugins, which deliver information about the build results
+through protocols like HTTP, mail, and IRC.
+
+ at end itemize
+
+ at image{images/slavebuilder,,,SlaveBuilders,}
+
+Each Builder is configured with a list of BuildSlaves that it will use
+for its builds. These buildslaves are expected to behave identically:
+the only reason to use multiple BuildSlaves for a single Builder is to
+provide a measure of load-balancing.
+
+Within a single BuildSlave, each Builder creates its own SlaveBuilder
+instance. These SlaveBuilders operate independently from each other.
+Each gets its own base directory to work in. It is quite common to
+have many Builders sharing the same buildslave. For example, there
+might be two buildslaves: one for i386, and a second for PowerPC.
+There may then be a pair of Builders that do a full compile/test run,
+one for each architecture, and a lone Builder that creates snapshot
+source tarballs if the full builders complete successfully. The full
+builders would each run on a single buildslave, whereas the tarball
+creation step might run on either buildslave (since the platform
+doesn't matter when creating source tarballs). In this case, the
+mapping would look like:
+
+ at example
+Builder(full-i386)  ->  BuildSlaves(slave-i386)
+Builder(full-ppc)   ->  BuildSlaves(slave-ppc)
+Builder(source-tarball) -> BuildSlaves(slave-i386, slave-ppc)
+ at end example
+
+and each BuildSlave would have two SlaveBuilders inside it, one for a
+full builder, and a second for the source-tarball builder.
+
+Once a SlaveBuilder is available, the Builder pulls one or more
+BuildRequests off its incoming queue. (It may pull more than one if it
+determines that it can merge the requests together; for example, there
+may be multiple requests to build the current HEAD revision). These
+requests are merged into a single Build instance, which includes the
+SourceStamp that describes what exact version of the source code
+should be used for the build. The Build is then assigned to a
+SlaveBuilder and the build begins.
+
+
+ at node Status Delivery Architecture,  , Buildmaster Architecture, System Architecture
+ at subsection Status Delivery Architecture
+
+The buildmaster maintains a central Status object, to which various
+status plugins are connected. Through this Status object, a full
+hierarchy of build status objects can be obtained.
+
+ at image{images/status,,,Status Delivery,}
+
+The configuration file controls which status plugins are active. Each
+status plugin gets a reference to the top-level Status object. From
+there they can request information on each Builder, Build, Step, and
+LogFile. This query-on-demand interface is used by the html.Waterfall
+plugin to create the main status page each time a web browser hits the
+main URL.
+
+The status plugins can also subscribe to hear about new Builds as they
+occur: this is used by the MailNotifier to create new email messages
+for each recently-completed Build.
+
+The Status object records the status of old builds on disk in the
+buildmaster's base directory. This allows it to return information
+about historical builds.
+
+There are also status objects that correspond to Schedulers and
+BuildSlaves. These allow status plugins to report information about
+upcoming builds, and the online/offline status of each buildslave.
+
+
+ at node Control Flow,  , System Architecture, Introduction
+ at comment  node-name,  next,  previous,  up
+ at section Control Flow
+
+A day in the life of the buildbot:
+
+ at itemize @bullet
+
+ at item
+A developer commits some source code changes to the repository. A hook
+script or commit trigger of some sort sends information about this
+change to the buildmaster through one of its configured Change
+Sources. This notification might arrive via email, or over a network
+connection (either initiated by the buildmaster as it ``subscribes''
+to changes, or by the commit trigger as it pushes Changes towards the
+buildmaster). The Change contains information about who made the
+change, what files were modified, which revision contains the change,
+and any checkin comments.
+
+ at item
+The buildmaster distributes this change to all of its configured
+Schedulers. Any ``important'' changes cause the ``tree-stable-timer''
+to be started, and the Change is added to a list of those that will go
+into a new Build. When the timer expires, a Build is started on each
+of a set of configured Builders, all compiling/testing the same source
+code. Unless configured otherwise, all Builds run in parallel on the
+various buildslaves.
+
+ at item
+The Build consists of a series of Steps. Each Step causes some number
+of commands to be invoked on the remote buildslave associated with
+that Builder. The first step is almost always to perform a checkout of
+the appropriate revision from the same VC system that produced the
+Change. The rest generally perform a compile and run unit tests. As
+each Step runs, the buildslave reports back command output and return
+status to the buildmaster.
+
+ at item
+As the Build runs, status messages like ``Build Started'', ``Step
+Started'', ``Build Finished'', etc, are published to a collection of
+Status Targets. One of these targets is usually the HTML ``Waterfall''
+display, which shows a chronological list of events, and summarizes
+the results of the most recent build at the top of each column.
+Developers can periodically check this page to see how their changes
+have fared. If they see red, they know that they've made a mistake and
+need to fix it. If they see green, they know that they've done their
+duty and don't need to worry about their change breaking anything.
+
+ at item
+If a MailNotifier status target is active, the completion of a build
+will cause email to be sent to any developers whose Changes were
+incorporated into this Build. The MailNotifier can be configured to
+only send mail upon failing builds, or for builds which have just
+transitioned from passing to failing. Other status targets can provide
+similar real-time notification via different communication channels,
+like IRC.
+
+ at end itemize
+
+
+ at node Installation, Concepts, Introduction, Top
+ at chapter Installation
+
+ at menu
+* Requirements::                
+* Installing the code::         
+* Creating a buildmaster::      
+* Creating a buildslave::       
+* Launching the daemons::       
+* Logfiles::                    
+* Shutdown::                    
+* Maintenance::                 
+* Troubleshooting::             
+ at end menu
+
+ at node Requirements, Installing the code, Installation, Installation
+ at section Requirements
+
+At a bare minimum, you'll need the following (for both the buildmaster
+and a buildslave):
+
+ at itemize @bullet
+ at item
+Python: http://www.python.org
+
+Buildbot requires python-2.2 or later, and is primarily developed
+against python-2.3. The buildmaster uses generators, a feature which
+is not available in python-2.1, and both master and slave require a
+version of Twisted which only works with python-2.2 or later. Certain
+features (like the inclusion of build logs in status emails) require
+python-2.2.2 or later. The IRC ``force build'' command requires
+python-2.3 (for the shlex.split function).
+
+ at item
+Twisted: http://twistedmatrix.com
+
+Both the buildmaster and the buildslaves require Twisted-1.3.0 or
+later. It has been mainly developed against Twisted-2.0.1, but has
+been tested against Twisted-2.1.0 (the most recent as of this
+writing), and might even work on versions as old as Twisted-1.1.0, but
+as always the most recent version is recommended.
+
+Twisted-1.3.0 and earlier were released as a single monolithic
+package. When you run Buildbot against Twisted-2.0.0 or later (which
+are split into a number of smaller subpackages), you'll need at least
+"Twisted" (the core package), and you'll also want TwistedMail,
+TwistedWeb, and TwistedWords (for sending email, serving a web status
+page, and delivering build status via IRC, respectively).
+ at end itemize
+
+Certain other packages may be useful on the system running the
+buildmaster:
+
+ at itemize @bullet
+ at item
+CVSToys: http://purl.net/net/CVSToys
+
+If your buildmaster uses FreshCVSSource to receive change notification
+from a cvstoys daemon, it will require CVSToys be installed (tested
+with CVSToys-1.0.10). If the it doesn't use that source (i.e. if you
+only use a mail-parsing change source, or the SVN notification
+script), you will not need CVSToys.
+
+ at end itemize
+
+And of course, your project's build process will impose additional
+requirements on the buildslaves. These hosts must have all the tools
+necessary to compile and test your project's source code.
+
+
+ at node Installing the code, Creating a buildmaster, Requirements, Installation
+ at section Installing the code
+
+ at cindex installation
+
+The Buildbot is installed using the standard python @code{distutils}
+module. After unpacking the tarball, the process is:
+
+ at example
+python setup.py build
+python setup.py install
+ at end example
+
+where the install step may need to be done as root. This will put the
+bulk of the code in somewhere like
+/usr/lib/python2.3/site-packages/buildbot . It will also install the
+ at code{buildbot} command-line tool in /usr/bin/buildbot.
+
+To test this, shift to a different directory (like /tmp), and run:
+
+ at example
+buildbot --version
+ at end example
+
+If it shows you the versions of Buildbot and Twisted, the install went
+ok. If it says @code{no such command} or it gets an @code{ImportError}
+when it tries to load the libaries, then something went wrong.
+ at code{pydoc buildbot} is another useful diagnostic tool.
+
+Windows users will find these files in other places. You will need to
+make sure that python can find the libraries, and will probably find
+it convenient to have @code{buildbot} on your PATH.
+
+If you wish, you can run the buildbot unit test suite like this:
+
+ at example
+PYTHONPATH=. trial buildbot.test
+ at end example
+
+This should run up to 192 tests, depending upon what VC tools you have
+installed. On my desktop machine it takes about five minutes to
+complete. Nothing should fail, a few might be skipped. If any of the
+tests fail, you should stop and investigate the cause before
+continuing the installation process, as it will probably be easier to
+track down the bug early.
+
+If you cannot or do not wish to install the buildbot into a site-wide
+location like @file{/usr} or @file{/usr/local}, you can also install
+it into the account's home directory. Do the install command like
+this:
+
+ at example
+python setup.py install --home=~
+ at end example
+
+That will populate @file{~/lib/python} and create
+ at file{~/bin/buildbot}. Make sure this lib directory is on your
+ at code{PYTHONPATH}.
+
+
+ at node Creating a buildmaster, Creating a buildslave, Installing the code, Installation
+ at section Creating a buildmaster
+
+As you learned earlier (@pxref{System Architecture}), the buildmaster
+runs on a central host (usually one that is publically visible, so
+everybody can check on the status of the project), and controls all
+aspects of the buildbot system. Let us call this host
+ at code{buildbot.example.org}.
+
+You may wish to create a separate user account for the buildmaster,
+perhaps named @code{buildmaster}. This can help keep your personal
+configuration distinct from that of the buildmaster and is useful if
+you have to use a mail-based notification system (@pxref{Change
+Sources}). However, the Buildbot will work just fine with your regular
+user account.
+
+You need to choose a directory for the buildmaster, called the
+ at code{basedir}. This directory will be owned by the buildmaster, which
+will use configuration files therein, and create status files as it
+runs. @file{~/Buildbot} is a likely value. If you run multiple
+buildmasters in the same account, or if you run both masters and
+slaves, you may want a more distinctive name like
+ at file{~/Buildbot/master/gnomovision} or
+ at file{~/Buildmasters/fooproject}. If you are using a separate user
+account, this might just be @file{~buildmaster/masters/fooproject}.
+
+Once you've picked a directory, use the @command{buildbot
+create-master} command to create the directory and populate it with
+startup files:
+
+ at example
+buildbot create-master @var{basedir}
+ at end example
+
+You will need to create a configuration file (@pxref{Configuration})
+before starting the buildmaster. Most of the rest of this manual is
+dedicated to explaining how to do this. A sample configuration file is
+placed in the working directory, named @file{master.cfg.sample}, which
+can be copied to @file{master.cfg} and edited to suit your purposes.
+
+(Internal details: This command creates a file named
+ at file{buildbot.tac} that contains all the state necessary to create
+the buildmaster. Twisted has a tool called @code{twistd} which can use
+this .tac file to create and launch a buildmaster instance. twistd
+takes care of logging and daemonization (running the program in the
+background). @file{/usr/bin/buildbot} is a front end which runs twistd
+for you.)
+
+In addition to @file{buildbot.tac}, a small @file{Makefile.sample} is
+installed. This can be used as the basis for customized daemon startup,
+ at xref{Launching the daemons}.
+
+
+ at node Creating a buildslave, Launching the daemons, Creating a buildmaster, Installation
+ at section Creating a buildslave
+
+Typically, you will be adding a buildslave to an existing buildmaster,
+to provide additional architecture coverage. The buildbot
+administrator will give you several pieces of information necessary to
+connect to the buildmaster. You should also be somewhat familiar with
+the project being tested, so you can troubleshoot build problems
+locally.
+
+The buildbot exists to make sure that the project's stated ``how to
+build it'' process actually works. To this end, the buildslave should
+run in an environment just like that of your regular developers.
+Typically the project build process is documented somewhere
+(@file{README}, @file{INSTALL}, etc), in a document that should
+mention all library dependencies and contain a basic set of build
+instructions. This document will be useful as you configure the host
+and account in which the buildslave runs.
+
+Here's a good checklist for setting up a buildslave:
+
+ at enumerate
+ at item
+Set up the account
+
+It is recommended (although not mandatory) to set up a separate user
+account for the buildslave. This account is frequently named
+ at code{buildbot} or @code{buildslave}. This serves to isolate your
+personal working environment from that of the slave's, and helps to
+minimize the security threat posed by letting possibly-unknown
+contributors run arbitrary code on your system. The account should
+have a minimum of fancy init scripts.
+
+ at item
+Install the buildbot code
+
+Follow the instructions given earlier (@pxref{Installing the code}).
+If you use a separate buildslave account, and you didn't install the
+buildbot code to a shared location, then you will need to install it
+with @code{--home=~} for each account that needs it.
+
+ at item
+Set up the host
+
+Make sure the host can actually reach the buildmaster. Usually the
+buildmaster is running a status webserver on the same machine, so
+simply point your web browser at it and see if you can get there.
+Install whatever additional packages or libraries the project's
+INSTALL document advises. (or not: if your buildslave is supposed to
+make sure that building without optional libraries still works, then
+don't install those libraries).
+
+Again, these libraries don't necessarily have to be installed to a
+site-wide shared location, but they must be available to your build
+process. Accomplishing this is usually very specific to the build
+process, so installing them to @file{/usr} or @file{/usr/local} is
+usually the best approach.
+
+ at item
+Test the build process
+
+Follow the instructions in the INSTALL document, in the buildslave's
+account. Perform a full CVS (or whatever) checkout, configure, make,
+run tests, etc. Confirm that the build works without manual fussing.
+If it doesn't work when you do it by hand, it will be unlikely to work
+when the buildbot attempts to do it in an automated fashion.
+
+ at item
+Choose a base directory
+
+This should be somewhere in the buildslave's account, typically named
+after the project which is being tested. The buildslave will not touch
+any file outside of this directory. Something like @file{~/Buildbot}
+or @file{~/Buildslaves/fooproject} is appropriate.
+
+ at item
+Get the buildmaster host/port, botname, and password
+
+When the buildbot admin configures the buildmaster to accept and use
+your buildslave, they will provide you with the following pieces of
+information:
+
+ at itemize @bullet
+ at item
+your buildslave's name
+ at item
+the password assigned to your buildslave
+ at item
+the hostname and port number of the buildmaster, i.e. buildbot.example.org:8007
+ at end itemize
+
+ at item
+Create the buildslave
+
+Now run the 'buildbot' command as follows:
+
+ at example
+buildbot create-slave @var{BASEDIR} @var{MASTERHOST}:@var{PORT} @var{SLAVENAME} @var{PASSWORD}
+ at end example
+
+This will create the base directory and a collection of files inside,
+including the @file{buildbot.tac} file that contains all the
+information you passed to the @code{buildbot} command.
+
+ at item
+Fill in the hostinfo files
+
+When it first connects, the buildslave will send a few files up to the
+buildmaster which describe the host that it is running on. These files
+are presented on the web status display so that developers have more
+information to reproduce any test failures that are witnessed by the
+buildbot. There are sample files in the @file{info} subdirectory of
+the buildbot's base directory. You should edit these to correctly
+describe you and your host.
+
+ at file{BASEDIR/info/admin} should contain your name and email address.
+This is the ``buildslave admin address'', and will be visible from the
+build status page (so you may wish to munge it a bit if
+address-harvesting spambots are a concern).
+
+ at file{BASEDIR/info/host} should be filled with a brief description of
+the host: OS, version, memory size, CPU speed, versions of relevant
+libraries installed, and finally the version of the buildbot code
+which is running the buildslave.
+
+If you run many buildslaves, you may want to create a single
+ at file{~buildslave/info} file and share it among all the buildslaves
+with symlinks.
+
+ at end enumerate
+
+ at menu
+* Buildslave Options::          
+ at end menu
+
+ at node Buildslave Options,  , Creating a buildslave, Creating a buildslave
+ at subsection Buildslave Options
+
+There are a handful of options you might want to use when creating the
+buildslave with the @command{buildbot create-slave <options> DIR <params>}
+command. You can type @command{buildbot create-slave --help} for a summary.
+To use these, just include them on the @command{buildbot create-slave}
+command line, like this:
+
+ at example
+buildbot create-slave --umask=022 ~/buildslave buildmaster.example.org:42012 myslavename mypasswd
+ at end example
+
+ at table @code
+ at item --usepty
+This is a boolean flag that tells the buildslave whether to launch
+child processes in a PTY (the default) or with regular pipes. The
+advantage of using a PTY is that ``grandchild'' processes are more
+likely to be cleaned up if the build is interrupted or times out
+(since it enables the use of a ``process group'' in which all child
+processes will be placed). The disadvantages: some forms of Unix have
+problems with PTYs, some of your unit tests may behave differently
+when run under a PTY (generally those which check to see if they are
+being run interactively), and PTYs will merge the stdout and stderr
+streams into a single output stream (which means the red-vs-black
+coloring in the logfiles will be lost). If you encounter problems, you
+can add @code{--usepty=0} to disable the use of PTYs. Note that
+windows buildslaves never use PTYs.
+
+ at item --umask
+This is a string (generally an octal representation of an integer)
+which will cause the buildslave process' ``umask'' value to be set
+shortly after initialization. The ``twistd'' daemonization utility
+forces the umask to 077 at startup (which means that all files created
+by the buildslave or its child processes will be unreadable by any
+user other than the buildslave account). If you want build products to
+be readable by other accounts, you can add @code{--umask=022} to tell
+the buildslave to fix the umask after twistd clobbers it. If you want
+build products to be @emph{writable} by other accounts too, use
+ at code{--umask=000}, but this is likely to be a security problem.
+
+ at item --keepalive
+This is a number that indicates how frequently ``keepalive'' messages
+should be sent from the buildslave to the buildmaster, expressed in
+seconds. The default (600) causes a message to be sent to the
+buildmaster at least once every 10 minutes. To set this to a lower
+value, use e.g. @code{--keepalive=120}.
+
+If the buildslave is behind a NAT box or stateful firewall, these
+messages may help to keep the connection alive: some NAT boxes tend to
+forget about a connection if it has not been used in a while. When
+this happens, the buildmaster will think that the buildslave has
+disappeared, and builds will time out. Meanwhile the buildslave will
+not realize than anything is wrong.
+
+ at end table
+
+
+ at node Launching the daemons, Logfiles, Creating a buildslave, Installation
+ at section Launching the daemons
+
+Both the buildmaster and the buildslave run as daemon programs. To
+launch them, pass the working directory to the @code{buildbot}
+command:
+
+ at example
+buildbot start @var{BASEDIR}
+ at end example
+
+This command will start the daemon and then return, so normally it
+will not produce any output. To verify that the programs are indeed
+running, look for a pair of files named @file{twistd.log} and
+ at file{twistd.pid} that should be created in the working directory.
+ at file{twistd.pid} contains the process ID of the newly-spawned daemon.
+
+When the buildslave connects to the buildmaster, new directories will
+start appearing in its base directory. The buildmaster tells the slave
+to create a directory for each Builder which will be using that slave.
+All build operations are performed within these directories: CVS
+checkouts, compiles, and tests.
+
+Once you get everything running, you will want to arrange for the
+buildbot daemons to be started at boot time. One way is to use
+ at code{cron}, by putting them in a @@reboot crontab entry at footnote{this
+@@reboot syntax is understood by Vixie cron, which is the flavor
+usually provided with linux systems. Other unices may have a cron that
+doesn't understand @@reboot}:
+
+ at example
+@@reboot buildbot start @var{BASEDIR}
+ at end example
+
+When you run @command{crontab} to set this up, remember to do it as
+the buildmaster or buildslave account! If you add this to your crontab
+when running as your regular account (or worse yet, root), then the
+daemon will run as the wrong user, quite possibly as one with more
+authority than you intended to provide.
+
+It is important to remember that the environment provided to cron jobs
+and init scripts can be quite different that your normal runtime.
+There may be fewer environment variables specified, and the PATH may
+be shorter than usual. It is a good idea to test out this method of
+launching the buildslave by using a cron job with a time in the near
+future, with the same command, and then check @file{twistd.log} to
+make sure the slave actually started correctly. Common problems here
+are for @file{/usr/local} or @file{~/bin} to not be on your
+ at code{PATH}, or for @code{PYTHONPATH} to not be set correctly.
+Sometimes @code{HOME} is messed up too.
+
+To modify the way the daemons are started (perhaps you want to set
+some environment variables first, or perform some cleanup each time),
+you can create a file named @file{Makefile.buildbot} in the base
+directory. When the @file{buildbot} front-end tool is told to
+ at command{start} the daemon, and it sees this file (and
+ at file{/usr/bin/make} exists), it will do @command{make -f
+Makefile.buildbot start} instead of its usual action (which involves
+running @command{twistd}). When the buildmaster or buildslave is
+installed, a @file{Makefile.sample} is created which implements the
+same behavior as the the @file{buildbot} tool uses, so if you want to
+customize the process, just copy @file{Makefile.sample} to
+ at file{Makefile.buildbot} and edit it as necessary.
+
+ at node Logfiles, Shutdown, Launching the daemons, Installation
+ at section Logfiles
+
+ at cindex logfiles
+
+While a buildbot daemon runs, it emits text to a logfile, named
+ at file{twistd.log}. A command like @code{tail -f twistd.log} is useful
+to watch the command output as it runs.
+
+The buildmaster will announce any errors with its configuration file
+in the logfile, so it is a good idea to look at the log at startup
+time to check for any problems. Most buildmaster activities will cause
+lines to be added to the log.
+
+ at node Shutdown, Maintenance, Logfiles, Installation
+ at section Shutdown
+
+To stop a buildmaster or buildslave manually, use:
+
+ at example
+buildbot stop @var{BASEDIR}
+ at end example
+
+This simply looks for the @file{twistd.pid} file and kills whatever
+process is identified within.
+
+At system shutdown, all processes are sent a @code{SIGKILL}. The
+buildmaster and buildslave will respond to this by shutting down
+normally.
+
+The buildmaster will respond to a @code{SIGHUP} by re-reading its
+config file. The following shortcut is available:
+
+ at example
+buildbot reconfig @var{BASEDIR}
+ at end example
+
+When you update the Buildbot code to a new release, you will need to
+restart the buildmaster and/or buildslave before it can take advantage
+of the new code. You can do a @code{buildbot stop @var{BASEDIR}} and
+ at code{buildbot start @var{BASEDIR}} in quick succession, or you can
+use the @code{restart} shortcut, which does both steps for you:
+
+ at example
+buildbot restart @var{BASEDIR}
+ at end example
+
+
+ at node Maintenance, Troubleshooting, Shutdown, Installation
+ at section Maintenance
+
+It is a good idea to check the buildmaster's status page every once in
+a while, to see if your buildslave is still online. Eventually the
+buildbot will probably be enhanced to send you email (via the
+ at file{info/admin} email address) when the slave has been offline for
+more than a few hours.
+
+If you find you can no longer provide a buildslave to the project, please
+let the project admins know, so they can put out a call for a
+replacement.
+
+The Buildbot records status and logs output continually, each time a
+build is performed. The status tends to be small, but the build logs
+can become quite large. Each build and log are recorded in a separate
+file, arranged hierarchically under the buildmaster's base directory.
+To prevent these files from growing without bound, you should
+periodically delete old build logs. A simple cron job to delete
+anything older than, say, two weeks should do the job. The only trick
+is to leave the @file{buildbot.tac} and other support files alone, for
+which find's @code{-mindepth} argument helps skip everything in the
+top directory. You can use something like the following:
+
+ at example
+@@weekly cd BASEDIR && find . -mindepth 2 -type f -mtime +14 -exec rm @{@} \;
+@@weekly cd BASEDIR && find twistd.log* -mtime +14 -exec rm @{@} \;
+ at end example
+
+ at node Troubleshooting,  , Maintenance, Installation
+ at section Troubleshooting
+
+Here are a few hints on diagnosing common problems.
+
+ at menu
+* Starting the buildslave::     
+* Connecting to the buildmaster::  
+* Forcing Builds::              
+ at end menu
+
+ at node Starting the buildslave, Connecting to the buildmaster, Troubleshooting, Troubleshooting
+ at subsection Starting the buildslave
+
+Cron jobs are typically run with a minimal shell (@file{/bin/sh}, not
+ at file{/bin/bash}), and tilde expansion is not always performed in such
+commands. You may want to use explicit paths, because the @code{PATH}
+is usually quite short and doesn't include anything set by your
+shell's startup scripts (@file{.profile}, @file{.bashrc}, etc). If
+you've installed buildbot (or other python libraries) to an unusual
+location, you may need to add a @code{PYTHONPATH} specification (note
+that python will do tilde-expansion on @code{PYTHONPATH} elements by
+itself). Sometimes it is safer to fully-specify everything:
+
+ at example
+@@reboot PYTHONPATH=~/lib/python /usr/local/bin/buildbot start /usr/home/buildbot/basedir
+ at end example
+
+Take the time to get the @@reboot job set up. Otherwise, things will work
+fine for a while, but the first power outage or system reboot you have will
+stop the buildslave with nothing but the cries of sorrowful developers to
+remind you that it has gone away.
+
+ at node Connecting to the buildmaster, Forcing Builds, Starting the buildslave, Troubleshooting
+ at subsection Connecting to the buildmaster
+
+If the buildslave cannot connect to the buildmaster, the reason should
+be described in the @file{twistd.log} logfile. Some common problems
+are an incorrect master hostname or port number, or a mistyped bot
+name or password. If the buildslave loses the connection to the
+master, it is supposed to attempt to reconnect with an
+exponentially-increasing backoff. Each attempt (and the time of the
+next attempt) will be logged. If you get impatient, just manually stop
+and re-start the buildslave.
+
+When the buildmaster is restarted, all slaves will be disconnected,
+and will attempt to reconnect as usual. The reconnect time will depend
+upon how long the buildmaster is offline (i.e. how far up the
+exponential backoff curve the slaves have travelled). Again,
+ at code{buildbot stop @var{BASEDIR}; buildbot start @var{BASEDIR}} will
+speed up the process.
+
+ at node Forcing Builds,  , Connecting to the buildmaster, Troubleshooting
+ at subsection Forcing Builds
+
+From the buildmaster's main status web page, you can force a build to
+be run on your build slave. Figure out which column is for a builder
+that runs on your slave, click on that builder's name, and the page
+that comes up will have a ``Force Build'' button. Fill in the form,
+hit the button, and a moment later you should see your slave's
+ at file{twistd.log} filling with commands being run. Using @code{pstree}
+or @code{top} should also reveal the cvs/make/gcc/etc processes being
+run by the buildslave. Note that the same web page should also show
+the @file{admin} and @file{host} information files that you configured
+earlier.
+
+ at node Concepts, Configuration, Installation, Top
+ at chapter Concepts
+
+This chapter defines some of the basic concepts that the Buildbot
+uses. You'll need to understand how the Buildbot sees the world to
+configure it properly.
+
+ at menu
+* Version Control Systems::     
+* Schedulers::                  
+* BuildSet::                    
+* BuildRequest::                
+* Builder::                     
+* Users::                       
+ at end menu
+
+ at node Version Control Systems, Schedulers, Concepts, Concepts
+ at section Version Control Systems
+
+ at cindex Version Control
+
+These source trees come from a Version Control System of some kind.
+CVS and Subversion are two popular ones, but the Buildbot supports
+others. All VC systems have some notion of an upstream
+ at code{repository} which acts as a server at footnote{except Darcs, but
+since the Buildbot never modifies its local source tree we can ignore
+the fact that Darcs uses a less centralized model}, from which clients
+can obtain source trees according to various parameters. The VC
+repository provides source trees of various projects, for different
+branches, and from various points in time. The first thing we have to
+do is to specify which source tree we want to get.
+
+ at menu
+* Generalizing VC Systems::     
+* Source Tree Specifications::  
+* How Different VC Systems Specify Sources::  
+* Attributes of Changes::       
+ at end menu
+
+ at node Generalizing VC Systems, Source Tree Specifications, Version Control Systems, Version Control Systems
+ at subsection Generalizing VC Systems
+
+For the purposes of the Buildbot, we will try to generalize all VC
+systems as having repositories that each provide sources for a variety
+of projects. Each project is defined as a directory tree with source
+files. The individual files may each have revisions, but we ignore
+that and treat the project as a whole as having a set of revisions.
+Each time someone commits a change to the project, a new revision
+becomes available. These revisions can be described by a tuple with
+two items: the first is a branch tag, and the second is some kind of
+timestamp or revision stamp. Complex projects may have multiple branch
+tags, but there is always a default branch. The timestamp may be an
+actual timestamp (such as the -D option to CVS), or it may be a
+monotonically-increasing transaction number (such as the change number
+used by SVN and P4, or the revision number used by Arch, or a labeled
+tag used in CVS)@footnote{many VC systems provide more complexity than
+this: in particular the local views that P4 and ClearCase can assemble
+out of various source directories are more complex than we're prepared
+to take advantage of here}. The SHA1 revision ID used by Monotone and
+Mercurial is also a kind of revision stamp, in that it specifies a
+unique copy of the source tree, as does a Darcs ``context'' file.
+
+When we aren't intending to make any changes to the sources we check out
+(at least not any that need to be committed back upstream), there are two
+basic ways to use a VC system:
+
+ at itemize @bullet
+ at item
+Retrieve a specific set of source revisions: some tag or key is used
+to index this set, which is fixed and cannot be changed by subsequent
+developers committing new changes to the tree. Releases are built from
+tagged revisions like this, so that they can be rebuilt again later
+(probably with controlled modifications).
+ at item
+Retrieve the latest sources along a specific branch: some tag is used
+to indicate which branch is to be used, but within that constraint we want
+to get the latest revisions.
+ at end itemize
+
+Build personnel or CM staff typically use the first approach: the
+build that results is (ideally) completely specified by the two
+parameters given to the VC system: repository and revision tag. This
+gives QA and end-users something concrete to point at when reporting
+bugs. Release engineers are also reportedly fond of shipping code that
+can be traced back to a concise revision tag of some sort.
+
+Developers are more likely to use the second approach: each morning
+the developer does an update to pull in the changes committed by the
+team over the last day. These builds are not easy to fully specify: it
+depends upon exactly when you did a checkout, and upon what local
+changes the developer has in their tree. Developers do not normally
+tag each build they produce, because there is usually significant
+overhead involved in creating these tags. Recreating the trees used by
+one of these builds can be a challenge. Some VC systems may provide
+implicit tags (like a revision number), while others may allow the use
+of timestamps to mean ``the state of the tree at time X'' as opposed
+to a tree-state that has been explicitly marked.
+
+The Buildbot is designed to help developers, so it usually works in
+terms of @emph{the latest} sources as opposed to specific tagged
+revisions. However, it would really prefer to build from reproducible
+source trees, so implicit revisions are used whenever possible.
+
+ at node Source Tree Specifications, How Different VC Systems Specify Sources, Generalizing VC Systems, Version Control Systems
+ at subsection Source Tree Specifications
+
+So for the Buildbot's purposes we treat each VC system as a server
+which can take a list of specifications as input and produce a source
+tree as output. Some of these specifications are static: they are
+attributes of the builder and do not change over time. Others are more
+variable: each build will have a different value. The repository is
+changed over time by a sequence of Changes, each of which represents a
+single developer making changes to some set of files. These Changes
+are cumulative at footnote{Monotone's @emph{multiple heads} feature
+violates this assumption of cumulative Changes, but in most situations
+the changes don't occur frequently enough for this to be a significant
+problem}.
+
+For normal builds, the Buildbot wants to get well-defined source trees
+that contain specific Changes, and exclude other Changes that may have
+occurred after the desired ones. We assume that the Changes arrive at
+the buildbot (through one of the mechanisms described in @pxref{Change
+Sources}) in the same order in which they are committed to the
+repository. The Buildbot waits for the tree to become ``stable''
+before initiating a build, for two reasons. The first is that
+developers frequently make multiple related commits in quick
+succession, even when the VC system provides ways to make atomic
+transactions involving multiple files at the same time. Running a
+build in the middle of these sets of changes would use an inconsistent
+set of source files, and is likely to fail (and is certain to be less
+useful than a build which uses the full set of changes). The
+tree-stable-timer is intended to avoid these useless builds that
+include some of the developer's changes but not all. The second reason
+is that some VC systems (i.e. CVS) do not provide repository-wide
+transaction numbers, so that timestamps are the only way to refer to
+a specific repository state. These timestamps may be somewhat
+ambiguous, due to processing and notification delays. By waiting until
+the tree has been stable for, say, 10 minutes, we can choose a
+timestamp from the middle of that period to use for our source
+checkout, and then be reasonably sure that any clock-skew errors will
+not cause the build to be performed on an inconsistent set of source
+files.
+
+The Schedulers always use the tree-stable-timer, with a timeout that
+is configured to reflect a reasonable tradeoff between build latency
+and change frequency. When the VC system provides coherent
+repository-wide revision markers (such as Subversion's revision
+numbers, or in fact anything other than CVS's timestamps), the
+resulting Build is simply performed against a source tree defined by
+that revision marker. When the VC system does not provide this, a
+timestamp from the middle of the tree-stable period is used to
+generate the source tree at footnote{this @code{checkoutDelay} defaults
+to half the tree-stable timer, but it can be overridden with an
+argument to the Source Step}.
+
+ at node How Different VC Systems Specify Sources, Attributes of Changes, Source Tree Specifications, Version Control Systems
+ at subsection How Different VC Systems Specify Sources
+
+For CVS, the static specifications are @code{repository} and
+ at code{module}. In addition to those, each build uses a timestamp (or
+omits the timestamp to mean @code{the latest}) and @code{branch tag}
+(which defaults to HEAD). These parameters collectively specify a set
+of sources from which a build may be performed.
+
+ at uref{http://subversion.tigris.org, Subversion} combines the
+repository, module, and branch into a single @code{Subversion URL}
+parameter. Within that scope, source checkouts can be specified by a
+numeric @code{revision number} (a repository-wide
+monotonically-increasing marker, such that each transaction that
+changes the repository is indexed by a different revision number), or
+a revision timestamp. When branches are used, the repository and
+module form a static @code{baseURL}, while each build has a
+ at code{revision number} and a @code{branch} (which defaults to a
+statically-specified @code{defaultBranch}). The @code{baseURL} and
+ at code{branch} are simply concatenated together to derive the
+ at code{svnurl} to use for the checkout.
+
+ at uref{http://www.perforce.com/, Perforce} is similar. The server
+is specified through a @code{P4PORT} parameter. Module and branch
+are specified in a single depot path, and revisions are
+depot-wide. When branches are used, the @code{p4base} and
+ at code{defaultBranch} are concatenated together to produce the depot
+path.
+
+ at uref{http://wiki.gnuarch.org/, Arch} and
+ at uref{http://bazaar.canonical.com/, Bazaar} specify a repository by
+URL, as well as a @code{version} which is kind of like a branch name.
+Arch uses the word @code{archive} to represent the repository. Arch
+lets you push changes from one archive to another, removing the strict
+centralization required by CVS and SVN. It retains the distinction
+between repository and working directory that most other VC systems
+use. For complex multi-module directory structures, Arch has a
+built-in @code{build config} layer with which the checkout process has
+two steps. First, an initial bootstrap checkout is performed to
+retrieve a set of build-config files. Second, one of these files is
+used to figure out which archives/modules should be used to populate
+subdirectories of the initial checkout.
+
+Builders which use Arch and Bazaar therefore have a static archive
+ at code{url}, and a default ``branch'' (which is a string that specifies
+a complete category--branch--version triple). Each build can have its
+own branch (the category--branch--version string) to override the
+default, as well as a revision number (which is turned into a
+--patch-NN suffix when performing the checkout).
+
+ at uref{http://abridgegame.org/darcs/, Darcs} doesn't really have the
+notion of a single master repository. Nor does it really have
+branches. In Darcs, each working directory is also a repository, and
+there are operations to push and pull patches from one of these
+ at code{repositories} to another. For the Buildbot's purposes, all you
+need to do is specify the URL of a repository that you want to build
+from. The build slave will then pull the latest patches from that
+repository and build them. Multiple branches are implemented by using
+multiple repositories (possibly living on the same server).
+
+Builders which use Darcs therefore have a static @code{repourl} which
+specifies the location of the repository. If branches are being used,
+the source Step is instead configured with a @code{baseURL} and a
+ at code{defaultBranch}, and the two strings are simply concatenated
+together to obtain the repository's URL. Each build then has a
+specific branch which replaces @code{defaultBranch}, or just uses the
+default one. Instead of a revision number, each build can have a
+``context'', which is a string that records all the patches that are
+present in a given tree (this is the output of @command{darcs changes
+--context}, and is considerably less concise than, e.g. Subversion's
+revision number, but the patch-reordering flexibility of Darcs makes
+it impossible to provide a shorter useful specification).
+
+ at uref{http://selenic.com/mercurial, Mercurial} is like Darcs, in that
+each branch is stored in a separate repository. The @code{repourl},
+ at code{baseURL}, and @code{defaultBranch} arguments are all handled the
+same way as with Darcs. The ``revision'', however, is the hash
+identifier returned by @command{hg identify}.
+
+
+ at node Attributes of Changes,  , How Different VC Systems Specify Sources, Version Control Systems
+ at subsection Attributes of Changes
+
+ at heading Who
+
+Each Change has a @code{who} attribute, which specifies which
+developer is responsible for the change. This is a string which comes
+from a namespace controlled by the VC repository. Frequently this
+means it is a username on the host which runs the repository, but not
+all VC systems require this (Arch, for example, uses a fully-qualified
+ at code{Arch ID}, which looks like an email address, as does Darcs).
+Each StatusNotifier will map the @code{who} attribute into something
+appropriate for their particular means of communication: an email
+address, an IRC handle, etc.
+
+ at heading Files
+
+It also has a list of @code{files}, which are just the tree-relative
+filenames of any files that were added, deleted, or modified for this
+Change. These filenames are used by the @code{isFileImportant}
+function (in the Scheduler) to decide whether it is worth triggering a
+new build or not, e.g. the function could use
+ at code{filename.endswith(".c")} to only run a build if a C file were
+checked in. Certain BuildSteps can also use the list of changed files
+to run a more targeted series of tests, e.g. the
+ at code{python_twisted.Trial} step can run just the unit tests that
+provide coverage for the modified .py files instead of running the
+full test suite.
+
+ at heading Comments
+
+The Change also has a @code{comments} attribute, which is a string
+containing any checkin comments.
+
+ at heading Revision
+
+Each Change can have a @code{revision} attribute, which describes how
+to get a tree with a specific state: a tree which includes this Change
+(and all that came before it) but none that come after it. If this
+information is unavailable, the @code{.revision} attribute will be
+ at code{None}. These revisions are provided by the ChangeSource, and
+consumed by the @code{computeSourceRevision} method in the appropriate
+ at code{step.Source} class.
+
+ at table @samp
+ at item CVS
+ at code{revision} is an int, seconds since the epoch
+ at item SVN
+ at code{revision} is an int, a transation number (r%d)
+ at item Darcs
+ at code{revision} is a large string, the output of @code{darcs changes --context}
+ at item Mercurial
+ at code{revision} is a short string (a hash ID), the output of @code{hg identify}
+ at item Arch/Bazaar
+ at code{revision} is the full revision ID (ending in --patch-%d)
+ at item P4
+ at code{revision} is an int, the transaction number
+ at end table
+
+ at heading Branches
+
+The Change might also have a @code{branch} attribute. This indicates
+that all of the Change's files are in the same named branch. The
+Schedulers get to decide whether the branch should be built or not.
+
+For VC systems like CVS, Arch, and Monotone, the @code{branch} name is
+unrelated to the filename. (that is, the branch name and the filename
+inhabit unrelated namespaces). For SVN, branches are expressed as
+subdirectories of the repository, so the file's ``svnurl'' is a
+combination of some base URL, the branch name, and the filename within
+the branch. (In a sense, the branch name and the filename inhabit the
+same namespace). Darcs branches are subdirectories of a base URL just
+like SVN. Mercurial branches are the same as Darcs.
+
+ at table @samp
+ at item CVS
+branch='warner-newfeature', files=['src/foo.c']
+ at item SVN
+branch='branches/warner-newfeature', files=['src/foo.c']
+ at item Darcs
+branch='warner-newfeature', files=['src/foo.c']
+ at item Mercurial
+branch='warner-newfeature', files=['src/foo.c']
+ at item Arch/Bazaar
+branch='buildbot--usebranches--0', files=['buildbot/master.py']
+ at end table
+
+ at heading Links
+
+ at c TODO: who is using 'links'? how is it being used?
+
+Finally, the Change might have a @code{links} list, which is intended
+to provide a list of URLs to a @emph{viewcvs}-style web page that
+provides more detail for this Change, perhaps including the full file
+diffs.
+
+
+ at node Schedulers, BuildSet, Version Control Systems, Concepts
+ at section Schedulers
+
+ at cindex Scheduler
+
+Each Buildmaster has a set of @code{Scheduler} objects, each of which
+gets a copy of every incoming Change. The Schedulers are responsible
+for deciding when Builds should be run. Some Buildbot installations
+might have a single Scheduler, while others may have several, each for
+a different purpose.
+
+For example, a ``quick'' scheduler might exist to give immediate
+feedback to developers, hoping to catch obvious problems in the code
+that can be detected quickly. These typically do not run the full test
+suite, nor do they run on a wide variety of platforms. They also
+usually do a VC update rather than performing a brand-new checkout
+each time. You could have a ``quick'' scheduler which used a 30 second
+timeout, and feeds a single ``quick'' Builder that uses a VC
+ at code{mode='update'} setting.
+
+A separate ``full'' scheduler would run more comprehensive tests a
+little while later, to catch more subtle problems. This scheduler
+would have a longer tree-stable-timer, maybe 30 minutes, and would
+feed multiple Builders (with a @code{mode=} of @code{'copy'},
+ at code{'clobber'}, or @code{'export'}).
+
+The @code{tree-stable-timer} and @code{isFileImportant} decisions are
+made by the Scheduler. Dependencies are also implemented here.
+Periodic builds (those which are run every N seconds rather than after
+new Changes arrive) are triggered by a special @code{Periodic}
+Scheduler subclass. The default Scheduler class can also be told to
+watch for specific branches, ignoring Changes on other branches. This
+may be useful if you have a trunk and a few release branches which
+should be tracked, but when you don't want to have the Buildbot pay
+attention to several dozen private user branches.
+
+Some Schedulers may trigger builds for other reasons, other than
+recent Changes. For example, a Scheduler subclass could connect to a
+remote buildmaster and watch for builds of a library to succeed before
+triggering a local build that uses that library.
+
+Each Scheduler creates and submits @code{BuildSet} objects to the
+ at code{BuildMaster}, which is then responsible for making sure the
+individual @code{BuildRequests} are delivered to the target
+ at code{Builders}.
+
+ at code{Scheduler} instances are activated by placing them in the
+ at code{c['schedulers']} list in the buildmaster config file. Each
+Scheduler has a unique name.
+
+
+ at node BuildSet, BuildRequest, Schedulers, Concepts
+ at section BuildSet
+
+ at cindex BuildSet
+
+A @code{BuildSet} is the name given to a set of Builds that all
+compile/test the same version of the tree on multiple Builders. In
+general, all these component Builds will perform the same sequence of
+Steps, using the same source code, but on different platforms or
+against a different set of libraries.
+
+The @code{BuildSet} is tracked as a single unit, which fails if any of
+the component Builds have failed, and therefore can succeed only if
+ at emph{all} of the component Builds have succeeded. There are two kinds
+of status notification messages that can be emitted for a BuildSet:
+the @code{firstFailure} type (which fires as soon as we know the
+BuildSet will fail), and the @code{Finished} type (which fires once
+the BuildSet has completely finished, regardless of whether the
+overall set passed or failed).
+
+A @code{BuildSet} is created with a @emph{source stamp} tuple of
+(branch, revision, changes, patch), some of which may be None, and a
+list of Builders on which it is to be run. They are then given to the
+BuildMaster, which is responsible for creating a separate
+ at code{BuildRequest} for each Builder.
+
+There are a couple of different likely values for the
+ at code{SourceStamp}:
+
+ at table @code
+ at item (revision=None, changes=[CHANGES], patch=None)
+This is a @code{SourceStamp} used when a series of Changes have
+triggered a build. The VC step will attempt to check out a tree that
+contains CHANGES (and any changes that occurred before CHANGES, but
+not any that occurred after them).
+
+ at item (revision=None, changes=None, patch=None)
+This builds the most recent code on the default branch. This is the
+sort of @code{SourceStamp} that would be used on a Build that was
+triggered by a user request, or a Periodic scheduler. It is also
+possible to configure the VC Source Step to always check out the
+latest sources rather than paying attention to the Changes in the
+SourceStamp, which will result in same behavior as this.
+
+ at item (branch=BRANCH, revision=None, changes=None, patch=None)
+This builds the most recent code on the given BRANCH. Again, this is
+generally triggered by a user request or Periodic build.
+
+ at item (revision=REV, changes=None, patch=(LEVEL, DIFF))
+This checks out the tree at the given revision REV, then applies a
+patch (using @code{diff -pLEVEL <DIFF}). The @ref{try} feature uses
+this kind of @code{SourceStamp}. If @code{patch} is None, the patching
+step is bypassed.
+
+ at end table
+
+The buildmaster is responsible for turning the @code{BuildSet} into a
+set of @code{BuildRequest} objects and queueing them on the
+appropriate Builders.
+
+
+ at node BuildRequest, Builder, BuildSet, Concepts
+ at section BuildRequest
+
+ at cindex BuildRequest
+
+A @code{BuildRequest} is a request to build a specific set of sources
+on a single specific Builder. Each Builder runs the
+ at code{BuildRequest} as soon as it can (i.e. when an associated
+buildslave becomes free).
+
+The @code{BuildRequest} contains the @code{SourceStamp} specification.
+The actual process of running the build (the series of Steps that will
+be executed) is implemented by the @code{Build} object. In this future
+this might be changed, to have the @code{Build} define @emph{what}
+gets built, and a separate @code{BuildProcess} (provided by the
+Builder) to define @emph{how} it gets built.
+
+The @code{BuildRequest} may be mergeable with other compatible
+ at code{BuildRequest}s. Builds that are triggered by incoming Changes
+will generally be mergeable. Builds that are triggered by user
+requests are generally not, unless they are multiple requests to build
+the @emph{latest sources} of the same branch.
+
+ at node Builder, Users, BuildRequest, Concepts
+ at section Builder
+
+ at cindex Builder
+
+The @code{Builder} is a long-lived object which controls all Builds of
+a given type. Each one is created when the config file is first
+parsed, and lives forever (or rather until it is removed from the
+config file). It mediates the connections to the buildslaves that do
+all the work, and is responsible for creating the @code{Build} objects
+that decide @emph{how} a build is performed (i.e., which steps are
+executed in what order).
+
+Each @code{Builder} gets a unique name, and the path name of a
+directory where it gets to do all its work (there is a
+buildmaster-side directory for keeping status information, as well as
+a buildslave-side directory where the actual checkout/compile/test
+commands are executed). It also gets a @code{BuildFactory}, which is
+responsible for creating new @code{Build} instances: because the
+ at code{Build} instance is what actually performs each build, choosing
+the @code{BuildFactory} is the way to specify what happens each time a
+build is done.
+
+Each @code{Builder} is associated with one of more @code{BuildSlaves}.
+A @code{Builder} which is used to perform OS-X builds (as opposed to
+Linux or Solaris builds) should naturally be associated with an
+OS-X-based buildslave.
+
+
+ at node Users,  , Builder, Concepts
+ at section Users
+
+ at cindex Users
+
+Buildbot has a somewhat limited awareness of @emph{users}. It assumes
+the world consists of a set of developers, each of whom can be
+described by a couple of simple attributes. These developers make
+changes to the source code, causing builds which may succeed or fail.
+
+Each developer is primarily known through the source control system. Each
+Change object that arrives is tagged with a @code{who} field that
+typically gives the account name (on the repository machine) of the user
+responsible for that change. This string is the primary key by which the
+User is known, and is displayed on the HTML status pages and in each Build's
+``blamelist''.
+
+To do more with the User than just refer to them, this username needs to
+be mapped into an address of some sort. The responsibility for this mapping
+is left up to the status module which needs the address. The core code knows
+nothing about email addresses or IRC nicknames, just user names.
+
+ at menu
+* Doing Things With Users::     
+* Email Addresses::             
+* IRC Nicknames::               
+* Live Status Clients::         
+ at end menu
+
+ at node Doing Things With Users, Email Addresses, Users, Users
+ at subsection Doing Things With Users
+
+Each Change has a single User who is responsible for that Change. Most
+Builds have a set of Changes: the Build represents the first time these
+Changes have been built and tested by the Buildbot. The build has a
+``blamelist'' that consists of a simple union of the Users responsible
+for all the Build's Changes.
+
+The Build provides (through the IBuildStatus interface) a list of Users
+who are ``involved'' in the build. For now this is equal to the
+blamelist, but in the future it will be expanded to include a ``build
+sheriff'' (a person who is ``on duty'' at that time and responsible for
+watching over all builds that occur during their shift), as well as
+per-module owners who simply want to keep watch over their domain (chosen by
+subdirectory or a regexp matched against the filenames pulled out of the
+Changes). The Involved Users are those who probably have an interest in the
+results of any given build.
+
+In the future, Buildbot will acquire the concept of ``Problems'',
+which last longer than builds and have beginnings and ends. For example, a
+test case which passed in one build and then failed in the next is a
+Problem. The Problem lasts until the test case starts passing again, at
+which point the Problem is said to be ``resolved''.
+
+If there appears to be a code change that went into the tree at the
+same time as the test started failing, that Change is marked as being
+resposible for the Problem, and the user who made the change is added
+to the Problem's ``Guilty'' list. In addition to this user, there may
+be others who share responsibility for the Problem (module owners,
+sponsoring developers). In addition to the Responsible Users, there
+may be a set of Interested Users, who take an interest in the fate of
+the Problem.
+
+Problems therefore have sets of Users who may want to be kept aware of
+the condition of the problem as it changes over time. If configured, the
+Buildbot can pester everyone on the Responsible list with increasing
+harshness until the problem is resolved, with the most harshness reserved
+for the Guilty parties themselves. The Interested Users may merely be told
+when the problem starts and stops, as they are not actually responsible for
+fixing anything.
+
+ at node Email Addresses, IRC Nicknames, Doing Things With Users, Users
+ at subsection Email Addresses
+
+The @code{buildbot.status.mail.MailNotifier} class provides a
+status target which can send email about the results of each build. It
+accepts a static list of email addresses to which each message should be
+delivered, but it can also be configured to send mail to the Build's
+Interested Users. To do this, it needs a way to convert User names into
+email addresses.
+
+For many VC systems, the User Name is actually an account name on the
+system which hosts the repository. As such, turning the name into an
+email address is a simple matter of appending
+``@@repositoryhost.com''. Some projects use other kinds of mappings
+(for example the preferred email address may be at ``project.org''
+despite the repository host being named ``cvs.project.org''), and some
+VC systems have full separation between the concept of a user and that
+of an account on the repository host (like Perforce). Some systems
+(like Arch) put a full contact email address in every change.
+
+To convert these names to addresses, the MailNotifier uses an EmailLookup
+object. This provides a .getAddress method which accepts a name and
+(eventually) returns an address. The default @code{MailNotifier}
+module provides an EmailLookup which simply appends a static string,
+configurable when the notifier is created. To create more complex behaviors
+(perhaps using an LDAP lookup, or using ``finger'' on a central host to
+determine a preferred address for the developer), provide a different object
+as the @code{lookup} argument.
+
+In the future, when the Problem mechanism has been set up, the Buildbot
+will need to send mail to arbitrary Users. It will do this by locating a
+MailNotifier-like object among all the buildmaster's status targets, and
+asking it to send messages to various Users. This means the User-to-address
+mapping only has to be set up once, in your MailNotifier, and every email
+message the buildbot emits will take advantage of it.
+
+ at node IRC Nicknames, Live Status Clients, Email Addresses, Users
+ at subsection IRC Nicknames
+
+Like MailNotifier, the @code{buildbot.status.words.IRC} class
+provides a status target which can announce the results of each build. It
+also provides an interactive interface by responding to online queries
+posted in the channel or sent as private messages.
+
+In the future, the buildbot can be configured map User names to IRC
+nicknames, to watch for the recent presence of these nicknames, and to
+deliver build status messages to the interested parties. Like
+ at code{MailNotifier} does for email addresses, the @code{IRC} object
+will have an @code{IRCLookup} which is responsible for nicknames. The
+mapping can be set up statically, or it can be updated by online users
+themselves (by claiming a username with some kind of ``buildbot: i am
+user warner'' commands).
+
+Once the mapping is established, the rest of the buildbot can ask the
+ at code{IRC} object to send messages to various users. It can report on
+the likelihood that the user saw the given message (based upon how long the
+user has been inactive on the channel), which might prompt the Problem
+Hassler logic to send them an email message instead.
+
+ at node Live Status Clients,  , IRC Nicknames, Users
+ at subsection Live Status Clients
+
+The Buildbot also offers a PB-based status client interface which can
+display real-time build status in a GUI panel on the developer's desktop.
+This interface is normally anonymous, but it could be configured to let the
+buildmaster know @emph{which} developer is using the status client. The
+status client could then be used as a message-delivery service, providing an
+alternative way to deliver low-latency high-interruption messages to the
+developer (like ``hey, you broke the build'').
+
+
+ at node Configuration, Getting Source Code Changes, Concepts, Top
+ at chapter Configuration
+
+ at cindex Configuration
+
+The buildbot's behavior is defined by the ``config file'', which
+normally lives in the @file{master.cfg} file in the buildmaster's base
+directory (but this can be changed with an option to the
+ at code{buildbot create-master} command). This file completely specifies
+which Builders are to be run, which slaves they should use, how
+Changes should be tracked, and where the status information is to be
+sent. The buildmaster's @file{buildbot.tac} file names the base
+directory; everything else comes from the config file.
+
+A sample config file was installed for you when you created the
+buildmaster, but you will need to edit it before your buildbot will do
+anything useful.
+
+This chapter gives an overview of the format of this file and the
+various sections in it. You will need to read the later chapters to
+understand how to fill in each section properly.
+
+ at menu
+* Config File Format::          
+* Loading the Config File::     
+* Defining the Project::        
+* Listing Change Sources and Schedulers::  
+* Setting the slaveport::       
+* Buildslave Specifiers::       
+* Defining Builders::           
+* Defining Status Targets::     
+* Debug options::               
+ at end menu
+
+ at node Config File Format, Loading the Config File, Configuration, Configuration
+ at section Config File Format
+
+The config file is, fundamentally, just a piece of Python code which
+defines a dictionary named @code{BuildmasterConfig}, with a number of
+keys that are treated specially. You don't need to know Python to do
+basic configuration, though, you can just copy the syntax of the
+sample file. If you @emph{are} comfortable writing Python code,
+however, you can use all the power of a full programming language to
+achieve more complicated configurations.
+
+The @code{BuildmasterConfig} name is the only one which matters: all
+other names defined during the execution of the file are discarded.
+When parsing the config file, the Buildmaster generally compares the
+old configuration with the new one and performs the minimum set of
+actions necessary to bring the buildbot up to date: Builders which are
+not changed are left untouched, and Builders which are modified get to
+keep their old event history.
+
+Basic Python syntax: comments start with a hash character (``#''),
+tuples are defined with @code{(parenthesis, pairs)}, arrays are
+defined with @code{[square, brackets]}, tuples and arrays are mostly
+interchangeable. Dictionaries (data structures which map ``keys'' to
+``values'') are defined with curly braces: @code{@{'key1': 'value1',
+'key2': 'value2'@} }. Function calls (and object instantiation) can use
+named parameters, like @code{w = html.Waterfall(http_port=8010)}.
+
+The config file starts with a series of @code{import} statements,
+which make various kinds of Steps and Status targets available for
+later use. The main @code{BuildmasterConfig} dictionary is created,
+then it is populated with a variety of keys. These keys are broken
+roughly into the following sections, each of which is documented in
+the rest of this chapter:
+
+ at itemize @bullet
+ at item
+Project Definitions
+ at item
+Change Sources / Schedulers
+ at item
+Slaveport
+ at item
+Buildslave Configuration
+ at item
+Builders / Interlocks
+ at item
+Status Targets
+ at item
+Debug options
+ at end itemize
+
+The config file can use a few names which are placed into its namespace:
+
+ at table @code
+ at item basedir
+the base directory for the buildmaster. This string has not been
+expanded, so it may start with a tilde. It needs to be expanded before
+use. The config file is located in
+ at code{os.path.expanduser(os.path.join(basedir, 'master.cfg'))}
+
+ at end table
+
+
+ at node Loading the Config File, Defining the Project, Config File Format, Configuration
+ at section Loading the Config File
+
+The config file is only read at specific points in time. It is first
+read when the buildmaster is launched. Once it is running, there are
+various ways to ask it to reload the config file. If you are on the
+system hosting the buildmaster, you can send a @code{SIGHUP} signal to
+it: the @command{buildbot} tool has a shortcut for this:
+
+ at example
+buildbot reconfig @var{BASEDIR}
+ at end example
+
+This command will show you all of the lines from @file{twistd.log}
+that relate to the reconfiguration. If there are any problems during
+the config-file reload, they will be displayed in these lines.
+
+The debug tool (@code{buildbot debugclient --master HOST:PORT}) has a
+``Reload .cfg'' button which will also trigger a reload. In the
+future, there will be other ways to accomplish this step (probably a
+password-protected button on the web page, as well as a privileged IRC
+command).
+
+When reloading the config file, the buildmaster will endeavor to
+change as little as possible about the running system. For example,
+although old status targets may be shut down and new ones started up,
+any status targets that were not changed since the last time the
+config file was read will be left running and untouched. Likewise any
+Builders which have not been changed will be left running. If a
+Builder is modified (say, the build process is changed) while a Build
+is currently running, that Build will keep running with the old
+process until it completes. Any previously queued Builds (or Builds
+which get queued after the reconfig) will use the new process.
+
+ at node Defining the Project, Listing Change Sources and Schedulers, Loading the Config File, Configuration
+ at section Defining the Project
+
+There are a couple of basic settings that you use to tell the buildbot
+what project it is working on. This information is used by status
+reporters to let users find out more about the codebase being
+exercised by this particular Buildbot installation.
+
+ at example
+c['projectName'] = "Buildbot"
+c['projectURL'] = "http://buildbot.sourceforge.net/"
+c['buildbotURL'] = "http://localhost:8010/"
+ at end example
+
+ at bcindex c['projectName']
+ at code{projectName} is a short string will be used to describe the
+project that this buildbot is working on. For example, it is used as
+the title of the waterfall HTML page. 
+
+ at bcindex c['projectURL']
+ at code{projectURL} is a string that gives a URL for the project as a
+whole. HTML status displays will show @code{projectName} as a link to
+ at code{projectURL}, to provide a link from buildbot HTML pages to your
+project's home page.
+
+ at bcindex c['buildbotURL']
+The @code{buildbotURL} string should point to the location where the
+buildbot's internal web server (usually the @code{html.Waterfall}
+page) is visible. This typically uses the port number set when you
+create the @code{Waterfall} object: the buildbot needs your help to
+figure out a suitable externally-visible host name.
+
+When status notices are sent to users (either by email or over IRC),
+ at code{buildbotURL} will be used to create a URL to the specific build
+or problem that they are being notified about. It will also be made
+available to queriers (over IRC) who want to find out where to get
+more information about this buildbot.
+
+
+ at node Listing Change Sources and Schedulers, Setting the slaveport, Defining the Project, Configuration
+ at section Listing Change Sources and Schedulers
+
+ at bcindex c['sources']
+The @code{c['sources']} key is a list of ChangeSource
+instances at footnote{To be precise, it is a list of objects which all
+implement the @code{buildbot.interfaces.IChangeSource} Interface}.
+This defines how the buildmaster learns about source code changes.
+More information about what goes here is available in @xref{Getting
+Source Code Changes}.
+
+ at example
+import buildbot.changes.pb
+c['sources'] = [buildbot.changes.pb.PBChangeSource()]
+ at end example
+
+ at bcindex c['schedulers']
+ at code{c['schedulers']} is a list of Scheduler instances, each of which
+causes builds to be started on a particular set of Builders. The two
+basic Scheduler classes you are likely to start with are
+ at code{Scheduler} and @code{Periodic}, but you can write a customized
+subclass to implement more complicated build scheduling.
+
+The docstring for @code{buildbot.scheduler.Scheduler} is the best
+place to see all the options that can be used. Type @code{pydoc
+buildbot.scheduler.Scheduler} to see it, or look in
+ at file{buildbot/scheduler.py} directly.
+
+The basic Scheduler takes four arguments:
+
+ at table @code
+ at item name
+Each Scheduler must have a unique name. This is only used in status
+displays.
+
+ at item branch
+This Scheduler will pay attention to a single branch, ignoring Changes
+that occur on other branches. Setting @code{branch} equal to the
+special value of @code{None} means it should only pay attention to the
+default branch. Note that @code{None} is a keyword, not a string, so
+you want to use @code{None} and not @code{"None"}.
+
+ at item treeStableTimer
+The Scheduler will wait for this many seconds before starting the
+build. If new changes are made during this interval, the timer will be
+restarted, so really the build will be started after a change and then
+after this many seconds of inactivity.
+
+ at item builderNames
+When the tree-stable-timer finally expires, builds will be started on
+these Builders. Each Builder gets a unique name: these strings must
+match.
+
+ at end table
+
+ at example
+from buildbot import scheduler
+quick = scheduler.Scheduler("quick", None, 60,
+                            ["quick-linux", "quick-netbsd"])
+full = scheduler.Scheduler("full", None, 5*60,
+                           ["full-linux", "full-netbsd", "full-OSX"])
+nightly = scheduler.Periodic("nightly", ["full-solaris"], 24*60*60)
+c['schedulers'] = [quick, full, nightly]
+ at end example
+
+In this example, the two ``quick'' builds are triggered 60 seconds
+after the tree has been changed. The ``full'' builds do not run quite
+so quickly (they wait 5 minutes), so hopefully if the quick builds
+fail due to a missing file or really simple typo, the developer can
+discover and fix the problem before the full builds are started. Both
+Schedulers only pay attention to the default branch: any changes on
+other branches are ignored by these Schedulers. Each Scheduler
+triggers a different set of Builders, referenced by name.
+
+The third Scheduler in this example just runs the full solaris build
+once per day. (note that this Scheduler only lets you control the time
+between builds, not the absolute time-of-day of each Build, so this
+could easily wind up a ``daily'' or ``every afternoon'' scheduler
+depending upon when it was first activated).
+
+ at menu
+* Scheduler Types::             
+* Build Dependencies::          
+ at end menu
+
+ at node Scheduler Types, Build Dependencies, Listing Change Sources and Schedulers, Listing Change Sources and Schedulers
+ at subsection Scheduler Types
+
+ at slindex buildbot.scheduler.Scheduler
+ at slindex buildbot.scheduler.AnyBranchScheduler
+ at slindex buildbot.scheduler.Periodic
+ at slindex buildbot.scheduler.Nightly
+
+Here is a brief catalog of the available Scheduler types. All these
+Schedulers are classes in @code{buildbot.scheduler}, and the
+docstrings there are the best source of documentation on the arguments
+taken by each one.
+
+ at table @code
+ at item Scheduler
+This is the default Scheduler class. It follows exactly one branch,
+and starts a configurable tree-stable-timer after each change on that
+branch. When the timer expires, it starts a build on some set of
+Builders. The Scheduler accepts a @code{fileIsImportant} function
+which can be used to ignore some Changes if they do not affect any
+``important'' files.
+
+ at item AnyBranchScheduler
+This scheduler uses a tree-stable-timer like the default one, but
+follows multiple branches at once. Each branch gets a separate timer.
+
+ at item Dependent
+This scheduler watches an ``upstream'' Builder. When that Builder
+successfully builds a particular set of Changes, it triggers builds of
+the same code on a configured set of ``downstream'' builders. The next
+section (@pxref{Build Dependencies}) describes this scheduler in more
+detail.
+
+ at item Periodic
+This simple scheduler just triggers a build every N seconds.
+
+ at item Nightly
+This is highly configurable periodic build scheduler, which triggers a
+build at particular times of day, week, month, or year. The
+configuration syntax is very similar to the well-known @code{crontab}
+format, in which you provide values for minute, hour, day, and month
+(some of which can be wildcards), and a build is triggered whenever
+the current time matches the given constraints. This can run a build
+every night, every morning, every weekend, alternate Thursdays, on
+your boss's birthday, etc.
+
+ at item Try_Jobdir / Try_Userpass
+This scheduler allows developers to use the @code{buildbot try}
+command to trigger builds of code they have not yet committed. See
+ at ref{try} for complete details.
+
+ at end table
+
+ at node Build Dependencies,  , Scheduler Types, Listing Change Sources and Schedulers
+ at subsection Build Dependencies
+
+ at cindex Dependent
+ at cindex Dependencies
+ at slindex buildbot.scheduler.Dependent
+
+It is common to wind up with one kind of build which should only be
+performed if the same source code was successfully handled by some
+other kind of build first. An example might be a packaging step: you
+might only want to produce .deb or RPM packages from a tree that was
+known to compile successfully and pass all unit tests. You could put
+the packaging step in the same Build as the compile and testing steps,
+but there might be other reasons to not do this (in particular you
+might have several Builders worth of compiles/tests, but only wish to
+do the packaging once). Another example is if you want to skip the
+``full'' builds after a failing ``quick'' build of the same source
+code. Or, if one Build creates a product (like a compiled library)
+that is used by some other Builder, you'd want to make sure the
+consuming Build is run @emph{after} the producing one.
+
+You can use @code{Dependencies} to express this relationship to the
+Buildbot. There is a special kind of Scheduler named
+ at code{scheduler.Dependent} that will watch an ``upstream'' Scheduler
+for builds to complete successfully (on all of its Builders). Each
+time that happens, the same source code (i.e. the same
+ at code{SourceStamp}) will be used to start a new set of builds, on a
+different set of Builders. This ``downstream'' scheduler doesn't pay
+attention to Changes at all, it only pays attention to the upstream
+scheduler.
+
+If the SourceStamp fails on any of the Builders in the upstream set,
+the downstream builds will not fire.
+
+ at example
+from buildbot import scheduler
+tests = scheduler.Scheduler("tests", None, 5*60,
+                            ["full-linux", "full-netbsd", "full-OSX"])
+package = scheduler.Dependent("package",
+                              tests, # upstream scheduler
+                              ["make-tarball", "make-deb", "make-rpm"])
+c['schedulers'] = [tests, package]
+ at end example
+
+Note that @code{Dependent}'s upstream scheduler argument is given as a
+ at code{Scheduler} @emph{instance}, not a name. This makes it impossible
+to create circular dependencies in the config file.
+
+
+ at node Setting the slaveport, Buildslave Specifiers, Listing Change Sources and Schedulers, Configuration
+ at section Setting the slaveport
+
+ at bcindex c['slavePortnum']
+
+The buildmaster will listen on a TCP port of your choosing for
+connections from buildslaves. It can also use this port for
+connections from remote Change Sources, status clients, and debug
+tools. This port should be visible to the outside world, and you'll
+need to tell your buildslave admins about your choice.
+
+It does not matter which port you pick, as long it is externally
+visible, however you should probably use something larger than 1024,
+since most operating systems don't allow non-root processes to bind to
+low-numbered ports. If your buildmaster is behind a firewall or a NAT
+box of some sort, you may have to configure your firewall to permit
+inbound connections to this port.
+
+ at example
+c['slavePortnum'] = 10000
+ at end example
+
+ at code{c['slavePortnum']} is a @emph{strports} specification string,
+defined in the @code{twisted.application.strports} module (try
+ at command{pydoc twisted.application.strports} to get documentation on
+the format). This means that you can have the buildmaster listen on a
+localhost-only port by doing:
+
+ at example
+c['slavePortnum'] = "tcp:10000:interface=127.0.0.1"
+ at end example
+
+This might be useful if you only run buildslaves on the same machine,
+and they are all configured to contact the buildmaster at
+ at code{localhost:10000}.
+
+
+ at node Buildslave Specifiers, Defining Builders, Setting the slaveport, Configuration
+ at section Buildslave Specifiers
+
+ at bcindex c['bots']
+
+The @code{c['bots']} key is a list of known buildslaves. Each
+buildslave is defined by a tuple of (slavename, slavepassword). These
+are the same two values that need to be provided to the buildslave
+administrator when they create the buildslave.
+
+ at example
+c['bots'] = [('bot-solaris', 'solarispasswd'),
+             ('bot-bsd', 'bsdpasswd'),
+            ]
+ at end example
+
+The slavenames must be unique, of course. The password exists to
+prevent evildoers from interfering with the buildbot by inserting
+their own (broken) buildslaves into the system and thus displacing the
+real ones.
+
+Buildslaves with an unrecognized slavename or a non-matching password
+will be rejected when they attempt to connect, and a message
+describing the problem will be put in the log file (see @ref{Logfiles}).
+
+
+ at node Defining Builders, Defining Status Targets, Buildslave Specifiers, Configuration
+ at section Defining Builders
+
+ at bcindex c['builders']
+
+The @code{c['builders']} key is a list of dictionaries which specify
+the Builders. The Buildmaster runs a collection of Builders, each of
+which handles a single type of build (e.g. full versus quick), on a
+single build slave. A Buildbot which makes sure that the latest code
+(``HEAD'') compiles correctly across four separate architecture will
+have four Builders, each performing the same build but on different
+slaves (one per platform).
+
+Each Builder gets a separate column in the waterfall display. In
+general, each Builder runs independently (although various kinds of
+interlocks can cause one Builder to have an effect on another).
+
+Each Builder specification dictionary has several required keys:
+
+ at table @code
+ at item name
+This specifies the Builder's name, which is used in status
+reports.
+
+ at item slavename
+This specifies which buildslave will be used by this Builder.
+ at code{slavename} must appear in the @code{c['bots']} list. Each
+buildslave can accomodate multiple Builders.
+
+ at item slavenames
+If you provide @code{slavenames} instead of @code{slavename}, you can
+give a list of buildslaves which are capable of running this Builder.
+If multiple buildslaves are available for any given Builder, you will
+have some measure of redundancy: in case one slave goes offline, the
+others can still keep the Builder working. In addition, multiple
+buildslaves will allow multiple simultaneous builds for the same
+Builder, which might be useful if you have a lot of forced or ``try''
+builds taking place.
+
+If you use this feature, it is important to make sure that the
+buildslaves are all, in fact, capable of running the given build. The
+slave hosts should be configured similarly, otherwise you will spend a
+lot of time trying (unsuccessfully) to reproduce a failure that only
+occurs on some of the buildslaves and not the others. Different
+platforms, operating systems, versions of major programs or libraries,
+all these things mean you should use separate Builders.
+
+ at item builddir
+This specifies the name of a subdirectory (under the base directory)
+in which everything related to this builder will be placed. On the
+buildmaster, this holds build status information. On the buildslave,
+this is where checkouts, compiles, and tests are run.
+
+ at item factory
+This is a @code{buildbot.process.factory.BuildFactory} instance which
+controls how the build is performed. Full details appear in their own
+chapter, @xref{Build Process}. Parameters like the location of the CVS
+repository and the compile-time options used for the build are
+generally provided as arguments to the factory's constructor.
+
+ at end table
+
+Other optional keys may be set on each Builder:
+
+ at table @code
+
+ at item category
+If provided, this is a string that identifies a category for the
+builder to be a part of. Status clients can limit themselves to a
+subset of the available categories. A common use for this is to add
+new builders to your setup (for a new module, or for a new buildslave)
+that do not work correctly yet and allow you to integrate them with
+the active builders. You can put these new builders in a test
+category, make your main status clients ignore them, and have only
+private status clients pick them up. As soon as they work, you can
+move them over to the active category.
+
+ at end table
+
+
+ at node Defining Status Targets, Debug options, Defining Builders, Configuration
+ at section Defining Status Targets
+
+The Buildmaster has a variety of ways to present build status to
+various users. Each such delivery method is a ``Status Target'' object
+in the configuration's @code{status} list. To add status targets, you
+just append more objects to this list:
+
+ at bcindex c['status']
+
+ at example
+c['status'] = []
+
+from buildbot.status import html
+c['status'].append(html.Waterfall(http_port=8010))
+
+from buildbot.status import mail
+m = mail.MailNotifier(fromaddr="buildbot@@localhost",
+                      extraRecipients=["builds@@lists.example.com"],
+                      sendToInterestedUsers=False)
+c['status'].append(m)
+
+from buildbot.status import words
+c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+                             channels=["#example"]))
+ at end example
+
+Status delivery has its own chapter, @xref{Status Delivery}, in which
+all the built-in status targets are documented.
+
+
+ at node Debug options,  , Defining Status Targets, Configuration
+ at section Debug options
+
+
+ at bcindex c['debugPassword']
+If you set @code{c['debugPassword']}, then you can connect to the
+buildmaster with the diagnostic tool launched by @code{buildbot
+debugclient MASTER:PORT}. From this tool, you can reload the config
+file, manually force builds, and inject changes, which may be useful
+for testing your buildmaster without actually commiting changes to
+your repository (or before you have the Change Sources set up). The
+debug tool uses the same port number as the slaves do:
+ at code{c['slavePortnum']}, and is authenticated with this password.
+
+ at example
+c['debugPassword'] = "debugpassword"
+ at end example
+
+ at bcindex c['manhole']
+If you set @code{c['manhole']} to an instance of one of the classes in
+ at code{buildbot.manhole}, you can telnet or ssh into the buildmaster
+and get an interactive Python shell, which may be useful for debugging
+buildbot internals. It is probably only useful for buildbot
+developers. It exposes full access to the buildmaster's account
+(including the ability to modify and delete files), so it should not
+be enabled with a weak or easily guessable password.
+
+There are three separate @code{Manhole} classes. Two of them use SSH,
+one uses unencrypted telnet. Two of them use a username+password
+combination to grant access, one of them uses an SSH-style
+ at file{authorized_keys} file which contains a list of ssh public keys.
+
+ at table @code
+ at item manhole.AuthorizedKeysManhole
+You construct this with the name of a file that contains one SSH
+public key per line, just like @file{~/.ssh/authorized_keys}. If you
+provide a non-absolute filename, it will be interpreted relative to
+the buildmaster's base directory.
+
+ at item manhole.PasswordManhole
+This one accepts SSH connections but asks for a username and password
+when authenticating. It accepts only one such pair.
+
+
+ at item manhole.TelnetManhole
+This accepts regular unencrypted telnet connections, and asks for a
+username/password pair before providing access. Because this
+username/password is transmitted in the clear, and because Manhole
+access to the buildmaster is equivalent to granting full shell
+privileges to both the buildmaster and all the buildslaves (and to all
+accounts which then run code produced by the buildslaves), it is
+highly recommended that you use one of the SSH manholes instead.
+
+ at end table
+
+ at example
+# some examples:
+from buildbot import manhole
+c['manhole'] = manhole.AuthorizedKeysManhole(1234, "authorized_keys")
+c['manhole'] = manhole.PasswordManhole(1234, "alice", "mysecretpassword")
+c['manhole'] = manhole.TelnetManhole(1234, "bob", "snoop_my_password_please")
+ at end example
+
+The @code{Manhole} instance can be configured to listen on a specific
+port. You may wish to have this listening port bind to the loopback
+interface (sometimes known as ``lo0'', ``localhost'', or 127.0.0.1) to
+restrict access to clients which are running on the same host.
+
+ at example
+from buildbot.manhole import PasswordManhole
+c['manhole'] = PasswordManhole("tcp:9999:interface=127.0.0.1","admin","passwd")
+ at end example
+
+To have the @code{Manhole} listen on all interfaces, use
+ at code{"tcp:9999"} or simply 9999. This port specification uses
+ at code{twisted.application.strports}, so you can make it listen on SSL
+or even UNIX-domain sockets if you want.
+
+Note that using any Manhole requires that the TwistedConch package be
+installed, and that you be using Twisted version 2.0 or later.
+
+The buildmaster's SSH server will use a different host key than the
+normal sshd running on a typical unix host. This will cause the ssh
+client to complain about a ``host key mismatch'', because it does not
+realize there are two separate servers running on the same host. To
+avoid this, use a clause like the following in your @file{.ssh/config}
+file:
+
+ at example
+Host remotehost-buildbot
+ HostName remotehost
+ HostKeyAlias remotehost-buildbot
+ Port 9999
+ # use 'user' if you use PasswordManhole and your name is not 'admin'.
+ # if you use AuthorizedKeysManhole, this probably doesn't matter.
+ User admin
+ at end example
+
+
+ at node Getting Source Code Changes, Build Process, Configuration, Top
+ at chapter Getting Source Code Changes
+
+The most common way to use the Buildbot is centered around the idea of
+ at code{Source Trees}: a directory tree filled with source code of some form
+which can be compiled and/or tested. Some projects use languages that don't
+involve any compilation step: nevertheless there may be a @code{build} phase
+where files are copied or rearranged into a form that is suitable for
+installation. Some projects do not have unit tests, and the Buildbot is
+merely helping to make sure that the sources can compile correctly. But in
+all of these cases, the thing-being-tested is a single source tree.
+
+A Version Control System mantains a source tree, and tells the
+buildmaster when it changes. The first step of each Build is typically
+to acquire a copy of some version of this tree.
+
+This chapter describes how the Buildbot learns about what Changes have
+occurred. For more information on VC systems and Changes, see
+ at ref{Version Control Systems}.
+
+
+ at menu
+* Change Sources::              
+ at end menu
+
+
+
+ at node Change Sources,  , Getting Source Code Changes, Getting Source Code Changes
+ at section Change Sources
+
+ at c TODO: rework this, the one-buildmaster-one-tree thing isn't quite
+ at c so narrow-minded anymore
+
+Each Buildmaster watches a single source tree. Changes can be provided
+by a variety of ChangeSource types, however any given project will
+typically have only a single ChangeSource active. This section
+provides a description of all available ChangeSource types and
+explains how to set up each of them.
+
+There are a variety of ChangeSources available, some of which are
+meant to be used in conjunction with other tools to deliver Change
+events from the VC repository to the buildmaster.
+
+ at itemize @bullet
+
+ at item CVSToys
+This ChangeSource opens a TCP connection from the buildmaster to a
+waiting FreshCVS daemon that lives on the repository machine, and
+subscribes to hear about Changes.
+
+ at item MaildirSource
+This one watches a local maildir-format inbox for email sent out by
+the repository when a change is made. When a message arrives, it is
+parsed to create the Change object. A variety of parsing functions are
+available to accomodate different email-sending tools.
+
+ at item PBChangeSource
+This ChangeSource listens on a local TCP socket for inbound
+connections from a separate tool. Usually, this tool would be run on
+the VC repository machine in a commit hook. It is expected to connect
+to the TCP socket and send a Change message over the network
+connection. The @command{buildbot sendchange} command is one example
+of a tool that knows how to send these messages, so you can write a
+commit script for your VC system that calls it to deliver the Change.
+There are other tools in the contrib/ directory that use the same
+protocol.
+
+ at end itemize
+
+As a quick guide, here is a list of VC systems and the ChangeSources
+that might be useful with them. All of these ChangeSources are in the
+ at code{buildbot.changes} module.
+
+ at table @code
+ at item CVS
+
+ at itemize @bullet
+ at item freshcvs.FreshCVSSource (connected via TCP to the freshcvs daemon)
+ at item mail.FCMaildirSource (watching for email sent by a freshcvs daemon)
+ at item mail.BonsaiMaildirSource (watching for email sent by Bonsai)
+ at item mail.SyncmailMaildirSource (watching for email sent by syncmail)
+ at item pb.PBChangeSource (listening for connections from @code{buildbot
+sendchange} run in a loginfo script)
+ at item pb.PBChangeSource (listening for connections from a long-running
+ at code{contrib/viewcvspoll.py} polling process which examines the ViewCVS
+database directly
+ at end itemize
+
+ at item SVN
+ at itemize @bullet
+ at item pb.PBChangeSource (listening for connections from
+ at code{contrib/svn_buildbot.py} run in a postcommit script)
+ at item pb.PBChangeSource (listening for connections from a long-running
+ at code{contrib/svn_watcher.py} or @code{contrib/svnpoller.py} polling
+process
+ at item svnpoller.SVNPoller (polling the SVN repository)
+ at end itemize
+
+ at item Darcs
+ at itemize @bullet
+ at item pb.PBChangeSource (listening for connections from
+ at code{contrib/darcs_buildbot.py} in a commit script
+ at end itemize
+
+ at item Mercurial
+ at itemize @bullet
+ at item pb.PBChangeSource (listening for connections from
+ at code{contrib/hg_buildbot.py} run in an 'incoming' hook)
+ at end itemize
+
+ at item Arch/Bazaar
+ at itemize @bullet
+ at item pb.PBChangeSource (listening for connections from
+ at code{contrib/arch_buildbot.py} run in a commit hook)
+ at end itemize
+
+ at end table
+
+All VC systems can be driven by a PBChangeSource and the
+ at code{buildbot sendchange} tool run from some form of commit script.
+If you write an email parsing function, they can also all be driven by
+a suitable @code{MaildirSource}.
+
+
+ at menu
+* Choosing ChangeSources::      
+* CVSToys - PBService::         
+* CVSToys - mail notification::  
+* Other mail notification ChangeSources::  
+* PBChangeSource::              
+* P4Source::                    
+* BonsaiPoller::                
+* SVNPoller::                   
+ at end menu
+
+ at node Choosing ChangeSources, CVSToys - PBService, Change Sources, Change Sources
+ at subsection Choosing ChangeSources
+
+The @code{master.cfg} configuration file has a dictionary key named
+ at code{BuildmasterConfig['sources']}, which holds a list of
+ at code{IChangeSource} objects. The config file will typically create an
+object from one of the classes described below and stuff it into the
+list.
+
+ at example
+s = FreshCVSSourceNewcred(host="host", port=4519,
+                          user="alice", passwd="secret",
+                          prefix="Twisted")
+BuildmasterConfig['sources'] = [s]
+ at end example
+
+Each source tree has a nominal @code{top}. Each Change has a list of
+filenames, which are all relative to this top location. The
+ChangeSource is responsible for doing whatever is necessary to
+accomplish this. Most sources have a @code{prefix} argument: a partial
+pathname which is stripped from the front of all filenames provided to
+that @code{ChangeSource}. Files which are outside this sub-tree are
+ignored by the changesource: it does not generate Changes for those
+files.
+
+
+ at node CVSToys - PBService, CVSToys - mail notification, Choosing ChangeSources, Change Sources
+ at subsection CVSToys - PBService
+
+ at csindex buildbot.changes.freshcvs.FreshCVSSource
+
+The @uref{http://purl.net/net/CVSToys, CVSToys} package provides a
+server which runs on the machine that hosts the CVS repository it
+watches. It has a variety of ways to distribute commit notifications,
+and offers a flexible regexp-based way to filter out uninteresting
+changes. One of the notification options is named @code{PBService} and
+works by listening on a TCP port for clients. These clients subscribe
+to hear about commit notifications.
+
+The buildmaster has a CVSToys-compatible @code{PBService} client built
+in. There are two versions of it, one for old versions of CVSToys
+(1.0.9 and earlier) which used the @code{oldcred} authentication
+framework, and one for newer versions (1.0.10 and later) which use
+ at code{newcred}. Both are classes in the
+ at code{buildbot.changes.freshcvs} package.
+
+ at code{FreshCVSSourceNewcred} objects are created with the following
+parameters:
+
+ at table @samp
+
+ at item @code{host} and @code{port}
+these specify where the CVSToys server can be reached
+
+ at item @code{user} and @code{passwd}
+these specify the login information for the CVSToys server
+(@code{freshcvs}). These must match the server's values, which are
+defined in the @code{freshCfg} configuration file (which lives in the
+CVSROOT directory of the repository).
+
+ at item @code{prefix}
+this is the prefix to be found and stripped from filenames delivered
+by the CVSToys server. Most projects live in sub-directories of the
+main repository, as siblings of the CVSROOT sub-directory, so
+typically this prefix is set to that top sub-directory name.
+
+ at end table
+
+ at heading Example
+
+To set up the freshCVS server, add a statement like the following to
+your @file{freshCfg} file:
+
+ at example
+pb = ConfigurationSet([
+    (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+    ])
+ at end example
+
+This will announce all changes to a client which connects to port 4519
+using a username of 'foo' and a password of 'bar'.
+
+Then add a clause like this to your buildmaster's @file{master.cfg}:
+
+ at example
+BuildmasterConfig['sources'] = [FreshCVSSource("cvs.example.com", 4519,
+                                "foo", "bar",
+                                prefix="glib/")]
+ at end example
+
+where "cvs.example.com" is the host that is running the FreshCVS daemon, and
+"glib" is the top-level directory (relative to the repository's root) where
+all your source code lives. Most projects keep one or more projects in the
+same repository (along with CVSROOT/ to hold admin files like loginfo and
+freshCfg); the prefix= argument tells the buildmaster to ignore everything
+outside that directory, and to strip that common prefix from all pathnames
+it handles.
+
+
+
+ at node CVSToys - mail notification, Other mail notification ChangeSources, CVSToys - PBService, Change Sources
+ at subsection CVSToys - mail notification
+
+ at csindex buildbot.changes.mail.FCMaildirSource
+
+CVSToys also provides a @code{MailNotification} action which will send
+email to a list of recipients for each commit. This tends to work
+better than using @code{/bin/mail} from within the CVSROOT/loginfo
+file directly, as CVSToys will batch together all files changed during
+the same CVS invocation, and can provide more information (like
+creating a ViewCVS URL for each file changed).
+
+The Buildbot's @code{FCMaildirSource} is a ChangeSource which knows
+how to parse these CVSToys messages and turn them into Change objects.
+It watches a Maildir for new messages. The usually installation
+process looks like:
+
+ at enumerate
+ at item
+Create a mailing list, @code{projectname-commits}.
+ at item
+In CVSToys' freshCfg file, use a @code{MailNotification} action to
+send commit mail to this mailing list.
+ at item
+Subscribe the buildbot user to the mailing list.
+ at item
+Configure your .qmail or .forward file to deliver these messages into
+a maildir.
+ at item
+In the Buildbot's master.cfg file, use a @code{FCMaildirSource} to
+watch the maildir for commit messages.
+ at end enumerate
+
+The @code{FCMaildirSource} is created with two parameters: the
+directory name of the maildir root, and the prefix to strip.
+
+ at node Other mail notification ChangeSources, PBChangeSource, CVSToys - mail notification, Change Sources
+ at subsection Other mail notification ChangeSources
+
+ at csindex buildbot.changes.mail.SyncmailMaildirSource
+ at csindex buildbot.changes.mail.BonsaiMaildirSource
+
+There are other types of maildir-watching ChangeSources, which only
+differ in the function used to parse the message body.
+
+ at code{SyncmailMaildirSource} knows how to parse the message format
+used in mail sent by Syncmail.
+
+ at code{BonsaiMaildirSource} parses messages sent out by Bonsai.
+
+ at node PBChangeSource, P4Source, Other mail notification ChangeSources, Change Sources
+ at subsection PBChangeSource
+
+ at csindex buildbot.changes.pb.PBChangeSource
+
+The last kind of ChangeSource actually listens on a TCP port for
+clients to connect and push change notices @emph{into} the
+Buildmaster. This is used by the built-in @code{buildbot sendchange}
+notification tool, as well as the VC-specific
+ at file{contrib/svn_buildbot.py} and @file{contrib/arch_buildbot.py}
+tools. These tools are run by the repository (in a commit hook
+script), and connect to the buildmaster directly each time a file is
+comitted. This is also useful for creating new kinds of change sources
+that work on a @code{push} model instead of some kind of subscription
+scheme, for example a script which is run out of an email .forward
+file.
+
+This ChangeSource can be configured to listen on its own TCP port, or
+it can share the port that the buildmaster is already using for the
+buildslaves to connect. (This is possible because the
+ at code{PBChangeSource} uses the same protocol as the buildslaves, and
+they can be distinguished by the @code{username} attribute used when
+the initial connection is established). It might be useful to have it
+listen on a different port if, for example, you wanted to establish
+different firewall rules for that port. You could allow only the SVN
+repository machine access to the @code{PBChangeSource} port, while
+allowing only the buildslave machines access to the slave port. Or you
+could just expose one port and run everything over it. @emph{Note:
+this feature is not yet implemented, the PBChangeSource will always
+share the slave port and will always have a @code{user} name of
+ at code{change}, and a passwd of @code{changepw}. These limitations will
+be removed in the future.}.
+
+
+The @code{PBChangeSource} is created with the following arguments. All
+are optional.
+
+ at table @samp
+ at item @code{port}
+which port to listen on. If @code{None} (which is the default), it
+shares the port used for buildslave connections. @emph{Not
+Implemented, always set to @code{None}}.
+
+ at item @code{user} and @code{passwd}
+The user/passwd account information that the client program must use
+to connect. Defaults to @code{change} and @code{changepw}. @emph{Not
+Implemented, @code{user} is currently always set to @code{change},
+ at code{passwd} is always set to @code{changepw}}.
+
+ at item @code{prefix}
+The prefix to be found and stripped from filenames delivered over the
+connection. Any filenames which do not start with this prefix will be
+removed. If all the filenames in a given Change are removed, the that
+whole Change will be dropped. This string should probably end with a
+directory separator.
+
+This is useful for changes coming from version control systems that
+represent branches as parent directories within the repository (like
+SVN and Perforce). Use a prefix of 'trunk/' or
+'project/branches/foobranch/' to only follow one branch and to get
+correct tree-relative filenames. Without a prefix, the PBChangeSource
+will probably deliver Changes with filenames like @file{trunk/foo.c}
+instead of just @file{foo.c}. Of course this also depends upon the
+tool sending the Changes in (like @command{buildbot sendchange}) and
+what filenames it is delivering: that tool may be filtering and
+stripping prefixes at the sending end.
+
+ at end table
+
+ at node P4Source, BonsaiPoller, PBChangeSource, Change Sources
+ at subsection P4Source
+
+ at csindex buildbot.changes.p4poller.P4Source
+
+The @code{P4Source} periodically polls a @uref{http://www.perforce.com/,
+Perforce} depot for changes. It accepts the following arguments:
+
+ at table @samp
+ at item @code{p4base}
+The base depot path to watch, without the trailing '/...'.
+
+ at item @code{p4port}
+The Perforce server to connect to (as host:port).
+
+ at item @code{p4user}
+The Perforce user.
+
+ at item @code{p4passwd}
+The Perforce password.
+
+ at item @code{split_file}
+A function that maps a pathname, without the leading @code{p4base}, to a
+(branch, filename) tuple. The default just returns (None, branchfile),
+which effectively disables branch support. You should supply a function
+which understands your repository structure.
+
+ at item @code{pollinterval}
+How often to poll, in seconds. Defaults to 600 (10 minutes).
+
+ at item @code{histmax}
+The maximum number of changes to inspect at a time. If more than this
+number occur since the last poll, older changes will be silently
+ignored.
+ at end table
+
+ at heading Example
+
+This configuration uses the @code{P4PORT}, @code{P4USER}, and @code{P4PASSWD}
+specified in the buildmaster's environment. It watches a project in which the
+branch name is simply the next path component, and the file is all path
+components after.
+
+ at example
+import buildbot.changes.p4poller
+c['sources'].append(p4poller.P4Source(
+        p4base='//depot/project/',
+        split_file=lambda branchfile: branchfile.split('/',1)
+))
+ at end example
+
+ at node BonsaiPoller, SVNPoller, P4Source, Change Sources
+ at subsection BonsaiPoller
+
+ at csindex buildbot.changes.bonsaipoller.BonsaiPoller
+
+The @code{BonsaiPoller} periodically polls a Bonsai server. This is a
+CGI script accessed through a web server that provides information
+about a CVS tree, for example the Mozilla bonsai server at
+ at uref{http://bonsai.mozilla.org}. Bonsai servers are usable by both
+humans and machines. In this case, the buildbot's change source forms
+a query which asks about any files in the specified branch which have
+changed since the last query.
+
+Please take a look at the BonsaiPoller docstring for details about the
+arguments it accepts.
+
+
+ at node SVNPoller,  , BonsaiPoller, Change Sources
+ at subsection SVNPoller
+
+ at csindex buildbot.changes.svnpoller.SVNPoller
+
+The @code{buildbot.changes.svnpoller.SVNPoller} is a ChangeSource
+which periodically polls a @uref{http://subversion.tigris.org/,
+Subversion} repository for new revisions, by running the @code{svn
+log} command in a subshell. It can watch a single branch or multiple
+branches.
+
+ at code{SVNPoller} accepts the following arguments:
+
+ at table @code
+ at item svnurl
+The base URL path to watch, like
+ at code{svn://svn.twistedmatrix.com/svn/Twisted/trunk}, or
+ at code{http://divmod.org/svn/Divmod/}, or even
+ at code{file:///home/svn/Repository/ProjectA/branches/1.5/}. This must
+include the access scheme, the location of the repository (both the
+hostname for remote ones, and any additional directory names necessary
+to get to the repository), and the sub-path within the repository's
+virtual filesystem for the project and branch of interest.
+
+The @code{SVNPoller} will only pay attention to files inside the
+subdirectory specified by the complete svnurl.
+
+ at item split_file
+A function to convert pathnames into (branch, relative_pathname)
+tuples. Use this to explain your repository's branch-naming policy to
+ at code{SVNPoller}. This function must accept a single string and return
+a two-entry tuple. There are a few utility functions in
+ at code{buildbot.changes.svnpoller} that can be used as a
+ at code{split_file} function, see below for details.
+
+The default value always returns (None, path), which indicates that
+all files are on the trunk.
+
+Subclasses of @code{SVNPoller} can override the @code{split_file}
+method instead of using the @code{split_file=} argument.
+
+ at item svnuser
+An optional string parameter. If set, the @code{--user} argument will
+be added to all @code{svn} commands. Use this if you have to
+authenticate to the svn server before you can do @code{svn info} or
+ at code{svn log} commands.
+
+ at item svnpasswd
+Like @code{svnuser}, this will cause a @code{--password} argument to
+be passed to all svn commands.
+
+ at item pollinterval
+How often to poll, in seconds. Defaults to 600 (checking once every 10
+minutes). Lower this if you want the buildbot to notice changes
+faster, raise it if you want to reduce the network and CPU load on
+your svn server. Please be considerate of public SVN repositories by
+using a large interval when polling them.
+
+ at item histmax
+The maximum number of changes to inspect at a time. Every POLLINTERVAL
+seconds, the @code{SVNPoller} asks for the last HISTMAX changes and
+looks through them for any ones it does not already know about. If
+more than HISTMAX revisions have been committed since the last poll,
+older changes will be silently ignored. Larger values of histmax will
+cause more time and memory to be consumed on each poll attempt.
+ at code{histmax} defaults to 100.
+
+ at item svnbin
+This controls the @code{svn} executable to use. If subversion is
+installed in a weird place on your system (outside of the
+buildmaster's @code{$PATH}), use this to tell @code{SVNPoller} where
+to find it. The default value of ``svn'' will almost always be
+sufficient.
+
+ at end table
+
+ at heading Branches
+
+Each source file that is tracked by a Subversion repository has a
+fully-qualified SVN URL in the following form:
+(REPOURL)(PROJECT-plus-BRANCH)(FILEPATH). When you create the
+ at code{SVNPoller}, you give it a @code{svnurl} value that includes all
+of the REPOURL and possibly some portion of the PROJECT-plus-BRANCH
+string. The @code{SVNPoller} is responsible for producing Changes that
+contain a branch name and a FILEPATH (which is relative to the top of
+a checked-out tree). The details of how these strings are split up
+depend upon how your repository names its branches.
+
+ at subheading PROJECT/BRANCHNAME/FILEPATH repositories
+
+One common layout is to have all the various projects that share a
+repository get a single top-level directory each. Then under a given
+project's directory, you get two subdirectories, one named ``trunk''
+and another named ``branches''. Under ``branches'' you have a bunch of
+other directories, one per branch, with names like ``1.5.x'' and
+``testing''. It is also common to see directories like ``tags'' and
+``releases'' next to ``branches'' and ``trunk''.
+
+For example, the Twisted project has a subversion server on
+``svn.twistedmatrix.com'' that hosts several sub-projects. The
+repository is available through a SCHEME of ``svn:''. The primary
+sub-project is Twisted, of course, with a repository root of
+``svn://svn.twistedmatrix.com/svn/Twisted''. Another sub-project is
+Informant, with a root of
+``svn://svn.twistedmatrix.com/svn/Informant'', etc. Inside any
+checked-out Twisted tree, there is a file named bin/trial (which is
+used to run unit test suites).
+
+The trunk for Twisted is in
+``svn://svn.twistedmatrix.com/svn/Twisted/trunk'', and the
+fully-qualified SVN URL for the trunk version of @code{trial} would be
+``svn://svn.twistedmatrix.com/svn/Twisted/trunk/bin/trial''. The same
+SVNURL for that file on a branch named ``1.5.x'' would be
+``svn://svn.twistedmatrix.com/svn/Twisted/branches/1.5.x/bin/trial''.
+
+To set up a @code{SVNPoller} that watches the Twisted trunk (and
+nothing else), we would use the following:
+
+ at example
+from buildbot.changes.svnpoller import SVNPoller
+s = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted/trunk")
+c['sources'].append(ss)
+ at end example
+
+In this case, every Change that our @code{SVNPoller} produces will
+have @code{.branch=None}, to indicate that the Change is on the trunk.
+No other sub-projects or branches will be tracked.
+
+If we want our ChangeSource to follow multiple branches, we have to do
+two things. First we have to change our @code{svnurl=} argument to
+watch more than just ``.../Twisted/trunk''. We will set it to
+``.../Twisted'' so that we'll see both the trunk and all the branches.
+Second, we have to tell @code{SVNPoller} how to split the
+(PROJECT-plus-BRANCH)(FILEPATH) strings it gets from the repository
+out into (BRANCH) and (FILEPATH) pairs.
+
+We do the latter by providing a ``split_file'' function. This function
+is responsible for splitting something like
+``branches/1.5.x/bin/trial'' into @code{branch}=''branches/1.5.x'' and
+ at code{filepath}=''bin/trial''. This function is always given a string
+that names a file relative to the subdirectory pointed to by the
+ at code{SVNPoller}'s @code{svnurl=} argument. It is expected to return a
+(BRANCHNAME, FILEPATH) tuple (in which FILEPATH is relative to the
+branch indicated), or None to indicate that the file is outside any
+project of interest.
+
+(note that we want to see ``branches/1.5.x'' rather than just
+``1.5.x'' because when we perform the SVN checkout, we will probably
+append the branch name to the baseURL, which requires that we keep the
+``branches'' component in there. Other VC schemes use a different
+approach towards branches and may not require this artifact.)
+
+If your repository uses this same PROJECT/BRANCH/FILEPATH naming
+scheme, the following function will work:
+
+ at example
+def split_file_branches(path):
+    pieces = path.split('/')
+    if pieces[0] == 'trunk':
+        return (None, '/'.join(pieces[1:]))
+    elif pieces[0] == 'branches':
+        return ('/'.join(pieces[0:2]),
+                '/'.join(pieces[2:]))
+    else:
+        return None
+ at end example
+
+This function is provided as
+ at code{buildbot.changes.svnpoller.split_file_branches} for your
+convenience. So to have our Twisted-watching @code{SVNPoller} follow
+multiple branches, we would use this:
+
+ at example
+from buildbot.changes.svnpoller import SVNPoller, split_file_branches
+s = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted",
+              split_file=split_file_branches)
+c['sources'].append(ss)
+ at end example
+
+Changes for all sorts of branches (with names like ``branches/1.5.x'',
+and None to indicate the trunk) will be delivered to the Schedulers.
+Each Scheduler is then free to use or ignore each branch as it sees
+fit.
+
+ at subheading BRANCHNAME/PROJECT/FILEPATH repositories
+
+Another common way to organize a Subversion repository is to put the
+branch name at the top, and the projects underneath. This is
+especially frequent when there are a number of related sub-projects
+that all get released in a group.
+
+For example, Divmod.org hosts a project named ``Nevow'' as well as one
+named ``Quotient''. In a checked-out Nevow tree there is a directory
+named ``formless'' that contains a python source file named
+``webform.py''. This repository is accessible via webdav (and thus
+uses an ``http:'' scheme) through the divmod.org hostname. There are
+many branches in this repository, and they use a
+(BRANCHNAME)/(PROJECT) naming policy.
+
+The fully-qualified SVN URL for the trunk version of webform.py is
+ at code{http://divmod.org/svn/Divmod/trunk/Nevow/formless/webform.py}.
+You can do an @code{svn co} with that URL and get a copy of the latest
+version. The 1.5.x branch version of this file would have a URL of
+ at code{http://divmod.org/svn/Divmod/branches/1.5.x/Nevow/formless/webform.py}.
+The whole Nevow trunk would be checked out with
+ at code{http://divmod.org/svn/Divmod/trunk/Nevow}, while the Quotient
+trunk would be checked out using
+ at code{http://divmod.org/svn/Divmod/trunk/Quotient}.
+
+Now suppose we want to have an @code{SVNPoller} that only cares about
+the Nevow trunk. This case looks just like the PROJECT/BRANCH layout
+described earlier:
+
+ at example
+from buildbot.changes.svnpoller import SVNPoller
+s = SVNPoller("http://divmod.org/svn/Divmod/trunk/Nevow")
+c['sources'].append(ss)
+ at end example
+
+But what happens when we want to track multiple Nevow branches? We
+have to point our @code{svnurl=} high enough to see all those
+branches, but we also don't want to include Quotient changes (since
+we're only building Nevow). To accomplish this, we must rely upon the
+ at code{split_file} function to help us tell the difference between
+files that belong to Nevow and those that belong to Quotient, as well
+as figuring out which branch each one is on.
+
+ at example
+from buildbot.changes.svnpoller import SVNPoller
+s = SVNPoller("http://divmod.org/svn/Divmod",
+              split_file=my_file_splitter)
+c['sources'].append(ss)
+ at end example
+
+The @code{my_file_splitter} function will be called with
+repository-relative pathnames like:
+
+ at table @code
+ at item trunk/Nevow/formless/webform.py
+This is a Nevow file, on the trunk. We want the Change that includes this
+to see a filename of @code{formless/webform.py"}, and a branch of None
+
+ at item branches/1.5.x/Nevow/formless/webform.py
+This is a Nevow file, on a branch. We want to get
+branch=''branches/1.5.x'' and filename=''formless/webform.py''.
+
+ at item trunk/Quotient/setup.py
+This is a Quotient file, so we want to ignore it by having
+ at code{my_file_splitter} return None.
+
+ at item branches/1.5.x/Quotient/setup.py
+This is also a Quotient file, which should be ignored.
+ at end table
+
+The following definition for @code{my_file_splitter} will do the job:
+
+ at example
+def my_file_splitter(path):
+    pieces = path.split('/')
+    if pieces[0] == 'trunk':
+        branch = None
+        pieces.pop(0) # remove 'trunk'
+    elif pieces[0] == 'branches':
+        pieces.pop(0) # remove 'branches'
+        # grab branch name
+        branch = 'branches/' + pieces.pop(0)
+    else:
+        return None # something weird
+    projectname = pieces.pop(0)
+    if projectname != 'Nevow':
+        return None # wrong project
+    return (branch, '/'.join(pieces))
+ at end example
+
+
+ at node Build Process, Status Delivery, Getting Source Code Changes, Top
+ at chapter Build Process
+
+A @code{Build} object is responsible for actually performing a build.
+It gets access to a remote @code{SlaveBuilder} where it may run
+commands, and a @code{BuildStatus} object where it must emit status
+events. The @code{Build} is created by the Builder's
+ at code{BuildFactory}.
+
+The default @code{Build} class is made up of a fixed sequence of
+ at code{BuildSteps}, executed one after another until all are complete
+(or one of them indicates that the build should be halted early). The
+default @code{BuildFactory} creates instances of this @code{Build}
+class with a list of @code{BuildSteps}, so the basic way to configure
+the build is to provide a list of @code{BuildSteps} to your
+ at code{BuildFactory}.
+
+More complicated @code{Build} subclasses can make other decisions:
+execute some steps only if certain files were changed, or if certain
+previous steps passed or failed. The base class has been written to
+allow users to express basic control flow without writing code, but
+you can always subclass and customize to achieve more specialized
+behavior.
+
+ at menu
+* Build Steps::                 
+* Interlocks::                  
+* Build Factories::             
+ at end menu
+
+ at node Build Steps, Interlocks, Build Process, Build Process
+ at section Build Steps
+
+ at code{BuildStep}s are usually specified in the buildmaster's
+configuration file, in a list of ``step specifications'' that is used
+to create the @code{BuildFactory}. These ``step specifications'' are
+not actual steps, but rather a tuple of the @code{BuildStep} subclass
+to be created and a dictionary of arguments. (the actual
+ at code{BuildStep} instances are not created until the Build is started,
+so that each Build gets an independent copy of each BuildStep). The
+preferred way to create these step specifications is with the
+ at code{BuildFactory}'s @code{addStep} method:
+
+ at example
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+f = factory.BuildFactory()
+f.addStep(source.SVN, svnurl="http://svn.example.org/Trunk/")
+f.addStep(shell.ShellCommand, command=["make", "all"])
+f.addStep(shell.ShellCommand, command=["make", "test"])
+ at end example
+
+The rest of this section lists all the standard BuildStep objects
+available for use in a Build, and the parameters which can be used to
+control each.
+
+ at menu
+* Common Parameters::           
+* Source Checkout::             
+* ShellCommand::                
+* Simple ShellCommand Subclasses::  
+* Python BuildSteps::           
+* Transferring Files::          
+* Writing New BuildSteps::      
+ at end menu
+
+ at node Common Parameters, Source Checkout, Build Steps, Build Steps
+ at subsection Common Parameters
+
+The standard @code{Build} runs a series of @code{BuildStep}s in order,
+only stopping when it runs out of steps or if one of them requests
+that the build be halted. It collects status information from each one
+to create an overall build status (of SUCCESS, WARNINGS, or FAILURE).
+
+All BuildSteps accept some common parameters. Some of these control
+how their individual status affects the overall build. Others are used
+to specify which @code{Locks} (see @pxref{Interlocks}) should be
+acquired before allowing the step to run.
+
+Arguments common to all @code{BuildStep} subclasses:
+
+
+ at table @code
+ at item name
+the name used to describe the step on the status display. It is also
+used to give a name to any LogFiles created by this step.
+
+ at item haltOnFailure
+if True, a FAILURE of this build step will cause the build to halt
+immediately with an overall result of FAILURE.
+
+ at item flunkOnWarnings
+when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as FAILURE. The remaining steps will still be executed.
+
+ at item flunkOnFailure
+when True, a FAILURE of this build step will mark the overall build as
+a FAILURE. The remaining steps will still be executed.
+
+ at item warnOnWarnings
+when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as having WARNINGS. The remaining steps will still be
+executed.
+
+ at item warnOnFailure
+when True, a FAILURE of this build step will mark the overall build as
+having WARNINGS. The remaining steps will still be executed.
+
+ at item locks
+a list of Locks (instances of @code{buildbot.locks.SlaveLock} or
+ at code{buildbot.locks.MasterLock}) that should be acquired before
+starting this Step. The Locks will be released when the step is
+complete. Note that this is a list of actual Lock instances, not
+names. Also note that all Locks must have unique names.
+
+ at end table
+
+
+ at node Source Checkout, ShellCommand, Common Parameters, Build Steps
+ at subsection Source Checkout
+
+The first step of any build is typically to acquire the source code
+from which the build will be performed. There are several classes to
+handle this, one for each of the different source control system that
+Buildbot knows about. For a description of how Buildbot treats source
+control in general, see @ref{Version Control Systems}.
+
+All source checkout steps accept some common parameters to control how
+they get the sources and where they should be placed. The remaining
+per-VC-system parameters are mostly to specify where exactly the
+sources are coming from.
+
+ at table @code
+ at item mode
+
+a string describing the kind of VC operation that is desired. Defaults
+to @code{update}.
+
+ at table @code
+ at item update
+specifies that the CVS checkout/update should be performed directly
+into the workdir. Each build is performed in the same directory,
+allowing for incremental builds. This minimizes disk space, bandwidth,
+and CPU time. However, it may encounter problems if the build process
+does not handle dependencies properly (sometimes you must do a ``clean
+build'' to make sure everything gets compiled), or if source files are
+deleted but generated files can influence test behavior (e.g. python's
+.pyc files), or when source directories are deleted but generated
+files prevent CVS from removing them. Builds ought to be correct
+regardless of whether they are done ``from scratch'' or incrementally,
+but it is useful to test both kinds: this mode exercises the
+incremental-build style.
+
+ at item copy
+specifies that the CVS workspace should be maintained in a separate
+directory (called the 'copydir'), using checkout or update as
+necessary. For each build, a new workdir is created with a copy of the
+source tree (rm -rf workdir; cp -r copydir workdir). This doubles the
+disk space required, but keeps the bandwidth low (update instead of a
+full checkout). A full 'clean' build is performed each time. This
+avoids any generated-file build problems, but is still occasionally
+vulnerable to CVS problems such as a repository being manually
+rearranged, causing CVS errors on update which are not an issue with a
+full checkout.
+
+ at c TODO: something is screwy about this, revisit. Is it the source
+ at c directory or the working directory that is deleted each time?
+
+ at item clobber
+specifes that the working directory should be deleted each time,
+necessitating a full checkout for each build. This insures a clean
+build off a complete checkout, avoiding any of the problems described
+above. This mode exercises the ``from-scratch'' build style.
+
+ at item export
+this is like @code{clobber}, except that the 'cvs export' command is
+used to create the working directory. This command removes all CVS
+metadata files (the CVS/ directories) from the tree, which is
+sometimes useful for creating source tarballs (to avoid including the
+metadata in the tar file).
+ at end table
+
+ at item workdir
+like all Steps, this indicates the directory where the build will take
+place. Source Steps are special in that they perform some operations
+outside of the workdir (like creating the workdir itself).
+
+ at item alwaysUseLatest
+if True, bypass the usual ``update to the last Change'' behavior, and
+always update to the latest changes instead.
+
+ at item retry
+If set, this specifies a tuple of @code{(delay, repeats)} which means
+that when a full VC checkout fails, it should be retried up to
+ at var{repeats} times, waiting @var{delay} seconds between attempts. If
+you don't provide this, it defaults to @code{None}, which means VC
+operations should not be retried. This is provided to make life easier
+for buildslaves which are stuck behind poor network connections.
+
+ at end table
+
+
+My habit as a developer is to do a @code{cvs update} and @code{make} each
+morning. Problems can occur, either because of bad code being checked in, or
+by incomplete dependencies causing a partial rebuild to fail where a
+complete from-scratch build might succeed. A quick Builder which emulates
+this incremental-build behavior would use the @code{mode='update'}
+setting.
+
+On the other hand, other kinds of dependency problems can cause a clean
+build to fail where a partial build might succeed. This frequently results
+from a link step that depends upon an object file that was removed from a
+later version of the tree: in the partial tree, the object file is still
+around (even though the Makefiles no longer know how to create it).
+
+``official'' builds (traceable builds performed from a known set of
+source revisions) are always done as clean builds, to make sure it is
+not influenced by any uncontrolled factors (like leftover files from a
+previous build). A ``full'' Builder which behaves this way would want
+to use the @code{mode='clobber'} setting.
+
+Each VC system has a corresponding source checkout class: their
+arguments are described on the following pages.
+
+
+ at menu
+* CVS::                         
+* SVN::                         
+* Darcs::                       
+* Mercurial::                   
+* Arch::                        
+* Bazaar::                      
+* P4::                          
+ at end menu
+
+ at node CVS, SVN, Source Checkout, Source Checkout
+ at subsubsection CVS
+
+ at cindex CVS Checkout
+ at bsindex buildbot.steps.source.CVS
+
+
+The @code{CVS} build step performs a @uref{http://www.nongnu.org/cvs/,
+CVS} checkout or update. It takes the following arguments:
+
+ at table @code
+ at item cvsroot
+(required): specify the CVSROOT value, which points to a CVS
+repository, probably on a remote machine. For example, the cvsroot
+value you would use to get a copy of the Buildbot source code is
+ at code{:pserver:anonymous@@cvs.sourceforge.net:/cvsroot/buildbot}
+
+ at item cvsmodule
+(required): specify the cvs @code{module}, which is generally a
+subdirectory of the CVSROOT. The cvsmodule for the Buildbot source
+code is @code{buildbot}.
+
+ at item branch
+a string which will be used in a @code{-r} argument. This is most
+useful for specifying a branch to work on. Defaults to @code{HEAD}.
+
+ at item global_options
+a list of flags to be put before the verb in the CVS command.
+
+ at item checkoutDelay
+if set, the number of seconds to put between the timestamp of the last
+known Change and the value used for the @code{-D} option. Defaults to
+half of the parent Build's treeStableTimer.
+
+ at end table
+
+
+ at node SVN, Darcs, CVS, Source Checkout
+ at subsubsection SVN
+
+ at cindex SVN Checkout
+ at bsindex buildbot.steps.source.SVN
+
+
+The @code{SVN} build step performs a
+ at uref{http://subversion.tigris.org, Subversion} checkout or update.
+There are two basic ways of setting up the checkout step, depending
+upon whether you are using multiple branches or not.
+
+If all of your builds use the same branch, then you should create the
+ at code{SVN} step with the @code{svnurl} argument:
+
+ at table @code
+ at item svnurl
+(required): this specifies the @code{URL} argument that will be given
+to the @code{svn checkout} command. It dictates both where the
+repository is located and which sub-tree should be extracted. In this
+respect, it is like a combination of the CVS @code{cvsroot} and
+ at code{cvsmodule} arguments. For example, if you are using a remote
+Subversion repository which is accessible through HTTP at a URL of
+ at code{http://svn.example.com/repos}, and you wanted to check out the
+ at code{trunk/calc} sub-tree, you would use
+ at code{svnurl="http://svn.example.com/repos/trunk/calc"} as an argument
+to your @code{SVN} step.
+ at end table
+
+If, on the other hand, you are building from multiple branches, then
+you should create the @code{SVN} step with the @code{baseURL} and
+ at code{defaultBranch} arguments instead:
+
+ at table @code
+ at item baseURL
+(required): this specifies the base repository URL, to which a branch
+name will be appended. It should probably end in a slash.
+
+ at item defaultBranch
+this specifies the name of the branch to use when a Build does not
+provide one of its own. This will be appended to @code{baseURL} to
+create the string that will be passed to the @code{svn checkout}
+command.
+ at end table
+
+If you are using branches, you must also make sure your
+ at code{ChangeSource} will report the correct branch names.
+
+ at heading branch example
+
+Let's suppose that the ``MyProject'' repository uses branches for the
+trunk, for various users' individual development efforts, and for
+several new features that will require some amount of work (involving
+multiple developers) before they are ready to merge onto the trunk.
+Such a repository might be organized as follows:
+
+ at example
+svn://svn.example.org/MyProject/trunk
+svn://svn.example.org/MyProject/branches/User1/foo
+svn://svn.example.org/MyProject/branches/User1/bar
+svn://svn.example.org/MyProject/branches/User2/baz
+svn://svn.example.org/MyProject/features/newthing
+svn://svn.example.org/MyProject/features/otherthing
+ at end example
+
+Further assume that we want the Buildbot to run tests against the
+trunk and against all the feature branches (i.e., do a
+checkout/compile/build of branch X when a file has been changed on
+branch X, when X is in the set [trunk, features/newthing,
+features/otherthing]). We do not want the Buildbot to automatically
+build any of the user branches, but it should be willing to build a
+user branch when explicitly requested (most likely by the user who
+owns that branch).
+
+There are three things that need to be set up to accomodate this
+system. The first is a ChangeSource that is capable of identifying the
+branch which owns any given file. This depends upon a user-supplied
+function, in an external program that runs in the SVN commit hook and
+connects to the buildmaster's @code{PBChangeSource} over a TCP
+connection. (you can use the ``@code{buildbot sendchange}'' utility
+for this purpose, but you will still need an external program to
+decide what value should be passed to the @code{--branch=} argument).
+For example, a change to a file with the SVN url of
+``svn://svn.example.org/MyProject/features/newthing/src/foo.c'' should
+be broken down into a Change instance with
+ at code{branch='features/newthing'} and @code{file='src/foo.c'}.
+
+The second piece is an @code{AnyBranchScheduler} which will pay
+attention to the desired branches. It will not pay attention to the
+user branches, so it will not automatically start builds in response
+to changes there. The AnyBranchScheduler class requires you to
+explicitly list all the branches you want it to use, but it would not
+be difficult to write a subclass which used
+ at code{branch.startswith('features/'} to remove the need for this
+explicit list. Or, if you want to build user branches too, you can use
+AnyBranchScheduler with @code{branches=None} to indicate that you want
+it to pay attention to all branches.
+
+The third piece is an @code{SVN} checkout step that is configured to
+handle the branches correctly, with a @code{baseURL} value that
+matches the way the ChangeSource splits each file's URL into base,
+branch, and file.
+
+ at example
+from buildbot.changes.pb import PBChangeSource
+from buildbot.scheduler import AnyBranchScheduler
+from buildbot.process import source, factory
+from buildbot.steps import source, shell
+
+c['sources'] = [PBChangeSource()]
+s1 = AnyBranchScheduler('main',
+                        ['trunk', 'features/newthing', 'features/otherthing'],
+                        10*60, ['test-i386', 'test-ppc'])
+c['schedulers'] = [s1]
+
+f = factory.BuildFactory()
+f.addStep(source.SVN, mode='update',
+          baseURL='svn://svn.example.org/MyProject/',
+          defaultBranch='trunk')
+f.addStep(shell.Compile, command="make all")
+f.addStep(shell.Test, command="make test")
+
+c['builders'] = [
+  @{'name':'test-i386', 'slavename':'bot-i386', 'builddir':'test-i386',
+                       'factory':f @},
+  @{'name':'test-ppc', 'slavename':'bot-ppc', 'builddir':'test-ppc',
+                      'factory':f @},
+ ]
+ at end example
+
+In this example, when a change arrives with a @code{branch} attribute
+of ``trunk'', the resulting build will have an SVN step that
+concatenates ``svn://svn.example.org/MyProject/'' (the baseURL) with
+``trunk'' (the branch name) to get the correct svn command. If the
+``newthing'' branch has a change to ``src/foo.c'', then the SVN step
+will concatenate ``svn://svn.example.org/MyProject/'' with
+``features/newthing'' to get the svnurl for checkout.
+
+ at node Darcs, Mercurial, SVN, Source Checkout
+ at subsubsection Darcs
+
+ at cindex Darcs Checkout
+ at bsindex buildbot.steps.source.Darcs
+
+
+The @code{Darcs} build step performs a
+ at uref{http://abridgegame.org/darcs/, Darcs} checkout or update.
+
+Like @xref{SVN}, this step can either be configured to always check
+out a specific tree, or set up to pull from a particular branch that
+gets specified separately for each build. Also like SVN, the
+repository URL given to Darcs is created by concatenating a
+ at code{baseURL} with the branch name, and if no particular branch is
+requested, it uses a @code{defaultBranch}. The only difference in
+usage is that each potential Darcs repository URL must point to a
+fully-fledged repository, whereas SVN URLs usually point to sub-trees
+of the main Subversion repository. In other words, doing an SVN
+checkout of @code{baseURL} is legal, but silly, since you'd probably
+wind up with a copy of every single branch in the whole repository.
+Doing a Darcs checkout of @code{baseURL} is just plain wrong, since
+the parent directory of a collection of Darcs repositories is not
+itself a valid repository.
+
+The Darcs step takes the following arguments:
+
+ at table @code
+ at item repourl
+(required unless @code{baseURL} is provided): the URL at which the
+Darcs source repository is available.
+
+ at item baseURL
+(required unless @code{repourl} is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+ at item defaultBranch
+(allowed if and only if @code{baseURL} is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to @code{baseURL} to create the string that
+will be passed to the @code{darcs get} command.
+ at end table
+
+ at node Mercurial, Arch, Darcs, Source Checkout
+ at subsubsection Mercurial
+
+ at cindex Mercurial Checkout
+ at bsindex buildbot.steps.source.Mercurial
+
+
+The @code{Mercurial} build step performs a
+ at uref{http://selenic.com/mercurial, Mercurial} (aka ``hg'') checkout
+or update.
+
+Branches are handled just like @xref{Darcs}.
+
+The Mercurial step takes the following arguments:
+
+ at table @code
+ at item repourl
+(required unless @code{baseURL} is provided): the URL at which the
+Mercurial source repository is available.
+
+ at item baseURL
+(required unless @code{repourl} is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+ at item defaultBranch
+(allowed if and only if @code{baseURL} is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to @code{baseURL} to create the string that
+will be passed to the @code{hg clone} command.
+ at end table
+
+
+ at node Arch, Bazaar, Mercurial, Source Checkout
+ at subsubsection Arch
+
+ at cindex Arch Checkout
+ at bsindex buildbot.steps.source.Arch
+
+
+The @code{Arch} build step performs an @uref{http://gnuarch.org/,
+Arch} checkout or update using the @code{tla} client. It takes the
+following arguments:
+
+ at table @code
+ at item url
+(required): this specifies the URL at which the Arch source archive is
+available.
+
+ at item version
+(required): this specifies which ``development line'' (like a branch)
+should be used. This provides the default branch name, but individual
+builds may specify a different one.
+
+ at item archive
+(optional): Each repository knows its own archive name. If this
+parameter is provided, it must match the repository's archive name.
+The parameter is accepted for compatibility with the @code{Bazaar}
+step, below.
+
+ at end table
+
+ at node Bazaar, P4, Arch, Source Checkout
+ at subsubsection Bazaar
+
+ at cindex Bazaar Checkout
+ at bsindex buildbot.steps.source.Bazaar
+
+
+ at code{Bazaar} is an alternate implementation of the Arch VC system,
+which uses a client named @code{baz}. The checkout semantics are just
+different enough from @code{tla} that there is a separate BuildStep for
+it.
+
+It takes exactly the same arguments as @code{Arch}, except that the
+ at code{archive=} parameter is required. (baz does not emit the archive
+name when you do @code{baz register-archive}, so we must provide it
+ourselves).
+
+
+ at node P4,  , Bazaar, Source Checkout
+ at subsubsection P4
+
+ at cindex Perforce Update
+ at bsindex buildbot.steps.source.P4
+ at c TODO @bsindex buildbot.steps.source.P4Sync
+
+
+The @code{P4} build step creates a @uref{http://www.perforce.com/,
+Perforce} client specification and performs an update.
+
+ at table @code
+ at item p4base
+A view into the Perforce depot without branch name or trailing "...".
+Typically "//depot/proj/".
+ at item defaultBranch
+A branch name to append on build requests if none is specified.
+Typically "trunk".
+ at item p4port
+(optional): the host:port string describing how to get to the P4 Depot
+(repository), used as the -p argument for all p4 commands.
+ at item p4user
+(optional): the Perforce user, used as the -u argument to all p4
+commands.
+ at item p4passwd
+(optional): the Perforce password, used as the -p argument to all p4
+commands.
+ at item p4extra_views
+(optional): a list of (depotpath, clientpath) tuples containing extra
+views to be mapped into the client specification. Both will have
+"/..." appended automatically. The client name and source directory
+will be prepended to the client path.
+ at item p4client
+(optional): The name of the client to use. In mode='copy' and
+mode='update', it's particularly important that a unique name is used
+for each checkout directory to avoid incorrect synchronization. For
+this reason, Python percent substitution will be performed on this value
+to replace %(slave)s with the slave name and %(builder)s with the
+builder name. The default is "buildbot_%(slave)s_%(build)s".
+ at end table
+
+ at node ShellCommand, Simple ShellCommand Subclasses, Source Checkout, Build Steps
+ at subsection ShellCommand
+
+ at bsindex buildbot.steps.shell.ShellCommand
+ at c TODO @bsindex buildbot.steps.shell.TreeSize
+
+This is a useful base class for just about everything you might want
+to do during a build (except for the initial source checkout). It runs
+a single command in a child shell on the buildslave. All stdout/stderr
+is recorded into a LogFile. The step finishes with a status of FAILURE
+if the command's exit code is non-zero, otherwise it has a status of
+SUCCESS.
+
+The preferred way to specify the command is with a list of argv strings,
+since this allows for spaces in filenames and avoids doing any fragile
+shell-escaping. You can also specify the command with a single string, in
+which case the string is given to '/bin/sh -c COMMAND' for parsing.
+
+All ShellCommands are run by default in the ``workdir'', which
+defaults to the ``@file{build}'' subdirectory of the slave builder's
+base directory. The absolute path of the workdir will thus be the
+slave's basedir (set as an option to @code{buildbot create-slave},
+ at pxref{Creating a buildslave}) plus the builder's basedir (set in the
+builder's @code{c['builddir']} key in master.cfg) plus the workdir
+itself (a class-level attribute of the BuildFactory, defaults to
+``@file{build}'').
+
+ at code{ShellCommand} arguments:
+
+ at table @code
+ at item command
+a list of strings (preferred) or single string (discouraged) which
+specifies the command to be run. A list of strings is preferred
+because it can be used directly as an argv array. Using a single
+string (with embedded spaces) requires the buildslave to pass the
+string to /bin/sh for interpretation, which raises all sorts of
+difficult questions about how to escape or interpret shell
+metacharacters.
+
+ at item env
+a dictionary of environment strings which will be added to the child
+command's environment. For example, to run tests with a different i18n
+language setting, you might use
+
+ at example
+f.addStep(ShellCommand, command=["make", "test"],
+          env=@{'LANG': 'fr_FR'@})
+ at end example
+
+These variable settings will override any existing ones in the
+buildslave's environment. The exception is PYTHONPATH, which is merged
+with (actually prepended to) any existing $PYTHONPATH setting. The
+value is treated as a list of directories to prepend, and a single
+string is treated like a one-item list. For example, to prepend both
+ at file{/usr/local/lib/python2.3} and @file{/home/buildbot/lib/python}
+to any existing $PYTHONPATH setting, you would do something like the
+following:
+
+ at example
+f.addStep(ShellCommand, command=["make", "test"],
+          env=@{'PYTHONPATH': ["/usr/local/lib/python2.3",
+                              "/home/buildbot/lib/python"] @})
+ at end example
+
+ at item want_stdout
+if False, stdout from the child process is discarded rather than being
+sent to the buildmaster for inclusion in the step's LogFile.
+
+ at item want_stderr
+like @code{want_stdout} but for stderr. Note that commands run through
+a PTY do not have separate stdout/stderr streams: both are merged into
+stdout.
+
+ at item logfiles
+Sometimes commands will log interesting data to a local file, rather
+than emitting everything to stdout or stderr. For example, Twisted's
+``trial'' command (which runs unit tests) only presents summary
+information to stdout, and puts the rest into a file named
+ at file{_trial_temp/test.log}. It is often useful to watch these files
+as the command runs, rather than using @command{/bin/cat} to dump
+their contents afterwards.
+
+The @code{logfiles=} argument allows you to collect data from these
+secondary logfiles in near-real-time, as the step is running. It
+accepts a dictionary which maps from a local Log name (which is how
+the log data is presented in the build results) to a remote filename
+(interpreted relative to the build's working directory). Each named
+file will be polled on a regular basis (every couple of seconds) as
+the build runs, and any new text will be sent over to the buildmaster.
+
+ at example
+f.addStep(ShellCommand, command=["make", "test"],
+          logfiles=@{"triallog": "_trial_temp/test.log"@})
+ at end example
+
+
+ at item timeout
+if the command fails to produce any output for this many seconds, it
+is assumed to be locked up and will be killed.
+
+ at item description
+This will be used to describe the command (on the Waterfall display)
+while the command is still running. It should be a single
+imperfect-tense verb, like ``compiling'' or ``testing''. The preferred
+form is a list of short strings, which allows the HTML Waterfall
+display to create narrower columns by emitting a <br> tag between each
+word. You may also provide a single string.
+
+ at item descriptionDone
+This will be used to describe the command once it has finished. A
+simple noun like ``compile'' or ``tests'' should be used. Like
+ at code{description}, this may either be a list of short strings or a
+single string.
+
+If neither @code{description} nor @code{descriptionDone} are set, the
+actual command arguments will be used to construct the description.
+This may be a bit too wide to fit comfortably on the Waterfall
+display.
+
+ at example
+f.addStep(ShellCommand, command=["make", "test"],
+          description=["testing"],
+          descriptionDone=["tests"])
+ at end example
+
+ at end table
+
+ at node Simple ShellCommand Subclasses, Python BuildSteps, ShellCommand, Build Steps
+ at subsection Simple ShellCommand Subclasses
+
+Several subclasses of ShellCommand are provided as starting points for
+common build steps. These are all very simple: they just override a few
+parameters so you don't have to specify them yourself, making the master.cfg
+file less verbose.
+
+ at menu
+* Configure::                   
+* Compile::                     
+* Test::                        
+* Build Properties::            
+ at end menu
+
+ at node Configure, Compile, Simple ShellCommand Subclasses, Simple ShellCommand Subclasses
+ at subsubsection Configure
+
+ at bsindex buildbot.steps.shell.Configure
+
+This is intended to handle the @code{./configure} step from
+autoconf-style projects, or the @code{perl Makefile.PL} step from perl
+MakeMaker.pm-style modules. The default command is @code{./configure}
+but you can change this by providing a @code{command=} parameter.
+
+ at node Compile, Test, Configure, Simple ShellCommand Subclasses
+ at subsubsection Compile
+
+ at bsindex buildbot.steps.shell.Compile
+
+This is meant to handle compiling or building a project written in C. The
+default command is @code{make all}. When the compile is finished, the
+log file is scanned for GCC error/warning messages and a summary log is
+created with any problems that were seen (TODO: the summary is not yet
+created).
+
+ at node Test, Build Properties, Compile, Simple ShellCommand Subclasses
+ at subsubsection Test
+
+ at bsindex buildbot.steps.shell.Test
+
+This is meant to handle unit tests. The default command is @code{make
+test}, and the @code{warnOnFailure} flag is set.
+
+
+
+ at node Build Properties,  , Test, Simple ShellCommand Subclasses
+ at subsubsection Build Properties
+
+ at cindex build properties
+
+Each build has a set of ``Build Properties'', which can be used by its
+BuildStep to modify their actions. For example, the SVN revision
+number of the source code being built is available as a build
+property, and a ShellCommand step could incorporate this number into a
+command which create a numbered release tarball.
+
+Some build properties are set when the build starts, such as the
+SourceStamp information. Other properties can be set by BuildSteps as
+they run, for example the various Source steps will set the
+ at code{got_revision} property to the source revision that was actually
+checked out (which can be useful when the SourceStamp in use merely
+requested the ``latest revision'': @code{got_revision} will tell you
+what was actually built).
+
+In custom BuildSteps, you can get and set the build properties with
+the @code{getProperty}/@code{setProperty} methods. Each takes a string
+for the name of the property, and returns or accepts an
+arbitrary at footnote{Build properties are serialized along with the
+build results, so they must be serializable. For this reason, the
+value of any build property should be simple inert data: strings,
+numbers, lists, tuples, and dictionaries. They should not contain
+class instances.} object. For example:
+
+ at example
+class MakeTarball(ShellCommand):
+    def start(self):
+        self.setCommand(["tar", "czf",
+                         "build-%s.tar.gz" % self.getProperty("revision"),
+                         "source"])
+        ShellCommand.start(self)
+ at end example
+
+ at cindex WithProperties
+
+You can use build properties in ShellCommands by using the
+ at code{WithProperties} wrapper when setting the arguments of the
+ShellCommand. This interpolates the named build properties into the
+generated shell command.
+
+ at example
+from buildbot.steps.shell import ShellCommand, WithProperties
+
+f.addStep(ShellCommand,
+          command=["tar", "czf",
+                   WithProperties("build-%s.tar.gz", "revision"),
+                   "source"])
+ at end example
+
+If this BuildStep were used in a tree obtained from Subversion, it
+would create a tarball with a name like @file{build-1234.tar.gz}.
+
+The @code{WithProperties} function does @code{printf}-style string
+interpolation, using strings obtained by calling
+ at code{build.getProperty(propname)}. Note that for every @code{%s} (or
+ at code{%d}, etc), you must have exactly one additional argument to
+indicate which build property you want to insert.
+
+
+You can also use python dictionary-style string interpolation by using
+the @code{%(propname)s} syntax. In this form, the property name goes
+in the parentheses, and WithProperties takes @emph{no} additional
+arguments:
+
+ at example
+f.addStep(ShellCommand,
+          command=["tar", "czf",
+                   WithProperties("build-%(revision)s.tar.gz"),
+                   "source"])
+ at end example
+
+Don't forget the extra ``s'' after the closing parenthesis! This is
+the cause of many confusing errors. Also note that you can only use
+WithProperties in the list form of the command= definition. You cannot
+currently use it in the (discouraged) @code{command="stuff"}
+single-string form. However, you can use something like
+ at code{command=["/bin/sh", "-c", "stuff", WithProperties(stuff)]} to
+use both shell expansion and WithProperties interpolation.
+
+Note that, like python, you can either do positional-argument
+interpolation @emph{or} keyword-argument interpolation, not both. Thus
+you cannot use a string like
+ at code{WithProperties("foo-%(revision)s-%s", "branch")}.
+
+At the moment, the only way to set build properties is by writing a
+custom BuildStep.
+
+ at heading Common Build Properties
+
+The following build properties are set when the build is started, and
+are available to all steps.
+
+ at table @code
+ at item branch
+
+This comes from the build's SourceStamp, and describes which branch is
+being checked out. This will be @code{None} (which interpolates into
+ at code{WithProperties} as an empty string) if the build is on the
+default branch, which is generally the trunk. Otherwise it will be a
+string like ``branches/beta1.4''. The exact syntax depends upon the VC
+system being used.
+
+ at item revision
+
+This also comes from the SourceStamp, and is the revision of the
+source code tree that was requested from the VC system. When a build
+is requested of a specific revision (as is generally the case when the
+build is triggered by Changes), this will contain the revision
+specification. The syntax depends upon the VC system in use: for SVN
+it is an integer, for Mercurial it is a short string, for Darcs it is
+a rather large string, etc.
+
+If the ``force build'' button was pressed, the revision will be
+ at code{None}, which means to use the most recent revision available.
+This is a ``trunk build''. This will be interpolated as an empty
+string.
+
+ at item got_revision
+
+This is set when a Source step checks out the source tree, and
+provides the revision that was actually obtained from the VC system.
+In general this should be the same as @code{revision}, except for
+trunk builds, where @code{got_revision} indicates what revision was
+current when the checkout was performed. This can be used to rebuild
+the same source code later.
+
+Note that for some VC systems (Darcs in particular), the revision is a
+large string containing newlines, and is not suitable for
+interpolation into a filename.
+
+ at item buildername
+
+This is a string that indicates which Builder the build was a part of.
+The combination of buildername and buildnumber uniquely identify a
+build.
+
+ at item buildnumber
+
+Each build gets a number, scoped to the Builder (so the first build
+performed on any given Builder will have a build number of 0). This
+integer property contains the build's number.
+
+ at item slavename
+
+This is a string which identifies which buildslave the build is
+running on.
+
+ at end table
+
+ at node Python BuildSteps, Transferring Files, Simple ShellCommand Subclasses, Build Steps
+ at subsection Python BuildSteps
+
+Here are some BuildSteps that are specifcally useful for projects
+implemented in Python.
+
+ at menu
+* BuildEPYDoc::                 
+* PyFlakes::                    
+ at end menu
+
+ at node BuildEPYDoc, PyFlakes, Python BuildSteps, Python BuildSteps
+ at subsubsection BuildEPYDoc
+
+ at bsindex buildbot.steps.python.BuildEPYDoc
+
+ at url{http://epydoc.sourceforge.net/, epydoc} is a tool for generating
+API documentation for Python modules from their docstrings. It reads
+all the .py files from your source tree, processes the docstrings
+therein, and creates a large tree of .html files (or a single .pdf
+file).
+
+The @code{buildbot.steps.python.BuildEPYDoc} step will run
+ at command{epydoc} to produce this API documentation, and will count the
+errors and warnings from its output.
+
+You must supply the command line to be used. The default is
+ at command{make epydocs}, which assumes that your project has a Makefile
+with an ``epydocs'' target. You might wish to use something like
+ at command{epydoc -o apiref source/PKGNAME} instead. You might also want
+to add @command{--pdf} to generate a PDF file instead of a large tree
+of HTML files.
+
+The API docs are generated in-place in the build tree (under the
+workdir, in the subdirectory controlled by the ``-o'' argument). To
+make them useful, you will probably have to copy them to somewhere
+they can be read. A command like @command{rsync -ad apiref/
+dev.example.com:~public_html/current-apiref/} might be useful. You
+might instead want to bundle them into a tarball and publish it in the
+same place where the generated install tarball is placed.
+
+ at example
+from buildbot.steps.python import BuildEPYDoc
+
+...
+f.addStep(BuildEPYDoc, command=["epydoc", "-o", "apiref", "source/mypkg"])
+ at end example
+
+
+ at node PyFlakes,  , BuildEPYDoc, Python BuildSteps
+ at subsubsection PyFlakes
+
+ at bsindex buildbot.steps.python.PyFlakes
+
+ at url{http://divmod.org/trac/wiki/DivmodPyflakes, PyFlakes} is a tool
+to perform basic static analysis of Python code to look for simple
+errors, like missing imports and references of undefined names. It is
+like a fast and simple form of the C ``lint'' program. Other tools
+(like pychecker) provide more detailed results but take longer to run.
+
+The @code{buildbot.steps.python.PyFlakes} step will run pyflakes and
+count the various kinds of errors and warnings it detects.
+
+You must supply the command line to be used. The default is
+ at command{make pyflakes}, which assumes you have a top-level Makefile
+with a ``pyflakes'' target. You might want to use something like
+ at command{pyflakes .} or @command{pyflakes src}.
+
+ at example
+from buildbot.steps.python import PyFlakes
+
+...
+f.addStep(PyFlakes, command=["pyflakes", "src"])
+ at end example
+
+
+ at node Transferring Files, Writing New BuildSteps, Python BuildSteps, Build Steps
+ at subsection Transferring Files
+
+ at cindex File Transfer
+ at bsindex buildbot.steps.transfer.FileUpload
+ at bsindex buildbot.steps.transfer.FileDownload
+
+Most of the work involved in a build will take place on the
+buildslave. But occasionally it is useful to do some work on the
+buildmaster side. The most basic way to involve the buildmaster is
+simply to move a file from the slave to the master, or vice versa.
+There are a pair of BuildSteps named @code{FileUpload} and
+ at code{FileDownload} to provide this functionality. @code{FileUpload}
+moves a file @emph{up to} the master, while @code{FileDownload} moves
+a file @emph{down from} the master.
+
+As an example, let's assume that there is a step which produces an
+HTML file within the source tree that contains some sort of generated
+project documentation. We want to move this file to the buildmaster,
+into a @file{~/public_html} directory, so it can be visible to
+developers. This file will wind up in the slave-side working directory
+under the name @file{docs/reference.html}. We want to put it into the
+master-side @file{~/public_html/ref.html}.
+
+ at example
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.transfer import FileUpload
+
+f.addStep(ShellCommand, command=["make", "docs"])
+f.addStep(FileUpload,
+          slavesrc="docs/reference.html",
+          masterdest="~/public_html/ref.html")
+ at end example
+
+The @code{masterdest=} argument will be passed to os.path.expanduser,
+so things like ``~'' will be expanded properly. Non-absolute paths
+will be interpreted relative to the buildmaster's base directory.
+Likewise, the @code{slavesrc=} argument will be expanded and
+interpreted relative to the builder's working directory.
+
+
+To move a file from the master to the slave, use the
+ at code{FileDownload} command. For example, let's assume that some step
+requires a configuration file that, for whatever reason, could not be
+recorded in the source code repository or generated on the buildslave
+side:
+
+ at example
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.transfer import FileUpload
+
+f.addStep(FileDownload
+          mastersrc="~/todays_build_config.txt",
+          slavedest="build_config.txt")
+f.addStep(ShellCommand, command=["make", "config"])
+ at end example
+
+Like @code{FileUpload}, the @code{mastersrc=} argument is interpreted
+relative to the buildmaster's base directory, and the
+ at code{slavedest=} argument is relative to the builder's working
+directory. If the buildslave is running in @file{~buildslave}, and the
+builder's ``builddir'' is something like @file{tests-i386}, then the
+workdir is going to be @file{~buildslave/tests-i386/build}, and a
+ at code{slavedest=} of @file{foo/bar.html} will get put in
+ at file{~buildslave/tests-i386/build/foo/bar.html}. Remember that
+neither of these commands will create missing directories for you.
+
+
+ at subheading Other Parameters
+
+The @code{maxsize=} argument lets you set a maximum size for the file
+to be transferred. This may help to avoid surprises: transferring a
+100MB coredump when you were expecting to move a 10kB status file
+might take an awfully long time. The @code{blocksize=} argument
+controls how the file is sent over the network: larger blocksizes are
+slightly more efficient but also consume more memory on each end, and
+there is a hard-coded limit of about 640kB.
+
+The @code{mode=} argument allows you to control the access permissions
+of the target file, traditionally expressed as an octal integer. The
+most common value is probably 0755, which sets the ``x'' executable
+bit on the file (useful for shell scripts and the like). The default
+value for @code{mode=} is None, which means the permission bits will
+default to whatever the umask of the writing process is. The default
+umask tends to be fairly restrictive, but at least on the buildslave
+you can make it less restrictive with a --umask command-line option at
+creation time (@pxref{Buildslave Options}).
+
+
+ at node Writing New BuildSteps,  , Transferring Files, Build Steps
+ at subsection Writing New BuildSteps
+
+While it is a good idea to keep your build process self-contained in
+the source code tree, sometimes it is convenient to put more
+intelligence into your Buildbot configuration. One was to do this is
+to write a custom BuildStep. Once written, this Step can be used in
+the @file{master.cfg} file.
+
+The best reason for writing a custom BuildStep is to better parse the
+results of the command being run. For example, a BuildStep that knows
+about JUnit could look at the logfiles to determine which tests had
+been run, how many passed and how many failed, and then report more
+detailed information than a simple @code{rc==0} -based ``good/bad''
+decision.
+
+TODO: add more description of BuildSteps.
+
+ at menu
+* BuildStep LogFiles::          
+* Adding LogObservers::         
+* BuildStep URLs::              
+ at end menu
+
+ at node BuildStep LogFiles, Adding LogObservers, Writing New BuildSteps, Writing New BuildSteps
+ at subsubsection BuildStep LogFiles
+
+Each BuildStep has a collection of ``logfiles''. Each one has a short
+name, like ``stdio'' or ``warnings''. Each LogFile contains an
+arbitrary amount of text, usually the contents of some output file
+generated during a build or test step, or a record of everything that
+was printed to stdout/stderr during the execution of some command.
+
+These LogFiles are stored to disk, so they can be retrieved later.
+
+Each can contain multiple ``channels'', generally limited to three
+basic ones: stdout, stderr, and ``headers''. For example, when a
+ShellCommand runs, it writes a few lines to the ``headers'' channel to
+indicate the exact argv strings being run, which directory the command
+is being executed in, and the contents of the current environment
+variables. Then, as the command runs, it adds a lot of ``stdout'' and
+``stderr'' messages. When the command finishes, a final ``header''
+line is added with the exit code of the process.
+
+Status display plugins can format these different channels in
+different ways. For example, the web page shows LogFiles as text/html,
+with header lines in blue text, stdout in black, and stderr in red. A
+different URL is available which provides a text/plain format, in
+which stdout and stderr are collapsed together, and header lines are
+stripped completely. This latter option makes it easy to save the
+results to a file and run @command{grep} or whatever against the
+output.
+
+Each BuildStep contains a mapping (implemented in a python dictionary)
+from LogFile name to the actual LogFile objects. Status plugins can
+get a list of LogFiles to display, for example, a list of HREF links
+that, when clicked, provide the full contents of the LogFile.
+
+ at heading Using LogFiles in custom BuildSteps
+
+The most common way for a custom BuildStep to use a LogFile is to
+summarize the results of a ShellCommand (after the command has
+finished running). For example, a compile step with thousands of lines
+of output might want to create a summary of just the warning messages.
+If you were doing this from a shell, you would use something like:
+
+ at example
+grep "warning:" output.log >warnings.log
+ at end example
+
+In a custom BuildStep, you could instead create a ``warnings'' LogFile
+that contained the same text. To do this, you would add code to your
+ at code{createSummary} method that pulls lines from the main output log
+and creates a new LogFile with the results:
+
+ at example
+    def createSummary(self, log):
+        warnings = []
+        for line in log.readlines():
+            if "warning:" in line:
+                warnings.append()
+        self.addCompleteLog('warnings', "".join(warnings))
+ at end example
+
+This example uses the @code{addCompleteLog} method, which creates a
+new LogFile, puts some text in it, and then ``closes'' it, meaning
+that no further contents will be added. This LogFile will appear in
+the HTML display under an HREF with the name ``warnings'', since that
+is the name of the LogFile.
+
+You can also use @code{addHTMLLog} to create a complete (closed)
+LogFile that contains HTML instead of plain text. The normal LogFile
+will be HTML-escaped if presented through a web page, but the HTML
+LogFile will not. At the moment this is only used to present a pretty
+HTML representation of an otherwise ugly exception traceback when
+something goes badly wrong during the BuildStep.
+
+In contrast, you might want to create a new LogFile at the beginning
+of the step, and add text to it as the command runs. You can create
+the LogFile and attach it to the build by calling @code{addLog}, which
+returns the LogFile object. You then add text to this LogFile by
+calling methods like @code{addStdout} and @code{addHeader}. When you
+are done, you must call the @code{finish} method so the LogFile can be
+closed. It may be useful to create and populate a LogFile like this
+from a LogObserver method @xref{Adding LogObservers}.
+
+The @code{logfiles=} argument to @code{ShellCommand} (see
+ at pxref{ShellCommand}) creates new LogFiles and fills them in realtime
+by asking the buildslave to watch a actual file on disk. The
+buildslave will look for additions in the target file and report them
+back to the BuildStep. These additions will be added to the LogFile by
+calling @code{addStdout}. These secondary LogFiles can be used as the
+source of a LogObserver just like the normal ``stdio'' LogFile.
+
+
+ at node Adding LogObservers, BuildStep URLs, BuildStep LogFiles, Writing New BuildSteps
+ at subsubsection Adding LogObservers
+
+ at cindex LogObserver
+ at cindex LogLineObserver
+
+Most shell commands emit messages to stdout or stderr as they operate,
+especially if you ask them nicely with a @code{--verbose} flag of some
+sort. They may also write text to a log file while they run. Your
+BuildStep can watch this output as it arrives, to keep track of how
+much progress the command has made. You can get a better measure of
+progress by counting the number of source files compiled or test cases
+run than by merely tracking the number of bytes that have been written
+to stdout. This improves the accuracy and the smoothness of the ETA
+display.
+
+To accomplish this, you will need to attach a @code{LogObserver} to
+one of the log channels, most commonly to the ``stdio'' channel but
+perhaps to another one which tracks a log file. This observer is given
+all text as it is emitted from the command, and has the opportunity to
+parse that output incrementally. Once the observer has decided that
+some event has occurred (like a source file being compiled), it can
+use the @code{setProgress} method to tell the BuildStep about the
+progress that this event represents.
+
+There are a number of pre-built @code{LogObserver} classes that you
+can choose from (defined in @code{buildbot.process.buildstep}, and of
+course you can subclass them to add further customization. The
+ at code{LogLineObserver} class handles the grunt work of buffering and
+scanning for end-of-line delimiters, allowing your parser to operate
+on complete stdout/stderr lines.
+
+For example, let's take a look at the @code{TrialTestCaseCounter},
+which is used by the Trial step to count test cases as they are run.
+As Trial executes, it emits lines like the following:
+
+ at example
+buildbot.test.test_config.ConfigTest.testDebugPassword ... [OK]
+buildbot.test.test_config.ConfigTest.testEmpty ... [OK]
+buildbot.test.test_config.ConfigTest.testIRC ... [FAIL]
+buildbot.test.test_config.ConfigTest.testLocks ... [OK]
+ at end example
+
+When the tests are finished, trial emits a long line of ``======'' and
+then some lines which summarize the tests that failed. We want to
+avoid parsing these trailing lines, because their format is less
+well-defined than the ``[OK]'' lines.
+
+The parser class looks like this:
+
+ at example
+from buildbot.process.buildstep import LogLineObserver
+
+class TrialTestCaseCounter(LogLineObserver):
+    _line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
+    numTests = 0
+    finished = False
+
+    def outLineReceived(self, line):
+        if self.finished:
+            return
+        if line.startswith("=" * 40):
+            self.finished = True
+            return
+
+        m = self._line_re.search(line.strip())
+        if m:
+            testname, result = m.groups()
+            self.numTests += 1
+            self.step.setProgress('tests', self.numTests)
+ at end example
+
+This parser only pays attention to stdout, since that's where trial
+writes the progress lines. It has a mode flag named @code{finished} to
+ignore everything after the ``===='' marker, and a scary-looking
+regular expression to match each line while hopefully ignoring other
+messages that might get displayed as the test runs.
+
+Each time it identifies a test has been completed, it increments its
+counter and delivers the new progress value to the step with
+ at code{self.step.setProgress}. This class is specifically measuring
+progress along the ``tests'' metric, in units of test cases (as
+opposed to other kinds of progress like the ``output'' metric, which
+measures in units of bytes). The Progress-tracking code uses each
+progress metric separately to come up with an overall completion
+percentage and an ETA value.
+
+To connect this parser into the @code{Trial} BuildStep,
+ at code{Trial.__init__} ends with the following clause:
+
+ at example
+        # this counter will feed Progress along the 'test cases' metric
+        counter = TrialTestCaseCounter()
+        self.addLogObserver('stdio', counter)
+ at end example
+
+This creates a TrialTestCaseCounter and tells the step that the
+counter wants to watch the ``stdio'' log. The observer is
+automatically given a reference to the step in its @code{.step}
+attribute.
+
+ at subheading A Somewhat Whimsical Example
+
+Let's say that we've got some snazzy new unit-test framework called
+Framboozle. It's the hottest thing since sliced bread. It slices, it
+dices, it runs unit tests like there's no tomorrow. Plus if your unit
+tests fail, you can use its name for a Web 2.1 startup company, make
+millions of dollars, and hire engineers to fix the bugs for you, while
+you spend your afternoons lazily hang-gliding along a scenic pacific
+beach, blissfully unconcerned about the state of your
+tests. at footnote{framboozle.com is still available. Remember, I get 10%
+:).}
+
+To run a Framboozle-enabled test suite, you just run the 'framboozler'
+command from the top of your source code tree. The 'framboozler'
+command emits a bunch of stuff to stdout, but the most interesting bit
+is that it emits the line "FNURRRGH!" every time it finishes running a
+test case at footnote{Framboozle gets very excited about running unit
+tests.}. You'd like to have a test-case counting LogObserver that
+watches for these lines and counts them, because counting them will
+help the buildbot more accurately calculate how long the build will
+take, and this will let you know exactly how long you can sneak out of
+the office for your hang-gliding lessons without anyone noticing that
+you're gone.
+
+This will involve writing a new BuildStep (probably named
+"Framboozle") which inherits from ShellCommand. The BuildStep class
+definition itself will look something like this:
+
+ at example
+# START
+from buildbot.steps.shell import ShellCommand
+from buildbot.process.buildstep import LogLineObserver
+
+class FNURRRGHCounter(LogLineObserver):
+    numTests = 0
+    def outLineReceived(self, line):
+        if "FNURRRGH!" in line:
+            self.numTests += 1
+            self.step.setProgress('tests', self.numTests)
+
+class Framboozle(ShellCommand):
+    command = ["framboozler"]
+
+    def __init__(self, **kwargs):
+        ShellCommand.__init__(self, **kwargs)   # always upcall!
+        counter = FNURRRGHCounter())
+        self.addLogObserver(counter)
+# FINISH
+ at end example
+
+So that's the code that we want to wind up using. How do we actually
+deploy it?
+
+You have a couple of different options.
+
+Option 1: The simplest technique is to simply put this text
+(everything from START to FINISH) in your master.cfg file, somewhere
+before the BuildFactory definition where you actually use it in a
+clause like:
+
+ at example
+f = BuildFactory()
+f.addStep(SVN, svnurl="stuff")
+f.addStep(Framboozle)
+ at end example
+
+Remember that master.cfg is secretly just a python program with one
+job: populating the BuildmasterConfig dictionary. And python programs
+are allowed to define as many classes as they like. So you can define
+classes and use them in the same file, just as long as the class is
+defined before some other code tries to use it.
+
+This is easy, and it keeps the point of definition very close to the
+point of use, and whoever replaces you after that unfortunate
+hang-gliding accident will appreciate being able to easily figure out
+what the heck this stupid "Framboozle" step is doing anyways. The
+downside is that every time you reload the config file, the Framboozle
+class will get redefined, which means that the buildmaster will think
+that you've reconfigured all the Builders that use it, even though
+nothing changed. Bleh.
+
+Option 2: Instead, we can put this code in a separate file, and import
+it into the master.cfg file just like we would the normal buildsteps
+like ShellCommand and SVN.
+
+Create a directory named ~/lib/python, put everything from START to
+FINISH in ~/lib/python/framboozle.py, and run your buildmaster using:
+
+ at example
+ PYTHONPATH=~/lib/python buildbot start MASTERDIR
+ at end example
+
+or use the @file{Makefile.buildbot} to control the way
+ at command{buildbot start} works. Or add something like this to
+something like your ~/.bashrc or ~/.bash_profile or ~/.cshrc:
+
+ at example
+ export PYTHONPATH=~/lib/python
+ at end example
+
+Once we've done this, our master.cfg can look like:
+
+ at example
+from framboozle import Framboozle
+f = BuildFactory()
+f.addStep(SVN, svnurl="stuff")
+f.addStep(Framboozle)
+ at end example
+
+or:
+
+ at example
+import framboozle
+f = BuildFactory()
+f.addStep(SVN, svnurl="stuff")
+f.addStep(framboozle.Framboozle)
+ at end example
+
+(check out the python docs for details about how "import" and "from A
+import B" work).
+
+What we've done here is to tell python that every time it handles an
+"import" statement for some named module, it should look in our
+~/lib/python/ for that module before it looks anywhere else. After our
+directories, it will try in a bunch of standard directories too
+(including the one where buildbot is installed). By setting the
+PYTHONPATH environment variable, you can add directories to the front
+of this search list.
+
+Python knows that once it "import"s a file, it doesn't need to
+re-import it again. This means that reconfiguring the buildmaster
+(with "buildbot reconfig", for example) won't make it think the
+Framboozle class has changed every time, so the Builders that use it
+will not be spuriously restarted. On the other hand, you either have
+to start your buildmaster in a slightly weird way, or you have to
+modify your environment to set the PYTHONPATH variable.
+
+
+Option 3: Install this code into a standard python library directory
+
+Find out what your python's standard include path is by asking it:
+
+ at example
+80:warner@@luther% python
+Python 2.4.4c0 (#2, Oct  2 2006, 00:57:46) 
+[GCC 4.1.2 20060928 (prerelease) (Debian 4.1.1-15)] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import sys
+>>> print sys.path
+['', '/usr/lib/python24.zip', '/usr/lib/python2.4', '/usr/lib/python2.4/plat-linux2', '/usr/lib/python2.4/lib-tk', '/usr/lib/python2.4/lib-dynload', '/usr/local/lib/python2.4/site-packages', '/usr/lib/python2.4/site-packages', '/usr/lib/python2.4/site-packages/Numeric', '/var/lib/python-support/python2.4', '/usr/lib/site-python']
+>>> 
+ at end example
+
+In this case, putting the code into
+/usr/local/lib/python2.4/site-packages/framboozle.py would work just
+fine. We can use the same master.cfg "import framboozle" statement as
+in Option 2. By putting it in a standard include directory (instead of
+the decidedly non-standard ~/lib/python), we don't even have to set
+PYTHONPATH to anything special. The downside is that you probably have
+to be root to write to one of those standard include directories.
+
+
+Option 4: Submit the code for inclusion in the Buildbot distribution
+
+Contribute the code in an Enhancement Request on SourceForge, via
+http://buildbot.sf.net . Lobby, convince, coerce, bribe, badger,
+harass, threaten, or otherwise encourage the author to accept the
+patch. This lets you do something like:
+
+ at example
+from buildbot.steps import framboozle
+f = BuildFactory()
+f.addStep(SVN, svnurl="stuff")
+f.addStep(framboozle.Framboozle)
+ at end example
+
+And then you don't even have to install framboozle.py anywhere on your
+system, since it will ship with Buildbot. You don't have to be root,
+you don't have to set PYTHONPATH. But you do have to make a good case
+for Framboozle being worth going into the main distribution, you'll
+probably have to provide docs and some unit test cases, you'll need to
+figure out what kind of beer the author likes, and then you'll have to
+wait until the next release. But in some environments, all this is
+easier than getting root on your buildmaster box, so the tradeoffs may
+actually be worth it.
+
+
+
+Putting the code in master.cfg (1) makes it available to that
+buildmaster instance. Putting it in a file in a personal library
+directory (2) makes it available for any buildmasters you might be
+running. Putting it in a file in a system-wide shared library
+directory (3) makes it available for any buildmasters that anyone on
+that system might be running. Getting it into the buildbot's upstream
+repository (4) makes it available for any buildmasters that anyone in
+the world might be running. It's all a matter of how widely you want
+to deploy that new class.
+
+
+
+ at node BuildStep URLs,  , Adding LogObservers, Writing New BuildSteps
+ at subsubsection BuildStep URLs
+
+ at cindex links
+ at cindex BuildStep URLs
+ at cindex addURL
+
+Each BuildStep has a collection of ``links''. Like its collection of
+LogFiles, each link has a name and a target URL. The web status page
+creates HREFs for each link in the same box as it does for LogFiles,
+except that the target of the link is the external URL instead of an
+internal link to a page that shows the contents of the LogFile.
+
+These external links can be used to point at build information hosted
+on other servers. For example, the test process might produce an
+intricate description of which tests passed and failed, or some sort
+of code coverage data in HTML form, or a PNG or GIF image with a graph
+of memory usage over time. The external link can provide an easy way
+for users to navigate from the buildbot's status page to these
+external web sites or file servers. Note that the step itself is
+responsible for insuring that there will be a document available at
+the given URL (perhaps by using @command{scp} to copy the HTML output
+to a @file{~/public_html/} directory on a remote web server). Calling
+ at code{addURL} does not magically populate a web server.
+
+To set one of these links, the BuildStep should call the @code{addURL}
+method with the name of the link and the target URL. Multiple URLs can
+be set.
+
+In this example, we assume that the @command{make test} command causes
+a collection of HTML files to be created and put somewhere on the
+coverage.example.org web server, in a filename that incorporates the
+build number.
+
+ at example
+class TestWithCodeCoverage(BuildStep):
+    command = ["make", "test",
+               WithProperties("buildnum=%s" % "buildnumber")]
+
+    def createSummary(self, log):
+        buildnumber = self.getProperty("buildnumber")
+        url = "http://coverage.example.org/builds/%s.html" % buildnumber
+        self.addURL("coverage", url)
+ at end example
+
+You might also want to extract the URL from some special message
+output by the build process itself:
+
+ at example
+class TestWithCodeCoverage(BuildStep):
+    command = ["make", "test",
+               WithProperties("buildnum=%s" % "buildnumber")]
+
+    def createSummary(self, log):
+        output = StringIO(log.getText())
+        for line in output.readlines():
+            if line.startswith("coverage-url:"):
+                url = line[len("coverage-url:"):].strip()
+                self.addURL("coverage", url)
+                return
+ at end example
+
+Note that a build process which emits both stdout and stderr might
+cause this line to be split or interleaved between other lines. It
+might be necessary to restrict the getText() call to only stdout with
+something like this:
+
+ at example
+        output = StringIO("".join([c[1]
+                                   for c in log.getChunks()
+                                   if c[0] == LOG_CHANNEL_STDOUT]))
+ at end example
+
+Of course if the build is run under a PTY, then stdout and stderr will
+be merged before the buildbot ever sees them, so such interleaving
+will be unavoidable.
+
+
+ at node Interlocks, Build Factories, Build Steps, Build Process
+ at section Interlocks
+
+ at cindex locks
+ at slindex buildbot.locks.MasterLock
+ at slindex buildbot.locks.SlaveLock
+
+For various reasons, you may want to prevent certain Steps (or perhaps
+entire Builds) from running simultaneously. Limited CPU speed or
+network bandwidth to the VC server, problems with simultaneous access
+to a database server used by unit tests, or multiple Builds which
+access shared state may all require some kind of interlock to prevent
+corruption, confusion, or resource overload. These resources might
+require completely exclusive access, or it might be sufficient to
+establish a limit of two or three simultaneous builds.
+
+ at code{Locks} are the mechanism used to express these kinds of
+constraints on when Builds or Steps can be run. There are two kinds of
+ at code{Locks}, each with their own scope: @code{MasterLock} instances
+are scoped to the buildbot as a whole, while @code{SlaveLock}s are
+scoped to a single buildslave. This means that each buildslave has a
+separate copy of each @code{SlaveLock}, which could enforce a
+one-Build-at-a-time limit for each machine, but still allow as many
+simultaneous builds as there are machines.
+
+Each @code{Lock} is created with a unique name. Each lock gets a count
+of how many owners it may have: how many processes can claim it at ths
+same time. This limit defaults to one, and is controllable through the
+ at code{maxCount} argument. On @code{SlaveLock}s you can set the owner
+count on a per-slave basis by providing a dictionary (that maps from
+slavename to maximum owner count) to its @code{maxCountForSlave}
+argument. Any buildslaves that aren't mentioned in
+ at code{maxCountForSlave} get their owner count from @code{maxCount}.
+
+To use a lock, simply include it in the @code{locks=} argument of the
+ at code{BuildStep} object that should obtain the lock before it runs.
+This argument accepts a list of @code{Lock} objects: the Step will
+acquire all of them before it runs.
+
+To claim a lock for the whole Build, add a @code{'locks'} key to the
+builder specification dictionary with the same list of @code{Lock}
+objects. (This is the dictionary that has the @code{'name'},
+ at code{'slavename'}, @code{'builddir'}, and @code{'factory'} keys). The
+ at code{Build} object also accepts a @code{locks=} argument, but unless
+you are writing your own @code{BuildFactory} subclass then it will be
+easier to set the locks in the builder dictionary.
+
+Note that there are no partial-acquire or partial-release semantics:
+this prevents deadlocks caused by two Steps each waiting for a lock
+held by the other at footnote{Also note that a clever buildmaster admin
+could still create the opportunity for deadlock: Build A obtains Lock
+1, inside which Step A.two tries to acquire Lock 2 at the Step level.
+Meanwhile Build B obtains Lock 2, and has a Step B.two which wants to
+acquire Lock 1 at the Step level. Don't Do That.}. This also means
+that waiting to acquire a @code{Lock} can take an arbitrarily long
+time: if the buildmaster is very busy, a Step or Build which requires
+only one @code{Lock} may starve another that is waiting for that
+ at code{Lock} plus some others.
+
+
+In the following example, we run the same build on three different
+platforms. The unit-test steps of these builds all use a common
+database server, and would interfere with each other if allowed to run
+simultaneously. The @code{Lock} prevents more than one of these builds
+from happening at the same time.
+
+ at example
+from buildbot import locks
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+db_lock = locks.MasterLock("database")
+f = factory.BuildFactory()
+f.addStep(source.SVN, svnurl="http://example.org/svn/Trunk")
+f.addStep(shell.ShellCommand, command="make all")
+f.addStep(shell.ShellCommand, command="make test", locks=[db_lock])
+b1 = @{'name': 'full1', 'slavename': 'bot-1', builddir='f1', 'factory': f@}
+b2 = @{'name': 'full2', 'slavename': 'bot-2', builddir='f2', 'factory': f@}
+b3 = @{'name': 'full3', 'slavename': 'bot-3', builddir='f3', 'factory': f@}
+c['builders'] = [b1, b2, b3]
+ at end example
+
+In the next example, we have one buildslave hosting three separate
+Builders (each running tests against a different version of Python).
+The machine which hosts this buildslave is not particularly fast, so
+we want to prevent all three builds from all happening at the same
+time. (Assume we've experimentally determined that one build leaves
+unused CPU capacity, three builds causes a lot of disk thrashing, but
+two builds at a time is Just Right). We use a @code{SlaveLock} because
+the builds happening on this one slow slave should not affect builds
+running on other slaves, and we use the lock on the build as a whole
+because the slave is so slow that even multiple simultaneous SVN
+checkouts would be too taxing. We set @code{maxCount=2} to achieve our
+goal of two simultaneous builds per slave.
+
+ at example
+from buildbot import locks
+from buildbot.steps import source
+from buildbot.process import s, factory
+
+slow_lock = locks.SlaveLock("cpu", maxCount=2)
+source = s(source.SVN, svnurl="http://example.org/svn/Trunk")
+f22 = factory.Trial(source, trialpython=["python2.2"])
+f23 = factory.Trial(source, trialpython=["python2.3"])
+f24 = factory.Trial(source, trialpython=["python2.4"])
+b1 = @{'name': 'p22', 'slavename': 'bot-1', builddir='p22', 'factory': f22,
+      'locks': [slow_lock] @}
+b2 = @{'name': 'p23', 'slavename': 'bot-1', builddir='p23', 'factory': f23,
+      'locks': [slow_lock] @}
+b3 = @{'name': 'p24', 'slavename': 'bot-1', builddir='p24', 'factory': f24,
+      'locks': [slow_lock] @}
+c['builders'] = [b1, b2, b3]
+ at end example
+
+In the last example, we use two Locks at the same time. In this case,
+we're concerned about both of the previous constraints, but we'll say
+that only the tests are computationally intensive, and that they have
+been split into those which use the database and those which do not.
+In addition, two of the Builds run on a fast machine which does not
+need to worry about the cpu lock, but which still must be prevented
+from simultaneous database access. We use @code{maxCountForSlave} to
+limit the slow machine to one simultanous build, but allow practically
+unlimited concurrent builds on the fast machine.
+
+ at example
+from buildbot import locks
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+db_lock = locks.MasterLock("database")
+slavecounts = @{"bot-slow": 1, "bot-fast": 100@}
+cpu_lock = locks.SlaveLock("cpu", maxCountForSlave=slavecounts)
+f = factory.BuildFactory()
+f.addStep(source.SVN, svnurl="http://example.org/svn/Trunk")
+f.addStep(shell.ShellCommand, command="make all", locks=[cpu_lock])
+f.addStep(shell.ShellCommand, command="make test", locks=[cpu_lock])
+f.addStep(shell.ShellCommand, command="make db-test",
+                              locks=[db_lock, cpu_lock])
+
+b1 = @{'name': 'full1', 'slavename': 'bot-slow', builddir='full1',
+      'factory': f@}
+b2 = @{'name': 'full2', 'slavename': 'bot-slow', builddir='full2',
+      'factory': f@}
+b3 = @{'name': 'full3', 'slavename': 'bot-fast', builddir='full3',
+      'factory': f@}
+b4 = @{'name': 'full4', 'slavename': 'bot-fast', builddir='full4',
+      'factory': f@}
+c['builders'] = [b1, b2, b3, b4]
+ at end example
+
+As a final note, remember that a unit test system which breaks when
+multiple people run it at the same time is fragile and should be
+fixed. Asking your human developers to serialize themselves when
+running unit tests will just discourage them from running the unit
+tests at all. Find a way to fix this: change the database tests to
+create a new (uniquely-named) user or table for each test run, don't
+use fixed listening TCP ports for network tests (instead listen on
+port 0 to let the kernel choose a port for you and then query the
+socket to find out what port was allocated). @code{MasterLock}s can be
+used to accomodate broken test systems like this, but are really
+intended for other purposes: build processes that store or retrieve
+products in shared directories, or which do things that human
+developers would not (or which might slow down or break in ways that
+require human attention to deal with).
+
+ at code{SlaveLocks}s can be used to keep automated performance tests
+from interfering with each other, when there are multiple Builders all
+using the same buildslave. But they can't prevent other users from
+running CPU-intensive jobs on that host while the tests are running.
+
+ at node Build Factories,  , Interlocks, Build Process
+ at section Build Factories
+
+
+Each Builder is equipped with a ``build factory'', which is
+responsible for producing the actual @code{Build} objects that perform
+each build. This factory is created in the configuration file, and
+attached to a Builder through the @code{factory} element of its
+dictionary.
+
+The standard @code{BuildFactory} object creates @code{Build} objects
+by default. These Builds will each execute a collection of BuildSteps
+in a fixed sequence. Each step can affect the results of the build,
+but in general there is little intelligence to tie the different steps
+together. You can create subclasses of @code{Build} to implement more
+sophisticated build processes, and then use a subclass of
+ at code{BuildFactory} (or simply set the @code{buildClass} attribute) to
+create instances of your new Build subclass.
+
+
+ at menu
+* BuildStep Objects::           
+* BuildFactory::                
+* Process-Specific build factories::  
+ at end menu
+
+ at node BuildStep Objects, BuildFactory, Build Factories, Build Factories
+ at subsection BuildStep Objects
+
+The steps used by these builds are all subclasses of @code{BuildStep}.
+The standard ones provided with Buildbot are documented later,
+ at xref{Build Steps}. You can also write your own subclasses to use in
+builds.
+
+The basic behavior for a @code{BuildStep} is to:
+
+ at itemize @bullet
+ at item
+run for a while, then stop
+ at item
+possibly invoke some RemoteCommands on the attached build slave
+ at item
+possibly produce a set of log files
+ at item
+finish with a status described by one of four values defined in
+buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, SKIPPED
+ at item
+provide a list of short strings to describe the step
+ at item
+define a color (generally green, orange, or red) with which the
+step should be displayed
+ at end itemize
+
+
+More sophisticated steps may produce additional information and
+provide it to later build steps, or store it in the factory to provide
+to later builds.
+
+
+ at menu
+* BuildFactory Attributes::     
+* Quick builds::                
+ at end menu
+
+ at node BuildFactory, Process-Specific build factories, BuildStep Objects, Build Factories
+ at subsection BuildFactory
+
+ at bfindex buildbot.process.factory.BuildFactory
+ at bfindex buildbot.process.factory.BasicBuildFactory
+ at c TODO: what is BasicSVN anyway?
+ at bfindex buildbot.process.factory.BasicSVN
+
+The default @code{BuildFactory}, provided in the
+ at code{buildbot.process.factory} module, contains a list of ``BuildStep
+specifications'': a list of @code{(step_class, kwargs)} tuples for
+each. When asked to create a Build, it loads the list of steps into
+the new Build object. When the Build is actually started, these step
+specifications are used to create the actual set of BuildSteps, which
+are then executed one at a time. For example, a build which consists
+of a CVS checkout followed by a @code{make build} would be constructed
+as follows:
+
+ at example
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+f = factory.BuildFactory()
+f.addStep(source.CVS, cvsroot=CVSROOT, cvsmodule="project", mode="update")
+f.addStep(shell.Compile, command=["make", "build"])
+ at end example
+
+It is also possible to pass a list of step specifications into the
+ at code{BuildFactory} when it is created. Using @code{addStep} is
+usually simpler, but there are cases where is is more convenient to
+create the list of steps ahead of time. To make this approach easier,
+a convenience function named @code{s} is available:
+
+ at example
+from buildbot.steps import source, shell
+from buildbot.process import factory
+from buildbot.factory import s
+# s is a convenience function, defined with:
+# def s(steptype, **kwargs): return (steptype, kwargs)
+
+all_steps = [s(source.CVS, cvsroot=CVSROOT, cvsmodule="project",
+               mode="update"),
+             s(shell.Compile, command=["make", "build"]),
+            ]
+f = factory.BuildFactory(all_steps)
+ at end example
+
+
+Each step can affect the build process in the following ways:
+
+ at itemize @bullet
+ at item
+If the step's @code{haltOnFailure} attribute is True, then a failure
+in the step (i.e. if it completes with a result of FAILURE) will cause
+the whole build to be terminated immediately: no further steps will be
+executed. This is useful for setup steps upon which the rest of the
+build depends: if the CVS checkout or @code{./configure} process
+fails, there is no point in trying to compile or test the resulting
+tree.
+
+ at item
+If the @code{flunkOnFailure} or @code{flunkOnWarnings} flag is set,
+then a result of FAILURE or WARNINGS will mark the build as a whole as
+FAILED. However, the remaining steps will still be executed. This is
+appropriate for things like multiple testing steps: a failure in any
+one of them will indicate that the build has failed, however it is
+still useful to run them all to completion.
+ 
+ at item
+Similarly, if the @code{warnOnFailure} or @code{warnOnWarnings} flag
+is set, then a result of FAILURE or WARNINGS will mark the build as
+having WARNINGS, and the remaining steps will still be executed. This
+may be appropriate for certain kinds of optional build or test steps.
+For example, a failure experienced while building documentation files
+should be made visible with a WARNINGS result but not be serious
+enough to warrant marking the whole build with a FAILURE.
+
+ at end itemize
+
+In addition, each Step produces its own results, may create logfiles,
+etc. However only the flags described above have any effect on the
+build as a whole.
+
+The pre-defined BuildSteps like @code{CVS} and @code{Compile} have
+reasonably appropriate flags set on them already. For example, without
+a source tree there is no point in continuing the build, so the
+ at code{CVS} class has the @code{haltOnFailure} flag set to True. Look
+in @file{buildbot/process/step.py} to see how the other Steps are
+marked.
+
+Each Step is created with an additional @code{workdir} argument that
+indicates where its actions should take place. This is specified as a
+subdirectory of the slave builder's base directory, with a default
+value of @code{build}. This is only implemented as a step argument (as
+opposed to simply being a part of the base directory) because the
+CVS/SVN steps need to perform their checkouts from the parent
+directory.
+
+ at menu
+* BuildFactory Attributes::     
+* Quick builds::                
+ at end menu
+
+ at node BuildFactory Attributes, Quick builds, BuildFactory, BuildFactory
+ at subsubsection BuildFactory Attributes
+
+Some attributes from the BuildFactory are copied into each Build.
+
+ at cindex treeStableTimer
+
+ at table @code
+ at item useProgress
+(defaults to True): if True, the buildmaster keeps track of how long
+each step takes, so it can provide estimates of how long future builds
+will take. If builds are not expected to take a consistent amount of
+time (such as incremental builds in which a random set of files are
+recompiled or tested each time), this should be set to False to
+inhibit progress-tracking.
+
+ at end table
+
+
+ at node Quick builds,  , BuildFactory Attributes, BuildFactory
+ at subsubsection Quick builds
+
+ at bfindex buildbot.process.factory.QuickBuildFactory
+
+The difference between a ``full build'' and a ``quick build'' is that
+quick builds are generally done incrementally, starting with the tree
+where the previous build was performed. That simply means that the
+source-checkout step should be given a @code{mode='update'} flag, to
+do the source update in-place.
+
+In addition to that, the @code{useProgress} flag should be set to
+False. Incremental builds will (or at least the ought to) compile as
+few files as necessary, so they will take an unpredictable amount of
+time to run. Therefore it would be misleading to claim to predict how
+long the build will take.
+
+
+ at node Process-Specific build factories,  , BuildFactory, Build Factories
+ at subsection Process-Specific build factories
+
+Many projects use one of a few popular build frameworks to simplify
+the creation and maintenance of Makefiles or other compilation
+structures. Buildbot provides several pre-configured BuildFactory
+subclasses which let you build these projects with a minimum of fuss.
+
+ at menu
+* GNUAutoconf::                 
+* CPAN::                        
+* Python distutils::            
+* Python/Twisted/trial projects::  
+ at end menu
+
+ at node GNUAutoconf, CPAN, Process-Specific build factories, Process-Specific build factories
+ at subsubsection GNUAutoconf
+
+ at bfindex buildbot.process.factory.GNUAutoconf
+
+ at uref{http://www.gnu.org/software/autoconf/, GNU Autoconf} is a
+software portability tool, intended to make it possible to write
+programs in C (and other languages) which will run on a variety of
+UNIX-like systems. Most GNU software is built using autoconf. It is
+frequently used in combination with GNU automake. These tools both
+encourage a build process which usually looks like this:
+
+ at example
+% CONFIG_ENV=foo ./configure --with-flags
+% make all
+% make check
+# make install
+ at end example
+
+(except of course the Buildbot always skips the @code{make install}
+part).
+
+The Buildbot's @code{buildbot.process.factory.GNUAutoconf} factory is
+designed to build projects which use GNU autoconf and/or automake. The
+configuration environment variables, the configure flags, and command
+lines used for the compile and test are all configurable, in general
+the default values will be suitable.
+
+Example:
+
+ at example
+# use the s() convenience function defined earlier
+f = factory.GNUAutoconf(source=s(step.SVN, svnurl=URL, mode="copy"),
+                        flags=["--disable-nls"])
+ at end example
+
+Required Arguments:
+
+ at table @code
+ at item source
+This argument must be a step specification tuple that provides a
+BuildStep to generate the source tree.
+ at end table
+
+Optional Arguments:
+
+ at table @code
+ at item configure
+The command used to configure the tree. Defaults to
+ at code{./configure}. Accepts either a string or a list of shell argv
+elements.
+
+ at item configureEnv
+The environment used for the initial configuration step. This accepts
+a dictionary which will be merged into the buildslave's normal
+environment. This is commonly used to provide things like
+ at code{CFLAGS="-O2 -g"} (to turn off debug symbols during the compile).
+Defaults to an empty dictionary.
+
+ at item configureFlags
+A list of flags to be appended to the argument list of the configure
+command. This is commonly used to enable or disable specific features
+of the autoconf-controlled package, like @code{["--without-x"]} to
+disable windowing support. Defaults to an empty list.
+
+ at item compile
+this is a shell command or list of argv values which is used to
+actually compile the tree. It defaults to @code{make all}. If set to
+None, the compile step is skipped.
+
+ at item test
+this is a shell command or list of argv values which is used to run
+the tree's self-tests. It defaults to @code{make check}. If set to
+None, the test step is skipped.
+
+ at end table
+
+
+ at node CPAN, Python distutils, GNUAutoconf, Process-Specific build factories
+ at subsubsection CPAN
+
+ at bfindex buildbot.process.factory.CPAN
+
+Most Perl modules available from the @uref{http://www.cpan.org/, CPAN}
+archive use the @code{MakeMaker} module to provide configuration,
+build, and test services. The standard build routine for these modules
+looks like:
+
+ at example
+% perl Makefile.PL
+% make
+% make test
+# make install
+ at end example
+
+(except again Buildbot skips the install step)
+
+Buildbot provides a @code{CPAN} factory to compile and test these
+projects.
+
+
+Arguments:
+ at table @code
+ at item source
+(required): A step specification tuple, that that used by GNUAutoconf.
+
+ at item perl
+A string which specifies the @code{perl} executable to use. Defaults
+to just @code{perl}.
+
+ at end table
+
+
+ at node Python distutils, Python/Twisted/trial projects, CPAN, Process-Specific build factories
+ at subsubsection Python distutils
+
+ at bfindex buildbot.process.factory.Distutils
+
+Most Python modules use the @code{distutils} package to provide
+configuration and build services. The standard build process looks
+like:
+
+ at example
+% python ./setup.py build
+% python ./setup.py install
+ at end example
+
+Unfortunately, although Python provides a standard unit-test framework
+named @code{unittest}, to the best of my knowledge @code{distutils}
+does not provide a standardized target to run such unit tests. (please
+let me know if I'm wrong, and I will update this factory).
+
+The @code{Distutils} factory provides support for running the build
+part of this process. It accepts the same @code{source=} parameter as
+the other build factories.
+
+
+Arguments:
+ at table @code
+ at item source
+(required): A step specification tuple, that that used by GNUAutoconf.
+  
+ at item python
+A string which specifies the @code{python} executable to use. Defaults
+to just @code{python}.
+
+ at item test
+Provides a shell command which runs unit tests. This accepts either a
+string or a list. The default value is None, which disables the test
+step (since there is no common default command to run unit tests in
+distutils modules).
+
+ at end table
+
+
+ at node Python/Twisted/trial projects,  , Python distutils, Process-Specific build factories
+ at subsubsection Python/Twisted/trial projects
+
+ at bfindex buildbot.process.factory.Trial
+ at c TODO: document these steps better
+ at bsindex buildbot.steps.python_twisted.HLint
+ at bsindex buildbot.steps.python_twisted.Trial
+ at bsindex buildbot.steps.python_twisted.ProcessDocs
+ at bsindex buildbot.steps.python_twisted.BuildDebs
+ at bsindex buildbot.steps.python_twisted.RemovePYCs
+
+Twisted provides a unit test tool named @code{trial} which provides a
+few improvements over Python's built-in @code{unittest} module. Many
+python projects which use Twisted for their networking or application
+services also use trial for their unit tests. These modules are
+usually built and tested with something like the following:
+
+ at example
+% python ./setup.py build
+% PYTHONPATH=build/lib.linux-i686-2.3 trial -v PROJECTNAME.test
+% python ./setup.py install
+ at end example
+
+Unfortunately, the @file{build/lib} directory into which the
+built/copied .py files are placed is actually architecture-dependent,
+and I do not yet know of a simple way to calculate its value. For many
+projects it is sufficient to import their libraries ``in place'' from
+the tree's base directory (@code{PYTHONPATH=.}).
+
+In addition, the @var{PROJECTNAME} value where the test files are
+located is project-dependent: it is usually just the project's
+top-level library directory, as common practice suggests the unit test
+files are put in the @code{test} sub-module. This value cannot be
+guessed, the @code{Trial} class must be told where to find the test
+files.
+
+The @code{Trial} class provides support for building and testing
+projects which use distutils and trial. If the test module name is
+specified, trial will be invoked. The library path used for testing
+can also be set.
+
+One advantage of trial is that the Buildbot happens to know how to
+parse trial output, letting it identify which tests passed and which
+ones failed. The Buildbot can then provide fine-grained reports about
+how many tests have failed, when individual tests fail when they had
+been passing previously, etc.
+
+Another feature of trial is that you can give it a series of source
+.py files, and it will search them for special @code{test-case-name}
+tags that indicate which test cases provide coverage for that file.
+Trial can then run just the appropriate tests. This is useful for
+quick builds, where you want to only run the test cases that cover the
+changed functionality.
+
+Arguments:
+ at table @code
+ at item source
+(required): A step specification tuple, like that used by GNUAutoconf.
+
+ at item buildpython
+A list (argv array) of strings which specifies the @code{python}
+executable to use when building the package. Defaults to just
+ at code{['python']}. It may be useful to add flags here, to supress
+warnings during compilation of extension modules. This list is
+extended with @code{['./setup.py', 'build']} and then executed in a
+ShellCommand.
+
+ at item testpath
+Provides a directory to add to @code{PYTHONPATH} when running the unit
+tests, if tests are being run. Defaults to @code{.} to include the
+project files in-place. The generated build library is frequently
+architecture-dependent, but may simply be @file{build/lib} for
+pure-python modules.
+
+ at item trialpython
+Another list of strings used to build the command that actually runs
+trial. This is prepended to the contents of the @code{trial} argument
+below. It may be useful to add @code{-W} flags here to supress
+warnings that occur while tests are being run. Defaults to an empty
+list, meaning @code{trial} will be run without an explicit
+interpreter, which is generally what you want if you're using
+ at file{/usr/bin/trial} instead of, say, the @file{./bin/trial} that
+lives in the Twisted source tree.
+
+ at item trial
+provides the name of the @code{trial} command. It is occasionally
+useful to use an alternate executable, such as @code{trial2.2} which
+might run the tests under an older version of Python. Defaults to
+ at code{trial}.
+
+ at item tests
+Provides a module name or names which contain the unit tests for this
+project. Accepts a string, typically @code{PROJECTNAME.test}, or a
+list of strings. Defaults to None, indicating that no tests should be
+run. You must either set this or @code{useTestCaseNames} to do anyting
+useful with the Trial factory.
+
+ at item useTestCaseNames
+Tells the Step to provide the names of all changed .py files to trial,
+so it can look for test-case-name tags and run just the matching test
+cases. Suitable for use in quick builds. Defaults to False.
+
+ at item randomly
+If @code{True}, tells Trial (with the @code{--random=0} argument) to
+run the test cases in random order, which sometimes catches subtle
+inter-test dependency bugs. Defaults to @code{False}.
+
+ at item recurse
+If @code{True}, tells Trial (with the @code{--recurse} argument) to
+look in all subdirectories for additional test cases. It isn't clear
+to me how this works, but it may be useful to deal with the
+unknown-PROJECTNAME problem described above, and is currently used in
+the Twisted buildbot to accomodate the fact that test cases are now
+distributed through multiple twisted.SUBPROJECT.test directories.
+
+ at end table  
+
+Unless one of @code{trialModule} or @code{useTestCaseNames}
+are set, no tests will be run.
+
+Some quick examples follow. Most of these examples assume that the
+target python code (the ``code under test'') can be reached directly
+from the root of the target tree, rather than being in a @file{lib/}
+subdirectory.
+
+ at example
+#  Trial(source, tests="toplevel.test") does:
+#   python ./setup.py build
+#   PYTHONPATH=. trial -to toplevel.test
+
+#  Trial(source, tests=["toplevel.test", "other.test"]) does:
+#   python ./setup.py build
+#   PYTHONPATH=. trial -to toplevel.test other.test
+
+#  Trial(source, useTestCaseNames=True) does:
+#   python ./setup.py build
+#   PYTHONPATH=. trial -to --testmodule=foo/bar.py..  (from Changes)
+
+#  Trial(source, buildpython=["python2.3", "-Wall"], tests="foo.tests"):
+#   python2.3 -Wall ./setup.py build
+#   PYTHONPATH=. trial -to foo.tests
+
+#  Trial(source, trialpython="python2.3", trial="/usr/bin/trial",
+#        tests="foo.tests") does:
+#   python2.3 -Wall ./setup.py build
+#   PYTHONPATH=. python2.3 /usr/bin/trial -to foo.tests
+
+# For running trial out of the tree being tested (only useful when the
+# tree being built is Twisted itself):
+#  Trial(source, trialpython=["python2.3", "-Wall"], trial="./bin/trial",
+#        tests="foo.tests") does:
+#   python2.3 -Wall ./setup.py build
+#   PYTHONPATH=. python2.3 -Wall ./bin/trial -to foo.tests
+ at end example
+
+If the output directory of @code{./setup.py build} is known, you can
+pull the python code from the built location instead of the source
+directories. This should be able to handle variations in where the
+source comes from, as well as accomodating binary extension modules:
+
+ at example
+# Trial(source,tests="toplevel.test",testpath='build/lib.linux-i686-2.3')
+# does:
+#  python ./setup.py build
+#  PYTHONPATH=build/lib.linux-i686-2.3 trial -to toplevel.test
+ at end example
+
+
+ at node Status Delivery, Command-line tool, Build Process, Top
+ at chapter Status Delivery
+
+More details are available in the docstrings for each class, use
+ at code{pydoc buildbot.status.html.Waterfall} to see them. Most status
+delivery objects take a @code{categories=} argument, which can contain
+a list of ``category'' names: in this case, it will only show status
+for Builders that are in one of the named categories.
+
+(implementor's note: each of these objects should be a
+service.MultiService which will be attached to the BuildMaster object
+when the configuration is processed. They should use
+ at code{self.parent.getStatus()} to get access to the top-level IStatus
+object, either inside @code{startService} or later. They may call
+ at code{status.subscribe()} in @code{startService} to receive
+notifications of builder events, in which case they must define
+ at code{builderAdded} and related methods. See the docstrings in
+ at file{buildbot/interfaces.py} for full details.)
+
+ at menu
+* HTML Waterfall::              
+* IRC Bot::                     
+* PBListener::                  
+* Writing New Status Plugins::  
+ at end menu
+
+ at c @node Email Delivery,  , Status Delivery, Status Delivery
+ at c @subsection Email Delivery
+
+ at c DOCUMENT THIS
+
+ at node HTML Waterfall, IRC Bot, Status Delivery, Status Delivery
+ at section HTML Waterfall
+
+ at cindex Waterfall
+ at stindex buildbot.status.html.Waterfall
+
+
+ at example
+from buildbot.status import html
+w = html.Waterfall(http_port=8080)
+c['status'].append(w)
+ at end example
+
+The @code{buildbot.status.html.Waterfall} status target creates an
+HTML ``waterfall display'', which shows a time-based chart of events.
+This display provides detailed information about all steps of all
+recent builds, and provides hyperlinks to look at individual build
+logs and source changes. If the @code{http_port} argument is provided,
+it provides a strports specification for the port that the web server
+should listen on. This can be a simple port number, or a string like
+ at code{tcp:8080:interface=127.0.0.1} (to limit connections to the
+loopback interface, and therefore to clients running on the same
+host)@footnote{It may even be possible to provide SSL access by using
+a specification like
+ at code{"ssl:12345:privateKey=mykey.pen:certKey=cert.pem"}, but this is
+completely untested}.
+
+If instead (or in addition) you provide the @code{distrib_port}
+argument, a twisted.web distributed server will be started either on a
+TCP port (if @code{distrib_port} is like @code{"tcp:12345"}) or more
+likely on a UNIX socket (if @code{distrib_port} is like
+ at code{"unix:/path/to/socket"}).
+
+The @code{distrib_port} option means that, on a host with a
+suitably-configured twisted-web server, you do not need to consume a
+separate TCP port for the buildmaster's status web page. When the web
+server is constructed with @code{mktap web --user}, URLs that point to
+ at code{http://host/~username/} are dispatched to a sub-server that is
+listening on a UNIX socket at @code{~username/.twisted-web-pb}. On
+such a system, it is convenient to create a dedicated @code{buildbot}
+user, then set @code{distrib_port} to
+ at code{"unix:"+os.path.expanduser("~/.twistd-web-pb")}. This
+configuration will make the HTML status page available at
+ at code{http://host/~buildbot/} . Suitable URL remapping can make it
+appear at @code{http://host/buildbot/}, and the right virtual host
+setup can even place it at @code{http://buildbot.host/} .
+
+Other arguments:
+
+ at table @code
+ at item allowForce
+If set to True (the default), then the web page will provide a ``Force
+Build'' button that allows visitors to manually trigger builds. This
+is useful for developers to re-run builds that have failed because of
+intermittent problems in the test suite, or because of libraries that
+were not installed at the time of the previous build. You may not wish
+to allow strangers to cause a build to run: in that case, set this to
+False to remove these buttons.
+
+ at item favicon
+If set to a string, this will be interpreted as a filename containing
+a ``favicon'': a small image that contains an icon for the web site.
+This is returned to browsers that request the @code{favicon.ico} file,
+and should point to a .png or .ico image file. The default value uses
+the buildbot/buildbot.png image (a small hex nut) contained in the
+buildbot distribution. You can set this to None to avoid using a
+favicon at all.
+
+ at item robots_txt
+If set to a string, this will be interpreted as a filename containing
+the contents of ``robots.txt''. Many search engine spiders request
+this file before indexing the site. Setting it to a file which
+contains:
+ at example
+User-agent: *
+Disallow: /
+ at end example
+will prevent most search engines from trawling the (voluminous)
+generated status pages.
+
+ at end table
+
+
+ at node IRC Bot, PBListener, HTML Waterfall, Status Delivery
+ at section IRC Bot
+
+ at cindex IRC
+ at stindex buildbot.status.words.IRC
+
+
+The @code{buildbot.status.words.IRC} status target creates an IRC bot
+which will attach to certain channels and be available for status
+queries. It can also be asked to announce builds as they occur, or be
+told to shut up.
+
+ at example
+from twisted.status import words
+irc = words.IRC("irc.example.org", "botnickname", 
+                channels=["channel1", "channel2"],
+                password="mysecretpassword")
+c['status'].append(irc)
+ at end example
+
+Take a look at the docstring for @code{words.IRC} for more details on
+configuring this service. The @code{password} argument, if provided,
+will be sent to Nickserv to claim the nickname: some IRC servers will
+not allow clients to send private messages until they have logged in
+with a password.
+
+To use the service, you address messages at the buildbot, either
+normally (@code{botnickname: status}) or with private messages
+(@code{/msg botnickname status}). The buildbot will respond in kind.
+
+Some of the commands currently available:
+
+ at table @code
+
+ at item list builders
+Emit a list of all configured builders
+ at item status BUILDER
+Announce the status of a specific Builder: what it is doing right now.
+ at item status all
+Announce the status of all Builders
+ at item watch BUILDER
+If the given Builder is currently running, wait until the Build is
+finished and then announce the results.
+ at item last BUILDER
+Return the results of the last build to run on the given Builder.
+
+ at item help COMMAND
+Describe a command. Use @code{help commands} to get a list of known
+commands.
+ at item source
+Announce the URL of the Buildbot's home page.
+ at item version
+Announce the version of this Buildbot.
+ at end table
+
+If the @code{allowForce=True} option was used, some addtional commands
+will be available:
+
+ at table @code
+ at item force build BUILDER REASON
+Tell the given Builder to start a build of the latest code. The user
+requesting the build and REASON are recorded in the Build status. The
+buildbot will announce the build's status when it finishes.
+
+ at item stop build BUILDER REASON
+Terminate any running build in the given Builder. REASON will be added
+to the build status to explain why it was stopped. You might use this
+if you committed a bug, corrected it right away, and don't want to
+wait for the first build (which is destined to fail) to complete
+before starting the second (hopefully fixed) build.
+ at end table
+
+ at node PBListener, Writing New Status Plugins, IRC Bot, Status Delivery
+ at section PBListener
+
+ at cindex PBListener
+ at stindex buildbot.status.client.PBListener
+
+
+ at example
+import buildbot.status.client
+pbl = buildbot.status.client.PBListener(port=int, user=str,
+                                        passwd=str)
+c['status'].append(pbl)
+ at end example
+
+This sets up a PB listener on the given TCP port, to which a PB-based
+status client can connect and retrieve status information.
+ at code{buildbot statusgui} (@pxref{statusgui}) is an example of such a
+status client. The @code{port} argument can also be a strports
+specification string.
+
+ at node Writing New Status Plugins,  , PBListener, Status Delivery
+ at section Writing New Status Plugins
+
+TODO: this needs a lot more examples
+
+Each status plugin is an object which provides the
+ at code{twisted.application.service.IService} interface, which creates a
+tree of Services with the buildmaster at the top [not strictly true].
+The status plugins are all children of an object which implements
+ at code{buildbot.interfaces.IStatus}, the main status object. From this
+object, the plugin can retrieve anything it wants about current and
+past builds. It can also subscribe to hear about new and upcoming
+builds.
+
+Status plugins which only react to human queries (like the Waterfall
+display) never need to subscribe to anything: they are idle until
+someone asks a question, then wake up and extract the information they
+need to answer it, then they go back to sleep. Plugins which need to
+act spontaneously when builds complete (like the Mail plugin) need to
+subscribe to hear about new builds.
+
+If the status plugin needs to run network services (like the HTTP
+server used by the Waterfall plugin), they can be attached as Service
+children of the plugin itself, using the @code{IServiceCollection}
+interface.
+
+
+
+ at node Command-line tool, Resources, Status Delivery, Top
+ at chapter Command-line tool
+
+The @command{buildbot} command-line tool can be used to start or stop a
+buildmaster or buildbot, and to interact with a running buildmaster.
+Some of its subcommands are intended for buildmaster admins, while
+some are for developers who are editing the code that the buildbot is
+monitoring.
+
+ at menu
+* Administrator Tools::         
+* Developer Tools::             
+* Other Tools::                 
+* .buildbot config directory::  
+ at end menu
+
+ at node Administrator Tools, Developer Tools, Command-line tool, Command-line tool
+ at section Administrator Tools
+
+The following @command{buildbot} sub-commands are intended for
+buildmaster administrators:
+
+ at heading create-master
+
+This creates a new directory and populates it with files that allow it
+to be used as a buildmaster's base directory.
+
+ at example
+buildbot create-master BASEDIR
+ at end example
+
+ at heading create-slave
+
+This creates a new directory and populates it with files that let it
+be used as a buildslave's base directory. You must provide several
+arguments, which are used to create the initial @file{buildbot.tac}
+file.
+
+ at example
+buildbot create-slave @var{BASEDIR} @var{MASTERHOST}:@var{PORT} @var{SLAVENAME} @var{PASSWORD}
+ at end example
+
+ at heading start
+
+This starts a buildmaster or buildslave which was already created in
+the given base directory. The daemon is launched in the background,
+with events logged to a file named @file{twistd.log}.
+
+ at example
+buildbot start BASEDIR
+ at end example
+
+ at heading stop
+
+This terminates the daemon (either buildmaster or buildslave) running
+in the given directory.
+
+ at example
+buildbot stop BASEDIR
+ at end example
+
+ at heading sighup
+
+This sends a SIGHUP to the buildmaster running in the given directory,
+which causes it to re-read its @file{master.cfg} file.
+
+ at example
+buildbot sighup BASEDIR
+ at end example
+
+ at node Developer Tools, Other Tools, Administrator Tools, Command-line tool
+ at section Developer Tools
+
+These tools are provided for use by the developers who are working on
+the code that the buildbot is monitoring.
+
+ at menu
+* statuslog::                   
+* statusgui::                   
+* try::                         
+ at end menu
+
+ at node statuslog, statusgui, Developer Tools, Developer Tools
+ at subsection statuslog
+
+ at example
+buildbot statuslog --master @var{MASTERHOST}:@var{PORT}
+ at end example
+
+This command starts a simple text-based status client, one which just
+prints out a new line each time an event occurs on the buildmaster.
+
+The @option{--master} option provides the location of the
+ at code{buildbot.status.client.PBListener} status port, used to deliver
+build information to realtime status clients. The option is always in
+the form of a string, with hostname and port number separated by a
+colon (@code{HOSTNAME:PORTNUM}). Note that this port is @emph{not} the
+same as the slaveport (although a future version may allow the same
+port number to be used for both purposes). If you get an error message
+to the effect of ``Failure: twisted.cred.error.UnauthorizedLogin:'',
+this may indicate that you are connecting to the slaveport rather than
+a @code{PBListener} port.
+
+The @option{--master} option can also be provided by the
+ at code{masterstatus} name in @file{.buildbot/options} (@pxref{.buildbot
+config directory}).
+
+ at node statusgui, try, statuslog, Developer Tools
+ at subsection statusgui
+
+ at cindex statusgui
+
+If you have set up a PBListener (@pxref{PBListener}), you will be able
+to monitor your Buildbot using a simple Gtk+ application invoked with
+the @code{buildbot statusgui} command:
+
+ at example
+buildbot statusgui --master @var{MASTERHOST}:@var{PORT}
+ at end example
+
+This command starts a simple Gtk+-based status client, which contains
+a few boxes for each Builder that change color as events occur. It
+uses the same @option{--master} argument as the @command{buildbot
+statuslog} command (@pxref{statuslog}).
+
+ at node try,  , statusgui, Developer Tools
+ at subsection try
+
+This lets a developer to ask the question ``What would happen if I
+committed this patch right now?''. It runs the unit test suite (across
+multiple build platforms) on the developer's current code, allowing
+them to make sure they will not break the tree when they finally
+commit their changes.
+
+The @command{buildbot try} command is meant to be run from within a
+developer's local tree, and starts by figuring out the base revision
+of that tree (what revision was current the last time the tree was
+updated), and a patch that can be applied to that revision of the tree
+to make it match the developer's copy. This (revision, patch) pair is
+then sent to the buildmaster, which runs a build with that
+SourceStamp. If you want, the tool will emit status messages as the
+builds run, and will not terminate until the first failure has been
+detected (or the last success).
+
+For this command to work, several pieces must be in place:
+
+
+ at heading TryScheduler
+
+ at slindex buildbot.scheduler.Try_Jobdir
+ at slindex buildbot.scheduler.Try_Userpass
+
+The buildmaster must have a @code{scheduler.Try} instance in
+the config file's @code{c['schedulers']} list. This lets the
+administrator control who may initiate these ``trial'' builds, which
+branches are eligible for trial builds, and which Builders should be
+used for them.
+
+The @code{TryScheduler} has various means to accept build requests:
+all of them enforce more security than the usual buildmaster ports do.
+Any source code being built can be used to compromise the buildslave
+accounts, but in general that code must be checked out from the VC
+repository first, so only people with commit privileges can get
+control of the buildslaves. The usual force-build control channels can
+waste buildslave time but do not allow arbitrary commands to be
+executed by people who don't have those commit privileges. However,
+the source code patch that is provided with the trial build does not
+have to go through the VC system first, so it is important to make
+sure these builds cannot be abused by a non-committer to acquire as
+much control over the buildslaves as a committer has. Ideally, only
+developers who have commit access to the VC repository would be able
+to start trial builds, but unfortunately the buildmaster does not, in
+general, have access to VC system's user list.
+
+As a result, the @code{TryScheduler} requires a bit more
+configuration. There are currently two ways to set this up:
+
+ at table @strong
+ at item jobdir (ssh)
+
+This approach creates a command queue directory, called the
+``jobdir'', in the buildmaster's working directory. The buildmaster
+admin sets the ownership and permissions of this directory to only
+grant write access to the desired set of developers, all of whom must
+have accounts on the machine. The @code{buildbot try} command creates
+a special file containing the source stamp information and drops it in
+the jobdir, just like a standard maildir. When the buildmaster notices
+the new file, it unpacks the information inside and starts the builds.
+
+The config file entries used by 'buildbot try' either specify a local
+queuedir (for which write and mv are used) or a remote one (using scp
+and ssh).
+
+The advantage of this scheme is that it is quite secure, the
+disadvantage is that it requires fiddling outside the buildmaster
+config (to set the permissions on the jobdir correctly). If the
+buildmaster machine happens to also house the VC repository, then it
+can be fairly easy to keep the VC userlist in sync with the
+trial-build userlist. If they are on different machines, this will be
+much more of a hassle. It may also involve granting developer accounts
+on a machine that would not otherwise require them.
+
+To implement this, the buildslave invokes 'ssh -l username host
+buildbot tryserver ARGS', passing the patch contents over stdin. The
+arguments must include the inlet directory and the revision
+information.
+
+ at item user+password (PB)
+
+In this approach, each developer gets a username/password pair, which
+are all listed in the buildmaster's configuration file. When the
+developer runs @code{buildbot try}, their machine connects to the
+buildmaster via PB and authenticates themselves using that username
+and password, then sends a PB command to start the trial build.
+
+The advantage of this scheme is that the entire configuration is
+performed inside the buildmaster's config file. The disadvantages are
+that it is less secure (while the ``cred'' authentication system does
+not expose the password in plaintext over the wire, it does not offer
+most of the other security properties that SSH does). In addition, the
+buildmaster admin is responsible for maintaining the username/password
+list, adding and deleting entries as developers come and go.
+
+ at end table
+
+
+For example, to set up the ``jobdir'' style of trial build, using a
+command queue directory of @file{MASTERDIR/jobdir} (and assuming that
+all your project developers were members of the @code{developers} unix
+group), you would first create that directory (with @command{mkdir
+MASTERDIR/jobdir MASTERDIR/jobdir/new MASTERDIR/jobdir/cur
+MASTERDIR/jobdir/tmp; chgrp developers MASTERDIR/jobdir
+MASTERDIR/jobdir/*; chmod g+rwx,o-rwx MASTERDIR/jobdir
+MASTERDIR/jobdir/*}), and then use the following scheduler in the
+buildmaster's config file:
+
+ at example
+from buildbot.scheduler import Try_Jobdir
+s = Try_Jobdir("try1", ["full-linux", "full-netbsd", "full-OSX"],
+               jobdir="jobdir")
+c['schedulers'] = [s]
+ at end example
+
+Note that you must create the jobdir before telling the buildmaster to
+use this configuration, otherwise you will get an error. Also remember
+that the buildmaster must be able to read and write to the jobdir as
+well. Be sure to watch the @file{twistd.log} file (@pxref{Logfiles})
+as you start using the jobdir, to make sure the buildmaster is happy
+with it.
+
+To use the username/password form of authentication, create a
+ at code{Try_Userpass} instance instead. It takes the same
+ at code{builderNames} argument as the @code{Try_Jobdir} form, but
+accepts an addtional @code{port} argument (to specify the TCP port to
+listen on) and a @code{userpass} list of username/password pairs to
+accept. Remember to use good passwords for this: the security of the
+buildslave accounts depends upon it:
+
+ at example
+from buildbot.scheduler import Try_Userpass
+s = Try_Userpass("try2", ["full-linux", "full-netbsd", "full-OSX"],
+                 port=8031, userpass=[("alice","pw1"), ("bob", "pw2")] )
+c['schedulers'] = [s]
+ at end example
+
+Like most places in the buildbot, the @code{port} argument takes a
+strports specification. See @code{twisted.application.strports} for
+details.
+
+
+ at heading locating the master
+
+The @command{try} command needs to be told how to connect to the
+ at code{TryScheduler}, and must know which of the authentication
+approaches described above is in use by the buildmaster. You specify
+the approach by using @option{--connect=ssh} or @option{--connect=pb}
+(or @code{try_connect = 'ssh'} or @code{try_connect = 'pb'} in
+ at file{.buildbot/options}).
+
+For the PB approach, the command must be given a @option{--master}
+argument (in the form HOST:PORT) that points to TCP port that you
+picked in the @code{Try_Userpass} scheduler. It also takes a
+ at option{--username} and @option{--passwd} pair of arguments that match
+one of the entries in the buildmaster's @code{userpass} list. These
+arguments can also be provided as @code{try_master},
+ at code{try_username}, and @code{try_password} entries in the
+ at file{.buildbot/options} file.
+
+For the SSH approach, the command must be given @option{--tryhost},
+ at option{--username}, and optionally @option{--password} (TODO:
+really?) to get to the buildmaster host. It must also be given
+ at option{--trydir}, which points to the inlet directory configured
+above. The trydir can be relative to the user's home directory, but
+most of the time you will use an explicit path like
+ at file{~buildbot/project/trydir}. These arguments can be provided in
+ at file{.buildbot/options} as @code{try_host}, @code{try_username},
+ at code{try_password}, and @code{try_dir}.
+
+In addition, the SSH approach needs to connect to a PBListener status
+port, so it can retrieve and report the results of the build (the PB
+approach uses the existing connection to retrieve status information,
+so this step is not necessary). This requires a @option{--master}
+argument, or a @code{masterstatus} entry in @file{.buildbot/options},
+in the form of a HOSTNAME:PORT string.
+
+
+ at heading choosing the Builders
+
+A trial build is performed on multiple Builders at the same time, and
+the developer gets to choose which Builders are used (limited to a set
+selected by the buildmaster admin with the TryScheduler's
+ at code{builderNames=} argument). The set you choose will depend upon
+what your goals are: if you are concerned about cross-platform
+compatibility, you should use multiple Builders, one from each
+platform of interest. You might use just one builder if that platform
+has libraries or other facilities that allow better test coverage than
+what you can accomplish on your own machine, or faster test runs.
+
+The set of Builders to use can be specified with multiple
+ at option{--builder} arguments on the command line. It can also be
+specified with a single @code{try_builders} option in
+ at file{.buildbot/options} that uses a list of strings to specify all
+the Builder names:
+
+ at example
+try_builders = ["full-OSX", "full-win32", "full-linux"]
+ at end example
+
+ at heading specifying the VC system
+
+The @command{try} command also needs to know how to take the
+developer's current tree and extract the (revision, patch)
+source-stamp pair. Each VC system uses a different process, so you
+start by telling the @command{try} command which VC system you are
+using, with an argument like @option{--vc=cvs} or @option{--vc=tla}.
+This can also be provided as @code{try_vc} in
+ at file{.buildbot/options}.
+
+The following names are recognized: @code{cvs} @code{svn} @code{baz}
+ at code{tla} @code{hg} @code{darcs}
+
+
+ at heading finding the top of the tree
+
+Some VC systems (notably CVS and SVN) track each directory
+more-or-less independently, which means the @command{try} command
+needs to move up to the top of the project tree before it will be able
+to construct a proper full-tree patch. To accomplish this, the
+ at command{try} command will crawl up through the parent directories
+until it finds a marker file. The default name for this marker file is
+ at file{.buildbot-top}, so when you are using CVS or SVN you should
+ at code{touch .buildbot-top} from the top of your tree before running
+ at command{buildbot try}. Alternatively, you can use a filename like
+ at file{ChangeLog} or @file{README}, since many projects put one of
+these files in their top-most directory (and nowhere else). To set
+this filename, use @option{--try-topfile=ChangeLog}, or set it in the
+options file with @code{try_topfile = 'ChangeLog'}.
+
+You can also manually set the top of the tree with
+ at option{--try-topdir=~/trees/mytree}, or @code{try_topdir =
+'~/trees/mytree'}. If you use @code{try_topdir}, in a
+ at file{.buildbot/options} file, you will need a separate options file
+for each tree you use, so it may be more convenient to use the
+ at code{try_topfile} approach instead.
+
+Other VC systems which work on full projects instead of individual
+directories (tla, baz, darcs, monotone, mercurial) do not require
+ at command{try} to know the top directory, so the @option{--try-topfile}
+and @option{--try-topdir} arguments will be ignored.
+ at c is this true? I think I currently require topdirs all the time.
+
+If the @command{try} command cannot find the top directory, it will
+abort with an error message.
+
+ at heading determining the branch name
+
+Some VC systems record the branch information in a way that ``try''
+can locate it, in particular Arch (both @command{tla} and
+ at command{baz}). For the others, if you are using something other than
+the default branch, you will have to tell the buildbot which branch
+your tree is using. You can do this with either the @option{--branch}
+argument, or a @option{try_branch} entry in the
+ at file{.buildbot/options} file.
+
+ at heading determining the revision and patch
+
+Each VC system has a separate approach for determining the tree's base
+revision and computing a patch.
+
+ at table @code
+
+ at item CVS
+
+ at command{try} pretends that the tree is up to date. It converts the
+current time into a @code{-D} time specification, uses it as the base
+revision, and computes the diff between the upstream tree as of that
+point in time versus the current contents. This works, more or less,
+but requires that the local clock be in reasonably good sync with the
+repository.
+
+ at item SVN
+ at command{try} does a @code{svn status -u} to find the latest
+repository revision number (emitted on the last line in the ``Status
+against revision: NN'' message). It then performs an @code{svn diff
+-rNN} to find out how your tree differs from the repository version,
+and sends the resulting patch to the buildmaster. If your tree is not
+up to date, this will result in the ``try'' tree being created with
+the latest revision, then @emph{backwards} patches applied to bring it
+``back'' to the version you actually checked out (plus your actual
+code changes), but this will still result in the correct tree being
+used for the build.
+
+ at item baz
+ at command{try} does a @code{baz tree-id} to determine the
+fully-qualified version and patch identifier for the tree
+(ARCHIVE/VERSION--patch-NN), and uses the VERSION--patch-NN component
+as the base revision. It then does a @code{baz diff} to obtain the
+patch.
+
+ at item tla
+ at command{try} does a @code{tla tree-version} to get the
+fully-qualified version identifier (ARCHIVE/VERSION), then takes the
+first line of @code{tla logs --reverse} to figure out the base
+revision. Then it does @code{tla changes --diffs} to obtain the patch.
+
+ at item Darcs
+ at code{darcs changes --context} emits a text file that contains a list
+of all patches back to and including the last tag was made. This text
+file (plus the location of a repository that contains all these
+patches) is sufficient to re-create the tree. Therefore the contents
+of this ``context'' file @emph{are} the revision stamp for a
+Darcs-controlled source tree.
+
+So @command{try} does a @code{darcs changes --context} to determine
+what your tree's base revision is, and then does a @code{darcs diff
+-u} to compute the patch relative to that revision.
+
+ at item Mercurial
+ at code{hg identify} emits a short revision ID (basically a truncated
+SHA1 hash of the current revision's contents), which is used as the
+base revision. @code{hg diff} then provides the patch relative to that
+revision. For @command{try} to work, your working directory must only
+have patches that are available from the same remotely-available
+repository that the build process' @code{step.Mercurial} will use.
+
+ at c TODO: monotone, git
+ at end table
+
+ at heading waiting for results
+
+If you provide the @option{--wait} option (or @code{try_wait = True}
+in @file{.buildbot/options}), the @command{buildbot try} command will
+wait until your changes have either been proven good or bad before
+exiting. Unless you use the @option{--quiet} option (or
+ at code{try_quiet=True}), it will emit a progress message every 60
+seconds until the builds have completed.
+
+
+ at node Other Tools, .buildbot config directory, Developer Tools, Command-line tool
+ at section Other Tools
+
+These tools are generally used by buildmaster administrators.
+
+ at menu
+* sendchange::                  
+* debugclient::                 
+ at end menu
+
+ at node sendchange, debugclient, Other Tools, Other Tools
+ at subsection sendchange
+
+This command is used to tell the buildmaster about source changes. It
+is intended to be used from within a commit script, installed on the
+VC server. It requires that you have a PBChangeSource
+(@pxref{PBChangeSource}) running in the buildmaster (by being included
+in the @code{c['sources']} list).
+
+
+ at example
+buildbot sendchange --master @var{MASTERHOST}:@var{PORT} --username @var{USER} @var{FILENAMES..}
+ at end example
+
+There are other (optional) arguments which can influence the
+ at code{Change} that gets submitted:
+
+ at table @code
+ at item --branch
+This provides the (string) branch specifier. If omitted, it defaults
+to None, indicating the ``default branch''. All files included in this
+Change must be on the same branch.
+
+ at item --revision_number
+This provides a (numeric) revision number for the change, used for VC systems
+that use numeric transaction numbers (like Subversion).
+
+ at item --revision
+This provides a (string) revision specifier, for VC systems that use
+strings (Arch would use something like patch-42 etc).
+
+ at item --revision_file
+This provides a filename which will be opened and the contents used as
+the revision specifier. This is specifically for Darcs, which uses the
+output of @command{darcs changes --context} as a revision specifier.
+This context file can be a couple of kilobytes long, spanning a couple
+lines per patch, and would be a hassle to pass as a command-line
+argument.
+
+ at item --comments
+This provides the change comments as a single argument. You may want
+to use @option{--logfile} instead.
+
+ at item --logfile
+This instructs the tool to read the change comments from the given
+file. If you use @code{-} as the filename, the tool will read the
+change comments from stdin.
+ at end table
+
+
+ at node debugclient,  , sendchange, Other Tools
+ at subsection debugclient
+
+ at example
+buildbot debugclient --master @var{MASTERHOST}:@var{PORT} --passwd @var{DEBUGPW}
+ at end example
+
+This launches a small Gtk+/Glade-based debug tool, connecting to the
+buildmaster's ``debug port''. This debug port shares the same port
+number as the slaveport (@pxref{Setting the slaveport}), but the
+ at code{debugPort} is only enabled if you set a debug password in the
+buildmaster's config file (@pxref{Debug options}). The
+ at option{--passwd} option must match the @code{c['debugPassword']}
+value.
+
+ at option{--master} can also be provided in @file{.debug/options} by the
+ at code{master} key. @option{--passwd} can be provided by the
+ at code{debugPassword} key.
+
+The @code{Connect} button must be pressed before any of the other
+buttons will be active. This establishes the connection to the
+buildmaster. The other sections of the tool are as follows:
+
+ at table @code
+ at item Reload .cfg
+Forces the buildmaster to reload its @file{master.cfg} file. This is
+equivalent to sending a SIGHUP to the buildmaster, but can be done
+remotely through the debug port. Note that it is a good idea to be
+watching the buildmaster's @file{twistd.log} as you reload the config
+file, as any errors which are detected in the config file will be
+announced there.
+
+ at item Rebuild .py
+(not yet implemented). The idea here is to use Twisted's ``rebuild''
+facilities to replace the buildmaster's running code with a new
+version. Even if this worked, it would only be used by buildbot
+developers.
+
+ at item poke IRC
+This locates a @code{words.IRC} status target and causes it to emit a
+message on all the channels to which it is currently connected. This
+was used to debug a problem in which the buildmaster lost the
+connection to the IRC server and did not attempt to reconnect.
+
+ at item Commit
+This allows you to inject a Change, just as if a real one had been
+delivered by whatever VC hook you are using. You can set the name of
+the committed file and the name of the user who is doing the commit.
+Optionally, you can also set a revision for the change. If the
+revision you provide looks like a number, it will be sent as an
+integer, otherwise it will be sent as a string.
+
+ at item Force Build
+This lets you force a Builder (selected by name) to start a build of
+the current source tree.
+
+ at item Currently
+(obsolete). This was used to manually set the status of the given
+Builder, but the status-assignment code was changed in an incompatible
+way and these buttons are no longer meaningful.
+
+ at end table
+
+
+ at node .buildbot config directory,  , Other Tools, Command-line tool
+ at section .buildbot config directory
+
+Many of the @command{buildbot} tools must be told how to contact the
+buildmaster that they interact with. This specification can be
+provided as a command-line argument, but most of the time it will be
+easier to set them in an ``options'' file. The @command{buildbot}
+command will look for a special directory named @file{.buildbot},
+starting from the current directory (where the command was run) and
+crawling upwards, eventually looking in the user's home directory. It
+will look for a file named @file{options} in this directory, and will
+evaluate it as a python script, looking for certain names to be set.
+You can just put simple @code{name = 'value'} pairs in this file to
+set the options.
+
+For a description of the names used in this file, please see the
+documentation for the individual @command{buildbot} sub-commands. The
+following is a brief sample of what this file's contents could be.
+
+ at example
+# for status-reading tools
+masterstatus = 'buildbot.example.org:12345'
+# for 'sendchange' or the debug port
+master = 'buildbot.example.org:18990'
+debugPassword = 'eiv7Po'
+ at end example
+
+ at table @code
+ at item masterstatus
+Location of the @code{client.PBListener} status port, used by
+ at command{statuslog} and @command{statusgui}.
+
+ at item master
+Location of the @code{debugPort} (for @command{debugclient}). Also the
+location of the @code{pb.PBChangeSource} (for @command{sendchange}).
+Usually shares the slaveport, but a future version may make it
+possible to have these listen on a separate port number.
+
+ at item debugPassword
+Must match the value of @code{c['debugPassword']}, used to protect the
+debug port, for the @command{debugclient} command.
+
+ at item username
+Provides a default username for the @command{sendchange} command.
+
+ at end table
+
+
+The following options are used by the @code{buildbot try} command
+(@pxref{try}):
+
+ at table @code
+ at item try_connect
+This specifies how the ``try'' command should deliver its request to
+the buildmaster. The currently accepted values are ``ssh'' and ``pb''.
+ at item try_builders
+Which builders should be used for the ``try'' build.
+ at item try_vc
+This specifies the version control system being used.
+ at item try_branch
+This indicates that the current tree is on a non-trunk branch.
+ at item try_topdir
+ at item try_topfile
+Use @code{try_topdir} to explicitly indicate the top of your working
+tree, or @code{try_topfile} to name a file that will only be found in
+that top-most directory.
+
+ at item try_host
+ at item try_username
+ at item try_dir
+When try_connect is ``ssh'', the command will pay attention to
+ at code{try_host}, @code{try_username}, and @code{try_dir}.
+
+ at item try_username
+ at item try_password
+ at item try_master
+Instead, when @code{try_connect} is ``pb'', the command will pay
+attention to @code{try_username}, @code{try_password}, and
+ at code{try_master}.
+
+ at item try_wait
+ at item masterstatus
+ at code{try_wait} and @code{masterstatus} are used to ask the ``try''
+command to wait for the requested build to complete.
+
+ at end table
+
+
+
+ at node Resources, Developer's Appendix, Command-line tool, Top
+ at chapter Resources
+
+The Buildbot's home page is at @uref{http://buildbot.sourceforge.net/}
+
+For configuration questions and general discussion, please use the
+ at code{buildbot-devel} mailing list. The subscription instructions and
+archives are available at
+ at uref{http://lists.sourceforge.net/lists/listinfo/buildbot-devel}
+
+ at node Developer's Appendix, Index of Useful Classes, Resources, Top
+ at unnumbered Developer's Appendix
+
+This appendix contains random notes about the implementation of the
+Buildbot, and is likely to only be of use to people intending to
+extend the Buildbot's internals.
+
+The buildmaster consists of a tree of Service objects, which is shaped
+as follows:
+
+ at example
+BuildMaster
+ ChangeMaster  (in .change_svc)
+  [IChangeSource instances]
+ [IScheduler instances]  (in .schedulers)
+ BotMaster  (in .botmaster)
+ [IStatusTarget instances]  (in .statusTargets)
+ at end example
+
+The BotMaster has a collection of Builder objects as values of its
+ at code{.builders} dictionary.
+
+
+ at node Index of Useful Classes, Index of master.cfg keys, Developer's Appendix, Top
+ at unnumbered Index of Useful Classes
+
+This is a list of all user-visible classes. There are the ones that
+are useful in @file{master.cfg}, the buildmaster's configuration file.
+Classes that are not listed here are generally internal things that
+admins are unlikely to have much use for.
+
+
+ at heading Change Sources
+ at printindex cs
+
+ at heading Schedulers and Locks
+ at printindex sl
+
+ at heading Build Factories
+ at printindex bf
+
+ at heading Build Steps
+ at printindex bs
+
+ at c undocumented steps
+ at bsindex buildbot.steps.source.Git
+ at bsindex buildbot.steps.maxq.MaxQ
+
+
+ at heading Status Targets
+ at printindex st
+
+ at c TODO: undocumented targets
+ at stindex buildbot.status.mail.MailNotifier
+
+ at node Index of master.cfg keys, Index, Index of Useful Classes, Top
+ at unnumbered Index of master.cfg keys
+
+This is a list of all of the significant keys in master.cfg . Recall
+that master.cfg is effectively a small python program one
+responsibility: create a dictionary named @code{BuildmasterConfig}.
+The keys of this dictionary are listed here. The beginning of the
+master.cfg file typically starts with something like:
+
+ at example
+BuildmasterConfig = c = @{@}
+ at end example
+
+Therefore a config key of @code{sources} will usually appear in
+master.cfg as @code{c['sources']}.
+
+ at printindex bc
+
+
+ at node Index,  , Index of master.cfg keys, Top
+ at unnumbered Index
+
+ at printindex cp
+
+
+ at bye
+

Added: vendor/buildbot/current/docs/epyrun
===================================================================
--- vendor/buildbot/current/docs/epyrun	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/epyrun	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+import sys
+import os
+
+from twisted.python import reflect
+from twisted.internet import reactor
+
+# epydoc
+import epydoc
+assert epydoc.__version__[0] == '2', "You need epydoc 2.x!"
+from epydoc.cli import cli
+
+class FakeModule:
+
+    def __init__(self, name, level):
+        self.__level = level
+        self.__name__ = name
+
+    def __repr__(self):
+        return '<Fake %s>' % self.__name__
+    __str__ = __repr__
+
+    def __nonzero__(self):
+        return 1
+
+    def __call__(self, *args, **kw):
+        pass #print 'Called:', args
+
+    def __getattr__(self, attr):
+        if self.__level == 0:
+            raise AttributeError
+        return FakeModule(self.__name__+'.'+attr, self.__level-1)
+
+    def __cmp__(self, other):
+        if not hasattr(other, '___name__'):
+            return -1
+        return cmp(self.__name__, other.__name__)
+
+
+def fakeOut(modname):
+    modpath = modname.split('.')
+    prevmod = None
+    for m in range(len(modpath)):
+        mp = '.'.join(modpath[:m+1])
+        nm = FakeModule(mp, 4)
+        if prevmod:
+            setattr(prevmod, modpath[m], nm)
+        sys.modules[mp] = nm
+        prevmod = nm
+
+#fakeOut("twisted")
+
+# HACK: Another "only doc what we tell you". We don't want epydoc to
+# automatically recurse into subdirectories: "twisted"'s presence was
+# causing "twisted/test" to be docced, even thought we explicitly
+# didn't put any twisted/test in our modnames.
+
+from epydoc import imports
+orig_find_modules = imports.find_modules
+
+import re
+
+def find_modules(dirname):
+    if not os.path.isdir(dirname): return []
+    found_init = 0
+    modules = {}
+    dirs = []
+
+    # Search for directories & modules, and check for __init__.py.
+    # Don't include duplicates (like foo.py and foo.pyc), and give
+    # precedance to the .py files.
+    for file in os.listdir(dirname):
+        filepath = os.path.join(dirname, file)
+        if os.path.isdir(filepath): dirs.append(filepath)
+        elif not re.match(r'\w+.py.?', file):
+            continue # Ignore things like ".#foo.py" or "a-b.py"
+        elif file[-3:] == '.py':
+            modules[file] = os.path.join(dirname, file)
+            if file == '__init__.py': found_init = 1
+        elif file[-4:-1] == '.py':
+            modules.setdefault(file[:-1], file)
+            if file[:-1] == '__init__.py': found_init = 1
+    modules = modules.values()
+
+    # If there was no __init__.py, then this isn't a package
+    # directory; return nothing.
+    if not found_init: return []
+
+    # Recurse to the child directories.
+    # **twisted** here's the change: commented next line out
+    #for d in dirs: modules += find_modules(d)
+    return modules
+
+imports.find_modules = find_modules
+
+
+
+# Now, set up the list of modules for epydoc to document
+modnames = []
+def addMod(arg, path, files):
+    for fn in files:
+        file = os.path.join(path, fn).replace('%s__init__'%os.sep, '')
+        if file[-3:] == '.py' and not file.count('%stest%s' % (os.sep,os.sep)):
+            modName = file[:-3].replace(os.sep,'.')
+            try:
+                #print 'pre-loading', modName
+                reflect.namedModule(modName)
+            except ImportError, e:
+                print 'import error:', modName, e
+            except Exception, e:
+                print 'other error:', modName, e
+            else:
+                modnames.append(modName)
+
+document_all = True # are we doing a full build?
+names = ['buildbot/'] #default, may be overriden below
+
+#get list of modules/pkgs on cmd-line
+try:
+    i = sys.argv.index("--modules")
+except:
+    pass
+else:
+    names = sys.argv[i+1:]
+    document_all = False
+    sys.argv[i:] = []
+    #sanity check on names
+    for i in range(len(names)):
+        try:
+            j = names[i].rindex('buildbot/') 
+        except:
+            raise SystemExit, 'You can only specify buildbot modules or packages'
+        else:
+            #strip off any leading directories before the 'twisted/'
+            #dir. this makes it easy to specify full paths, such as
+            #from TwistedEmacs
+            names[i] = names[i][j:]
+
+    old_out_dir = "html"
+    #if -o was specified, we need to change it to point to a tmp dir
+    #otherwise add our own -o option
+    try:
+        i = sys.argv.index('-o')
+        old_out_dir = sys.argv[i+1]
+        try:
+            os.mkdir(tmp_dir)
+        except OSError:
+            pass
+        sys.argv[i+1] = tmp_dir
+    except ValueError:
+        sys.argv[1:1] = ['-o', tmp_dir]
+
+osrv = sys.argv
+sys.argv=["IGNORE"]
+
+for name in names:
+    if name.endswith(".py"):
+        # turn it in to a python module name
+        name = name[:-3].replace(os.sep, ".")
+        try:
+            reflect.namedModule(name)
+        except ImportError:
+            print 'import error:', name
+        except:
+            print 'other error:', name
+        else:
+            modnames.append(name)
+    else: #assume it's a dir
+        os.path.walk(name, addMod, None)
+
+sys.argv = osrv
+
+if 'buildbot.test' in modnames:
+    modnames.remove('buildbot.test')
+##if 'twisted' in modnames:
+##    modnames.remove('twisted')
+
+sys.argv.extend(modnames)
+
+import buildbot
+
+
+sys.argv[1:1] = [
+    '-n', 'BuildBot %s' % buildbot.version,
+    '-u', 'http://buildbot.sourceforge.net/', '--no-private']
+
+# Make it easy to profile epyrun
+if 0:
+    import profile
+    profile.run('cli()', 'epyrun.prof')
+else:
+    cli()
+
+print 'Done!'

Added: vendor/buildbot/current/docs/examples/glib_master.cfg
===================================================================
--- vendor/buildbot/current/docs/examples/glib_master.cfg	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/examples/glib_master.cfg	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,55 @@
+#! /usr/bin/python
+
+from buildbot.changes.freshcvs import FreshCVSSource
+from buildbot.steps.source import CVS
+from buildbot.process.factory import GNUAutoconf, s
+from buildbot.status import html
+
+c = {}
+
+c['bots'] = [["bot1", "sekrit"]]
+
+c['sources'] = [FreshCVSSource("localhost", 4519,
+                               "foo", "bar",
+                               prefix="glib")]
+#c['sources'] = []
+c['builders'] = []
+
+repository = "/usr/home/warner/stuff/Projects/BuildBot/fakerep"
+cvsmodule = "glib"
+
+f1 = GNUAutoconf(s(CVS, cvsroot=repository, cvsmodule=cvsmodule,
+                   mode="update"),
+                 #configure="./configure --disable-shared",
+                 #configureEnv={'CFLAGS': '-O0'},
+                 configure=None)
+f1.useProgress = False
+
+b1 = {'name': "glib-quick",
+      'slavename': "bot1",
+      'builddir': "glib-quick",
+      'factory': f1,
+      }
+c['builders'].append(b1)
+
+f2 = GNUAutoconf(s(CVS, cvsroot=repository, cvsmodule=cvsmodule,
+                   mode="copy"),
+                 configure="./configure --disable-shared",
+                 configureEnv={'CFLAGS': '-O0'},
+                 )
+
+b2 = {'name': "glib-full",
+      'slavename': "bot1",
+      'builddir': "glib-full",
+      'factory': f2,
+      }
+c['builders'].append(b2)
+
+#c['irc'] = {("localhost", 6667): ('buildbot', ["private"])}
+
+c['slavePortnum'] = 8007
+
+c['status'] = [html.Waterfall(http_port=8080)]
+c['debugPassword'] = "asdf"
+
+BuildmasterConfig = c

Added: vendor/buildbot/current/docs/examples/hello.cfg
===================================================================
--- vendor/buildbot/current/docs/examples/hello.cfg	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/examples/hello.cfg	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,104 @@
+#! /usr/bin/python
+
+from buildbot import master
+from buildbot.process import factory
+from buildbot.steps.source import CVS, SVN, Darcs, Arch
+from buildbot.steps.shell import Configure, Compile, Test
+from buildbot.status import html, client
+from buildbot.changes.pb import PBChangeSource
+s = factory.s
+
+BuildmasterConfig = c = {}
+
+c['bots'] = [["bot1", "sekrit"]]
+
+c['sources'] = []
+c['sources'].append(PBChangeSource(prefix="trunk"))
+c['builders'] = []
+
+if 1:
+    steps = [
+        s(CVS,
+          cvsroot="/usr/home/warner/stuff/Projects/BuildBot/demo/Repository",
+          cvsmodule="hello",
+          mode="clobber",
+          checkoutDelay=6,
+          alwaysUseLatest=True,
+          ),
+        s(Configure),
+        s(Compile),
+        s(Test, command=["make", "check"]),
+        ]
+    b1 = {"name": "cvs-hello",
+          "slavename": "bot1",
+          "builddir": "cvs-hello",
+          "factory": factory.BuildFactory(steps),
+          }
+    c['builders'].append(b1)
+
+if 1:
+    svnrep="file:///usr/home/warner/stuff/Projects/BuildBot/demo/SVN-Repository"
+    steps = [
+        s(SVN,
+          svnurl=svnrep+"/hello",
+          mode="update",
+          ),
+        s(Configure),
+        s(Compile),
+        s(Test, command=["make", "check"]),
+        ]
+    b1 = {"name": "svn-hello",
+          "slavename": "bot1",
+          "builddir": "svn-hello",
+          "factory": factory.BuildFactory(steps),
+          }
+    c['builders'].append(b1)
+
+if 1:
+    steps = [
+        s(Darcs,
+          repourl="http://localhost/~warner/hello-darcs",
+          mode="copy",
+          ),
+        s(Configure, command=["/bin/sh", "./configure"]),
+        s(Compile),
+        s(Test, command=["make", "check"]),
+        ]
+    b1 = {"name": "darcs-hello",
+          "slavename": "bot1",
+          "builddir": "darcs-hello",
+          "factory": factory.BuildFactory(steps),
+          }
+    c['builders'].append(b1)
+
+if 1:
+    steps = [
+        s(Arch,
+          url="http://localhost/~warner/hello-arch",
+          version="gnu-hello--release--2.1.1",
+          mode="copy",
+          ),
+        s(Configure),
+        s(Compile),
+        s(Test, command=["make", "check"]),
+        ]
+    b1 = {"name": "arch-hello",
+          "slavename": "bot1",
+          "builddir": "arch-hello",
+          "factory": factory.BuildFactory(steps),
+          }
+    c['builders'].append(b1)
+
+
+c['projectName'] = "Hello"
+c['projectURL'] = "http://www.hello.example.com"
+c['buildbotURL'] = "http://localhost:8080"
+
+c['slavePortnum'] = 8007
+c['debugPassword'] = "asdf"
+c['manhole'] = master.Manhole(9900, "username", "password")
+
+c['status'] = [html.Waterfall(http_port=8080),
+               client.PBListener(port=8008),
+               ]
+

Added: vendor/buildbot/current/docs/examples/twisted_master.cfg
===================================================================
--- vendor/buildbot/current/docs/examples/twisted_master.cfg	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/examples/twisted_master.cfg	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,324 @@
+#! /usr/bin/python
+
+# This configuration file is described in $BUILDBOT/docs/config.xhtml
+
+# This is used (with online=True) to run the Twisted Buildbot at
+# http://www.twistedmatrix.com/buildbot/ . Passwords and other secret
+# information are loaded from a neighboring file called 'private.py'.
+
+import sys
+sys.path.append('/home/buildbot/BuildBot/support-master')
+
+import os.path
+
+from buildbot.changes.pb import PBChangeSource
+from buildbot.scheduler import Scheduler, Try_Userpass
+from buildbot.steps.source import SVN
+from buildbot.process.factory import s
+from buildbot.process.process_twisted import \
+     QuickTwistedBuildFactory, \
+     FullTwistedBuildFactory, \
+     TwistedReactorsBuildFactory
+from buildbot.status import html, words, client, mail
+
+import extra_factory
+reload(extra_factory)
+from extra_factory import GoodTwistedBuildFactory
+
+import private # holds passwords
+reload(private) # make it possible to change the contents without a restart
+
+BuildmasterConfig = c = {}
+
+# I set really=False when testing this configuration at home
+really = True
+usePBChangeSource = True
+
+
+c['bots'] = []
+for bot in private.bot_passwords.keys():
+    c['bots'].append((bot, private.bot_passwords[bot]))
+
+c['sources'] = []
+
+# the Twisted buildbot currently uses the contrib/svn_buildbot.py script.
+# This makes a TCP connection to the ChangeMaster service to push Changes
+# into the build master. The script is invoked by
+# /svn/Twisted/hooks/post-commit, so it will only be run for things inside
+# the Twisted repository. However, the standard SVN practice is to put the
+# actual trunk in a subdirectory named "trunk/" (to leave room for
+# "branches/" and "tags/"). We want to only pay attention to the trunk, so
+# we use "trunk" as a prefix for the ChangeSource. This also strips off that
+# prefix, so that the Builders all see sensible pathnames (which means they
+# can do things like ignore the sandbox properly).
+
+source = PBChangeSource(prefix="trunk/")
+c['sources'].append(source)
+
+
+## configure the builders
+
+if 0:
+    # always build on trunk
+    svnurl = "svn://svn.twistedmatrix.com/svn/Twisted/trunk"
+    source_update = s(SVN, svnurl=svnurl, mode="update")
+    source_copy = s(SVN, svnurl=svnurl, mode="copy")
+    source_export = s(SVN, svnurl=svnurl, mode="export")
+else:
+    # for build-on-branch, we use these instead
+    baseURL = "svn://svn.twistedmatrix.com/svn/Twisted/"
+    defaultBranch = "trunk"
+    source_update = s(SVN, baseURL=baseURL, defaultBranch=defaultBranch,
+                      mode="update")
+    source_copy = s(SVN, baseURL=baseURL, defaultBranch=defaultBranch,
+                    mode="copy")
+    source_export = s(SVN, baseURL=baseURL, defaultBranch=defaultBranch,
+                      mode="export")
+
+
+builders = []
+
+
+
+b24compile_opts = [
+    "-Wignore::PendingDeprecationWarning:distutils.command.build_py",
+    "-Wignore::PendingDeprecationWarning:distutils.command.build_ext",
+    ]
+
+
+b25compile_opts = b24compile_opts # FIXME
+
+
+b1 = {'name': "quick",
+      'slavename': "bot1",
+      'builddir': "quick",
+      'factory': QuickTwistedBuildFactory(source_update,
+                                          python=["python2.3", "python2.4"]),
+      }
+builders.append(b1)
+
+b23compile_opts = [
+    "-Wignore::PendingDeprecationWarning:distutils.command.build_py",
+    "-Wignore::PendingDeprecationWarning:distutils.command.build_ext",
+    ]
+b23 = {'name': "debian-py2.3-select",
+       'slavename': "bot-exarkun",
+       'builddir': "full2.3",
+       'factory': FullTwistedBuildFactory(source_copy,
+                                          python=["python2.3", "-Wall"],
+                                          # use -Werror soon
+                                          compileOpts=b23compile_opts,
+                                          processDocs=1,
+                                          runTestsRandomly=1),
+       }
+builders.append(b23)
+
+b24 = {'name': "debian-py2.4-select",
+       'slavenames': ["bot-exarkun"],
+       'builddir': "full2.4",
+       'factory': FullTwistedBuildFactory(source_copy,
+                                          python=["python2.4", "-Wall"],
+                                          # use -Werror soon
+                                          compileOpts=b24compile_opts,
+                                          runTestsRandomly=1),
+       }
+builders.append(b24)
+
+b24debian64 = {
+    'name': 'debian64-py2.4-select',
+    'slavenames': ['bot-idnar-debian64'],
+    'builddir': 'full2.4-debian64',
+    'factory': FullTwistedBuildFactory(source_copy,
+                                       python=["python2.4", "-Wall"],
+                                       compileOpts=b24compile_opts),
+    }
+builders.append(b24debian64)
+
+b25debian = {
+    'name': 'debian-py2.5-select',
+    'slavenames': ['bot-idnar-debian'],
+    'builddir': 'full2.5-debian',
+    'factory': FullTwistedBuildFactory(source_copy,
+                                       python=["python2.5", "-Wall"],
+                                       compileOpts=b24compile_opts)}
+builders.append(b25debian)
+
+
+b25suse = {
+    'name': 'suse-py2.5-select',
+    'slavenames': ['bot-scmikes-2.5'],
+    'builddir': 'bot-scmikes-2.5',
+    'factory': FullTwistedBuildFactory(source_copy,
+                                       python=["python2.5", "-Wall"],
+                                       compileOpts=b24compile_opts),
+    }
+builders.append(b25suse)
+
+reactors = ['poll', 'epoll', 'gtk', 'gtk2']
+b4 = {'name': "debian-py2.4-reactors",
+      'slavename': "bot2",
+      'builddir': "reactors",
+      'factory': TwistedReactorsBuildFactory(source_copy,
+                                             python="python2.4",
+                                             reactors=reactors),
+      }
+builders.append(b4)
+
+bosx24 = {
+    'name': 'osx-py2.4-select',
+    'slavenames': ['bot-exarkun-osx'],
+    'builddir': 'full2.4-exarkun-osx',
+    'factory': FullTwistedBuildFactory(source_copy,
+                                       python=["python2.4", "-Wall"],
+                                       compileOpts=b24compile_opts,
+                                       runTestsRandomly=1)}
+builders.append(bosx24)
+
+forcegc = {
+    'name': 'osx-py2.4-select-gc',
+    'slavenames': ['bot-exarkun-osx'],
+    'builddir': 'full2.4-force-gc-exarkun-osx',
+    'factory': GoodTwistedBuildFactory(source_copy,
+                                       python="python2.4")}
+builders.append(forcegc)
+
+
+# debuild is offline while we figure out how to build 2.0 .debs from SVN
+# b3 = {'name': "debuild",
+#       'slavename': "bot2",
+#       'builddir': "debuild",
+#       'factory': TwistedDebsBuildFactory(source_export,
+#                                          python="python2.4"),
+#       }
+# builders.append(b3)
+
+b24w32_scmikes_select = {
+          'name': "win32-py2.4-select",
+          'slavename': "bot-scmikes-win32",
+          'builddir': "W32-full2.4-scmikes-select",
+          'factory': TwistedReactorsBuildFactory(source_copy,
+                                                 python="python",
+                                                 compileOpts2=["-c","mingw32"],
+                                                 reactors=["default"]),
+          }
+builders.append(b24w32_scmikes_select)
+
+b25w32_scmikes_select = {
+          'name': "win32-py2.5-select",
+          'slavename': "bot-scmikes-win32-2.5",
+          'builddir': "W32-full2.5-scmikes-select",
+          'factory': TwistedReactorsBuildFactory(source_copy,
+                                                 python="python",
+                                                 compileOpts2=["-c","mingw32"],
+                                                 reactors=["default"]),
+          }
+builders.append(b25w32_scmikes_select)
+
+b24w32_win32er = {
+          'name': "win32-py2.4-er",
+          'slavename': "bot-win32-win32er",
+          'builddir': "W32-full2.4-win32er",
+          'factory': TwistedReactorsBuildFactory(source_copy,
+                                                 python="python",
+                                                 compileOpts2=["-c","mingw32"],
+                                                 reactors=["win32"]),
+          }
+builders.append(b24w32_win32er)
+
+
+b24w32_iocp = {
+          'name': "win32-py2.4-iocp",
+          'slavename': "bot-win32-iocp",
+          'builddir': "W32-full2.4-iocp",
+          'factory': TwistedReactorsBuildFactory(source_copy,
+                                                 python="python",
+                                                 compileOpts2=[],
+                                                 reactors=["iocp"]),
+          }
+builders.append(b24w32_iocp)
+
+
+b24freebsd = {'name': "freebsd-py2.4-select-kq",
+              'slavename': "bot-landonf",
+              'builddir': "freebsd-full2.4",
+              'factory':
+              TwistedReactorsBuildFactory(source_copy,
+                                          python="python2.4",
+                                          reactors=["default",
+                                                    "kqueue",
+                                                    ]),
+              }
+builders.append(b24freebsd)
+
+
+osxtsr = {'name': "osx-py2.4-tsr",
+          'slavename': "bot-exarkun-osx",
+          'builddir': "osx-tsr",
+          'factory': TwistedReactorsBuildFactory(
+              source_copy,
+              python="python2.4",
+              reactors=["tsr"])}
+builders.append(osxtsr)
+
+
+bpypyc = {'name': 'osx-pypyc-select',
+          'slavename': 'bot-jerub-pypy',
+          'builddir': 'pypy-c',
+          'factory': TwistedReactorsBuildFactory(source_copy,
+						 python="pypy-c",
+						 reactors=["default"])}
+builders.append(bpypyc)
+
+c['builders'] = builders
+
+# now set up the schedulers. We do this after setting up c['builders'] so we
+# can auto-generate a list of all of them.
+all_builders = [b['name'] for b in c['builders']]
+all_builders.sort()
+all_builders.remove("quick")
+
+## configure the schedulers
+s_quick = Scheduler(name="quick", branch=None, treeStableTimer=30,
+                    builderNames=["quick"])
+s_try = Try_Userpass("try", all_builders, port=9989,
+                     userpass=private.try_users)
+
+s_all = []
+for i, builderName in enumerate(all_builders):
+    s_all.append(Scheduler(name="all-" + builderName,
+                           branch=None, builderNames=[builderName],
+                           treeStableTimer=(5 * 60 + i * 30)))
+c['schedulers'] = [s_quick, s_try] + s_all
+
+
+
+# configure other status things
+
+c['slavePortnum'] = 9987
+c['status'] = []
+if really:
+    p = os.path.expanduser("~/.twistd-web-pb")
+    c['status'].append(html.Waterfall(distrib_port=p))
+else:
+    c['status'].append(html.Waterfall(http_port=9988))
+if really:
+    c['status'].append(words.IRC(host="irc.freenode.net",
+                                 nick='buildbot',
+                                 channels=["twisted"]))
+
+c['debugPassword'] = private.debugPassword
+#c['interlocks'] = [("do-deb", ["full-2.2"], ["debuild"])]
+if hasattr(private, "manhole"):
+    from buildbot import manhole
+    c['manhole'] = manhole.PasswordManhole(*private.manhole)
+c['status'].append(client.PBListener(9936))
+m = mail.MailNotifier(fromaddr="buildbot at twistedmatrix.com",
+                      builders=["quick", "debian-py2.3-select"],
+                      sendToInterestedUsers=True,
+		      extraRecipients=["warner at lothar.com"],
+		      mode="problem",
+		      )
+c['status'].append(m)
+c['projectName'] = "Twisted"
+c['projectURL'] = "http://twistedmatrix.com/"
+c['buildbotURL'] = "http://twistedmatrix.com/buildbot/"

Added: vendor/buildbot/current/docs/gen-reference
===================================================================
--- vendor/buildbot/current/docs/gen-reference	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/docs/gen-reference	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1 @@
+cd .. && python docs/epyrun -o docs/reference

Added: vendor/buildbot/current/docs/images/master.png
===================================================================
(Binary files differ)


Property changes on: vendor/buildbot/current/docs/images/master.png
___________________________________________________________________
Name: svn:mime-type
   + application/octet-stream

Added: vendor/buildbot/current/docs/images/overview.png
===================================================================
(Binary files differ)


Property changes on: vendor/buildbot/current/docs/images/overview.png
___________________________________________________________________
Name: svn:mime-type
   + application/octet-stream

Added: vendor/buildbot/current/docs/images/slavebuilder.png
===================================================================
(Binary files differ)


Property changes on: vendor/buildbot/current/docs/images/slavebuilder.png
___________________________________________________________________
Name: svn:mime-type
   + application/octet-stream

Added: vendor/buildbot/current/docs/images/slaves.png
===================================================================
(Binary files differ)


Property changes on: vendor/buildbot/current/docs/images/slaves.png
___________________________________________________________________
Name: svn:mime-type
   + application/octet-stream

Added: vendor/buildbot/current/docs/images/status.png
===================================================================
(Binary files differ)


Property changes on: vendor/buildbot/current/docs/images/status.png
___________________________________________________________________
Name: svn:mime-type
   + application/octet-stream

Added: vendor/buildbot/current/setup.py
===================================================================
--- vendor/buildbot/current/setup.py	2007-01-19 21:47:39 UTC (rev 5840)
+++ vendor/buildbot/current/setup.py	2007-01-19 22:06:08 UTC (rev 5841)
@@ -0,0 +1,89 @@
+#! /usr/bin/python
+
+import sys, os
+from distutils.core import setup
+from buildbot import version
+
+# Path: twisted!cvstoys!buildbot
+from distutils.command.install_data import install_data
+class install_data_twisted(install_data):
+    """make sure data files are installed in package.
+    this is evil.
+    copied from Twisted/setup.py.
+    """
+    def finalize_options(self):
+        self.set_undefined_options('install',
+            ('install_lib', 'install_dir')
+        )
+        install_data.finalize_options(self)
+
+long_description="""
+The BuildBot is a system to automate the compile/test cycle required by
+most software projects to validate code changes. By automatically
+rebuilding and testing the tree each time something has changed, build
+problems are pinpointed quickly, before other developers are
+inconvenienced by the failure. The guilty developer can be identified
+and harassed without human intervention. By running the builds on a
+variety of platforms, developers who do not have the facilities to test
+their changes everywhere before checkin will at least know shortly
+afterwards whether they have broken the build or not. Warning counts,
+lint checks, image size, compile time, and other build parameters can
+be tracked over time, are more visible, and are therefore easier to
+improve.
+"""
+
+scripts = ["bin/buildbot"]
+if sys.platform == "win32":
+    scripts.append("contrib/windows/buildbot.bat")
+    scripts.append("contrib/windows/buildbot_service.py")
+
+testmsgs = []
+for f in os.listdir("buildbot/test/mail"):
+    if f.endswith("~"):
+        continue
+    if f.startswith("msg") or f.startswith("syncmail"):
+        testmsgs.append("buildbot/test/mail/%s" % f)
+
+setup(name="buildbot",
+      version=version,
+      description="BuildBot build automation system",
+      long_description=long_description,
+      author="Brian Warner",
+      author_email="warner-buildbot at lothar.com",
+      url="http://buildbot.sourceforge.net/",
+      license="GNU GPL",
+      # does this classifiers= mean that this can't be installed on 2.2/2.3?
+      classifiers=[
+    'Development Status :: 4 - Beta',
+    'Environment :: No Input/Output (Daemon)',
+    'Environment :: Web Environment',
+    'Intended Audience :: Developers',
+    'License :: OSI Approved :: GNU General Public License (GPL)',
+    'Topic :: Software Development :: Build Tools',
+    'Topic :: Software Development :: Testing',
+    ],
+
+      packages=["buildbot",
+                "buildbot.status",
+                "buildbot.changes",
+                "buildbot.steps",
+                "buildbot.process",
+                "buildbot.clients",
+                "buildbot.slave",
+                "buildbot.scripts",
+                "buildbot.test",
+                ],
+      data_files=[("buildbot", ["buildbot/buildbot.png"]),
+                  ("buildbot/clients", ["buildbot/clients/debug.glade"]),
+                  ("buildbot/status", ["buildbot/status/classic.css"]),
+                  ("buildbot/scripts", ["buildbot/scripts/sample.cfg"]),
+                  ("buildbot/test/mail", testmsgs),
+                  ("buildbot/test/subdir", ["buildbot/test/subdir/emit.py"]),
+                  ],
+      scripts = scripts,
+      cmdclass={'install_data': install_data_twisted},
+      )
+
+# Local Variables:
+# fill-column: 71
+# End:



More information about the cig-commits mailing list