Initial commit of Anvil code, with a partially tested setup.py
This commit is contained in:
parent
9bed4637cc
commit
662d0fede9
|
@ -0,0 +1,21 @@
|
|||
[report]
|
||||
exclude_lines =
|
||||
pragma: no cover
|
||||
def __repr__
|
||||
def __str__
|
||||
if self.debug:
|
||||
if settings.DEBUG
|
||||
raise AssertionError
|
||||
raise NotImplementedError
|
||||
if 0:
|
||||
if __name__ == .__main__.:
|
||||
|
||||
omit =
|
||||
BUILD
|
||||
anvil/test.py
|
||||
run-tests.py
|
||||
*_test.py
|
||||
/usr/**
|
||||
/tmp/**
|
||||
/Library/Python/**
|
||||
/private/var/**
|
|
@ -0,0 +1,66 @@
|
|||
# ==============================================================================
|
||||
# Misc system junk
|
||||
# ==============================================================================
|
||||
|
||||
.DS_Store
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
.com.apple.*
|
||||
Thumbs.db
|
||||
Desktop.ini
|
||||
|
||||
# ==============================================================================
|
||||
# Projects/IDE files
|
||||
# ==============================================================================
|
||||
|
||||
# Sublime Text
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
|
||||
# VIM
|
||||
.*.sw[a-z]
|
||||
*.un~
|
||||
Session.vim
|
||||
|
||||
# TextMate
|
||||
*.tmproj
|
||||
*.tmproject
|
||||
tmtags
|
||||
|
||||
# Eclipse
|
||||
.project
|
||||
.metadata
|
||||
|
||||
# ==============================================================================
|
||||
# Temp generated code
|
||||
# ==============================================================================
|
||||
|
||||
*.py[co]
|
||||
.coverage
|
||||
|
||||
# ==============================================================================
|
||||
# Logs and dumps
|
||||
# ==============================================================================
|
||||
|
||||
npm-debug.log
|
||||
|
||||
# ==============================================================================
|
||||
# Build system output
|
||||
# ==============================================================================
|
||||
|
||||
# Python
|
||||
*.egg-info
|
||||
|
||||
# npm/node
|
||||
.lock-wscript
|
||||
node_modules/**/build/
|
||||
node_modules/.bin/
|
||||
|
||||
# coverage/etc
|
||||
scratch/
|
||||
|
||||
.build-cache/
|
||||
build-out/
|
||||
build-gen/
|
||||
build-bin/
|
|
@ -0,0 +1,6 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
|
@ -0,0 +1,154 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
class Deferred(object):
|
||||
"""A simple deferred object, designed for single-threaded tracking of futures.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initializes a deferred."""
|
||||
self._callbacks = []
|
||||
self._errbacks = []
|
||||
self._is_done = False
|
||||
self._failed = False
|
||||
self._exception = None
|
||||
self._args = None
|
||||
self._kwargs = None
|
||||
|
||||
def is_done(self):
|
||||
"""Whether the deferred has completed (either succeeded or failed).
|
||||
|
||||
Returns:
|
||||
True if the deferred has completed.
|
||||
"""
|
||||
return self._is_done
|
||||
|
||||
def add_callback_fn(self, fn):
|
||||
"""Adds a function that will be called when the deferred completes
|
||||
successfully.
|
||||
|
||||
The arguments passed to the function will be those arguments passed to
|
||||
callback. If multiple callbacks are registered they will all be called with
|
||||
the same arguments, so don't modify them.
|
||||
|
||||
If the deferred has already completed when this function is called then the
|
||||
callback will be made immediately.
|
||||
|
||||
Args:
|
||||
fn: Function to call back.
|
||||
"""
|
||||
if self._is_done:
|
||||
if not self._failed:
|
||||
fn(*self._args, **self._kwargs)
|
||||
return
|
||||
self._callbacks.append(fn)
|
||||
|
||||
def add_errback_fn(self, fn):
|
||||
"""Adds a function that will be called when the deferred completes with
|
||||
an error.
|
||||
|
||||
The arguments passed to the function will be those arguments passed to
|
||||
errback. If multiple callbacks are registered they will all be called with
|
||||
the same arguments, so don't modify them.
|
||||
|
||||
If the deferred has already completed when this function is called then the
|
||||
callback will be made immediately.
|
||||
|
||||
Args:
|
||||
fn: Function to call back.
|
||||
"""
|
||||
if self._is_done:
|
||||
if self._failed:
|
||||
fn(*self._args, **self._kwargs)
|
||||
return
|
||||
self._errbacks.append(fn)
|
||||
|
||||
def callback(self, *args, **kwargs):
|
||||
"""Completes a deferred successfully and calls any registered callbacks."""
|
||||
assert not self._is_done
|
||||
self._is_done = True
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
callbacks = self._callbacks
|
||||
self._callbacks = []
|
||||
self._errbacks = []
|
||||
for fn in callbacks:
|
||||
fn(*args, **kwargs)
|
||||
|
||||
def errback(self, *args, **kwargs):
|
||||
"""Completes a deferred with an error and calls any registered errbacks."""
|
||||
assert not self._is_done
|
||||
self._is_done = True
|
||||
self._failed = True
|
||||
if len(args) and isinstance(args[0], Exception):
|
||||
self._exception = args[0]
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
errbacks = self._errbacks
|
||||
self._callbacks = []
|
||||
self._errbacks = []
|
||||
for fn in errbacks:
|
||||
fn(*args, **kwargs)
|
||||
|
||||
|
||||
def gather_deferreds(deferreds, errback_if_any_fail=False):
|
||||
"""Waits until all of the given deferreds callback.
|
||||
Once all have completed this deferred will issue a callback
|
||||
with a list corresponding to each waiter, with a (success, args, kwargs)
|
||||
tuple for each deferred.
|
||||
|
||||
The deferred returned by this will only ever issue callbacks, never errbacks.
|
||||
|
||||
Args:
|
||||
deferreds: A list of deferreds to wait on.
|
||||
errback_if_any_fail: True to use errback instead of callback if at least one
|
||||
of the input deferreds fails.
|
||||
|
||||
Returns:
|
||||
A deferred that is called back with a list of tuples corresponding to each
|
||||
input deferred. The tuples are of (success, args, kwargs) with success
|
||||
being a boolean True if the deferred used callback and False if it used
|
||||
errback.
|
||||
"""
|
||||
if isinstance(deferreds, Deferred):
|
||||
deferreds = [deferreds]
|
||||
gather_deferred = Deferred()
|
||||
deferred_len = len(deferreds)
|
||||
if not deferred_len:
|
||||
gather_deferred.callback([])
|
||||
return gather_deferred
|
||||
|
||||
pending = [deferred_len]
|
||||
result_tuples = deferred_len * [None]
|
||||
def _complete():
|
||||
pending[0] -= 1
|
||||
if not pending[0]:
|
||||
if not errback_if_any_fail:
|
||||
gather_deferred.callback(result_tuples)
|
||||
else:
|
||||
any_failed = False
|
||||
for result in result_tuples:
|
||||
if not result[0]:
|
||||
any_failed = True
|
||||
break
|
||||
if any_failed:
|
||||
gather_deferred.errback(result_tuples)
|
||||
else:
|
||||
gather_deferred.callback(result_tuples)
|
||||
|
||||
def _makecapture(n, deferred):
|
||||
def _callback(*args, **kwargs):
|
||||
result_tuples[n] = (True, args, kwargs)
|
||||
_complete()
|
||||
def _errback(*args, **kwargs):
|
||||
result_tuples[n] = (False, args, kwargs)
|
||||
_complete()
|
||||
deferred.add_callback_fn(_callback)
|
||||
deferred.add_errback_fn(_errback)
|
||||
|
||||
for n in xrange(deferred_len):
|
||||
_makecapture(n, deferreds[n])
|
||||
|
||||
return gather_deferred
|
|
@ -0,0 +1,293 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the async module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import unittest2
|
||||
|
||||
from async import Deferred, gather_deferreds
|
||||
from test import AsyncTestCase
|
||||
|
||||
|
||||
class DeferredTest(unittest2.TestCase):
|
||||
"""Behavioral tests of the Deferred type."""
|
||||
|
||||
def testMultiCall(self):
|
||||
d = Deferred()
|
||||
d.callback()
|
||||
with self.assertRaises(AssertionError):
|
||||
d.callback()
|
||||
d = Deferred()
|
||||
d.errback()
|
||||
with self.assertRaises(AssertionError):
|
||||
d.errback()
|
||||
d = Deferred()
|
||||
d.callback()
|
||||
with self.assertRaises(AssertionError):
|
||||
d.errback()
|
||||
d = Deferred()
|
||||
d.errback()
|
||||
with self.assertRaises(AssertionError):
|
||||
d.callback()
|
||||
|
||||
def testCallbackArgs(self):
|
||||
cb = {}
|
||||
def cb_thunk(*args, **kwargs):
|
||||
cb['done'] = True
|
||||
cb['args'] = args
|
||||
cb['kwargs'] = kwargs
|
||||
|
||||
d = Deferred()
|
||||
self.assertFalse(d.is_done())
|
||||
d.callback()
|
||||
self.assertTrue(d.is_done())
|
||||
|
||||
d = Deferred()
|
||||
self.assertFalse(d.is_done())
|
||||
d.errback()
|
||||
self.assertTrue(d.is_done())
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_thunk)
|
||||
d.callback()
|
||||
self.assertNotEqual(len(cb), 0)
|
||||
self.assertTrue(cb['done'])
|
||||
self.assertEqual(len(cb['args']), 0)
|
||||
self.assertEqual(len(cb['kwargs']), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_thunk)
|
||||
d.callback('a', 'b')
|
||||
self.assertNotEqual(len(cb), 0)
|
||||
self.assertTrue(cb['done'])
|
||||
self.assertEqual(len(cb['args']), 2)
|
||||
self.assertEqual(cb['args'][0], 'a')
|
||||
self.assertEqual(cb['args'][1], 'b')
|
||||
self.assertEqual(len(cb['kwargs']), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_thunk)
|
||||
d.callback('a', b='b')
|
||||
self.assertNotEqual(len(cb), 0)
|
||||
self.assertTrue(cb['done'])
|
||||
self.assertEqual(len(cb['args']), 1)
|
||||
self.assertEqual(cb['args'][0], 'a')
|
||||
self.assertEqual(len(cb['kwargs']), 1)
|
||||
self.assertEqual(cb['kwargs']['b'], 'b')
|
||||
cb.clear()
|
||||
|
||||
def testCallbackOrder(self):
|
||||
cb = {}
|
||||
def cb_thunk(*args, **kwargs):
|
||||
cb['done'] = True
|
||||
cb['args'] = args
|
||||
cb['kwargs'] = kwargs
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_thunk)
|
||||
d.callback('a')
|
||||
self.assertNotEqual(len(cb), 0)
|
||||
self.assertTrue(cb['done'])
|
||||
self.assertEqual(len(cb['args']), 1)
|
||||
self.assertEqual(cb['args'][0], 'a')
|
||||
self.assertEqual(len(cb['kwargs']), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.callback('a')
|
||||
d.add_callback_fn(cb_thunk)
|
||||
self.assertNotEqual(len(cb), 0)
|
||||
self.assertTrue(cb['done'])
|
||||
self.assertEqual(len(cb['args']), 1)
|
||||
self.assertEqual(cb['args'][0], 'a')
|
||||
self.assertEqual(len(cb['kwargs']), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.add_errback_fn(cb_thunk)
|
||||
d.errback('a')
|
||||
self.assertNotEqual(len(cb), 0)
|
||||
self.assertTrue(cb['done'])
|
||||
self.assertEqual(len(cb['args']), 1)
|
||||
self.assertEqual(cb['args'][0], 'a')
|
||||
self.assertEqual(len(cb['kwargs']), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.errback('a')
|
||||
d.add_errback_fn(cb_thunk)
|
||||
self.assertNotEqual(len(cb), 0)
|
||||
self.assertTrue(cb['done'])
|
||||
self.assertEqual(len(cb['args']), 1)
|
||||
self.assertEqual(cb['args'][0], 'a')
|
||||
self.assertEqual(len(cb['kwargs']), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_thunk)
|
||||
d.errback('a')
|
||||
self.assertEqual(len(cb), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.errback('a')
|
||||
d.add_callback_fn(cb_thunk)
|
||||
self.assertEqual(len(cb), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.add_errback_fn(cb_thunk)
|
||||
d.callback('a')
|
||||
self.assertEqual(len(cb), 0)
|
||||
cb.clear()
|
||||
|
||||
d = Deferred()
|
||||
d.callback('a')
|
||||
d.add_errback_fn(cb_thunk)
|
||||
self.assertEqual(len(cb), 0)
|
||||
cb.clear()
|
||||
|
||||
def testMultiCallbacks(self):
|
||||
cbs = []
|
||||
def cb_multi_thunk(*args, **kwargs):
|
||||
cbs.append({
|
||||
'done': True,
|
||||
'args': args,
|
||||
'kwargs': kwargs
|
||||
})
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_multi_thunk)
|
||||
d.callback('a')
|
||||
self.assertEqual(len(cbs), 1)
|
||||
self.assertNotEqual(len(cbs[0]), 0)
|
||||
self.assertEqual(cbs[0]['args'][0], 'a')
|
||||
cbs[:] = []
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_multi_thunk)
|
||||
d.add_callback_fn(cb_multi_thunk)
|
||||
d.callback('a')
|
||||
self.assertEqual(len(cbs), 2)
|
||||
self.assertNotEqual(len(cbs[0]), 0)
|
||||
self.assertNotEqual(len(cbs[1]), 0)
|
||||
self.assertEqual(cbs[0]['args'][0], 'a')
|
||||
self.assertEqual(cbs[1]['args'][0], 'a')
|
||||
cbs[:] = []
|
||||
|
||||
d = Deferred()
|
||||
d.add_callback_fn(cb_multi_thunk)
|
||||
d.callback('a')
|
||||
d.add_callback_fn(cb_multi_thunk)
|
||||
self.assertEqual(len(cbs), 2)
|
||||
self.assertNotEqual(len(cbs[0]), 0)
|
||||
self.assertNotEqual(len(cbs[1]), 0)
|
||||
self.assertEqual(cbs[0]['args'][0], 'a')
|
||||
self.assertEqual(cbs[1]['args'][0], 'a')
|
||||
cbs[:] = []
|
||||
|
||||
d = Deferred()
|
||||
d.callback('a')
|
||||
d.add_callback_fn(cb_multi_thunk)
|
||||
d.add_callback_fn(cb_multi_thunk)
|
||||
self.assertEqual(len(cbs), 2)
|
||||
self.assertNotEqual(len(cbs[0]), 0)
|
||||
self.assertNotEqual(len(cbs[1]), 0)
|
||||
self.assertEqual(cbs[0]['args'][0], 'a')
|
||||
self.assertEqual(cbs[1]['args'][0], 'a')
|
||||
cbs[:] = []
|
||||
|
||||
|
||||
class GatherTest(AsyncTestCase):
|
||||
"""Behavioral tests for the async gather function."""
|
||||
|
||||
def testGather(self):
|
||||
d = gather_deferreds([])
|
||||
self.assertCallbackEqual(d, [])
|
||||
|
||||
da = Deferred()
|
||||
db = Deferred()
|
||||
dc = Deferred()
|
||||
df = Deferred()
|
||||
d = gather_deferreds([da, db, dc, df])
|
||||
df.errback()
|
||||
dc.callback('c')
|
||||
db.callback('b')
|
||||
da.callback('a')
|
||||
self.assertCallbackEqual(d, [
|
||||
(True, ('a',), {}),
|
||||
(True, ('b',), {}),
|
||||
(True, ('c',), {}),
|
||||
(False, (), {})])
|
||||
|
||||
da = Deferred()
|
||||
db = Deferred()
|
||||
dc = Deferred()
|
||||
df = Deferred()
|
||||
df.errback('f')
|
||||
dc.callback('c')
|
||||
d = gather_deferreds([da, db, dc, df])
|
||||
db.callback('b')
|
||||
da.callback('a')
|
||||
self.assertCallbackEqual(d, [
|
||||
(True, ('a',), {}),
|
||||
(True, ('b',), {}),
|
||||
(True, ('c',), {}),
|
||||
(False, ('f',), {})])
|
||||
|
||||
def testErrback(self):
|
||||
d = gather_deferreds([], errback_if_any_fail=True)
|
||||
self.assertCallbackEqual(d, [])
|
||||
|
||||
da = Deferred()
|
||||
db = Deferred()
|
||||
dc = Deferred()
|
||||
d = gather_deferreds([da, db, dc], errback_if_any_fail=True)
|
||||
dc.callback('c')
|
||||
db.callback('b')
|
||||
da.callback('a')
|
||||
self.assertCallbackEqual(d, [
|
||||
(True, ('a',), {}),
|
||||
(True, ('b',), {}),
|
||||
(True, ('c',), {})])
|
||||
|
||||
da = Deferred()
|
||||
db = Deferred()
|
||||
dc = Deferred()
|
||||
df = Deferred()
|
||||
d = gather_deferreds([da, db, dc, df], errback_if_any_fail=True)
|
||||
df.errback()
|
||||
dc.callback('c')
|
||||
db.callback('b')
|
||||
da.callback('a')
|
||||
self.assertErrbackEqual(d, [
|
||||
(True, ('a',), {}),
|
||||
(True, ('b',), {}),
|
||||
(True, ('c',), {}),
|
||||
(False, (), {})])
|
||||
|
||||
da = Deferred()
|
||||
db = Deferred()
|
||||
dc = Deferred()
|
||||
df = Deferred()
|
||||
df.errback('f')
|
||||
dc.callback('c')
|
||||
d = gather_deferreds([da, db, dc, df], errback_if_any_fail=True)
|
||||
db.callback('b')
|
||||
da.callback('a')
|
||||
self.assertErrbackEqual(d, [
|
||||
(True, ('a',), {}),
|
||||
(True, ('b',), {}),
|
||||
(True, ('c',), {}),
|
||||
(False, ('f',), {})])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,6 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
|
@ -0,0 +1,52 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Builds a set of target rules.
|
||||
|
||||
Examples:
|
||||
# Build the given rules
|
||||
manage.py build :some_rule some/path:another_rule
|
||||
# Force a full rebuild (essentially a 'manage.py clean')
|
||||
manage.py build --rebuild :some_rule
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import argparse
|
||||
|
||||
import anvil.commands.util as commandutil
|
||||
from anvil.manage import manage_command
|
||||
|
||||
|
||||
def _get_options_parser():
|
||||
"""Gets an options parser for the given args."""
|
||||
parser = commandutil.create_argument_parser('manage.py build', __doc__)
|
||||
|
||||
# Add all common args
|
||||
commandutil.add_common_build_args(parser, targets=True)
|
||||
|
||||
# 'build' specific
|
||||
parser.add_argument('--rebuild',
|
||||
dest='rebuild',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=('Cleans all output and caches before building.'))
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
@manage_command('build', 'Builds target rules.')
|
||||
def build(args, cwd):
|
||||
parser = _get_options_parser()
|
||||
parsed_args = parser.parse_args(args)
|
||||
|
||||
# Handle --rebuild
|
||||
if parsed_args.rebuild:
|
||||
if not commandutil.clean_output(cwd):
|
||||
return False
|
||||
|
||||
(result, all_target_outputs) = commandutil.run_build(cwd, parsed_args)
|
||||
|
||||
print all_target_outputs
|
||||
|
||||
return result
|
|
@ -0,0 +1,33 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Cleans all build-* paths and caches.
|
||||
Attempts to delete all paths the build system creates.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
import anvil.commands.util as commandutil
|
||||
from anvil.manage import manage_command
|
||||
|
||||
|
||||
def _get_options_parser():
|
||||
"""Gets an options parser for the given args."""
|
||||
parser = commandutil.create_argument_parser('manage.py clean', __doc__)
|
||||
|
||||
# 'clean' specific
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
@manage_command('clean', 'Cleans outputs and caches.')
|
||||
def clean(args, cwd):
|
||||
parser = _get_options_parser()
|
||||
parsed_args = parser.parse_args(args)
|
||||
|
||||
return commandutil.clean_output(cwd)
|
|
@ -0,0 +1,95 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Runs a build and copies all output results of the specified rules to a path.
|
||||
All of the output files of the specified rules will be copied to the target
|
||||
output path. The directory structure will be exactly that of under the
|
||||
various build-*/ folders but collapsed into one.
|
||||
|
||||
A typical deploy rule will bring together many result srcs, for example
|
||||
converted audio files or compiled code, for a specific configuration.
|
||||
One could have many such rules to target different configurations, such as
|
||||
unoptimized/uncompiled vs. optimized/packed.
|
||||
|
||||
Examples:
|
||||
# Copy all output files of :release_all to /some/bin/, merging the output
|
||||
manage.py deploy --output=/some/bin/ :release_all
|
||||
# Clean (aka delete) /some/bin/ before doing the copy
|
||||
manage.py deploy --clean --output=/some/bin/ :release_all
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
import anvil.commands.util as commandutil
|
||||
from anvil.manage import manage_command
|
||||
|
||||
|
||||
def _get_options_parser():
|
||||
"""Gets an options parser for the given args."""
|
||||
parser = commandutil.create_argument_parser('manage.py deploy', __doc__)
|
||||
|
||||
# Add all common args
|
||||
commandutil.add_common_build_args(parser, targets=True)
|
||||
|
||||
# 'deploy' specific
|
||||
parser.add_argument('-o', '--output',
|
||||
dest='output',
|
||||
required=True,
|
||||
help=('Output path to place all results. Will be created '
|
||||
' if it does not exist.'))
|
||||
parser.add_argument('-c', '--clean',
|
||||
dest='clean',
|
||||
action='store_true',
|
||||
help=('Whether to remove all output files before '
|
||||
'deploying.'))
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
@manage_command('deploy', 'Builds and copies output to a target path.')
|
||||
def deploy(args, cwd):
|
||||
parser = _get_options_parser()
|
||||
parsed_args = parser.parse_args(args)
|
||||
|
||||
# Build everything first
|
||||
(result, all_target_outputs) = commandutil.run_build(cwd, parsed_args)
|
||||
if not result:
|
||||
# Failed - don't copy anything
|
||||
return False
|
||||
|
||||
# Delete output, if desired
|
||||
if parsed_args.clean:
|
||||
shutil.rmtree(parsed_args.output)
|
||||
|
||||
# Ensure output exists
|
||||
if not os.path.isdir(parsed_args.output):
|
||||
os.makedirs(parsed_args.output)
|
||||
|
||||
# Copy results
|
||||
for target_output in all_target_outputs:
|
||||
# Get path relative to root
|
||||
# This will contain the build-out/ etc
|
||||
rel_path = os.path.relpath(target_output, cwd)
|
||||
|
||||
# Strip the build-*/
|
||||
rel_path = os.path.join(*(rel_path.split(os.sep)[1:]))
|
||||
|
||||
# Make output path
|
||||
deploy_path = os.path.normpath(os.path.join(parsed_args.output, rel_path))
|
||||
|
||||
# Ensure directory exists
|
||||
# TODO(benvanik): cache whether we have checked yet to reduce OS cost
|
||||
deploy_dir = os.path.dirname(deploy_path)
|
||||
if not os.path.isdir(deploy_dir):
|
||||
os.makedirs(deploy_dir)
|
||||
|
||||
# Copy!
|
||||
print '%s -> %s' % (target_output, deploy_path)
|
||||
shutil.copy2(target_output, deploy_path)
|
||||
|
||||
return result
|
|
@ -0,0 +1,59 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Launches an HTTP server and optionally a continuous build daemon.
|
||||
This serves the current working directory over HTTP, similar to Python's
|
||||
SimpleHTTPServer.
|
||||
|
||||
If a daemon port and any rules are defined then changes to the
|
||||
specified paths will automatically trigger builds. A WebSocket port is specified
|
||||
that clients can connect to and get lists of file change sets.
|
||||
|
||||
Daemon rules should be of the form:
|
||||
file_set('some_daemon',
|
||||
srcs=['watch_path_1/', 'watch_path_2/'],
|
||||
deps=[':root_build_target'])
|
||||
Where the given srcs will be recursively watched for changes to trigger the
|
||||
rules specified in deps.
|
||||
|
||||
Examples:
|
||||
# Simple HTTP server
|
||||
manage.py serve
|
||||
manage.py serve --http_port=8080
|
||||
# HTTP server + build daemon
|
||||
manage.py serve :some_daemon
|
||||
manage.py serve --http_port=8080 --daemon_port=8081 :some_daemon
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
import anvil.commands.util as commandutil
|
||||
from anvil.manage import manage_command
|
||||
|
||||
|
||||
def _get_options_parser():
|
||||
"""Gets an options parser for the given args."""
|
||||
parser = commandutil.create_argument_parser('manage.py serve', __doc__)
|
||||
|
||||
# Add all common args
|
||||
commandutil.add_common_build_args(parser, targets=True)
|
||||
|
||||
# 'serve' specific
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
@manage_command('serve', 'Continuously builds and serves targets.')
|
||||
def serve(args, cwd):
|
||||
parser = _get_options_parser()
|
||||
parsed_args = parser.parse_args(args)
|
||||
|
||||
(result, all_target_outputs) = commandutil.run_build(cwd, parsed_args)
|
||||
|
||||
print all_target_outputs
|
||||
|
||||
return result
|
|
@ -0,0 +1,43 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Builds and executes a set of test rules.
|
||||
TODO: need some custom rules (test_js or something?) that provide parameters
|
||||
to some test framework (BusterJS?)
|
||||
|
||||
Example:
|
||||
manage.py test :test_rule ...
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
import anvil.commands.util as commandutil
|
||||
from anvil.manage import manage_command
|
||||
|
||||
|
||||
def _get_options_parser():
|
||||
"""Gets an options parser for the given args."""
|
||||
parser = commandutil.create_argument_parser('manage.py test', __doc__)
|
||||
|
||||
# Add all common args
|
||||
commandutil.add_common_build_args(parser, targets=True)
|
||||
|
||||
# 'test' specific
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
@manage_command('test', 'Builds and runs test rules.')
|
||||
def test(args, cwd):
|
||||
parser = _get_options_parser()
|
||||
parsed_args = parser.parse_args(args)
|
||||
|
||||
(result, all_target_outputs) = commandutil.run_build(cwd, parsed_args)
|
||||
|
||||
print all_target_outputs
|
||||
|
||||
return result
|
|
@ -0,0 +1,152 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Common command utilities.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from anvil.context import BuildEnvironment, BuildContext
|
||||
from anvil.project import FileModuleResolver, Project
|
||||
from anvil.task import InProcessTaskExecutor, MultiProcessTaskExecutor
|
||||
|
||||
|
||||
# Hack to get formatting in usage() correct
|
||||
class _ComboHelpFormatter(argparse.RawDescriptionHelpFormatter,
|
||||
argparse.ArgumentDefaultsHelpFormatter):
|
||||
pass
|
||||
|
||||
|
||||
def create_argument_parser(program_usage, description=''):
|
||||
"""Creates an ArgumentParser with the proper formatting.
|
||||
|
||||
Args:
|
||||
program_usage: Program usage string, such as 'foo'.
|
||||
description: Help string, usually from __doc__.
|
||||
|
||||
Returns:
|
||||
An ArgumentParser that can be used to parse arguments.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(prog='manage.py serve',
|
||||
description=description,
|
||||
formatter_class=_ComboHelpFormatter)
|
||||
_add_common_args(parser)
|
||||
return parser
|
||||
|
||||
|
||||
def _add_common_args(parser):
|
||||
"""Adds common system arguments to an argument parser.
|
||||
|
||||
Args:
|
||||
parser: ArgumentParser to modify.
|
||||
"""
|
||||
# TODO(benvanik): logging control/etc
|
||||
pass
|
||||
|
||||
|
||||
def add_common_build_args(parser, targets=False):
|
||||
"""Adds common build arguments to an argument parser.
|
||||
|
||||
Args:
|
||||
parser: ArgumentParser to modify.
|
||||
targets: True to add variable target arguments.
|
||||
"""
|
||||
# Threading/execution control
|
||||
parser.add_argument('-j', '--jobs',
|
||||
dest='jobs',
|
||||
type=int,
|
||||
default=None,
|
||||
help=('Specifies the number of tasks to run '
|
||||
'simultaneously. If omitted then all processors '
|
||||
'will be used.'))
|
||||
|
||||
# Build context control
|
||||
parser.add_argument('-f', '--force',
|
||||
dest='force',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=('Force all rules to run as if there was no cache.'))
|
||||
parser.add_argument('--stop_on_error',
|
||||
dest='stop_on_error',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=('Stop building when an error is encountered.'))
|
||||
|
||||
# Target specification
|
||||
if targets:
|
||||
parser.add_argument('targets',
|
||||
nargs='+',
|
||||
metavar='target',
|
||||
help='Target build rule (such as :a or foo/bar:a)')
|
||||
|
||||
|
||||
def clean_output(cwd):
|
||||
"""Cleans all build-related output and caches.
|
||||
|
||||
Args:
|
||||
cwd: Current working directory.
|
||||
|
||||
Returns:
|
||||
True if the clean succeeded.
|
||||
"""
|
||||
nuke_paths = [
|
||||
'.build-cache',
|
||||
'build-out',
|
||||
'build-gen',
|
||||
'build-bin',
|
||||
]
|
||||
any_failed = False
|
||||
for path in nuke_paths:
|
||||
full_path = os.path.join(cwd, path)
|
||||
if os.path.isdir(full_path):
|
||||
try:
|
||||
shutil.rmtree(full_path)
|
||||
except Exception as e:
|
||||
print 'Unable to remove %s: %s' % (full_path, e)
|
||||
any_failed = True
|
||||
return not any_failed
|
||||
|
||||
|
||||
def run_build(cwd, parsed_args):
|
||||
"""Runs a build with the given arguments.
|
||||
Assumes that add_common_args and add_common_build_args was called on the
|
||||
ArgumentParser.
|
||||
|
||||
Args:
|
||||
cwd: Current working directory.
|
||||
parsed_args: Argument namespace from an ArgumentParser.
|
||||
"""
|
||||
build_env = BuildEnvironment(root_path=cwd)
|
||||
|
||||
module_resolver = FileModuleResolver(cwd)
|
||||
project = Project(module_resolver=module_resolver)
|
||||
|
||||
# -j/--jobs switch to change execution mode
|
||||
# TODO(benvanik): force -j 1 on Cygwin?
|
||||
if parsed_args.jobs == 1:
|
||||
task_executor = InProcessTaskExecutor()
|
||||
else:
|
||||
task_executor = MultiProcessTaskExecutor(worker_count=parsed_args.jobs)
|
||||
|
||||
# TODO(benvanik): good logging/info - resolve rules in project and print
|
||||
# info?
|
||||
print 'building %s' % (parsed_args.targets)
|
||||
|
||||
# TODO(benvanik): take additional args from command line
|
||||
all_target_outputs = set([])
|
||||
with BuildContext(build_env, project,
|
||||
task_executor=task_executor,
|
||||
force=parsed_args.force,
|
||||
stop_on_error=parsed_args.stop_on_error,
|
||||
raise_on_error=False) as build_ctx:
|
||||
result = build_ctx.execute_sync(parsed_args.targets)
|
||||
if result:
|
||||
for target in parsed_args.targets:
|
||||
(state, target_outputs) = build_ctx.get_rule_results(target)
|
||||
all_target_outputs.update(target_outputs)
|
||||
|
||||
return (result == True, all_target_outputs)
|
|
@ -0,0 +1,673 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Build context.
|
||||
|
||||
A build context is created to manage the dependency graph and build rules, as
|
||||
well as handling distribution and execution of the tasks those rules create.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
from collections import deque
|
||||
import fnmatch
|
||||
import multiprocessing
|
||||
import os
|
||||
import stat
|
||||
|
||||
import async
|
||||
from async import Deferred
|
||||
import graph
|
||||
import project
|
||||
import task
|
||||
import util
|
||||
|
||||
|
||||
class Status:
|
||||
"""Enumeration describing the status of a context."""
|
||||
WAITING = 0
|
||||
RUNNING = 1
|
||||
SUCCEEDED = 2
|
||||
FAILED = 3
|
||||
|
||||
|
||||
class BuildEnvironment(object):
|
||||
"""Build environment settings, containing access to all globals.
|
||||
Build environments are a combination of flags passed to the build system
|
||||
(from configuration files or the command line), system environment variables,
|
||||
and platform options.
|
||||
|
||||
Rule and task implementations should avoid accessing the kind of information
|
||||
contained here from anywhere else (such as the sys module), as this ensures
|
||||
a consistent environment.
|
||||
|
||||
The build environment should be kept constant throughout a build, and should
|
||||
be treated as read-only while in use by a context.
|
||||
|
||||
This object may be passed to other processes, and must be pickeable.
|
||||
"""
|
||||
|
||||
def __init__(self, root_path=None):
|
||||
"""Initializes a build environment.
|
||||
|
||||
Args:
|
||||
root_path: Root path for base path resolution. If none is provided then
|
||||
the current working directory will be used.
|
||||
|
||||
Raises:
|
||||
OSError: A path was not found or is wrong type.
|
||||
"""
|
||||
# TODO(benvanik): cwd for path resolution
|
||||
# TODO(benvanik): environment variables
|
||||
# TODO(benvanik): user-defined options dict
|
||||
|
||||
if not root_path or not len(root_path):
|
||||
self.root_path = os.getcwd()
|
||||
else:
|
||||
self.root_path = os.path.abspath(root_path)
|
||||
if not os.path.isdir(self.root_path):
|
||||
raise OSError('Root path "%s" not found or not a directory' % (
|
||||
self.root_path))
|
||||
|
||||
|
||||
class BuildContext(object):
|
||||
"""A build context for a given project and set of target rules.
|
||||
Projects are built by specifying rules that should be considered the
|
||||
'targets'. All rules that they depend on are then built, in the proper order,
|
||||
to ensure that all dependencies are up to date.
|
||||
|
||||
Build contexts store the runtime definitions of rules, as well as the
|
||||
environment they run in.
|
||||
|
||||
Build contexts are designed to be used once and thrown away. To start another
|
||||
build create a new context with the same parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, build_env, project,
|
||||
task_executor=None, force=False,
|
||||
stop_on_error=False, raise_on_error=False):
|
||||
"""Initializes a build context.
|
||||
|
||||
Args:
|
||||
build_env: Current build environment.
|
||||
project: Project to use for building.
|
||||
task_executor: Task executor to use. One will be created if none is
|
||||
passed.
|
||||
force: True to force execution of tasks even if they have not changed.
|
||||
stop_on_error: True to stop executing tasks as soon as an error occurs.
|
||||
raise_on_error: True to rethrow exceptions to ease debugging.
|
||||
"""
|
||||
self.build_env = build_env
|
||||
self.project = project
|
||||
|
||||
self.task_executor = task_executor
|
||||
self._close_task_executor = False
|
||||
if not self.task_executor:
|
||||
# HACK: multiprocessing on cygwin is really slow, so unless the caller
|
||||
# specifies we try to use the in-process executor to keep test times
|
||||
# low (any non-test callers should be specifying their own anyway)
|
||||
self.task_executor = task.InProcessTaskExecutor()
|
||||
#self.task_executor = task.MultiProcessTaskExecutor()
|
||||
self._close_task_executor = True
|
||||
|
||||
self.force = force
|
||||
self.stop_on_error = stop_on_error
|
||||
self.raise_on_error = raise_on_error
|
||||
|
||||
# Build the rule graph
|
||||
self.rule_graph = graph.RuleGraph(self.project)
|
||||
|
||||
# Dictionary that should be used to map rule paths to RuleContexts
|
||||
self.rule_contexts = {}
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if self._close_task_executor:
|
||||
self.task_executor.close()
|
||||
|
||||
def execute_sync(self, target_rule_names):
|
||||
"""Synchronously executes the given target rules in the context.
|
||||
Rules are executed in the order and, where possible, in parallel.
|
||||
|
||||
This is equivalent to calling execute_async and then waiting on the
|
||||
deferred.
|
||||
|
||||
Args:
|
||||
target_rule_names: A list of rule names that are to be executed.
|
||||
|
||||
Returns:
|
||||
A boolean indicating whether execution succeeded.
|
||||
|
||||
Raises:
|
||||
KeyError: One of the given target rules was not found in the project.
|
||||
NameError: An invalid target rule was given.
|
||||
TypeError: An invalid target rule was given.
|
||||
RuntimeError: Execution failed to complete.
|
||||
"""
|
||||
d = self.execute_async(target_rule_names)
|
||||
self.wait(d)
|
||||
result = [None]
|
||||
def _callback():
|
||||
result[0] = True
|
||||
def _errback():
|
||||
result[0] = False
|
||||
d.add_callback_fn(_callback)
|
||||
d.add_errback_fn(_errback)
|
||||
assert result[0] is not None
|
||||
return result[0]
|
||||
|
||||
def execute_async(self, target_rule_names):
|
||||
"""Executes the given target rules in the context.
|
||||
Rules are executed in the order and, where possible, in parallel.
|
||||
|
||||
Args:
|
||||
target_rule_names: A list of rule names that are to be executed.
|
||||
|
||||
Returns:
|
||||
A Deferred that completes when all rules have completed. If an error
|
||||
occurs in any rule an errback will be called.
|
||||
|
||||
Raises:
|
||||
KeyError: One of the given target rules was not found in the project.
|
||||
NameError: An invalid target rule was given.
|
||||
TypeError: An invalid target rule was given.
|
||||
"""
|
||||
# Verify that target rules are valid and exist
|
||||
target_rule_names = list(target_rule_names)
|
||||
util.validate_names(target_rule_names, require_semicolon=True)
|
||||
for rule_name in target_rule_names:
|
||||
if not self.project.resolve_rule(rule_name):
|
||||
raise KeyError('Target rule "%s" not found in project' % (rule_name))
|
||||
|
||||
# Calculate the sequence of rules to execute
|
||||
rule_sequence = self.rule_graph.calculate_rule_sequence(target_rule_names)
|
||||
|
||||
any_failed = [False]
|
||||
main_deferred = Deferred()
|
||||
remaining_rules = deque(rule_sequence)
|
||||
in_flight_rules = []
|
||||
|
||||
def _issue_rule(rule):
|
||||
"""Issues a single rule into the current execution context.
|
||||
Updates the in_flight_rules list and pumps when the rule completes.
|
||||
|
||||
Args:
|
||||
rule: Rule to issue.
|
||||
"""
|
||||
def _rule_callback(*args, **kwargs):
|
||||
in_flight_rules.remove(rule)
|
||||
_pump(previous_succeeded=True)
|
||||
|
||||
def _rule_errback(exception=None, *args, **kwargs):
|
||||
in_flight_rules.remove(rule)
|
||||
# TODO(benvanik): log result/exception/etc?
|
||||
if exception: # pragma: no cover
|
||||
print exception
|
||||
_pump(previous_succeeded=False)
|
||||
|
||||
in_flight_rules.append(rule)
|
||||
rule_deferred = self._execute_rule(rule)
|
||||
rule_deferred.add_callback_fn(_rule_callback)
|
||||
rule_deferred.add_errback_fn(_rule_errback)
|
||||
return rule_deferred
|
||||
|
||||
def _pump(previous_succeeded=True):
|
||||
"""Attempts to run another rule and signals the main_deferred if done.
|
||||
|
||||
Args:
|
||||
previous_succeeded: Whether the previous rule succeeded.
|
||||
"""
|
||||
# If we're already done, gracefully exit
|
||||
if main_deferred.is_done():
|
||||
return
|
||||
|
||||
# If we failed and we are supposed to stop, gracefully stop by
|
||||
# killing all future rules
|
||||
# This is better than terminating immediately, as it allows legit tasks
|
||||
# to finish
|
||||
if not previous_succeeded:
|
||||
any_failed[0] = True
|
||||
if not previous_succeeded and self.stop_on_error:
|
||||
remaining_rules.clear()
|
||||
|
||||
if len(remaining_rules):
|
||||
# Peek the next rule
|
||||
next_rule = remaining_rules[0]
|
||||
|
||||
# See if it has any dependency on currently running rules
|
||||
for in_flight_rule in in_flight_rules:
|
||||
if self.rule_graph.has_dependency(next_rule.path,
|
||||
in_flight_rule.path):
|
||||
# Blocked on a previous rule, so pass and wait for the next pump
|
||||
return
|
||||
|
||||
# If here then we found no conflicting rules, so run!
|
||||
remaining_rules.popleft()
|
||||
_issue_rule(next_rule)
|
||||
else:
|
||||
# Done!
|
||||
# TODO(benvanik): better errbacks? some kind of BuildResults?
|
||||
if not any_failed[0]:
|
||||
main_deferred.callback()
|
||||
else:
|
||||
main_deferred.errback()
|
||||
|
||||
# Kick off execution (once for each rule as a heuristic for flooding the
|
||||
# pipeline)
|
||||
for rule in rule_sequence:
|
||||
_pump()
|
||||
|
||||
return main_deferred
|
||||
|
||||
def wait(self, deferreds):
|
||||
"""Blocks waiting on a list of deferreds until they all complete.
|
||||
The deferreds must have been returned from execute.
|
||||
|
||||
Args:
|
||||
deferreds: A list of Deferreds (or one).
|
||||
"""
|
||||
self.task_executor.wait(deferreds)
|
||||
|
||||
def _execute_rule(self, rule):
|
||||
"""Executes a single rule.
|
||||
This assumes that all dependent rules have already been executed. Assertions
|
||||
will be raised if all dependent rules have not completed successfully or
|
||||
if the given rule has been executed already.
|
||||
|
||||
Args:
|
||||
rule: Rule to execute.
|
||||
|
||||
Returns:
|
||||
A Deferred that will callback when the rule has completed executing.
|
||||
"""
|
||||
assert not self.rule_contexts.has_key(rule.path)
|
||||
rule_ctx = rule.create_context(self)
|
||||
self.rule_contexts[rule.path] = rule_ctx
|
||||
if rule_ctx.check_predecessor_failures():
|
||||
return rule_ctx.cascade_failure()
|
||||
else:
|
||||
rule_ctx.begin()
|
||||
return rule_ctx.deferred
|
||||
|
||||
def get_rule_results(self, rule):
|
||||
"""Gets the status/output of a rule.
|
||||
This is not thread safe and should only be used to query the result of a
|
||||
rule after it has been run.
|
||||
|
||||
Args:
|
||||
rule: Rule to query - can be a Rule object or a rule path that will be
|
||||
resolved.
|
||||
|
||||
Returns:
|
||||
A tuple containing (status, output_paths) for the given rule.
|
||||
|
||||
Raises:
|
||||
KeyError: The rule was not found.
|
||||
"""
|
||||
if isinstance(rule, str):
|
||||
rule = self.project.resolve_rule(rule)
|
||||
if self.rule_contexts.has_key(rule.path):
|
||||
rule_ctx = self.rule_contexts[rule.path]
|
||||
return (rule_ctx.status, rule_ctx.all_output_files[:])
|
||||
else:
|
||||
return (Status.WAITING, [])
|
||||
|
||||
def get_rule_outputs(self, rule):
|
||||
"""Gets the output files of the given rule.
|
||||
It is only valid to call this on rules that have already been executed
|
||||
and have succeeded.
|
||||
|
||||
Args:
|
||||
rule: Rule to query - can be a Rule object or a rule path that will be
|
||||
resolved.
|
||||
|
||||
Returns:
|
||||
A list of all output files from the rule or None if the rule did not yet
|
||||
execute.
|
||||
Raises:
|
||||
KeyError: The rule was not found.
|
||||
"""
|
||||
results = self.get_rule_results(rule)
|
||||
return results[1]
|
||||
|
||||
|
||||
class RuleContext(object):
|
||||
"""A runtime context for an individual rule.
|
||||
Must contain all of the state for a rule while it is being run, including
|
||||
all resolved inputs and resulting outputs (once complete).
|
||||
"""
|
||||
|
||||
def __init__(self, build_context, rule, *args, **kwargs):
|
||||
"""Initializes a rule context.
|
||||
|
||||
Args:
|
||||
build_context: BuildContext this rule is running in.
|
||||
rule: Rule this context wraps.
|
||||
"""
|
||||
self.build_context = build_context
|
||||
self.build_env = build_context.build_env
|
||||
self.rule = rule
|
||||
|
||||
self.deferred = Deferred()
|
||||
self.status = Status.WAITING
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.exception = None
|
||||
|
||||
# TODO(benvanik): logger
|
||||
self.logger = None
|
||||
|
||||
# Resolve all src paths
|
||||
# If rules have their own attrs they'll have to do them themselves
|
||||
self.src_paths = self._resolve_input_files(rule.srcs, apply_src_filter=True)
|
||||
|
||||
# This list of all files this rule outputted, upon completion
|
||||
self.all_output_files = []
|
||||
|
||||
def _resolve_input_files(self, paths, apply_src_filter=False):
|
||||
"""Resolves the given paths into real file system paths, ready for use.
|
||||
This adds direct file references, recursively enumerates paths, expands
|
||||
globs, and grabs outputs from other rules.
|
||||
|
||||
Since this actually checks to see if specific files are present and raises
|
||||
if not, this should be called in the initializer of all subclasses to
|
||||
resolve all paths in a place where a good stack will occur.
|
||||
|
||||
Note that the resulting list is not deduplicated - certain rules may want
|
||||
the exact list in the exact order defined. If you want a de-duped list,
|
||||
simply use list(set(result)) to quickly de-dupe.
|
||||
|
||||
Args:
|
||||
paths: Paths to resolve.
|
||||
|
||||
Returns:
|
||||
A list of all file paths from the given paths.
|
||||
|
||||
Raises:
|
||||
KeyError: A required rule was not found.
|
||||
OSError: A source path was not found or could not be accessed.
|
||||
RuntimeError: Internal runtime error (rule executed out of order/etc)
|
||||
"""
|
||||
base_path = os.path.dirname(self.rule.parent_module.path)
|
||||
input_paths = []
|
||||
for src in paths:
|
||||
# Grab all items from the source
|
||||
src_items = None
|
||||
if util.is_rule_path(src):
|
||||
# Reference to another rule
|
||||
other_rule = self.build_context.project.resolve_rule(
|
||||
src, requesting_module=self.rule.parent_module)
|
||||
if not other_rule:
|
||||
raise KeyError('Source rule "%s" not found' % (src))
|
||||
if not self.build_context.rule_contexts.has_key(other_rule.path):
|
||||
raise RuntimeError('Source rule "%s" not yet executed' % (src))
|
||||
other_rule_ctx = self.build_context.rule_contexts[other_rule.path]
|
||||
src_items = other_rule_ctx.all_output_files
|
||||
else:
|
||||
# File or folder path
|
||||
src_path = os.path.join(base_path, src)
|
||||
mode = os.stat(src_path).st_mode
|
||||
if stat.S_ISDIR(mode):
|
||||
src_items = os.listdir(src_path)
|
||||
else:
|
||||
src_items = [src_path]
|
||||
|
||||
# Apply the src_filter, if any
|
||||
if apply_src_filter and self.rule.src_filter:
|
||||
for file_path in src_items:
|
||||
if fnmatch.fnmatch(file_path, self.rule.src_filter):
|
||||
input_paths.append(file_path)
|
||||
else:
|
||||
input_paths.extend(src_items)
|
||||
return input_paths
|
||||
|
||||
def __get_target_path(self, base_path, name=None, suffix=None):
|
||||
"""Handling of _get_*_path() methods.
|
||||
|
||||
Args:
|
||||
base_path: Base path to the project root.
|
||||
name: If a name is provided it will be used instead of the rule name.
|
||||
suffix: Suffix to add to whatever path is built, such as '.txt' to add
|
||||
an extension.
|
||||
|
||||
Returns:
|
||||
A full path that can be used to write a file.
|
||||
"""
|
||||
if not name or not len(name):
|
||||
name = self.rule.name
|
||||
if suffix and len(suffix):
|
||||
name += suffix
|
||||
root_path = self.build_context.build_env.root_path
|
||||
module_path = os.path.dirname(self.rule.parent_module.path)
|
||||
rel_path = os.path.relpath(module_path, root_path)
|
||||
return os.path.normpath(os.path.join(base_path, rel_path, name))
|
||||
|
||||
def _get_out_path(self, name=None, suffix=None):
|
||||
"""Gets the 'out' path for an output.
|
||||
If no name is provided then the rule name will be used.
|
||||
|
||||
The 'out' path should be used for all content/binary results.
|
||||
|
||||
Args:
|
||||
name: If a name is provided it will be used instead of the rule name.
|
||||
suffix: Suffix to add to whatever path is built, such as '.txt' to add
|
||||
an extension.
|
||||
|
||||
Returns:
|
||||
A full path that can be used to write a file to the proper 'out' path.
|
||||
"""
|
||||
base_path = os.path.join(self.build_context.build_env.root_path,
|
||||
'build-out')
|
||||
return self.__get_target_path(base_path, name=name, suffix=suffix)
|
||||
|
||||
def _get_gen_path(self, name=None, suffix=None):
|
||||
"""Gets the 'gen' path for an output.
|
||||
If no name is provided then the rule name will be used.
|
||||
|
||||
The 'gen' path should be used for generated code only.
|
||||
|
||||
Args:
|
||||
name: If a name is provided it will be used instead of the rule name.
|
||||
suffix: Suffix to add to whatever path is built, such as '.txt' to add
|
||||
an extension.
|
||||
|
||||
Returns:
|
||||
A full path that can be used to write a file to the proper 'gen' path.
|
||||
"""
|
||||
base_path = os.path.join(self.build_context.build_env.root_path,
|
||||
'build-gen')
|
||||
return self.__get_target_path(base_path, name=name, suffix=suffix)
|
||||
|
||||
def __get_target_path_for_src(self, base_path, src, opt_path=None):
|
||||
"""Handling of _get_*_path_for_src() methods.
|
||||
|
||||
Args:
|
||||
base_path: Base path to the project root.
|
||||
src: Absolute source path.
|
||||
|
||||
Returns:
|
||||
A full path that can be used to write a file.
|
||||
"""
|
||||
root_path = self.build_context.build_env.root_path
|
||||
rel_path = os.path.relpath(src, root_path)
|
||||
# Need to strip build-out and build-gen (so we can reference any file)
|
||||
rel_path = rel_path.replace('build-out/', '').replace('build-gen/', '')
|
||||
return os.path.normpath(os.path.join(base_path, rel_path))
|
||||
|
||||
def _get_out_path_for_src(self, src):
|
||||
"""Gets the 'out' path for a source file.
|
||||
|
||||
The 'out' path should be used for all content/binary results.
|
||||
|
||||
Args:
|
||||
src: Absolute source path.
|
||||
|
||||
Returns:
|
||||
A full path that can be used to write a file to the proper 'out' path.
|
||||
"""
|
||||
base_path = os.path.join(self.build_context.build_env.root_path,
|
||||
'build-out')
|
||||
return self.__get_target_path_for_src(base_path, src)
|
||||
|
||||
def _get_gen_path_for_src(self, src):
|
||||
"""Gets the 'gen' path for a source file.
|
||||
|
||||
The 'gen' path should be used for generated code only.
|
||||
|
||||
Args:
|
||||
src: Absolute source path.
|
||||
|
||||
Returns:
|
||||
A full path that can be used to write a file to the proper 'gen' path.
|
||||
"""
|
||||
base_path = os.path.join(self.build_context.build_env.root_path,
|
||||
'build-gen')
|
||||
return self.__get_target_path_for_src(base_path, src)
|
||||
|
||||
def _ensure_output_exists(self, path):
|
||||
"""Makes the given path exist, if it doesn't.
|
||||
|
||||
Arg:
|
||||
path: An absolute path to a folder that should exist.
|
||||
"""
|
||||
if not os.path.isdir(path):
|
||||
os.makedirs(path)
|
||||
|
||||
def _append_output_paths(self, paths):
|
||||
"""Appends the given paths to the output list.
|
||||
Other rules that depend on this rule will receive these paths when it
|
||||
is used as a source.
|
||||
|
||||
Args:
|
||||
paths: A list of paths to add to the list.
|
||||
"""
|
||||
self.all_output_files.extend(paths)
|
||||
|
||||
def _run_task_async(self, task):
|
||||
"""Runs a task asynchronously.
|
||||
This is a utility method that makes it easier to execute tasks.
|
||||
|
||||
Args:
|
||||
task: Task to execute.
|
||||
|
||||
Returns:
|
||||
A deferred that signals when the task completes.
|
||||
"""
|
||||
return self.build_context.task_executor.run_task_async(task)
|
||||
|
||||
def check_predecessor_failures(self):
|
||||
"""Checks all dependencies for failure.
|
||||
|
||||
Returns:
|
||||
True if any dependency has failed.
|
||||
"""
|
||||
for dep in self.rule.get_dependent_paths():
|
||||
if util.is_rule_path(dep):
|
||||
other_rule = self.build_context.project.resolve_rule(
|
||||
dep, requesting_module=self.rule.parent_module)
|
||||
other_rule_ctx = self.build_context.rule_contexts.get(
|
||||
other_rule.path, None)
|
||||
if other_rule_ctx.status == Status.FAILED:
|
||||
return True
|
||||
return False
|
||||
|
||||
def begin(self):
|
||||
"""Begins asynchronous rule execution.
|
||||
Custom RuleContext implementations should override this method to perform
|
||||
their behavior (spawning tasks/etc). When the returned Deferred is called
|
||||
back the rule context should be completed, with all_output_files properly
|
||||
set.
|
||||
|
||||
The default implementation ends immediately, passing all input files through
|
||||
as output.
|
||||
|
||||
Returns:
|
||||
A Deferred that can will be called back when the rule has completed.
|
||||
"""
|
||||
self.status = Status.RUNNING
|
||||
self.start_time = util.timer()
|
||||
return self.deferred
|
||||
|
||||
def cascade_failure(self):
|
||||
"""Instantly fails a rule, signaling that a rule prior to it has failed
|
||||
and it should not be run.
|
||||
|
||||
Use this if a call to check_predecessor_failures returns True to properly
|
||||
set a rule context up for cascading failures.
|
||||
After calling this begin should not be called.
|
||||
|
||||
Returns:
|
||||
A Deferred that has had its errback called.
|
||||
"""
|
||||
# TODO(benvanik): special CascadingError exception
|
||||
self.start_time = util.timer()
|
||||
self._fail()
|
||||
return self.deferred
|
||||
|
||||
def _succeed(self):
|
||||
"""Signals that rule execution has completed successfully.
|
||||
This will set all state and issue the callback on the deferred.
|
||||
"""
|
||||
self.status = Status.SUCCEEDED
|
||||
self.end_time = util.timer()
|
||||
self.deferred.callback()
|
||||
|
||||
def _fail(self, exception=None, *args, **kwargs):
|
||||
"""Signals that rule execution has completed in failure.
|
||||
This will set all state and issue the errback on the deferred.
|
||||
If an exception is provided it will be set on the context and passed as
|
||||
the first argument to the deferred.
|
||||
|
||||
Args:
|
||||
exception: The exception that resulted in the rule failure, if any.
|
||||
"""
|
||||
self.status = Status.FAILED
|
||||
self.end_time = util.timer()
|
||||
self.exception = exception
|
||||
if exception:
|
||||
self.deferred.errback(exception=exception)
|
||||
else:
|
||||
self.deferred.errback()
|
||||
|
||||
def _chain(self, deferreds):
|
||||
"""Chains the completion of the rule on the given deferred.
|
||||
Depending on the success or failure the deferred, the rule context will
|
||||
succeeed or fail.
|
||||
|
||||
Args:
|
||||
deferred: A Deferred or list of deferreds that will be called back.
|
||||
"""
|
||||
deferred = async.gather_deferreds(deferreds, errback_if_any_fail=True)
|
||||
def _callback(*args, **kwargs):
|
||||
self._succeed()
|
||||
def _errback(*args, **kwargs):
|
||||
exception = None
|
||||
for arg in args[0]:
|
||||
if not arg[0]:
|
||||
if len(arg[1]) and isinstance(arg[1][0], Exception):
|
||||
exception = arg[1][0]
|
||||
break
|
||||
exception = arg[2].get('exception', None)
|
||||
if exception:
|
||||
break
|
||||
self._fail(exception=exception)
|
||||
deferred.add_callback_fn(_callback)
|
||||
deferred.add_errback_fn(_errback)
|
||||
|
||||
|
||||
# class FileDelta(object):
|
||||
# """
|
||||
# TODO(benvanik): move to another module and setup to use cache
|
||||
# """
|
||||
|
||||
# def __init__(self, source_paths=None):
|
||||
# """
|
||||
# Args:
|
||||
# source_paths
|
||||
# """
|
||||
# self.all_files = []
|
||||
# self.added_files = []
|
||||
# self.removed_files = []
|
||||
# self.changed_files = []
|
|
@ -0,0 +1,448 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the context module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
import async
|
||||
from context import *
|
||||
from module import *
|
||||
from rule import *
|
||||
from project import *
|
||||
from task import *
|
||||
from test import AsyncTestCase, FixtureTestCase
|
||||
|
||||
|
||||
class BuildEnvironmentTest(FixtureTestCase):
|
||||
"""Behavioral tests of the BuildEnvironment type."""
|
||||
fixture='simple'
|
||||
|
||||
def testConstruction(self):
|
||||
build_env = BuildEnvironment()
|
||||
self.assertTrue(os.path.isdir(build_env.root_path))
|
||||
|
||||
build_env = BuildEnvironment(root_path='.')
|
||||
self.assertTrue(os.path.isdir(build_env.root_path))
|
||||
|
||||
build_env = BuildEnvironment(root_path=self.root_path)
|
||||
self.assertTrue(os.path.isdir(build_env.root_path))
|
||||
self.assertEqual(build_env.root_path, self.root_path)
|
||||
|
||||
build_env = BuildEnvironment(root_path=os.path.join(self.root_path, 'dir'))
|
||||
self.assertTrue(os.path.isdir(build_env.root_path))
|
||||
self.assertEqual(build_env.root_path, os.path.join(self.root_path, 'dir'))
|
||||
|
||||
with self.assertRaises(OSError):
|
||||
BuildEnvironment(root_path='/not/found')
|
||||
|
||||
class BuildContextTest(FixtureTestCase):
|
||||
"""Behavioral tests of the BuildContext type."""
|
||||
fixture = 'simple'
|
||||
|
||||
def setUp(self):
|
||||
super(BuildContextTest, self).setUp()
|
||||
self.build_env = BuildEnvironment(root_path=self.root_path)
|
||||
|
||||
def testConstruction(self):
|
||||
project = Project()
|
||||
with BuildContext(self.build_env, project): pass
|
||||
|
||||
project = Project(modules=[Module('m', rules=[Rule('a')])])
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
self.assertIsNotNone(ctx.task_executor)
|
||||
|
||||
with BuildContext(self.build_env, project,
|
||||
task_executor=InProcessTaskExecutor()) as ctx:
|
||||
self.assertIsNotNone(ctx.task_executor)
|
||||
|
||||
def testExecution(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
with self.assertRaises(NameError):
|
||||
ctx.execute_async(['x'])
|
||||
with self.assertRaises(KeyError):
|
||||
ctx.execute_async([':x'])
|
||||
with self.assertRaises(OSError):
|
||||
ctx.execute_async(['x:x'])
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
self.assertTrue(ctx.execute_sync([':a']))
|
||||
results = ctx.get_rule_results(':a')
|
||||
self.assertEqual(results[0], Status.SUCCEEDED)
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
d = ctx.execute_async([':a'])
|
||||
ctx.wait(d)
|
||||
self.assertCallback(d)
|
||||
results = ctx.get_rule_results(':a')
|
||||
self.assertEqual(results[0], Status.SUCCEEDED)
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
d = ctx.execute_async([':mixed_input'])
|
||||
ctx.wait(d)
|
||||
self.assertCallback(d)
|
||||
results = ctx.get_rule_results(':mixed_input')
|
||||
self.assertEqual(results[0], Status.SUCCEEDED)
|
||||
self.assertEqual(len(results[1]), 2)
|
||||
|
||||
class SucceedRule(Rule):
|
||||
class _Context(RuleContext):
|
||||
def begin(self):
|
||||
super(SucceedRule._Context, self).begin()
|
||||
#print 'hello from rule %s' % (self.rule.path)
|
||||
self._succeed()
|
||||
class FailRule(Rule):
|
||||
class _Context(RuleContext):
|
||||
def begin(self):
|
||||
super(FailRule._Context, self).begin()
|
||||
#print 'hello from rule %s' % (self.rule.path)
|
||||
self._fail()
|
||||
|
||||
project = Project(modules=[Module('m', rules=[SucceedRule('a')])])
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
d = ctx.execute_async(['m:a'])
|
||||
ctx.wait(d)
|
||||
self.assertCallback(d)
|
||||
results = ctx.get_rule_results('m:a')
|
||||
self.assertEqual(results[0], Status.SUCCEEDED)
|
||||
|
||||
project = Project(modules=[Module('m', rules=[
|
||||
SucceedRule('a'),
|
||||
SucceedRule('b', deps=[':a'])])])
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
d = ctx.execute_async(['m:b'])
|
||||
ctx.wait(d)
|
||||
self.assertCallback(d)
|
||||
results = ctx.get_rule_results('m:a')
|
||||
self.assertEqual(results[0], Status.SUCCEEDED)
|
||||
results = ctx.get_rule_results('m:b')
|
||||
self.assertEqual(results[0], Status.SUCCEEDED)
|
||||
|
||||
project = Project(modules=[Module('m', rules=[FailRule('a')])])
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
d = ctx.execute_async(['m:a'])
|
||||
ctx.wait(d)
|
||||
self.assertErrback(d)
|
||||
results = ctx.get_rule_results('m:a')
|
||||
self.assertEqual(results[0], Status.FAILED)
|
||||
|
||||
project = Project(modules=[Module('m', rules=[FailRule('a')])])
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
self.assertFalse(ctx.execute_sync(['m:a']))
|
||||
results = ctx.get_rule_results('m:a')
|
||||
self.assertEqual(results[0], Status.FAILED)
|
||||
|
||||
project = Project(modules=[Module('m', rules=[
|
||||
FailRule('a'),
|
||||
SucceedRule('b', deps=[':a'])])])
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
d = ctx.execute_async(['m:b'])
|
||||
ctx.wait(d)
|
||||
self.assertErrback(d)
|
||||
results = ctx.get_rule_results('m:a')
|
||||
self.assertEqual(results[0], Status.FAILED)
|
||||
results = ctx.get_rule_results('m:b')
|
||||
self.assertEqual(results[0], Status.FAILED)
|
||||
|
||||
project = Project(modules=[Module('m', rules=[
|
||||
FailRule('a'),
|
||||
SucceedRule('b', deps=[':a'])])])
|
||||
with BuildContext(self.build_env, project, stop_on_error=True) as ctx:
|
||||
d = ctx.execute_async(['m:b'])
|
||||
ctx.wait(d)
|
||||
self.assertErrback(d)
|
||||
results = ctx.get_rule_results('m:a')
|
||||
self.assertEqual(results[0], Status.FAILED)
|
||||
results = ctx.get_rule_results('m:b')
|
||||
self.assertEqual(results[0], Status.WAITING)
|
||||
|
||||
# TODO(benvanik): test stop_on_error
|
||||
# TODO(benvanik): test raise_on_error
|
||||
|
||||
def testCaching(self):
|
||||
# TODO(benvanik): test caching and force arg
|
||||
pass
|
||||
|
||||
def testBuild(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
d = ctx.execute_async([':a'])
|
||||
ctx.wait(d)
|
||||
self.assertCallback(d)
|
||||
# TODO(benvanik): the rest of this
|
||||
|
||||
|
||||
class RuleContextTest(FixtureTestCase):
|
||||
"""Behavioral tests of the RuleContext type."""
|
||||
fixture = 'simple'
|
||||
|
||||
def setUp(self):
|
||||
super(RuleContextTest, self).setUp()
|
||||
self.build_env = BuildEnvironment(root_path=self.root_path)
|
||||
|
||||
def testStatus(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
build_ctx = BuildContext(self.build_env, project)
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
rule = project.resolve_rule(':a')
|
||||
|
||||
class SuccessfulRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(SuccessfulRuleContext, self).begin()
|
||||
self._succeed()
|
||||
|
||||
rule_ctx = SuccessfulRuleContext(build_ctx, rule)
|
||||
self.assertEqual(rule_ctx.status, Status.WAITING)
|
||||
rule_ctx.begin()
|
||||
self.assertTrue(rule_ctx.deferred.is_done())
|
||||
self.assertEqual(rule_ctx.status, Status.SUCCEEDED)
|
||||
|
||||
class FailedRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(FailedRuleContext, self).begin()
|
||||
self._fail()
|
||||
|
||||
rule_ctx = FailedRuleContext(build_ctx, rule)
|
||||
self.assertEqual(rule_ctx.status, Status.WAITING)
|
||||
rule_ctx.begin()
|
||||
self.assertTrue(rule_ctx.deferred.is_done())
|
||||
self.assertEqual(rule_ctx.status, Status.FAILED)
|
||||
self.assertIsNone(rule_ctx.exception)
|
||||
|
||||
class FailedWithErrorRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(FailedWithErrorRuleContext, self).begin()
|
||||
self._fail(RuntimeError('Failure'))
|
||||
|
||||
rule_ctx = FailedWithErrorRuleContext(build_ctx, rule)
|
||||
self.assertEqual(rule_ctx.status, Status.WAITING)
|
||||
rule_ctx.begin()
|
||||
self.assertTrue(rule_ctx.deferred.is_done())
|
||||
self.assertEqual(rule_ctx.status, Status.FAILED)
|
||||
self.assertIsInstance(rule_ctx.exception, RuntimeError)
|
||||
|
||||
class SuccessfulAsyncRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(SuccessfulAsyncRuleContext, self).begin()
|
||||
d = Deferred()
|
||||
self._chain(d)
|
||||
d.callback()
|
||||
|
||||
rule_ctx = SuccessfulAsyncRuleContext(build_ctx, rule)
|
||||
self.assertEqual(rule_ctx.status, Status.WAITING)
|
||||
rule_ctx.begin()
|
||||
self.assertTrue(rule_ctx.deferred.is_done())
|
||||
self.assertEqual(rule_ctx.status, Status.SUCCEEDED)
|
||||
|
||||
class FailedAsyncRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(FailedAsyncRuleContext, self).begin()
|
||||
d = Deferred()
|
||||
self._chain(d)
|
||||
d.errback(RuntimeError('Failure'))
|
||||
|
||||
rule_ctx = FailedAsyncRuleContext(build_ctx, rule)
|
||||
self.assertEqual(rule_ctx.status, Status.WAITING)
|
||||
rule_ctx.begin()
|
||||
self.assertTrue(rule_ctx.deferred.is_done())
|
||||
self.assertEqual(rule_ctx.status, Status.FAILED)
|
||||
self.assertIsInstance(rule_ctx.exception, RuntimeError)
|
||||
|
||||
class FailedManyAsyncRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(FailedManyAsyncRuleContext, self).begin()
|
||||
d1 = Deferred()
|
||||
d2 = Deferred()
|
||||
self._chain([d1, d2])
|
||||
d1.callback()
|
||||
d2.errback(RuntimeError('Failure'))
|
||||
|
||||
rule_ctx = FailedManyAsyncRuleContext(build_ctx, rule)
|
||||
self.assertEqual(rule_ctx.status, Status.WAITING)
|
||||
rule_ctx.begin()
|
||||
self.assertTrue(rule_ctx.deferred.is_done())
|
||||
self.assertEqual(rule_ctx.status, Status.FAILED)
|
||||
self.assertIsInstance(rule_ctx.exception, RuntimeError)
|
||||
|
||||
def testFileInputs(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
build_ctx = BuildContext(self.build_env, project)
|
||||
|
||||
rule = project.resolve_rule(':file_input')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(
|
||||
set([os.path.basename(f) for f in rule_outputs]),
|
||||
set(['a.txt']))
|
||||
|
||||
rule = project.resolve_rule(':local_txt')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(
|
||||
set([os.path.basename(f) for f in rule_outputs]),
|
||||
set(['a.txt', 'b.txt', 'c.txt']))
|
||||
|
||||
rule = project.resolve_rule(':recursive_txt')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(
|
||||
set([os.path.basename(f) for f in rule_outputs]),
|
||||
set(['a.txt', 'b.txt', 'c.txt', 'd.txt', 'e.txt']))
|
||||
|
||||
def testFileInputFilters(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
build_ctx = BuildContext(self.build_env, project)
|
||||
|
||||
rule = project.resolve_rule(':missing_txt')
|
||||
with self.assertRaises(OSError):
|
||||
build_ctx._execute_rule(rule)
|
||||
|
||||
rule = project.resolve_rule(':missing_glob_txt')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(len(rule_outputs), 0)
|
||||
|
||||
rule = project.resolve_rule(':local_txt_filter')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(
|
||||
set([os.path.basename(f) for f in rule_outputs]),
|
||||
set(['a.txt', 'b.txt', 'c.txt']))
|
||||
|
||||
rule = project.resolve_rule(':recursive_txt_filter')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(
|
||||
set([os.path.basename(f) for f in rule_outputs]),
|
||||
set(['a.txt', 'b.txt', 'c.txt', 'd.txt', 'e.txt']))
|
||||
|
||||
def testRuleInputs(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
build_ctx = BuildContext(self.build_env, project)
|
||||
|
||||
rule = project.resolve_rule(':file_input')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertNotEqual(len(rule_outputs), 0)
|
||||
|
||||
rule = project.resolve_rule(':rule_input')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(
|
||||
set([os.path.basename(f) for f in rule_outputs]),
|
||||
set(['a.txt']))
|
||||
|
||||
rule = project.resolve_rule(':mixed_input')
|
||||
d = build_ctx._execute_rule(rule)
|
||||
self.assertTrue(d.is_done())
|
||||
rule_outputs = build_ctx.get_rule_outputs(rule)
|
||||
self.assertEqual(
|
||||
set([os.path.basename(f) for f in rule_outputs]),
|
||||
set(['a.txt', 'b.txt']))
|
||||
|
||||
rule = project.resolve_rule(':missing_input')
|
||||
with self.assertRaises(KeyError):
|
||||
build_ctx._execute_rule(rule)
|
||||
|
||||
build_ctx = BuildContext(self.build_env, project)
|
||||
rule = project.resolve_rule(':rule_input')
|
||||
with self.assertRaises(RuntimeError):
|
||||
build_ctx._execute_rule(rule)
|
||||
|
||||
def _compare_path(self, result, expected):
|
||||
result = os.path.relpath(result, self.root_path)
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def testTargetPaths(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
build_ctx = BuildContext(self.build_env, project)
|
||||
|
||||
class SuccessfulRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(SuccessfulRuleContext, self).begin()
|
||||
self._succeed()
|
||||
|
||||
rule = project.resolve_rule(':a')
|
||||
rule_ctx = SuccessfulRuleContext(build_ctx, rule)
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path(), 'build-out/a')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path(suffix='.txt'), 'build-out/a.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path('f'), 'build-out/f')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path('f', suffix='.txt'), 'build-out/f.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path('dir/f'), 'build-out/dir/f')
|
||||
# Note that both are implemented the same way
|
||||
self._compare_path(
|
||||
rule_ctx._get_gen_path(), 'build-gen/a')
|
||||
self._compare_path(
|
||||
rule_ctx._get_gen_path(suffix='.txt'), 'build-gen/a.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_gen_path('f'), 'build-gen/f')
|
||||
self._compare_path(
|
||||
rule_ctx._get_gen_path('f', suffix='.txt'), 'build-gen/f.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_gen_path('dir/f'), 'build-gen/dir/f')
|
||||
|
||||
rule = project.resolve_rule('dir/dir_2:d')
|
||||
rule_ctx = SuccessfulRuleContext(build_ctx, rule)
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path(), 'build-out/dir/dir_2/d')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path(suffix='.txt'), 'build-out/dir/dir_2/d.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path('f'), 'build-out/dir/dir_2/f')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path('f', suffix='.txt'), 'build-out/dir/dir_2/f.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path('dir/f'), 'build-out/dir/dir_2/dir/f')
|
||||
|
||||
def testTargetSrcPaths(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
build_ctx = BuildContext(self.build_env, project)
|
||||
|
||||
class SuccessfulRuleContext(RuleContext):
|
||||
def begin(self):
|
||||
super(SuccessfulRuleContext, self).begin()
|
||||
self._succeed()
|
||||
|
||||
rule = project.resolve_rule(':a')
|
||||
rule_ctx = SuccessfulRuleContext(build_ctx, rule)
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path_for_src(os.path.join(self.root_path, 'a.txt')),
|
||||
'build-out/a.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_out_path_for_src(os.path.join(self.root_path,
|
||||
'dir/a.txt')),
|
||||
'build-out/dir/a.txt')
|
||||
# Note that both are implemented the same way
|
||||
self._compare_path(
|
||||
rule_ctx._get_gen_path_for_src(os.path.join(self.root_path, 'a.txt')),
|
||||
'build-gen/a.txt')
|
||||
self._compare_path(
|
||||
rule_ctx._get_gen_path_for_src(os.path.join(self.root_path,
|
||||
'dir/a.txt')),
|
||||
'build-gen/dir/a.txt')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,210 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Rule dependency graph.
|
||||
|
||||
A rule graph represents all of the rules in a project as they have been resolved
|
||||
and tracked for dependencies. The graph can then be queried for various
|
||||
information such as build rule sets/etc.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import networkx as nx
|
||||
|
||||
import project
|
||||
import util
|
||||
|
||||
|
||||
class RuleGraph(object):
|
||||
"""A graph of rule nodes.
|
||||
"""
|
||||
|
||||
def __init__(self, project):
|
||||
"""Initializes a rule graph.
|
||||
|
||||
Args:
|
||||
project: Project to use for resolution.
|
||||
"""
|
||||
self.project = project
|
||||
self.graph = nx.DiGraph()
|
||||
# A map of rule paths to nodes, if they exist
|
||||
self.rule_nodes = {}
|
||||
|
||||
def has_dependency(self, rule_path, predecessor_rule_path):
|
||||
"""Checks to see if the given rule has a dependency on another rule.
|
||||
|
||||
Args:
|
||||
rule_path: The name of the rule to check.
|
||||
predecessor_rule_path: A potential predecessor rule.
|
||||
|
||||
Returns:
|
||||
True if by any way rule_path depends on predecessor_rule_path.
|
||||
|
||||
Raises:
|
||||
KeyError: One of the given rules was not found.
|
||||
"""
|
||||
rule_node = self.rule_nodes.get(rule_path, None)
|
||||
if not rule_node:
|
||||
raise KeyError('Rule "%s" not found' % (rule_path))
|
||||
predecessor_rule_node = self.rule_nodes.get(predecessor_rule_path, None)
|
||||
if not predecessor_rule_node:
|
||||
raise KeyError('Rule "%s" not found' % (predecessor_rule_path))
|
||||
return nx.has_path(self.graph, predecessor_rule_node, rule_node)
|
||||
|
||||
def _ensure_rules_present(self, rule_paths, requesting_module=None):
|
||||
"""Ensures that the given list of rules are present in the graph, and if not
|
||||
recursively loads them.
|
||||
|
||||
Args:
|
||||
rule_paths: A list of target rule paths to add to the graph.
|
||||
requesting_module: Module that is requesting the given rules or None if
|
||||
all rule paths are absolute.
|
||||
"""
|
||||
# Add all of the rules listed
|
||||
rules = []
|
||||
for rule_path in rule_paths:
|
||||
# Attempt to resolve the rule
|
||||
rule = self.project.resolve_rule(rule_path,
|
||||
requesting_module=requesting_module)
|
||||
if not rule:
|
||||
raise KeyError('Rule "%s" unable to be resolved' % (rule_path))
|
||||
rules.append(rule)
|
||||
|
||||
# If already present, ignore (no need to recurse)
|
||||
if self.rule_nodes.has_key(rule.path):
|
||||
continue
|
||||
|
||||
# Wrap with our node and add it to the graph
|
||||
rule_node = _RuleNode(rule)
|
||||
self.rule_nodes[rule.path] = rule_node
|
||||
self.graph.add_node(rule_node)
|
||||
|
||||
# Recursively resolve all dependent rules
|
||||
dependent_rule_paths = []
|
||||
for dep in rule.get_dependent_paths():
|
||||
if util.is_rule_path(dep):
|
||||
dependent_rule_paths.append(dep)
|
||||
if len(dependent_rule_paths):
|
||||
self._ensure_rules_present(dependent_rule_paths,
|
||||
requesting_module=rule.parent_module)
|
||||
|
||||
# Add edges for all of the requested rules (at this point, all rules should
|
||||
# be added to the graph)
|
||||
for rule in rules:
|
||||
rule_node = self.rule_nodes[rule.path]
|
||||
for dep in rule_node.rule.get_dependent_paths():
|
||||
if util.is_rule_path(dep):
|
||||
dep_rule = self.project.resolve_rule(dep,
|
||||
requesting_module=rule.parent_module)
|
||||
dep_node = self.rule_nodes.get(dep_rule.path, None)
|
||||
# Node should exist due to recursive addition above
|
||||
assert dep_node
|
||||
self.graph.add_edge(dep_node, rule_node)
|
||||
|
||||
# Ensure the graph is a DAG (no cycles)
|
||||
if not nx.is_directed_acyclic_graph(self.graph):
|
||||
# TODO(benvanik): use nx.simple_cycles() to print the cycles
|
||||
raise ValueError('Cycle detected in the rule graph: %s' % (
|
||||
nx.simple_cycles(self.graph)))
|
||||
|
||||
def add_rules_from_module(self, module):
|
||||
"""Adds all rules (and their dependencies) from the given module.
|
||||
|
||||
Args:
|
||||
module: A module with rules to add.
|
||||
"""
|
||||
rule_paths = []
|
||||
for rule in module.rule_iter():
|
||||
rule_paths.append(rule.path)
|
||||
self._ensure_rules_present(rule_paths, requesting_module=module)
|
||||
|
||||
def has_rule(self, rule_path):
|
||||
"""Whether the graph has the given rule loaded.
|
||||
|
||||
Args:
|
||||
rule_path: Full rule path.
|
||||
|
||||
Returns:
|
||||
True if the given rule has been resolved and added to the graph.
|
||||
"""
|
||||
return self.rule_nodes.get(rule_path, None) != None
|
||||
|
||||
def calculate_rule_sequence(self, target_rule_paths):
|
||||
"""Calculates an ordered sequence of rules terminating with the given
|
||||
target rules.
|
||||
|
||||
By passing multiple target names it's possible to build a combined sequence
|
||||
that ensures all the given targets are included with no duplicate
|
||||
dependencies.
|
||||
|
||||
Args:
|
||||
target_rule_paths: A list of target rule paths to include in the
|
||||
sequence, or a single target rule path.
|
||||
|
||||
Returns:
|
||||
An ordered list of Rule instances including all of the given target rules
|
||||
and their dependencies.
|
||||
|
||||
Raises:
|
||||
KeyError: One of the given rules was not found.
|
||||
"""
|
||||
if isinstance(target_rule_paths, str):
|
||||
target_rule_paths = [target_rule_paths]
|
||||
|
||||
# Ensure the graph has everything required - if things go south this will
|
||||
# raise errors
|
||||
self._ensure_rules_present(target_rule_paths)
|
||||
|
||||
# Reversed graph to make sorting possible
|
||||
# If this gets expensive (or many sequences are calculated) it could be
|
||||
# cached
|
||||
reverse_graph = self.graph.reverse()
|
||||
|
||||
# Paths are added in reverse (from target to dependencies)
|
||||
sequence_graph = nx.DiGraph()
|
||||
|
||||
def _add_rule_node_dependencies(rule_node):
|
||||
if sequence_graph.has_node(rule_node):
|
||||
# Already present in the sequence graph, no need to add again
|
||||
return
|
||||
# Add node
|
||||
sequence_graph.add_node(rule_node)
|
||||
# Recursively add all dependent children
|
||||
for out_edge in reverse_graph.out_edges_iter(rule_node):
|
||||
out_rule_node = out_edge[1]
|
||||
if not sequence_graph.has_node(out_rule_node):
|
||||
_add_rule_node_dependencies(out_rule_node)
|
||||
sequence_graph.add_edge(rule_node, out_rule_node)
|
||||
|
||||
# Add all paths for targets
|
||||
# Note that all nodes are present if we got this far, so no need to check
|
||||
for rule_path in target_rule_paths:
|
||||
rule = self.project.resolve_rule(rule_path)
|
||||
assert rule
|
||||
rule_node = self.rule_nodes.get(rule.path, None)
|
||||
assert rule_node
|
||||
_add_rule_node_dependencies(rule_node)
|
||||
|
||||
# Reverse the graph so that it's dependencies -> targets
|
||||
reversed_sequence_graph = sequence_graph.reverse()
|
||||
|
||||
# Get the list of nodes in sorted order
|
||||
rule_sequence = []
|
||||
for rule_node in nx.topological_sort(reversed_sequence_graph):
|
||||
rule_sequence.append(rule_node.rule)
|
||||
return rule_sequence
|
||||
|
||||
class _RuleNode(object):
|
||||
"""A node type that references a rule in the project."""
|
||||
|
||||
def __init__(self, rule):
|
||||
"""Initializes a rule node.
|
||||
|
||||
Args:
|
||||
rule: The rule this node describes.
|
||||
"""
|
||||
self.rule = rule
|
||||
|
||||
def __repr__(self):
|
||||
return self.rule.path
|
|
@ -0,0 +1,162 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the graph module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import unittest2
|
||||
|
||||
from graph import *
|
||||
from module import *
|
||||
from rule import *
|
||||
from project import *
|
||||
|
||||
|
||||
class RuleGraphTest(unittest2.TestCase):
|
||||
"""Behavioral tests of the RuleGraph type."""
|
||||
|
||||
def setUp(self):
|
||||
super(RuleGraphTest, self).setUp()
|
||||
|
||||
self.module_1 = Module('m1', rules=[
|
||||
Rule('a1'),
|
||||
Rule('a2'),
|
||||
Rule('a3'),
|
||||
Rule('b', srcs=[':a1', 'a/b/c'], deps=[':a2'],),
|
||||
Rule('c', deps=[':b'],),])
|
||||
self.module_2 = Module('m2', rules=[
|
||||
Rule('p', deps=['m1:c'],)])
|
||||
self.project = Project(modules=[self.module_1, self.module_2])
|
||||
|
||||
def testConstruction(self):
|
||||
project = Project()
|
||||
graph = RuleGraph(project)
|
||||
self.assertIs(graph.project, project)
|
||||
|
||||
project = self.project
|
||||
graph = RuleGraph(project)
|
||||
self.assertIs(graph.project, project)
|
||||
|
||||
def testAddRulesFromModule(self):
|
||||
graph = RuleGraph(self.project)
|
||||
graph.add_rules_from_module(self.module_1)
|
||||
self.assertTrue(graph.has_rule('m1:a1'))
|
||||
self.assertTrue(graph.has_rule('m1:a2'))
|
||||
self.assertTrue(graph.has_rule('m1:a3'))
|
||||
self.assertTrue(graph.has_rule('m1:b'))
|
||||
self.assertTrue(graph.has_rule('m1:c'))
|
||||
self.assertFalse(graph.has_rule('m2:p'))
|
||||
graph.add_rules_from_module(self.module_2)
|
||||
self.assertTrue(graph.has_rule('m2:p'))
|
||||
|
||||
graph = RuleGraph(self.project)
|
||||
graph.add_rules_from_module(self.module_2)
|
||||
self.assertTrue(graph.has_rule('m2:p'))
|
||||
self.assertTrue(graph.has_rule('m1:a1'))
|
||||
self.assertTrue(graph.has_rule('m1:a2'))
|
||||
self.assertFalse(graph.has_rule('m1:a3'))
|
||||
self.assertTrue(graph.has_rule('m1:b'))
|
||||
self.assertTrue(graph.has_rule('m1:c'))
|
||||
|
||||
def testCycle(self):
|
||||
module = Module('mc', rules=[
|
||||
Rule('a', deps=[':b']),
|
||||
Rule('b', deps=[':a'])])
|
||||
project = Project(modules=[module])
|
||||
graph = RuleGraph(project)
|
||||
with self.assertRaises(ValueError):
|
||||
graph.add_rules_from_module(module)
|
||||
|
||||
module_1 = Module('mc1', rules=[Rule('a', deps=['mc2:a'])])
|
||||
module_2 = Module('mc2', rules=[Rule('a', deps=['mc1:a'])])
|
||||
project = Project(modules=[module_1, module_2])
|
||||
graph = RuleGraph(project)
|
||||
with self.assertRaises(ValueError):
|
||||
graph.add_rules_from_module(module_1)
|
||||
|
||||
def testHasRule(self):
|
||||
graph = RuleGraph(self.project)
|
||||
graph.add_rules_from_module(self.module_1)
|
||||
self.assertTrue(graph.has_rule('m1:a1'))
|
||||
self.assertFalse(graph.has_rule('m2:p'))
|
||||
self.assertFalse(graph.has_rule('x:x'))
|
||||
|
||||
def testHasDependency(self):
|
||||
graph = RuleGraph(Project())
|
||||
with self.assertRaises(KeyError):
|
||||
graph.has_dependency('m1:a', 'm1:b')
|
||||
|
||||
graph = RuleGraph(self.project)
|
||||
graph.add_rules_from_module(self.module_1)
|
||||
self.assertTrue(graph.has_dependency('m1:c', 'm1:c'))
|
||||
self.assertTrue(graph.has_dependency('m1:a3', 'm1:a3'))
|
||||
self.assertTrue(graph.has_dependency('m1:c', 'm1:b'))
|
||||
self.assertTrue(graph.has_dependency('m1:c', 'm1:a1'))
|
||||
self.assertTrue(graph.has_dependency('m1:b', 'm1:a1'))
|
||||
self.assertFalse(graph.has_dependency('m1:b', 'm1:c'))
|
||||
self.assertFalse(graph.has_dependency('m1:a1', 'm1:a2'))
|
||||
self.assertFalse(graph.has_dependency('m1:c', 'm1:a3'))
|
||||
with self.assertRaises(KeyError):
|
||||
graph.has_dependency('m1:c', 'm1:x')
|
||||
with self.assertRaises(KeyError):
|
||||
graph.has_dependency('m1:x', 'm1:c')
|
||||
with self.assertRaises(KeyError):
|
||||
graph.has_dependency('m1:x', 'm1:x')
|
||||
|
||||
def testCalculateRuleSequence(self):
|
||||
graph = RuleGraph(self.project)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
graph.calculate_rule_sequence(':x')
|
||||
with self.assertRaises(KeyError):
|
||||
graph.calculate_rule_sequence([':x'])
|
||||
with self.assertRaises(KeyError):
|
||||
graph.calculate_rule_sequence(['m1:x'])
|
||||
|
||||
seq = graph.calculate_rule_sequence('m1:a1')
|
||||
self.assertEqual(len(seq), 1)
|
||||
self.assertEqual(seq[0].name, 'a1')
|
||||
seq = graph.calculate_rule_sequence(['m1:a1'])
|
||||
self.assertEqual(len(seq), 1)
|
||||
self.assertEqual(seq[0].name, 'a1')
|
||||
|
||||
seq = graph.calculate_rule_sequence(['m1:b'])
|
||||
self.assertEqual(len(seq), 3)
|
||||
self.assertTrue((seq[0].name in ['a1', 'a2']) or
|
||||
(seq[1].name in ['a1', 'a2']))
|
||||
self.assertEqual(seq[2].name, 'b')
|
||||
|
||||
seq = graph.calculate_rule_sequence(['m1:a1', 'm1:b'])
|
||||
self.assertEqual(len(seq), 3)
|
||||
self.assertTrue((seq[0].name in ['a1', 'a2']) or
|
||||
(seq[1].name in ['a1', 'a2']))
|
||||
self.assertEqual(seq[2].name, 'b')
|
||||
|
||||
seq = graph.calculate_rule_sequence(['m1:a1', 'm1:a3'])
|
||||
self.assertEqual(len(seq), 2)
|
||||
self.assertTrue((seq[0].name in ['a1', 'a3']) or
|
||||
(seq[1].name in ['a1', 'a3']))
|
||||
|
||||
module = Module('mx', rules=[Rule('a', deps=[':b'])])
|
||||
project = Project(modules=[module])
|
||||
graph = RuleGraph(project)
|
||||
with self.assertRaises(KeyError):
|
||||
graph.calculate_rule_sequence('mx:a')
|
||||
|
||||
def testCrossModuleRules(self):
|
||||
graph = RuleGraph(self.project)
|
||||
|
||||
seq = graph.calculate_rule_sequence(['m2:p'])
|
||||
self.assertEqual(len(seq), 5)
|
||||
self.assertTrue((seq[0].name in ['a1', 'a2']) or
|
||||
(seq[1].name in ['a1', 'a2']))
|
||||
self.assertTrue(seq[4].path, 'm2:p')
|
||||
self.assertTrue(graph.has_dependency('m2:p', 'm1:a1'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,158 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Management shell script.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import fnmatch
|
||||
import imp
|
||||
import os
|
||||
import sys
|
||||
|
||||
import util
|
||||
|
||||
|
||||
def _get_anvil_path():
|
||||
"""Gets the anvil/ path.
|
||||
|
||||
Returns:
|
||||
The full path to the anvil/ source.
|
||||
"""
|
||||
return os.path.normpath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def manage_command(command_name, command_help=None):
|
||||
"""A decorator for management command functions.
|
||||
Use this to register management command functions. A function decorated with
|
||||
this will be discovered and callable via manage.py.
|
||||
|
||||
Functions are expected to take (args, cwd) and return an error number that
|
||||
will be passed back to the shell.
|
||||
|
||||
Args:
|
||||
command_name: The name of the command exposed to the management script.
|
||||
command_help: Help text printed alongside the command when queried.
|
||||
"""
|
||||
def _exec_command(fn):
|
||||
fn.command_name = command_name
|
||||
fn.command_help = command_help
|
||||
return fn
|
||||
return _exec_command
|
||||
|
||||
|
||||
def discover_commands(search_path=None):
|
||||
"""Looks for all commands and returns a dictionary of them.
|
||||
Commands are looked for under anvil/commands/, and should be functions
|
||||
decorated with @manage_command.
|
||||
|
||||
Args:
|
||||
search_path: Search path to use instead of the default.
|
||||
|
||||
Returns:
|
||||
A dictionary containing command-to-function mappings.
|
||||
|
||||
Raises:
|
||||
KeyError: Multiple commands have the same name.
|
||||
"""
|
||||
commands = {}
|
||||
if not search_path:
|
||||
commands_path = os.path.join(_get_anvil_path(), 'commands')
|
||||
else:
|
||||
commands_path = search_path
|
||||
for (root, dirs, files) in os.walk(commands_path):
|
||||
for name in files:
|
||||
if fnmatch.fnmatch(name, '*.py'):
|
||||
full_path = os.path.join(root, name)
|
||||
module = imp.load_source(os.path.splitext(name)[0], full_path)
|
||||
for attr_name in dir(module):
|
||||
if hasattr(getattr(module, attr_name), 'command_name'):
|
||||
command_fn = getattr(module, attr_name)
|
||||
command_name = command_fn.command_name
|
||||
if commands.has_key(command_name):
|
||||
raise KeyError('Command "%s" already defined' % (command_name))
|
||||
commands[command_name] = command_fn
|
||||
return commands
|
||||
|
||||
|
||||
def usage(commands):
|
||||
"""Gets usage info that can be displayed to the user.
|
||||
|
||||
Args:
|
||||
commands: A command dictionary from discover_commands.
|
||||
|
||||
Returns:
|
||||
A string containing usage info and a command listing.
|
||||
"""
|
||||
s = 'manage.py command [-h]\n'
|
||||
s += '\n'
|
||||
s += 'Commands:\n'
|
||||
for command_name in commands:
|
||||
s += ' %s\n' % (command_name)
|
||||
command_help = commands[command_name].command_help
|
||||
if command_help:
|
||||
s += ' %s\n' % (command_help)
|
||||
return s
|
||||
|
||||
|
||||
def run_command(args=None, cwd=None, commands=None):
|
||||
"""Runs a command with the given context.
|
||||
|
||||
Args:
|
||||
args: Arguments, with the command to execute as the first.
|
||||
cwd: Current working directory override.
|
||||
commands: A command dictionary from discover_commands to override the
|
||||
defaults.
|
||||
|
||||
Returns:
|
||||
0 if the command succeeded and non-zero otherwise.
|
||||
|
||||
Raises:
|
||||
ValueError: The command could not be found or was not specified.
|
||||
"""
|
||||
args = args if args else []
|
||||
cwd = cwd if cwd else os.getcwd()
|
||||
|
||||
commands = commands if commands else discover_commands()
|
||||
|
||||
# TODO(benvanik): look for a .anvilrc, load it to find
|
||||
# - extra command search paths
|
||||
# - extra rule search paths
|
||||
# Also check to see if it was specified in args?
|
||||
|
||||
if not len(args):
|
||||
raise ValueError('No command given')
|
||||
command_name = args[0]
|
||||
if not commands.has_key(command_name):
|
||||
raise ValueError('Command "%s" not found' % (command_name))
|
||||
|
||||
command_fn = commands[command_name]
|
||||
return command_fn(args[1:], cwd)
|
||||
|
||||
|
||||
def main(): # pragma: no cover
|
||||
"""Entry point for scripts."""
|
||||
# Always add anvil/.. to the path
|
||||
sys.path.insert(1, _get_anvil_path())
|
||||
print sys.path
|
||||
commands = discover_commands()
|
||||
|
||||
try:
|
||||
return_code = run_command(args=sys.argv[1:],
|
||||
cwd=os.getcwd(),
|
||||
commands=commands)
|
||||
except ValueError:
|
||||
print usage(commands)
|
||||
return_code = 1
|
||||
except Exception as e:
|
||||
#print e
|
||||
raise
|
||||
return_code = 1
|
||||
sys.exit(return_code)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,66 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the manage module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import types
|
||||
import unittest2
|
||||
|
||||
import manage
|
||||
import test
|
||||
from manage import *
|
||||
from test import AsyncTestCase, FixtureTestCase
|
||||
|
||||
|
||||
class ManageTest(FixtureTestCase):
|
||||
"""Behavioral tests for the management wrapper."""
|
||||
fixture = 'manage'
|
||||
|
||||
def testDecorator(self):
|
||||
@manage_command('command_1')
|
||||
def command_1(args, cwd):
|
||||
return 0
|
||||
self.assertEqual(command_1.command_name, 'command_1')
|
||||
|
||||
def testDiscovery(self):
|
||||
# Check built-in
|
||||
commands = manage.discover_commands()
|
||||
self.assertTrue(commands.has_key('build'))
|
||||
self.assertIsInstance(commands['build'], types.FunctionType)
|
||||
|
||||
# Check custom
|
||||
commands = manage.discover_commands(
|
||||
os.path.join(self.root_path, 'commands'))
|
||||
self.assertTrue(commands.has_key('test_command'))
|
||||
self.assertIsInstance(commands['test_command'], types.FunctionType)
|
||||
self.assertEqual(commands['test_command']([], ''), 123)
|
||||
|
||||
# Duplicate command names/etc
|
||||
with self.assertRaises(KeyError):
|
||||
manage.discover_commands(os.path.join(self.root_path, 'bad_commands'))
|
||||
|
||||
def testUsage(self):
|
||||
commands = manage.discover_commands()
|
||||
self.assertNotEqual(len(manage.usage(commands)), 0)
|
||||
|
||||
def testMain(self):
|
||||
with self.assertRaises(ValueError):
|
||||
manage.run_command()
|
||||
with self.assertRaises(ValueError):
|
||||
manage.run_command(['xxx'])
|
||||
|
||||
def some_command(args, cwd):
|
||||
self.assertEqual(len(args), 0)
|
||||
self.assertNotEqual(len(cwd), 0)
|
||||
return 123
|
||||
self.assertEqual(manage.run_command(
|
||||
['some_command'], commands={'some_command': some_command}), 123)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,344 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Module representation.
|
||||
|
||||
A module is a simple namespace of rules, serving no purpose other than to allow
|
||||
for easier organization of projects.
|
||||
|
||||
Rules may refer to other rules in the same module with a shorthand (':foo') or
|
||||
rules in other modules by specifying a module-relative path
|
||||
('stuff/other.py:bar').
|
||||
|
||||
TODO(benvanik): details on path resolution
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import ast
|
||||
import glob2
|
||||
import io
|
||||
import os
|
||||
|
||||
import rule
|
||||
from rule import RuleNamespace
|
||||
|
||||
|
||||
class Module(object):
|
||||
"""A rule module.
|
||||
Modules are a flat namespace of rules. The actual resolution of rules occurs
|
||||
later on and is done using all of the modules in a project, allowing for
|
||||
cycles/lazy evaluation/etc.
|
||||
"""
|
||||
|
||||
def __init__(self, path, rules=None):
|
||||
"""Initializes a module.
|
||||
|
||||
Args:
|
||||
path: A path for the module - should be the path on disk or some other
|
||||
string that is used for referencing.
|
||||
rules: A list of rules to add to the module.
|
||||
"""
|
||||
self.path = path
|
||||
self.rules = {}
|
||||
if rules and len(rules):
|
||||
self.add_rules(rules)
|
||||
|
||||
def add_rule(self, rule):
|
||||
"""Adds a rule to the module.
|
||||
|
||||
Args:
|
||||
rule: A rule to add. Must have a unique name.
|
||||
|
||||
Raises:
|
||||
KeyError: A rule with the given name already exists in the module.
|
||||
"""
|
||||
self.add_rules([rule])
|
||||
|
||||
def add_rules(self, rules):
|
||||
"""Adds a list of rules to the module.
|
||||
|
||||
Args:
|
||||
rules: A list of rules to add. Each must have a unique name.
|
||||
|
||||
Raises:
|
||||
KeyError: A rule with the given name already exists in the module.
|
||||
"""
|
||||
for rule in rules:
|
||||
if self.rules.get(rule.name, None):
|
||||
raise KeyError('A rule with the name "%s" is already defined' % (
|
||||
rule.name))
|
||||
for rule in rules:
|
||||
self.rules[rule.name] = rule
|
||||
rule.set_parent_module(self)
|
||||
|
||||
def get_rule(self, rule_name):
|
||||
"""Gets a rule by name.
|
||||
|
||||
Args:
|
||||
rule_name: Name of the rule to find. May include leading semicolon.
|
||||
|
||||
Returns:
|
||||
The rule with the given name or None if it was not found.
|
||||
|
||||
Raises:
|
||||
NameError: The given rule name was invalid.
|
||||
"""
|
||||
if len(rule_name) and rule_name[0] == ':':
|
||||
rule_name = rule_name[1:]
|
||||
if not len(rule_name):
|
||||
raise NameError('Rule name "%s" is invalid' % (rule_name))
|
||||
return self.rules.get(rule_name, None)
|
||||
|
||||
def rule_list(self):
|
||||
"""Gets a list of all rules in the module.
|
||||
|
||||
Returns:
|
||||
A list of all rules.
|
||||
"""
|
||||
return self.rules.values()
|
||||
|
||||
def rule_iter(self):
|
||||
"""Iterates over all rules in the module."""
|
||||
for rule_name in self.rules:
|
||||
yield self.rules[rule_name]
|
||||
|
||||
|
||||
class ModuleLoader(object):
|
||||
"""A utility type that handles loading modules from files.
|
||||
A loader should only be used to load a single module and then be discarded.
|
||||
"""
|
||||
|
||||
def __init__(self, path, rule_namespace=None, modes=None):
|
||||
"""Initializes a loader.
|
||||
|
||||
Args:
|
||||
path: File-system path to the module.
|
||||
rule_namespace: Rule namespace to use for rule definitions.
|
||||
"""
|
||||
self.path = path
|
||||
self.rule_namespace = rule_namespace
|
||||
if not self.rule_namespace:
|
||||
self.rule_namespace = RuleNamespace()
|
||||
self.rule_namespace.discover()
|
||||
self.modes = {}
|
||||
if modes:
|
||||
for mode in modes:
|
||||
if self.modes.has_key(mode):
|
||||
raise KeyError('Duplicate mode "%s" defined' % (mode))
|
||||
self.modes[mode] = True
|
||||
|
||||
self.code_str = None
|
||||
self.code_ast = None
|
||||
self.code_obj = None
|
||||
|
||||
def load(self, source_string=None):
|
||||
"""Loads the module from the given path and prepares it for execution.
|
||||
|
||||
Args:
|
||||
source_string: A string to use as the source. If not provided the file
|
||||
will be loaded at the initialized path.
|
||||
|
||||
Raises:
|
||||
IOError: The file could not be loaded or read.
|
||||
SyntaxError: An error occurred parsing the module.
|
||||
"""
|
||||
if self.code_str:
|
||||
raise Exception('ModuleLoader load called multiple times')
|
||||
|
||||
# Read the source as a string
|
||||
if source_string is None:
|
||||
with io.open(self.path, 'r') as f:
|
||||
self.code_str = f.read()
|
||||
else:
|
||||
self.code_str = source_string
|
||||
|
||||
# Parse the AST
|
||||
# This will raise errors if it is not valid
|
||||
self.code_ast = ast.parse(self.code_str, self.path, 'exec')
|
||||
|
||||
# Compile
|
||||
self.code_obj = compile(self.code_ast, self.path, 'exec')
|
||||
|
||||
def execute(self):
|
||||
"""Executes the module and returns a Module instance.
|
||||
|
||||
Returns:
|
||||
A new Module instance with all of the rules.
|
||||
|
||||
Raises:
|
||||
NameError: A function or variable name was not found.
|
||||
"""
|
||||
all_rules = None
|
||||
rule.begin_capturing_emitted_rules()
|
||||
try:
|
||||
# Setup scope
|
||||
scope = {}
|
||||
self.rule_namespace.populate_scope(scope)
|
||||
self._add_builtins(scope)
|
||||
|
||||
# Execute!
|
||||
exec self.code_obj in scope
|
||||
finally:
|
||||
all_rules = rule.end_capturing_emitted_rules()
|
||||
|
||||
# Gather rules and build the module
|
||||
module = Module(self.path)
|
||||
module.add_rules(all_rules)
|
||||
return module
|
||||
|
||||
def _add_builtins(self, scope):
|
||||
"""Adds builtin functions and types to a scope.
|
||||
|
||||
Args:
|
||||
scope: Scope dictionary.
|
||||
"""
|
||||
scope['glob'] = self.glob
|
||||
scope['select_one'] = self.select_one
|
||||
scope['select_any'] = self.select_any
|
||||
scope['select_many'] = self.select_many
|
||||
|
||||
def glob(self, expr):
|
||||
"""Globs the given expression with the base path of the module.
|
||||
This uses the glob2 module and supports recursive globs ('**/*').
|
||||
|
||||
Args:
|
||||
expr: Glob expression.
|
||||
|
||||
Returns:
|
||||
A list of all files that match the glob expression.
|
||||
"""
|
||||
if not expr or not len(expr):
|
||||
return []
|
||||
base_path = os.path.dirname(self.path)
|
||||
glob_path = os.path.join(base_path, expr)
|
||||
return list(glob2.iglob(glob_path))
|
||||
|
||||
def select_one(self, d, default_value):
|
||||
"""Selects a single value from the given tuple list based on the current
|
||||
mode settings.
|
||||
This is similar to select_any, only it ensures a reliable ordering in the
|
||||
case of multiple modes being matched.
|
||||
|
||||
If 'A' and 'B' are two non-exclusive modes, then pass
|
||||
[('A', ...), ('B', ...)] to ensure ordering. If only A or B is defined then
|
||||
the respective values will be selected, and if both are defined then the
|
||||
last matching tuple will be returned - in the case of both A and B being
|
||||
defined, the value of 'B'.
|
||||
|
||||
Args:
|
||||
d: A list of (key, value) tuples.
|
||||
default_value: The value to return if nothing matches.
|
||||
|
||||
Returns:
|
||||
A value from the given dictionary based on the current mode, and if none
|
||||
match default_value.
|
||||
|
||||
Raises:
|
||||
KeyError: Multiple keys were matched in the given dictionary.
|
||||
"""
|
||||
value = None
|
||||
any_match = False
|
||||
for mode_tuple in d:
|
||||
if self.modes.has_key(mode_tuple[0]):
|
||||
any_match = True
|
||||
value = mode_tuple[1]
|
||||
if not any_match:
|
||||
return default_value
|
||||
return value
|
||||
|
||||
def select_any(self, d, default_value):
|
||||
"""Selects a single value from the given dictionary based on the current
|
||||
mode settings.
|
||||
If multiple keys match modes, then a random value will be returned.
|
||||
If you want to ensure consistent return behavior prefer select_one. This is
|
||||
only useful for exclusive modes (such as 'RELEASE' and 'DEBUG').
|
||||
|
||||
For example, if 'DEBUG' and 'RELEASE' are exclusive modes, one can use a
|
||||
dictionary that has 'DEBUG' and 'RELEASE' as keys and if both DEBUG and
|
||||
RELEASE are defined as modes then a KeyError will be raised.
|
||||
|
||||
Args:
|
||||
d: Dictionary of mode key-value pairs.
|
||||
default_value: The value to return if nothing matches.
|
||||
|
||||
Returns:
|
||||
A value from the given dictionary based on the current mode, and if none
|
||||
match default_value.
|
||||
|
||||
Raises:
|
||||
KeyError: Multiple keys were matched in the given dictionary.
|
||||
"""
|
||||
value = None
|
||||
any_match = False
|
||||
for mode in d:
|
||||
if self.modes.has_key(mode):
|
||||
if any_match:
|
||||
raise KeyError(
|
||||
'Multiple modes match in the given dictionary - use select_one '
|
||||
'instead to ensure ordering')
|
||||
any_match = True
|
||||
value = d[mode]
|
||||
if not any_match:
|
||||
return default_value
|
||||
return value
|
||||
|
||||
def select_many(self, d, default_value):
|
||||
"""Selects as many values from the given dictionary as match the current
|
||||
mode settings.
|
||||
|
||||
This expects the values of the keys in the dictionary to be uniform (for
|
||||
example, all lists, dictionaries, or primitives). If any do not match a
|
||||
TypeError is thrown.
|
||||
|
||||
If values are dictionaries then the result will be a dictionary that is
|
||||
an aggregate of all matching values. If the values are lists then a single
|
||||
combined list is returned. All other types are placed into a list that is
|
||||
returned.
|
||||
|
||||
Args:
|
||||
d: Dictionary of mode key-value pairs.
|
||||
default_value: The value to return if nothing matches.
|
||||
|
||||
Returns:
|
||||
A list or dictionary of combined values that match any modes, or the
|
||||
default_value.
|
||||
|
||||
Raises:
|
||||
TypeError: The type of a value does not match the expected type.
|
||||
"""
|
||||
if isinstance(default_value, list):
|
||||
results = []
|
||||
elif isinstance(default_value, dict):
|
||||
results = {}
|
||||
else:
|
||||
results = []
|
||||
any_match = False
|
||||
for mode in d:
|
||||
if self.modes.has_key(mode):
|
||||
any_match = True
|
||||
mode_value = d[mode]
|
||||
if isinstance(mode_value, list):
|
||||
if type(mode_value) != type(default_value):
|
||||
raise TypeError('Type mismatch in dictionary (expected list)')
|
||||
results.extend(mode_value)
|
||||
elif isinstance(mode_value, dict):
|
||||
if type(mode_value) != type(default_value):
|
||||
raise TypeError('Type mismatch in dictionary (expected dict)')
|
||||
results.update(mode_value)
|
||||
else:
|
||||
if type(default_value) == list:
|
||||
raise TypeError('Type mismatch in dictionary (expected list)')
|
||||
elif type(default_value) == dict:
|
||||
raise TypeError('Type mismatch in dictionary (expected dict)')
|
||||
results.append(mode_value)
|
||||
if not any_match:
|
||||
if default_value is None:
|
||||
return None
|
||||
elif isinstance(default_value, list):
|
||||
results.extend(default_value)
|
||||
elif isinstance(default_value, dict):
|
||||
results.update(default_value)
|
||||
else:
|
||||
results.append(default_value)
|
||||
return results
|
|
@ -0,0 +1,414 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the module module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import glob2
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
from module import *
|
||||
from rule import *
|
||||
from test import FixtureTestCase
|
||||
|
||||
|
||||
class ModuleTest(unittest2.TestCase):
|
||||
"""Behavioral tests of Module rule handling."""
|
||||
|
||||
def testEmptyModule(self):
|
||||
module = Module('m')
|
||||
self.assertIsNone(module.get_rule(':a'))
|
||||
self.assertEqual(len(module.rule_list()), 0)
|
||||
self.assertEqual(len(list(module.rule_iter())), 0)
|
||||
|
||||
def testModulePath(self):
|
||||
module = Module('a')
|
||||
self.assertEqual(module.path, 'a')
|
||||
|
||||
def testModuleRuleInit(self):
|
||||
rule_a = Rule('a')
|
||||
rule_b = Rule('b')
|
||||
rule_list = [rule_a, rule_b]
|
||||
module = Module('m', rules=rule_list)
|
||||
self.assertIsNot(module.rule_list(), rule_list)
|
||||
self.assertEqual(len(module.rule_list()), len(rule_list))
|
||||
self.assertIs(module.get_rule(':a'), rule_a)
|
||||
self.assertIs(module.get_rule(':b'), rule_b)
|
||||
|
||||
def testAddRule(self):
|
||||
rule_a = Rule('a')
|
||||
rule_b = Rule('b')
|
||||
|
||||
module = Module('m')
|
||||
self.assertIsNone(module.get_rule(':a'))
|
||||
|
||||
module.add_rule(rule_a)
|
||||
self.assertIs(module.get_rule('a'), rule_a)
|
||||
self.assertIs(module.get_rule(':a'), rule_a)
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertEqual(len(list(module.rule_iter())), 1)
|
||||
self.assertIs(module.rule_list()[0], rule_a)
|
||||
self.assertEqual(list(module.rule_iter())[0], rule_a)
|
||||
self.assertIsNone(module.get_rule(':b'))
|
||||
|
||||
module.add_rule(rule_b)
|
||||
self.assertIs(module.get_rule(':b'), rule_b)
|
||||
self.assertEqual(len(module.rule_list()), 2)
|
||||
self.assertEqual(len(list(module.rule_iter())), 2)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
module.add_rule(rule_b)
|
||||
self.assertEqual(len(module.rule_list()), 2)
|
||||
|
||||
def testAddRules(self):
|
||||
rule_a = Rule('a')
|
||||
rule_b = Rule('b')
|
||||
rule_list = [rule_a, rule_b]
|
||||
|
||||
module = Module('m')
|
||||
self.assertIsNone(module.get_rule('a'))
|
||||
self.assertIsNone(module.get_rule(':a'))
|
||||
self.assertIsNone(module.get_rule('b'))
|
||||
self.assertIsNone(module.get_rule(':b'))
|
||||
self.assertEqual(len(module.rule_list()), 0)
|
||||
|
||||
module.add_rules(rule_list)
|
||||
self.assertEqual(len(module.rule_list()), 2)
|
||||
self.assertEqual(len(list(module.rule_iter())), 2)
|
||||
self.assertIsNot(module.rule_list(), rule_list)
|
||||
self.assertIs(module.get_rule(':a'), rule_a)
|
||||
self.assertIs(module.get_rule(':b'), rule_b)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
module.add_rule(rule_b)
|
||||
self.assertEqual(len(module.rule_list()), 2)
|
||||
with self.assertRaises(KeyError):
|
||||
module.add_rules([rule_b])
|
||||
self.assertEqual(len(module.rule_list()), 2)
|
||||
with self.assertRaises(KeyError):
|
||||
module.add_rules(rule_list)
|
||||
self.assertEqual(len(module.rule_list()), 2)
|
||||
|
||||
def testGetRule(self):
|
||||
rule = Rule('a')
|
||||
module = Module('m')
|
||||
module.add_rule(rule)
|
||||
|
||||
self.assertIs(module.get_rule('a'), rule)
|
||||
self.assertIs(module.get_rule(':a'), rule)
|
||||
|
||||
self.assertIsNone(module.get_rule(':x'))
|
||||
|
||||
with self.assertRaises(NameError):
|
||||
module.get_rule('')
|
||||
with self.assertRaises(NameError):
|
||||
module.get_rule(':')
|
||||
|
||||
def testRuleParentModule(self):
|
||||
rule_a = Rule('a')
|
||||
module = Module('m')
|
||||
|
||||
self.assertIsNone(rule_a.parent_module)
|
||||
self.assertEqual(rule_a.path, ':a')
|
||||
|
||||
module.add_rule(rule_a)
|
||||
|
||||
self.assertIs(rule_a.parent_module, module)
|
||||
self.assertEqual(rule_a.path, 'm:a')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
rule_a.set_parent_module(module)
|
||||
|
||||
|
||||
class ModuleLoaderTest(FixtureTestCase):
|
||||
"""Behavioral tests for ModuleLoader."""
|
||||
fixture = 'simple'
|
||||
|
||||
def testModes(self):
|
||||
module_path = os.path.join(self.temp_path, 'simple', 'BUILD')
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
self.assertEqual(len(loader.modes), 0)
|
||||
loader = ModuleLoader(module_path, modes=None)
|
||||
self.assertEqual(len(loader.modes), 0)
|
||||
loader = ModuleLoader(module_path, modes=[])
|
||||
self.assertEqual(len(loader.modes), 0)
|
||||
loader = ModuleLoader(module_path, modes=['A'])
|
||||
self.assertEqual(len(loader.modes), 1)
|
||||
modes = ['A', 'B']
|
||||
loader = ModuleLoader(module_path, modes=modes)
|
||||
self.assertIsNot(loader.modes, modes)
|
||||
self.assertEqual(len(loader.modes), 2)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
ModuleLoader(module_path, modes=['A', 'A'])
|
||||
|
||||
def testLoad(self):
|
||||
module_path = os.path.join(self.temp_path, 'simple', 'BUILD')
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load()
|
||||
|
||||
loader = ModuleLoader(module_path + '.not-real')
|
||||
with self.assertRaises(IOError):
|
||||
loader.load()
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='x = 5')
|
||||
with self.assertRaises(Exception):
|
||||
loader.load(source_string='y = 5')
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
with self.assertRaises(SyntaxError):
|
||||
loader.load(source_string='x/')
|
||||
with self.assertRaises(Exception):
|
||||
loader.load(source_string='y = 5')
|
||||
|
||||
def testExecute(self):
|
||||
module_path = os.path.join(self.temp_path, 'simple', 'BUILD')
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='asdf()')
|
||||
with self.assertRaises(NameError):
|
||||
loader.execute()
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 0)
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='x = 5')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 0)
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='file_set("a")\nfile_set("b")')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 2)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
self.assertIsNotNone(module.get_rule(':b'))
|
||||
self.assertEqual(module.get_rule(':a').name, 'a')
|
||||
self.assertEqual(module.get_rule(':b').name, 'b')
|
||||
|
||||
def testBuiltins(self):
|
||||
module_path = os.path.join(self.temp_path, 'simple', 'BUILD')
|
||||
|
||||
loader = ModuleLoader(module_path, modes=['A'])
|
||||
loader.load(source_string=(
|
||||
'file_set("a", srcs=select_any({"A": "sa"}, "sx"))\n'
|
||||
'file_set("b", srcs=select_any({"B": "sb"}, "sx"))\n'
|
||||
'file_set("c", srcs=select_one([("A", "sa")], "sx"))\n'
|
||||
'file_set("d", srcs=select_many({"B": "sb"}, "sx"))\n'))
|
||||
module = loader.execute()
|
||||
self.assertEqual(module.get_rule(':a').srcs[0], 'sa')
|
||||
self.assertEqual(module.get_rule(':b').srcs[0], 'sx')
|
||||
self.assertEqual(module.get_rule(':c').srcs[0], 'sa')
|
||||
self.assertEqual(module.get_rule(':d').srcs[0], 'sx')
|
||||
|
||||
def testCustomRules(self):
|
||||
module_path = os.path.join(self.temp_path, 'simple', 'BUILD')
|
||||
|
||||
class MockRule1(Rule):
|
||||
pass
|
||||
rule_namespace = RuleNamespace()
|
||||
rule_namespace.add_rule_type('mock_rule_1', MockRule1)
|
||||
loader = ModuleLoader(module_path, rule_namespace=rule_namespace)
|
||||
loader.load(source_string='mock_rule_1("a")')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
self.assertEqual(module.get_rule(':a').name, 'a')
|
||||
|
||||
def testGlob(self):
|
||||
module_path = os.path.join(self.temp_path, 'simple', 'BUILD')
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='file_set("a", srcs=glob(""))')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
rule = module.get_rule(':a')
|
||||
self.assertEqual(len(rule.srcs), 0)
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='file_set("a", srcs=glob("*.txt"))')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
rule = module.get_rule(':a')
|
||||
self.assertEqual(len(rule.srcs), 3)
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='file_set("a", srcs=glob("**/*.txt"))')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
rule = module.get_rule(':a')
|
||||
self.assertEqual(len(rule.srcs), 5)
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='file_set("a", srcs=glob("a.txt"))')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
rule = module.get_rule(':a')
|
||||
self.assertEqual(len(rule.srcs), 1)
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='file_set("a", srcs=glob("x.txt"))')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
rule = module.get_rule(':a')
|
||||
self.assertEqual(len(rule.srcs), 0)
|
||||
|
||||
loader = ModuleLoader(module_path)
|
||||
loader.load(source_string='file_set("a", srcs=glob("*.notpresent"))')
|
||||
module = loader.execute()
|
||||
self.assertEqual(len(module.rule_list()), 1)
|
||||
self.assertIsNotNone(module.get_rule(':a'))
|
||||
rule = module.get_rule(':a')
|
||||
self.assertEqual(len(rule.srcs), 0)
|
||||
|
||||
|
||||
class ModuleLoaderSelectionTest(unittest2.TestCase):
|
||||
"""Behavioral tests for ModuleLoader selection utilities."""
|
||||
|
||||
def testSelectOne(self):
|
||||
loader = ModuleLoader('some/path')
|
||||
self.assertEqual(loader.select_one([
|
||||
], default_value=100), 100)
|
||||
self.assertEqual(loader.select_one([
|
||||
('A', 1),
|
||||
('B', 2),
|
||||
], default_value=100), 100)
|
||||
|
||||
loader = ModuleLoader('some/path', modes=['A', 'B', 'C'])
|
||||
self.assertEqual(loader.select_one([
|
||||
('X', 99),
|
||||
], default_value=100), 100)
|
||||
self.assertEqual(loader.select_one([
|
||||
('A', 1),
|
||||
], default_value=100), 1)
|
||||
self.assertEqual(loader.select_one([
|
||||
('A', 1),
|
||||
('B', 2),
|
||||
], default_value=100), 2)
|
||||
self.assertEqual(loader.select_one([
|
||||
('B', 2),
|
||||
('A', 1),
|
||||
], default_value=100), 1)
|
||||
|
||||
def testSelectAny(self):
|
||||
loader = ModuleLoader('some/path')
|
||||
self.assertEqual(loader.select_any({
|
||||
}, default_value=100), 100)
|
||||
self.assertIsNone(loader.select_any({
|
||||
'A': 1,
|
||||
'B': 2,
|
||||
}, default_value=None))
|
||||
self.assertEqual(loader.select_any({
|
||||
'A': 1,
|
||||
'B': 2,
|
||||
}, default_value=100), 100)
|
||||
|
||||
loader = ModuleLoader('some/path', modes=['A', 'B', 'C'])
|
||||
self.assertEqual(loader.select_any({
|
||||
}, default_value=100), 100)
|
||||
self.assertEqual(loader.select_any({
|
||||
'X': 99,
|
||||
}, default_value=100), 100)
|
||||
self.assertEqual(loader.select_any({
|
||||
'X': 99,
|
||||
'A': 1,
|
||||
}, default_value=100), 1)
|
||||
self.assertEqual(loader.select_any({
|
||||
'X': 99,
|
||||
'B': 2,
|
||||
}, default_value=100), 2)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
loader.select_any({
|
||||
'A': 1,
|
||||
'B': 2,
|
||||
}, default_value=100)
|
||||
|
||||
def testSelectMany(self):
|
||||
loader = ModuleLoader('some/path')
|
||||
self.assertIsNone(loader.select_many({}, default_value=None))
|
||||
self.assertEqual(loader.select_many({}, default_value=[]), [])
|
||||
self.assertEqual(loader.select_many({}, default_value=[1]), [1])
|
||||
self.assertEqual(loader.select_many({}, default_value={}), {})
|
||||
self.assertEqual(loader.select_many({}, default_value={'a': 1}), {'a': 1})
|
||||
self.assertEqual(loader.select_many({}, default_value=1), [1])
|
||||
self.assertEqual(loader.select_many({}, default_value='a'), ['a'])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': 1,
|
||||
}, default_value=100), [100])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': [1, 2, 3],
|
||||
}, default_value=[100, 101, 102]), [100, 101, 102])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': {'a': 1},
|
||||
}, default_value={'d': 100}), {'d': 100})
|
||||
|
||||
loader = ModuleLoader('some/path', modes=['A', 'B', 'C'])
|
||||
self.assertEqual(loader.select_many({}, default_value=[]), [])
|
||||
self.assertEqual(loader.select_many({
|
||||
'X': 1,
|
||||
}, default_value=100), [100])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': 1,
|
||||
}, default_value=100), [1])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': 1,
|
||||
'B': 2,
|
||||
}, default_value=100), [1, 2])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': [1, 2, 3],
|
||||
}, default_value=[100]), [1, 2, 3])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': [1, 2, 3],
|
||||
'B': [4, 5, 6],
|
||||
}, default_value=[100]), [1, 2, 3, 4, 5, 6])
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': {'a': 1},
|
||||
}, default_value={'d': 100}), {'a': 1})
|
||||
self.assertEqual(loader.select_many({
|
||||
'A': {'a': 1},
|
||||
'B': {'b': 2},
|
||||
}, default_value={'d': 100}), {'a': 1, 'b': 2})
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
loader.select_many({
|
||||
'A': 1,
|
||||
}, default_value=[100])
|
||||
with self.assertRaises(TypeError):
|
||||
loader.select_many({
|
||||
'A': 1,
|
||||
}, default_value={'d': 100})
|
||||
with self.assertRaises(TypeError):
|
||||
loader.select_many({
|
||||
'A': [1],
|
||||
}, default_value=100)
|
||||
with self.assertRaises(TypeError):
|
||||
loader.select_many({
|
||||
'A': [1],
|
||||
}, default_value={'d': 100})
|
||||
with self.assertRaises(TypeError):
|
||||
loader.select_many({
|
||||
'A': {'a': 1},
|
||||
}, default_value=100)
|
||||
with self.assertRaises(TypeError):
|
||||
loader.select_many({
|
||||
'A': {'a': 1},
|
||||
}, default_value=[100])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,283 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Project representation.
|
||||
|
||||
A project is a module (or set of modules) that provides a namespace of rules.
|
||||
Rules may refer to each other and will be resolved in the project namespace.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import base64
|
||||
import os
|
||||
import pickle
|
||||
import re
|
||||
import stat
|
||||
import string
|
||||
|
||||
from module import ModuleLoader
|
||||
from rule import RuleNamespace
|
||||
import util
|
||||
|
||||
|
||||
class Project(object):
|
||||
"""Project type that contains rules.
|
||||
Projects, once constructed, are designed to be immutable. Many duplicate
|
||||
build processes may run over the same project instance and all expect it to
|
||||
be in the state it was when first created.
|
||||
"""
|
||||
|
||||
def __init__(self, name='Project', rule_namespace=None, module_resolver=None,
|
||||
modules=None):
|
||||
"""Initializes an empty project.
|
||||
|
||||
Args:
|
||||
name: A human-readable name for the project that will be used for
|
||||
logging.
|
||||
rule_namespace: Rule namespace to use when loading modules. If omitted a
|
||||
default one is used.
|
||||
module_resolver: A module resolver to use when attempt to dynamically
|
||||
resolve modules by path.
|
||||
modules: A list of modules to add to the project.
|
||||
|
||||
Raises:
|
||||
NameError: The name given is not valid.
|
||||
"""
|
||||
self.name = name
|
||||
|
||||
if rule_namespace:
|
||||
self.rule_namespace = rule_namespace
|
||||
else:
|
||||
self.rule_namespace = RuleNamespace()
|
||||
self.rule_namespace.discover()
|
||||
|
||||
if module_resolver:
|
||||
self.module_resolver = module_resolver
|
||||
else:
|
||||
self.module_resolver = StaticModuleResolver()
|
||||
|
||||
self.modules = {}
|
||||
if modules and len(modules):
|
||||
self.add_modules(modules)
|
||||
|
||||
def add_module(self, module):
|
||||
"""Adds a module to the project.
|
||||
|
||||
Args:
|
||||
module: A module to add.
|
||||
|
||||
Raises:
|
||||
KeyError: A module with the given name already exists in the project.
|
||||
"""
|
||||
self.add_modules([module])
|
||||
|
||||
def add_modules(self, modules):
|
||||
"""Adds a list of modules to the project.
|
||||
|
||||
Args:
|
||||
modules: A list of modules to add.
|
||||
|
||||
Raises:
|
||||
KeyError: A module with the given name already exists in the project.
|
||||
"""
|
||||
for module in modules:
|
||||
if self.modules.get(module.path, None):
|
||||
raise KeyError('A module with the path "%s" is already defined' % (
|
||||
module.path))
|
||||
for module in modules:
|
||||
self.modules[module.path] = module
|
||||
|
||||
def get_module(self, module_path):
|
||||
"""Gets a module by path.
|
||||
|
||||
Args:
|
||||
module_path: Name of the module to find.
|
||||
|
||||
Returns:
|
||||
The module with the given path or None if it was not found.
|
||||
"""
|
||||
return self.modules.get(module_path, None)
|
||||
|
||||
def module_list(self):
|
||||
"""Gets a list of all modules in the project.
|
||||
|
||||
Returns:
|
||||
A list of all modules.
|
||||
"""
|
||||
return self.modules.values()
|
||||
|
||||
def module_iter(self):
|
||||
"""Iterates over all modules in the project."""
|
||||
for module_path in self.modules:
|
||||
yield self.modules[module_path]
|
||||
|
||||
def resolve_rule(self, rule_path, requesting_module=None):
|
||||
"""Gets a rule by path, supporting module lookup and dynamic loading.
|
||||
|
||||
Args:
|
||||
rule_path: Path of the rule to find. Must include a semicolon.
|
||||
requesting_module: The module that is requesting the given rule. If not
|
||||
provided then no local rule paths (':foo') or relative paths are
|
||||
allowed.
|
||||
|
||||
Returns:
|
||||
The rule with the given name or None if it was not found.
|
||||
|
||||
Raises:
|
||||
NameError: The given rule name was not valid.
|
||||
KeyError: The given rule was not found.
|
||||
"""
|
||||
if string.find(rule_path, ':') == -1:
|
||||
raise NameError('The rule path "%s" is missing a semicolon' % (rule_path))
|
||||
(module_path, rule_name) = string.rsplit(rule_path, ':', 1)
|
||||
if self.module_resolver.can_resolve_local:
|
||||
if not len(module_path) and not requesting_module:
|
||||
module_path = '.'
|
||||
if not len(module_path) and not requesting_module:
|
||||
raise KeyError('Local rule "%s" given when no resolver defined' % (
|
||||
rule_path))
|
||||
|
||||
module = requesting_module
|
||||
if len(module_path):
|
||||
requesting_path = None
|
||||
if requesting_module:
|
||||
requesting_path = os.path.dirname(requesting_module.path)
|
||||
full_path = self.module_resolver.resolve_module_path(
|
||||
module_path, requesting_path)
|
||||
module = self.modules.get(full_path, None)
|
||||
if not module:
|
||||
# Module not yet loaded - need to grab it
|
||||
module = self.module_resolver.load_module(
|
||||
full_path, self.rule_namespace)
|
||||
if module:
|
||||
self.add_module(module)
|
||||
else:
|
||||
raise IOError('Module "%s" not found', module_path)
|
||||
|
||||
return module.get_rule(rule_name)
|
||||
|
||||
|
||||
class ModuleResolver(object):
|
||||
"""A type to use for resolving modules.
|
||||
This is used to get a module when a project tries to resolve a rule in a
|
||||
module that has not yet been loaded.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initializes a module resolver."""
|
||||
self.can_resolve_local = False
|
||||
|
||||
def resolve_module_path(self, path, working_path=None):
|
||||
"""Resolves a module path to its full, absolute path.
|
||||
This is used by the project system to disambugate modules and check the
|
||||
cache before actually performing a load.
|
||||
The path returned from this will be passed to load_module.
|
||||
|
||||
Args:
|
||||
path: Path of the module (may be relative/etc).
|
||||
working_path: Path relative paths should be pased off of. If not provided
|
||||
then relative paths may fail.
|
||||
|
||||
Returns:
|
||||
An absolute path that can be used as a cache key and passed to
|
||||
load_module.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def load_module(self, full_path, rule_namespace):
|
||||
"""Loads a module from the given path.
|
||||
|
||||
Args:
|
||||
full_path: Absolute path of the module as returned by resolve_module_path.
|
||||
rule_namespace: Rule namespace to use when loading modules.
|
||||
|
||||
Returns:
|
||||
A Module representing the given path or None if it could not be found.
|
||||
|
||||
Raises:
|
||||
IOError/OSError: The module could not be found.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class StaticModuleResolver(ModuleResolver):
|
||||
"""A static module resolver that can resolve from a list of modules.
|
||||
"""
|
||||
|
||||
def __init__(self, modules=None, *args, **kwargs):
|
||||
"""Initializes a static module resolver.
|
||||
|
||||
Args:
|
||||
modules: A list of modules that can be resolved.
|
||||
"""
|
||||
super(StaticModuleResolver, self).__init__(*args, **kwargs)
|
||||
|
||||
self.modules = {}
|
||||
if modules:
|
||||
for module in modules:
|
||||
self.modules[module.path] = module
|
||||
|
||||
def resolve_module_path(self, path, working_path=None):
|
||||
real_path = path
|
||||
if working_path and len(working_path):
|
||||
real_path = os.path.join(working_path, path)
|
||||
return os.path.normpath(real_path)
|
||||
|
||||
def load_module(self, full_path, rule_namespace):
|
||||
return self.modules.get(full_path, None)
|
||||
|
||||
|
||||
class FileModuleResolver(ModuleResolver):
|
||||
"""A file-system backed module resolver.
|
||||
|
||||
Rules are searched for with relative paths from a defined root path.
|
||||
If the module path given is a directory, the resolver will attempt to load
|
||||
a BUILD file from that directory - otherwise the file specified will be
|
||||
treated as the module.
|
||||
"""
|
||||
|
||||
def __init__(self, root_path, *args, **kwargs):
|
||||
"""Initializes a file-system module resolver.
|
||||
|
||||
Args:
|
||||
root_path: Root filesystem path to treat as the base for all resolutions.
|
||||
|
||||
Raises:
|
||||
IOError: The given root path is not found or is not a directory.
|
||||
"""
|
||||
super(FileModuleResolver, self).__init__(*args, **kwargs)
|
||||
|
||||
self.can_resolve_local = True
|
||||
|
||||
self.root_path = root_path
|
||||
if not os.path.isdir(root_path):
|
||||
raise IOError('Root path "%s" not found' % (root_path))
|
||||
|
||||
def resolve_module_path(self, path, working_path=None):
|
||||
# Compute the real path
|
||||
has_working_path = working_path and len(working_path)
|
||||
real_path = path
|
||||
if has_working_path:
|
||||
real_path = os.path.join(working_path, path)
|
||||
real_path = os.path.normpath(real_path)
|
||||
full_path = os.path.join(self.root_path, real_path)
|
||||
|
||||
# Check to see if it exists and is a file
|
||||
# Special handling to find BUILD files under directories
|
||||
mode = os.stat(full_path).st_mode
|
||||
if stat.S_ISDIR(mode):
|
||||
full_path = os.path.join(full_path, 'BUILD')
|
||||
if not os.path.isfile(full_path):
|
||||
raise IOError('Path "%s" is not a file' % (full_path))
|
||||
elif stat.S_ISREG(mode):
|
||||
pass
|
||||
else:
|
||||
raise IOError('Path "%s" is not a file' % (full_path))
|
||||
|
||||
return os.path.normpath(full_path)
|
||||
|
||||
def load_module(self, full_path, rule_namespace):
|
||||
module_loader = ModuleLoader(full_path, rule_namespace=rule_namespace)
|
||||
module_loader.load()
|
||||
return module_loader.execute()
|
|
@ -0,0 +1,323 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the project module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
from module import *
|
||||
from rule import *
|
||||
from project import *
|
||||
from test import FixtureTestCase
|
||||
|
||||
|
||||
class ProjectTest(unittest2.TestCase):
|
||||
"""Behavioral tests of Project rule handling."""
|
||||
|
||||
def testEmptyProject(self):
|
||||
project = Project()
|
||||
self.assertIsNone(project.get_module(':a'))
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
self.assertEqual(len(list(project.module_iter())), 0)
|
||||
|
||||
def testProjectName(self):
|
||||
project = Project()
|
||||
self.assertNotEqual(len(project.name), 0)
|
||||
project = Project(name='a')
|
||||
self.assertEqual(project.name, 'a')
|
||||
|
||||
def testProjectRuleNamespace(self):
|
||||
project = Project()
|
||||
self.assertIsNotNone(project.rule_namespace)
|
||||
rule_namespace = RuleNamespace()
|
||||
project = Project(rule_namespace=rule_namespace)
|
||||
self.assertIs(project.rule_namespace, rule_namespace)
|
||||
|
||||
def testProjectModuleInit(self):
|
||||
module_a = Module('ma', rules=[Rule('a')])
|
||||
module_b = Module('mb', rules=[Rule('b')])
|
||||
module_list = [module_a, module_b]
|
||||
project = Project(modules=module_list)
|
||||
self.assertIsNot(project.module_list(), module_list)
|
||||
self.assertEqual(len(project.module_list()), len(module_list))
|
||||
self.assertIs(project.get_module('ma'), module_a)
|
||||
self.assertIs(project.get_module('mb'), module_b)
|
||||
|
||||
def testAddModule(self):
|
||||
module_a = Module('ma', rules=[Rule('a')])
|
||||
module_b = Module('mb', rules=[Rule('b')])
|
||||
|
||||
project = Project()
|
||||
self.assertIsNone(project.get_module('ma'))
|
||||
self.assertIsNone(project.get_module('mb'))
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
|
||||
project.add_module(module_a)
|
||||
self.assertIs(project.get_module('ma'), module_a)
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertEqual(len(list(project.module_iter())), 1)
|
||||
self.assertEqual(project.module_list()[0], module_a)
|
||||
self.assertEqual(list(project.module_iter())[0], module_a)
|
||||
self.assertIsNone(project.get_module('mb'))
|
||||
|
||||
project.add_module(module_b)
|
||||
self.assertIs(project.get_module('mb'), module_b)
|
||||
self.assertEqual(len(project.module_list()), 2)
|
||||
self.assertEqual(len(list(project.module_iter())), 2)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
project.add_module(module_b)
|
||||
self.assertEqual(len(project.module_list()), 2)
|
||||
|
||||
def testAddModules(self):
|
||||
module_a = Module('ma', rules=[Rule('a')])
|
||||
module_b = Module('mb', rules=[Rule('b')])
|
||||
module_list = [module_a, module_b]
|
||||
|
||||
project = Project()
|
||||
self.assertIsNone(project.get_module('ma'))
|
||||
self.assertIsNone(project.get_module('mb'))
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
|
||||
project.add_modules(module_list)
|
||||
self.assertIsNot(project.module_list(), module_list)
|
||||
self.assertEqual(len(project.module_list()), len(module_list))
|
||||
self.assertIs(project.get_module('ma'), module_a)
|
||||
self.assertIs(project.get_module('mb'), module_b)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
project.add_module(module_b)
|
||||
self.assertEqual(len(project.module_list()), len(module_list))
|
||||
with self.assertRaises(KeyError):
|
||||
project.add_modules([module_b])
|
||||
self.assertEqual(len(project.module_list()), len(module_list))
|
||||
with self.assertRaises(KeyError):
|
||||
project.add_modules(module_list)
|
||||
self.assertEqual(len(project.module_list()), len(module_list))
|
||||
|
||||
def testGetModule(self):
|
||||
module_a = Module('ma', rules=[Rule('a')])
|
||||
module_b = Module('mb', rules=[Rule('b')])
|
||||
project = Project(modules=[module_a, module_b])
|
||||
|
||||
self.assertIs(project.get_module('ma'), module_a)
|
||||
self.assertIs(project.get_module('mb'), module_b)
|
||||
self.assertIsNone(project.get_module('mx'))
|
||||
|
||||
def testResolveRule(self):
|
||||
rule_a = Rule('a')
|
||||
rule_b = Rule('b')
|
||||
module_a = Module('ma', rules=[rule_a])
|
||||
module_b = Module('mb', rules=[rule_b])
|
||||
project = Project(modules=[module_a, module_b])
|
||||
|
||||
with self.assertRaises(NameError):
|
||||
project.resolve_rule('')
|
||||
with self.assertRaises(NameError):
|
||||
project.resolve_rule('a')
|
||||
with self.assertRaises(NameError):
|
||||
project.resolve_rule('a/b/c')
|
||||
with self.assertRaises(NameError):
|
||||
project.resolve_rule('a', requesting_module=module_a)
|
||||
|
||||
self.assertIs(project.resolve_rule(':a', requesting_module=module_a),
|
||||
rule_a)
|
||||
self.assertIs(project.resolve_rule(':b', requesting_module=module_b),
|
||||
rule_b)
|
||||
self.assertIs(project.resolve_rule('ma:a', requesting_module=module_a),
|
||||
rule_a)
|
||||
self.assertIs(project.resolve_rule('mb:b', requesting_module=module_b),
|
||||
rule_b)
|
||||
self.assertIs(project.resolve_rule('mb:b', requesting_module=module_a),
|
||||
rule_b)
|
||||
self.assertIs(project.resolve_rule('ma:a', requesting_module=module_b),
|
||||
rule_a)
|
||||
|
||||
def testModuleResolver(self):
|
||||
rule_a = Rule('a')
|
||||
rule_b = Rule('b')
|
||||
module_a = Module('ma', rules=[rule_a])
|
||||
module_b = Module('mb', rules=[rule_b])
|
||||
module_resolver = StaticModuleResolver([module_a, module_b])
|
||||
project = Project(module_resolver=module_resolver)
|
||||
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
self.assertIs(project.resolve_rule('ma:a'), rule_a)
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIs(project.resolve_rule('mb:b'), rule_b)
|
||||
self.assertEqual(len(project.module_list()), 2)
|
||||
|
||||
with self.assertRaises(IOError):
|
||||
project.resolve_rule('mx:x')
|
||||
|
||||
def testRelativeModuleResolver(self):
|
||||
rule_a = Rule('a')
|
||||
rule_b = Rule('b')
|
||||
module_a = Module('ma', rules=[rule_a])
|
||||
module_b = Module('b/mb', rules=[rule_b])
|
||||
module_resolver = StaticModuleResolver([module_a, module_b])
|
||||
project = Project(module_resolver=module_resolver)
|
||||
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
with self.assertRaises(IOError):
|
||||
project.resolve_rule('ma:a', requesting_module=module_b)
|
||||
self.assertIs(project.resolve_rule('../ma:a',
|
||||
requesting_module=module_b), rule_a)
|
||||
self.assertIs(project.resolve_rule('b/mb:b',
|
||||
requesting_module=module_a), rule_b)
|
||||
|
||||
|
||||
class FileModuleResolverTest(FixtureTestCase):
|
||||
"""Behavioral tests for FileModuleResolver."""
|
||||
fixture = 'resolution'
|
||||
|
||||
def testResolverInit(self):
|
||||
FileModuleResolver(self.root_path)
|
||||
|
||||
with self.assertRaises(IOError):
|
||||
FileModuleResolver(os.path.join(self.root_path, 'x'))
|
||||
|
||||
def testResolveModulePath(self):
|
||||
module_resolver = FileModuleResolver(self.root_path)
|
||||
|
||||
self.assertEqual(module_resolver.resolve_module_path('BUILD'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('./BUILD'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('.'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('./a/..'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('./a/../BUILD'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
|
||||
self.assertEqual(module_resolver.resolve_module_path('BUILD', 'a'),
|
||||
os.path.join(self.root_path, 'a', 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('.', 'a'),
|
||||
os.path.join(self.root_path, 'a', 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('..', 'a'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('../.', 'a'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
self.assertEqual(module_resolver.resolve_module_path('../BUILD', 'a'),
|
||||
os.path.join(self.root_path, 'BUILD'))
|
||||
|
||||
with self.assertRaises(IOError):
|
||||
module_resolver.resolve_module_path('empty')
|
||||
|
||||
with self.assertRaises(IOError):
|
||||
module_resolver.resolve_module_path('/dev/null')
|
||||
|
||||
def testFileResolution(self):
|
||||
module_resolver = FileModuleResolver(self.root_path)
|
||||
|
||||
project = Project(module_resolver=module_resolver)
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
root_rule = project.resolve_rule('.:root_rule')
|
||||
self.assertIsNotNone(root_rule)
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
|
||||
def testModuleNameMatching(self):
|
||||
module_resolver = FileModuleResolver(self.root_path)
|
||||
|
||||
project = Project(module_resolver=module_resolver)
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
rule_a = project.resolve_rule('a:rule_a')
|
||||
self.assertIsNotNone(rule_a)
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIs(rule_a, project.resolve_rule('a/BUILD:rule_a'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIs(rule_a, project.resolve_rule('a/../a/BUILD:rule_a'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIs(rule_a, project.resolve_rule('b/../a/BUILD:rule_a'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIs(rule_a, project.resolve_rule('b/../a:rule_a'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIsNotNone(project.resolve_rule('b:rule_b'))
|
||||
self.assertEqual(len(project.module_list()), 2)
|
||||
|
||||
def testValidModulePaths(self):
|
||||
module_resolver = FileModuleResolver(self.root_path)
|
||||
|
||||
test_paths = [
|
||||
':root_rule',
|
||||
'.:root_rule',
|
||||
'./:root_rule',
|
||||
'./BUILD:root_rule',
|
||||
'a:rule_a',
|
||||
'a/BUILD:rule_a',
|
||||
'a/../a/BUILD:rule_a',
|
||||
'b/../a/BUILD:rule_a',
|
||||
'b/../a:rule_a',
|
||||
'a/.:rule_a',
|
||||
'a/./BUILD:rule_a',
|
||||
'b:rule_b',
|
||||
'b/:rule_b',
|
||||
'b/BUILD:rule_b',
|
||||
'b/c:rule_c',
|
||||
'b/c/build_file.py:rule_c_file',
|
||||
]
|
||||
for test_path in test_paths:
|
||||
project = Project(module_resolver=module_resolver)
|
||||
self.assertIsNotNone(project.resolve_rule(test_path))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
|
||||
def testInvalidModulePaths(self):
|
||||
module_resolver = FileModuleResolver(self.root_path)
|
||||
|
||||
invalid_test_paths = [
|
||||
'.',
|
||||
'/',
|
||||
]
|
||||
for test_path in invalid_test_paths:
|
||||
project = Project(module_resolver=module_resolver)
|
||||
with self.assertRaises(NameError):
|
||||
project.resolve_rule(test_path)
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
|
||||
def testMissingModules(self):
|
||||
module_resolver = FileModuleResolver(self.root_path)
|
||||
|
||||
project = Project(module_resolver=module_resolver)
|
||||
with self.assertRaises(OSError):
|
||||
project.resolve_rule('x:rule_x')
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
|
||||
project = Project(module_resolver=module_resolver)
|
||||
with self.assertRaises(OSError):
|
||||
project.resolve_rule('/x:rule_x')
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
|
||||
project = Project(module_resolver=module_resolver)
|
||||
with self.assertRaises(OSError):
|
||||
project.resolve_rule('/BUILD:root_rule')
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
|
||||
def testMissingRules(self):
|
||||
module_resolver = FileModuleResolver(self.root_path)
|
||||
|
||||
project = Project(module_resolver=module_resolver)
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
self.assertIsNone(project.resolve_rule('.:x'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIsNone(project.resolve_rule('.:y'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
|
||||
project = Project(module_resolver=module_resolver)
|
||||
self.assertEqual(len(project.module_list()), 0)
|
||||
self.assertIsNone(project.resolve_rule('a:rule_x'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIsNone(project.resolve_rule('a/../a/BUILD:rule_x'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
self.assertIsNone(project.resolve_rule('a/../a/BUILD:rule_y'))
|
||||
self.assertEqual(len(project.module_list()), 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,329 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""A single rule metadata blob.
|
||||
Rules are defined by special rule functions (found under anvil.rules). They are
|
||||
meant to be immutable and reusable, and contain no state.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import base64
|
||||
import fnmatch
|
||||
import hashlib
|
||||
import imp
|
||||
import os
|
||||
import pickle
|
||||
import re
|
||||
import sys
|
||||
|
||||
import util
|
||||
import version
|
||||
|
||||
|
||||
class Rule(object):
|
||||
"""A rule definition.
|
||||
Rules are the base unit in a module and can depend on other rules via either
|
||||
source (which depends on the outputs of the rule) or explicit dependencies
|
||||
(which just requires that the other rule have been run before).
|
||||
|
||||
Sources can also refer to files, folders, or file globs. When a rule goes to
|
||||
run a list of sources will be compiled from the outputs from the previous
|
||||
rules as well as all real files on the file system.
|
||||
|
||||
Rules must define a _Context class that extends RuleContext. This context
|
||||
will be used when executing the rule to store any temporary state and
|
||||
execution progress. Rules should not be modified after their initial
|
||||
construction, and instead the _Context should be used.
|
||||
"""
|
||||
|
||||
_whitespace_re = re.compile('\s', re.M)
|
||||
|
||||
def __init__(self, name, srcs=None, deps=None, src_filter=None,
|
||||
*args, **kwargs):
|
||||
"""Initializes a rule.
|
||||
|
||||
Args:
|
||||
name: A name for the rule - should be literal-like and contain no leading
|
||||
or trailing whitespace.
|
||||
srcs: A list of source strings or a single source string.
|
||||
deps: A list of depdendency strings or a single dependency string.
|
||||
src_filter: An inclusionary file name filter for all non-rule paths. If
|
||||
defined only srcs that match this filter will be included.
|
||||
|
||||
Raises:
|
||||
NameError: The given name is invalid (None/0-length).
|
||||
TypeError: The type of an argument or value is invalid.
|
||||
"""
|
||||
if not name or not len(name):
|
||||
raise NameError('Invalid name')
|
||||
if self._whitespace_re.search(name):
|
||||
raise NameError('Name contains leading or trailing whitespace')
|
||||
if name[0] == ':':
|
||||
raise NameError('Name cannot start with :')
|
||||
self.name = name
|
||||
|
||||
# Path will be updated when the parent module is set
|
||||
self.parent_module = None
|
||||
self.path = ':%s' % (name)
|
||||
|
||||
# All file/rule paths this rule depends on - as a set so no duplicates
|
||||
self._dependent_paths = set([])
|
||||
|
||||
self.srcs = []
|
||||
if isinstance(srcs, str):
|
||||
if len(srcs):
|
||||
self.srcs.append(srcs)
|
||||
elif isinstance(srcs, list):
|
||||
self.srcs.extend(srcs)
|
||||
elif srcs != None:
|
||||
raise TypeError('Invalid srcs type')
|
||||
self._append_dependent_paths(self.srcs)
|
||||
|
||||
self.deps = []
|
||||
if isinstance(deps, str):
|
||||
if len(deps):
|
||||
self.deps.append(deps)
|
||||
elif isinstance(deps, list):
|
||||
self.deps.extend(deps)
|
||||
elif deps != None:
|
||||
raise TypeError('Invalid deps type')
|
||||
self._append_dependent_paths(self.deps, require_semicolon=True)
|
||||
|
||||
self.src_filter = None
|
||||
if src_filter and len(src_filter):
|
||||
self.src_filter = src_filter
|
||||
|
||||
def _append_dependent_paths(self, paths, require_semicolon=False):
|
||||
"""Appends a list of paths to the rule's dependent paths.
|
||||
A dependent path is a file/rule that is required for execution and, if
|
||||
changed, will invalidate cached versions of this rule.
|
||||
|
||||
Args:
|
||||
paths: A list of paths to depend on.
|
||||
require_semicolon: True if all of the given paths require a semicolon
|
||||
(so they must be rules).
|
||||
|
||||
Raises:
|
||||
NameError: One of the given paths is invalid.
|
||||
"""
|
||||
util.validate_names(paths, require_semicolon=require_semicolon)
|
||||
self._dependent_paths.update(paths)
|
||||
|
||||
def get_dependent_paths(self):
|
||||
"""Gets a list of all dependent paths.
|
||||
Paths may be file paths or rule paths.
|
||||
|
||||
Returns:
|
||||
A list of file/rule paths.
|
||||
"""
|
||||
return self._dependent_paths.copy()
|
||||
|
||||
def set_parent_module(self, module):
|
||||
"""Sets the parent module of a rule.
|
||||
This can only be called once.
|
||||
|
||||
Args:
|
||||
module: New parent module for the rule.
|
||||
|
||||
Raises:
|
||||
ValueError: The parent module has already been set.
|
||||
"""
|
||||
if self.parent_module:
|
||||
raise ValueError('Rule "%s" already has a parent module' % (self.name))
|
||||
self.parent_module = module
|
||||
self.path = '%s:%s' % (module.path, self.name)
|
||||
|
||||
def compute_cache_key(self):
|
||||
"""Calculates a unique key based on the rule type and its values.
|
||||
This key may change when code changes, but is a fairly reliable way to
|
||||
detect changes in rule values.
|
||||
|
||||
Returns:
|
||||
A string that can be used to index this key in a dictionary. The string
|
||||
may be very long.
|
||||
"""
|
||||
# TODO(benvanik): faster serialization than pickle?
|
||||
pickled_self = pickle.dumps(self)
|
||||
pickled_str = base64.b64encode(pickled_self)
|
||||
# Include framework version in the string to enable forced rebuilds on
|
||||
# version change
|
||||
unique_str = version.VERSION_STR + pickled_str
|
||||
# Hash so that we return a reasonably-sized string
|
||||
return hashlib.md5(unique_str).hexdigest()
|
||||
|
||||
def create_context(self, build_context):
|
||||
"""Creates a new RuleContext that is used to run the rule.
|
||||
Rule implementations should return their own RuleContext type that
|
||||
has custom behavior.
|
||||
|
||||
Args:
|
||||
build_context: The current BuildContext that should be passed to the
|
||||
RuleContext.
|
||||
|
||||
Returns:
|
||||
A new RuleContext.
|
||||
"""
|
||||
assert self._Context
|
||||
return self._Context(build_context, self)
|
||||
|
||||
|
||||
# Active rule namespace that is capturing all new rule definitions
|
||||
# This should only be modified by RuleNamespace.discover
|
||||
_RULE_NAMESPACE = None
|
||||
|
||||
class RuleNamespace(object):
|
||||
"""A namespace of rule type definitions and discovery services.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initializes a rule namespace."""
|
||||
self.rule_types = {}
|
||||
|
||||
def populate_scope(self, scope):
|
||||
"""Populates the given scope dictionary with all of the rule types.
|
||||
|
||||
Args:
|
||||
scope: Scope dictionary.
|
||||
"""
|
||||
for rule_name in self.rule_types:
|
||||
scope[rule_name] = self.rule_types[rule_name]
|
||||
|
||||
def add_rule_type(self, rule_name, rule_cls):
|
||||
"""Adds a rule type to the namespace.
|
||||
|
||||
Args:
|
||||
rule_name: The name of the rule type exposed to modules.
|
||||
rule_cls: Rule type class.
|
||||
"""
|
||||
def rule_definition(name, *args, **kwargs):
|
||||
rule = rule_cls(name, *args, **kwargs)
|
||||
_emit_rule(rule)
|
||||
rule_definition.rule_name = rule_name
|
||||
if self.rule_types.has_key(rule_name):
|
||||
raise KeyError('Rule type "%s" already defined' % (rule_name))
|
||||
self.rule_types[rule_name] = rule_definition
|
||||
|
||||
def add_rule_type_fn(self, rule_type):
|
||||
"""Adds a rule type to the namespace.
|
||||
This assumes the type is a function that is setup to emit the rule.
|
||||
It should only be used by internal methods.
|
||||
|
||||
Args:
|
||||
rule_type: Rule type.
|
||||
"""
|
||||
rule_name = rule_type.rule_name
|
||||
if self.rule_types.has_key(rule_name):
|
||||
raise KeyError('Rule type "%s" already defined' % (rule_name))
|
||||
self.rule_types[rule_name] = rule_type
|
||||
|
||||
def discover(self, path=None):
|
||||
"""Recursively searches the given path for rule type definitions.
|
||||
Files are searched with the pattern '*_rules.py' for types decorated with
|
||||
@build_rule.
|
||||
|
||||
Each module is imported as discovered into the python module list and will
|
||||
be retained. Calling this multiple times with the same path has no effect.
|
||||
|
||||
Args:
|
||||
path: Path to search for rule type modules. If omitted then the built-in
|
||||
rule path will be searched instead. If the path points to a file it
|
||||
will be checked, even if it does not match the name rules.
|
||||
"""
|
||||
original_rule_types = self.rule_types.copy()
|
||||
try:
|
||||
if not path:
|
||||
path = os.path.join(os.path.dirname(__file__), 'rules')
|
||||
if os.path.isfile(path):
|
||||
self._discover_in_file(path)
|
||||
else:
|
||||
for (dirpath, dirnames, filenames) in os.walk(path):
|
||||
for filename in filenames:
|
||||
if fnmatch.fnmatch(filename, '*_rules.py'):
|
||||
self._discover_in_file(os.path.join(dirpath, filename))
|
||||
except:
|
||||
# Restore original types (don't take any of the discovered rules)
|
||||
self.rule_types = original_rule_types
|
||||
raise
|
||||
|
||||
def _discover_in_file(self, path):
|
||||
"""Loads the given python file to add all of its rules.
|
||||
|
||||
Args:
|
||||
path: Python file path.
|
||||
"""
|
||||
global _RULE_NAMESPACE
|
||||
assert _RULE_NAMESPACE is None
|
||||
_RULE_NAMESPACE = self
|
||||
try:
|
||||
name = os.path.splitext(os.path.basename(path))[0]
|
||||
module = imp.load_source(name, path)
|
||||
finally:
|
||||
_RULE_NAMESPACE = None
|
||||
|
||||
|
||||
# Used by begin_capturing_emitted_rules/build_rule to track all emitted rules
|
||||
_EMIT_RULE_SCOPE = None
|
||||
|
||||
def begin_capturing_emitted_rules():
|
||||
"""Begins capturing all rules emitted by @build_rule.
|
||||
Use end_capturing_emitted_rules to end capturing and return the list of rules.
|
||||
"""
|
||||
global _EMIT_RULE_SCOPE
|
||||
assert not _EMIT_RULE_SCOPE
|
||||
_EMIT_RULE_SCOPE = []
|
||||
|
||||
def end_capturing_emitted_rules():
|
||||
"""Ends a rule capture and returns any rules emitted.
|
||||
|
||||
Returns:
|
||||
A list of rules that were emitted by @build_rule.
|
||||
"""
|
||||
global _EMIT_RULE_SCOPE
|
||||
assert _EMIT_RULE_SCOPE is not None
|
||||
rules = _EMIT_RULE_SCOPE
|
||||
_EMIT_RULE_SCOPE = None
|
||||
return rules
|
||||
|
||||
def _emit_rule(rule):
|
||||
"""Emits a rule.
|
||||
This should only ever be called while capturing.
|
||||
|
||||
Args:
|
||||
rule: Rule that is being emitted.
|
||||
"""
|
||||
global _EMIT_RULE_SCOPE
|
||||
assert _EMIT_RULE_SCOPE is not None
|
||||
_EMIT_RULE_SCOPE.append(rule)
|
||||
|
||||
|
||||
class build_rule(object):
|
||||
"""A decorator for build rule classes.
|
||||
Use this to register build rule classes. A class decorated wtih this will be
|
||||
exposed to modules with the given rule_name. It should be callable and, on
|
||||
call, use emit_rule to emit a new rule.
|
||||
"""
|
||||
|
||||
def __init__(self, rule_name):
|
||||
"""Initializes the build rule decorator.
|
||||
|
||||
Args:
|
||||
rule_name: The name of the rule type exposed to modules.
|
||||
"""
|
||||
self.rule_name = rule_name
|
||||
|
||||
def __call__(self, cls):
|
||||
# This wrapper function makes it possible to record all invocations of
|
||||
# a rule while loading the module
|
||||
def rule_definition(name, *args, **kwargs):
|
||||
rule = cls(name, *args, **kwargs)
|
||||
_emit_rule(rule)
|
||||
rule_definition.rule_name = self.rule_name
|
||||
|
||||
# Add the (wrapped) rule type to the global namespace
|
||||
# We support not having an active namespace so that tests can import
|
||||
# rule files without dying
|
||||
global _RULE_NAMESPACE
|
||||
if _RULE_NAMESPACE:
|
||||
_RULE_NAMESPACE.add_rule_type_fn(rule_definition)
|
||||
return cls
|
|
@ -0,0 +1,237 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the rule module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
from rule import *
|
||||
from test import FixtureTestCase
|
||||
|
||||
|
||||
class RuleTest(unittest2.TestCase):
|
||||
"""Behavioral tests of the Rule type."""
|
||||
|
||||
def testRuleNames(self):
|
||||
with self.assertRaises(NameError):
|
||||
Rule(None)
|
||||
with self.assertRaises(NameError):
|
||||
Rule('')
|
||||
with self.assertRaises(NameError):
|
||||
Rule(' ')
|
||||
with self.assertRaises(NameError):
|
||||
Rule(' a')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('a ')
|
||||
with self.assertRaises(NameError):
|
||||
Rule(' a ')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('a\n')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('a\t')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('a b')
|
||||
with self.assertRaises(NameError):
|
||||
Rule(':a')
|
||||
rule = Rule('a')
|
||||
self.assertEqual(rule.name, 'a')
|
||||
self.assertEqual(rule.path, ':a')
|
||||
Rule('\u0CA_\u0CA')
|
||||
|
||||
def testRuleSrcs(self):
|
||||
rule = Rule('r')
|
||||
self.assertEqual(len(rule.srcs), 0)
|
||||
|
||||
srcs = ['a', 'b', ':c']
|
||||
rule = Rule('r', srcs=srcs)
|
||||
self.assertEqual(len(rule.srcs), 3)
|
||||
self.assertIsNot(rule.srcs, srcs)
|
||||
srcs[0] = 'x'
|
||||
self.assertEqual(rule.srcs[0], 'a')
|
||||
|
||||
srcs = 'a'
|
||||
rule = Rule('r', srcs=srcs)
|
||||
self.assertEqual(len(rule.srcs), 1)
|
||||
self.assertEqual(rule.srcs[0], 'a')
|
||||
|
||||
rule = Rule('r', srcs=None)
|
||||
rule = Rule('r', srcs='')
|
||||
self.assertEqual(len(rule.srcs), 0)
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', srcs={})
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', srcs=[None])
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', srcs=[''])
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', srcs=[{}])
|
||||
with self.assertRaises(NameError):
|
||||
Rule('r', srcs=' a')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('r', srcs='a ')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('r', srcs=' a ')
|
||||
|
||||
def testRuleDeps(self):
|
||||
rule = Rule('r')
|
||||
self.assertEqual(len(rule.deps), 0)
|
||||
|
||||
deps = [':a', ':b', ':c']
|
||||
rule = Rule('r', deps=deps)
|
||||
self.assertEqual(len(rule.deps), 3)
|
||||
self.assertIsNot(rule.deps, deps)
|
||||
deps[0] = 'x'
|
||||
self.assertEqual(rule.deps[0], ':a')
|
||||
|
||||
deps = ':a'
|
||||
rule = Rule('r', deps=deps)
|
||||
self.assertEqual(len(rule.deps), 1)
|
||||
self.assertEqual(rule.deps[0], ':a')
|
||||
|
||||
rule = Rule('r', deps=None)
|
||||
rule = Rule('r', deps='')
|
||||
self.assertEqual(len(rule.deps), 0)
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', deps={})
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', deps=[None])
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', deps=[''])
|
||||
with self.assertRaises(TypeError):
|
||||
Rule('r', deps={})
|
||||
with self.assertRaises(NameError):
|
||||
Rule('r', deps=' a')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('r', deps='a ')
|
||||
with self.assertRaises(NameError):
|
||||
Rule('r', deps=' a ')
|
||||
|
||||
def testRuleDependentPaths(self):
|
||||
rule = Rule('r')
|
||||
self.assertEqual(rule.get_dependent_paths(), set([]))
|
||||
|
||||
rule = Rule('r', srcs=[':a', 'a.txt'])
|
||||
self.assertEqual(rule.get_dependent_paths(), set([':a', 'a.txt']))
|
||||
|
||||
rule = Rule('r', deps=[':a', 'm:b'])
|
||||
self.assertEqual(rule.get_dependent_paths(), set([':a', 'm:b']))
|
||||
|
||||
rule = Rule('r', srcs=['a.txt'], deps=[':b'])
|
||||
self.assertEqual(rule.get_dependent_paths(), set(['a.txt', ':b']))
|
||||
|
||||
rule = Rule('r', srcs=[':b'], deps=[':b'])
|
||||
self.assertEqual(rule.get_dependent_paths(), set([':b']))
|
||||
|
||||
with self.assertRaises(NameError):
|
||||
Rule('r', deps=['a.txt'])
|
||||
|
||||
class RuleWithAttrs(Rule):
|
||||
def __init__(self, name, extra_srcs=None, extra_deps=None,
|
||||
*args, **kwargs):
|
||||
super(RuleWithAttrs, self).__init__(name, *args, **kwargs)
|
||||
self.extra_srcs = extra_srcs[:]
|
||||
self._append_dependent_paths(self.extra_srcs)
|
||||
self.extra_deps = extra_deps[:]
|
||||
self._append_dependent_paths(self.extra_deps, require_semicolon=True)
|
||||
|
||||
rule = RuleWithAttrs('r', srcs=['a.txt'], deps=[':b'],
|
||||
extra_srcs=['c.txt'], extra_deps=[':d'])
|
||||
self.assertEqual(rule.get_dependent_paths(), set([
|
||||
'a.txt', ':b', 'c.txt', ':d']))
|
||||
|
||||
def testRuleCacheKey(self):
|
||||
rule1 = Rule('r1')
|
||||
rule1_key = rule1.compute_cache_key()
|
||||
self.assertIsNotNone(rule1_key)
|
||||
self.assertGreater(len(rule1_key), 0)
|
||||
self.assertEqual(rule1_key, rule1.compute_cache_key())
|
||||
rule1.srcs.append('a')
|
||||
self.assertNotEqual(rule1_key, rule1.compute_cache_key())
|
||||
|
||||
rule1 = Rule('r1')
|
||||
rule2 = Rule('r1')
|
||||
self.assertEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
rule1 = Rule('r1')
|
||||
rule2 = Rule('r2')
|
||||
self.assertNotEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
|
||||
rule1 = Rule('r1', srcs='a')
|
||||
rule2 = Rule('r1', srcs='a')
|
||||
self.assertEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
rule1 = Rule('r1', srcs='a')
|
||||
rule2 = Rule('r1', srcs='b')
|
||||
self.assertNotEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
rule1 = Rule('r1', deps=':a')
|
||||
rule2 = Rule('r1', deps=':a')
|
||||
self.assertEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
rule1 = Rule('r1', deps=':a')
|
||||
rule2 = Rule('r1', deps=':b')
|
||||
self.assertNotEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
rule1 = Rule('r1', srcs='a', deps=':a')
|
||||
rule2 = Rule('r1', srcs='a', deps=':a')
|
||||
self.assertEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
rule1 = Rule('r1', srcs='a', deps=':a')
|
||||
rule2 = Rule('r1', srcs='b', deps=':b')
|
||||
self.assertNotEqual(rule1.compute_cache_key(), rule2.compute_cache_key())
|
||||
|
||||
def testRuleFilter(self):
|
||||
rule = Rule('a')
|
||||
self.assertIsNone(rule.src_filter)
|
||||
rule = Rule('a', src_filter='')
|
||||
self.assertIsNone(rule.src_filter)
|
||||
rule = Rule('a', src_filter='*.js')
|
||||
self.assertEqual(rule.src_filter, '*.js')
|
||||
|
||||
|
||||
class RuleNamespaceTest(FixtureTestCase):
|
||||
"""Behavioral tests of the Rule type."""
|
||||
fixture = 'rules'
|
||||
|
||||
def testManual(self):
|
||||
ns = RuleNamespace()
|
||||
self.assertEqual(len(ns.rule_types), 0)
|
||||
|
||||
class MockRule1(Rule):
|
||||
pass
|
||||
ns.add_rule_type('mock_rule_1', MockRule1)
|
||||
self.assertEqual(len(ns.rule_types), 1)
|
||||
|
||||
with self.assertRaises(KeyError):
|
||||
ns.add_rule_type('mock_rule_1', MockRule1)
|
||||
|
||||
def testDiscovery(self):
|
||||
ns = RuleNamespace()
|
||||
ns.discover()
|
||||
self.assertTrue(ns.rule_types.has_key('file_set'))
|
||||
|
||||
rule_path = self.root_path
|
||||
ns = RuleNamespace()
|
||||
ns.discover(rule_path)
|
||||
self.assertEqual(len(ns.rule_types), 3)
|
||||
self.assertFalse(ns.rule_types.has_key('file_set'))
|
||||
self.assertTrue(ns.rule_types.has_key('rule_a'))
|
||||
self.assertTrue(ns.rule_types.has_key('rule_b'))
|
||||
self.assertTrue(ns.rule_types.has_key('rule_c'))
|
||||
self.assertFalse(ns.rule_types.has_key('rule_x'))
|
||||
|
||||
rule_path = os.path.join(self.root_path, 'dupe.py')
|
||||
ns = RuleNamespace()
|
||||
with self.assertRaises(KeyError):
|
||||
ns.discover(rule_path)
|
||||
self.assertEqual(len(ns.rule_types), 0)
|
||||
|
||||
rule_path = os.path.join(self.root_path, 'more', 'more_rules.py')
|
||||
ns = RuleNamespace()
|
||||
ns.discover(rule_path)
|
||||
self.assertEqual(len(ns.rule_types), 1)
|
||||
self.assertTrue(ns.rule_types.has_key('rule_c'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,202 @@
|
|||
Some ideas for rules, based on old code:
|
||||
|
||||
# ==============================================================================
|
||||
# Common Tasks
|
||||
# ==============================================================================
|
||||
|
||||
CopyFilesTask
|
||||
ExecutableTask
|
||||
- JavaExecutableTask
|
||||
- NodeExecutableTask
|
||||
- PythonExecutableTask
|
||||
|
||||
# ==============================================================================
|
||||
# Core
|
||||
# ==============================================================================
|
||||
|
||||
copy_files(
|
||||
name='a',
|
||||
srcs=['a/file.txt'])
|
||||
- results in out/a/file.txt
|
||||
|
||||
copy_files(
|
||||
name='a',
|
||||
srcs=glob('**/*.txt'))
|
||||
- results in out/things/a/file.txt + others
|
||||
|
||||
concat_files(
|
||||
name='catted',
|
||||
srcs=['a.txt'] + glob('**/*.txt'))
|
||||
- results in out/catted
|
||||
|
||||
concat_files(
|
||||
name='catted',
|
||||
srcs=['a.txt'] + glob('**/*.txt'),
|
||||
out='catted.txt')
|
||||
- results in out/catted.txt
|
||||
|
||||
template_files(
|
||||
name='templated_txt',
|
||||
srcs=glob('**/*.txt'),
|
||||
params={
|
||||
'author': 'bob',
|
||||
'year': '2012',
|
||||
})
|
||||
- results in out/...txt with ${author} and ${year} replaced
|
||||
|
||||
# ==============================================================================
|
||||
# Audio
|
||||
# ==============================================================================
|
||||
|
||||
compile_soundbank(
|
||||
name='bank1',
|
||||
srcs=['*.wav'],
|
||||
out='assets/audio/')
|
||||
- creates out/assets/audio/bank1.wav + bank1.json
|
||||
|
||||
SOUNDBANK_FORMATS = select_any({
|
||||
'RELEASE': ['audio/wav', 'audio/mpeg', 'audio/ogg', 'audio/mp4',],
|
||||
}, ['audio/wav',])
|
||||
transcode_audio(
|
||||
name='encoded_banks',
|
||||
srcs=[':bank1', ':bank2'],
|
||||
formats=SOUNDBANK_FORMATS)
|
||||
- encodes all input audio files to the specified formats, updating the json
|
||||
with any new data sources - in this case, it files bank1.json and bank2.json,
|
||||
transcodes all sources for them, and updates their respective json files -
|
||||
the output files are all inputs + the transcoded files
|
||||
|
||||
generate_soundbank_js(
|
||||
name='bank_js',
|
||||
srcs=':encoded_banks',
|
||||
namespace='foo.audio',
|
||||
gen='foo/audio/')
|
||||
- for each json file generates a js file from the json metadata, resulting in
|
||||
gen/foo/audio/bank1.js (class foo.audio.bank1) + bank2.js
|
||||
|
||||
compile_tracklist(
|
||||
name='music',
|
||||
srcs=['*.ogg'],)
|
||||
- creates out/assets/audio/music.ogg (copy) + music.json
|
||||
|
||||
TRACKLIST_FORMATS=select_any({
|
||||
'RELEASE': ['audio/mpeg', 'audio/ogg', 'audio/mp4',],
|
||||
}, ['audio/ogg',])
|
||||
transcode_audio(
|
||||
name='encoded_music',
|
||||
srcs=':music',
|
||||
formats=TRACKLIST_FORMATS)
|
||||
generate_tracklist_js(
|
||||
name='music_js',
|
||||
srcs=':encoded_music',
|
||||
namespace='foo.audio',
|
||||
gen='foo/audio/')
|
||||
- for each json file generates a js file from the json metadata, resulting in
|
||||
gen/foo/audio/music.js (class foo.audio.music)
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# GLSL
|
||||
# ==============================================================================
|
||||
|
||||
compile_glsl(
|
||||
name='compiled_glsl',
|
||||
srcs=glob('assets/glsl/**/*.glsl*'))
|
||||
- compiles all .glsl files into .json files, such as assets/glsl/a.glsl ->
|
||||
out/assets/glsl/a.json - any glsllib files are ignored, but may be used by
|
||||
the compiler
|
||||
outputs are only the json files
|
||||
|
||||
generate_glsl_js(
|
||||
name='glsl_js',
|
||||
srcs=':compiled_glsl',
|
||||
namespace='foo.glsl',
|
||||
gen='foo/glsl/')
|
||||
- for each json file generates a js file from the json metadata, resulting in
|
||||
gen/foo/glsl/a.js (class foo.glsl.a)
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# CSS
|
||||
# ==============================================================================
|
||||
|
||||
compile_gss(
|
||||
name='page_gss',
|
||||
srcs=glob('assets/css/**/*.gss'),
|
||||
out='css/page_gss.css',
|
||||
gen='css/page_gss.js')
|
||||
- compiles all gss into out/css/page.css, and drops the map file to
|
||||
gen/css/page.js
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Closure JS
|
||||
# ==============================================================================
|
||||
|
||||
JS_NAMESPACES=['myns1', 'myns2']
|
||||
|
||||
fix_closure_js(
|
||||
name='fix_js',
|
||||
srcs=glob('src/**/*.js'),
|
||||
namespaces=JS_NAMESPACES)
|
||||
- runs fixjsstyle on all sources (with the same args as lint_closure_js) and
|
||||
returns all srcs as outputs
|
||||
|
||||
lint_closure_js(
|
||||
name='lint_js',
|
||||
srcs=':fix_js',
|
||||
namespaces=JS_NAMESPACES)
|
||||
- runs gjslist over all of the source files with the following args:
|
||||
--multiprocess
|
||||
--strict
|
||||
--jslint_error=all
|
||||
--closurized_namespaces=goog,gf, + namespaces
|
||||
and returns all srcs as outputs
|
||||
|
||||
file_set(
|
||||
name='all_js',
|
||||
src_filter='*.js',
|
||||
srcs=[':fix_js', ':audio_rules', ':page_gss',])
|
||||
generate_closure_deps_js(
|
||||
name='deps_js',
|
||||
srcs=[':all_js'],
|
||||
gen='my_deps.js')
|
||||
- runs genjsdeps on all sources and generate the gen/my_deps.js file
|
||||
note that this pulls in all generated JS code by sourcing from all rules
|
||||
|
||||
file_set(
|
||||
name='uncompiled',
|
||||
deps=[':deps_js'])
|
||||
- a synthetic rule to allow for easy 'uncompiled' building
|
||||
|
||||
SHARED_JS_FLAGS=['--define=foo=false']
|
||||
compile_closure_js(
|
||||
name='compiled_js',
|
||||
srcs=[':all_js', ':deps_js',],
|
||||
out='js/compiled.js',
|
||||
root_namespace='myns1.start',
|
||||
compiler_flags=SHARED_JS_FLAGS + select_many({
|
||||
'RELEASE': ['--define=gf.BUILD_CLIENT=false',
|
||||
'--define=goog.DEBUG=false',
|
||||
'--define=goog.asserts.ENABLE_ASSERTS=false',],
|
||||
})
|
||||
- creates a out/js/compiled.js file based on all sources
|
||||
could add source_map='foo.map' to enable source mapping output
|
||||
wrap_with_global='s' to do (function(){...})(s)
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Future...
|
||||
# ==============================================================================
|
||||
|
||||
* wget/curl-esque rules w/ caching (grab text/json/manifest from somewhere)
|
||||
* SASS/LESS/etc
|
||||
* uglifyjs/etc
|
||||
* jslint
|
||||
* html/json/etc linting
|
||||
* localization utils (common format translations)
|
||||
* soy compiler
|
||||
* images/texture compression
|
||||
* spriting
|
||||
* more advanced templating with mako
|
||||
* git info (get current commit hash/etc) - embedded version #s
|
|
@ -0,0 +1,6 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
|
@ -0,0 +1,243 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Core rules for the build system.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import io
|
||||
import os
|
||||
import shutil
|
||||
import string
|
||||
|
||||
from anvil.context import RuleContext
|
||||
from anvil.rule import Rule, build_rule
|
||||
from anvil.task import Task
|
||||
|
||||
|
||||
@build_rule('file_set')
|
||||
class FileSetRule(Rule):
|
||||
"""A file set aggregation rule.
|
||||
All source files are globbed together and de-duplicated before being passed
|
||||
on as outputs. If a src_filter is provided then it is used to filter all
|
||||
sources.
|
||||
|
||||
File set rules can be used as synthetic rules for making dependencies easier
|
||||
to manage, or for filtering many rules into one.
|
||||
|
||||
Inputs:
|
||||
srcs: Source file paths.
|
||||
|
||||
Outputs:
|
||||
All of the source file paths, passed-through unmodified.
|
||||
"""
|
||||
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
"""Initializes a file set rule.
|
||||
|
||||
Args:
|
||||
name: Rule name.
|
||||
"""
|
||||
super(FileSetRule, self).__init__(name, *args, **kwargs)
|
||||
|
||||
class _Context(RuleContext):
|
||||
def begin(self):
|
||||
super(FileSetRule._Context, self).begin()
|
||||
self._append_output_paths(self.src_paths)
|
||||
self._succeed()
|
||||
|
||||
|
||||
@build_rule('copy_files')
|
||||
class CopyFilesRule(Rule):
|
||||
"""Copy files from one path to another.
|
||||
Copies all source files to the output path.
|
||||
|
||||
The resulting structure will match that of all files relative to the path of
|
||||
the module the rule is in. For example, srcs='a.txt' will result in
|
||||
'$out/a.txt', and srcs='dir/a.txt' will result in '$out/dir/a.txt'.
|
||||
|
||||
If a src_filter is provided then it is used to filter all sources.
|
||||
|
||||
This copies all files and preserves all file metadata, but does not preserve
|
||||
directory metadata.
|
||||
|
||||
Inputs:
|
||||
srcs: Source file paths.
|
||||
|
||||
Outputs:
|
||||
All of the copied files in the output path.
|
||||
"""
|
||||
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
"""Initializes a copy files rule.
|
||||
|
||||
Args:
|
||||
name: Rule name.
|
||||
"""
|
||||
super(CopyFilesRule, self).__init__(name, *args, **kwargs)
|
||||
|
||||
class _Context(RuleContext):
|
||||
def begin(self):
|
||||
super(CopyFilesRule._Context, self).begin()
|
||||
|
||||
# Get all source -> output paths (and ensure directories exist)
|
||||
file_pairs = []
|
||||
for src_path in self.src_paths:
|
||||
out_path = self._get_out_path_for_src(src_path)
|
||||
self._ensure_output_exists(os.path.dirname(out_path))
|
||||
self._append_output_paths([out_path])
|
||||
file_pairs.append((src_path, out_path))
|
||||
|
||||
# Async issue copying task
|
||||
d = self._run_task_async(_CopyFilesTask(
|
||||
self.build_env, file_pairs))
|
||||
self._chain(d)
|
||||
|
||||
|
||||
class _CopyFilesTask(Task):
|
||||
def __init__(self, build_env, file_pairs, *args, **kwargs):
|
||||
super(_CopyFilesTask, self).__init__(build_env, *args, **kwargs)
|
||||
self.file_pairs = file_pairs
|
||||
|
||||
def execute(self):
|
||||
for file_pair in self.file_pairs:
|
||||
shutil.copy2(file_pair[0], file_pair[1])
|
||||
return True
|
||||
|
||||
|
||||
@build_rule('concat_files')
|
||||
class ConcatFilesRule(Rule):
|
||||
"""Concatenate many files into one.
|
||||
Takes all source files and concatenates them together. The order is based on
|
||||
the ordering of the srcs list, and all files are treated as binary.
|
||||
|
||||
Note that if referencing other rules or globs the order of files may be
|
||||
undefined, so if order matters try to enumerate files manually.
|
||||
|
||||
TODO(benvanik): support a text mode?
|
||||
|
||||
Inputs:
|
||||
srcs: Source file paths. The order is the order in which they will be
|
||||
concatenated.
|
||||
out: Optional output name. If none is provided than the rule name will be
|
||||
used.
|
||||
|
||||
Outputs:
|
||||
All of the srcs concatenated into a single file path. If no out is specified
|
||||
a file with the name of the rule will be created.
|
||||
"""
|
||||
|
||||
def __init__(self, name, out=None, *args, **kwargs):
|
||||
"""Initializes a concatenate files rule.
|
||||
|
||||
Args:
|
||||
name: Rule name.
|
||||
out: Optional output name.
|
||||
"""
|
||||
super(ConcatFilesRule, self).__init__(name, *args, **kwargs)
|
||||
self.out = out
|
||||
|
||||
class _Context(RuleContext):
|
||||
def begin(self):
|
||||
super(ConcatFilesRule._Context, self).begin()
|
||||
|
||||
output_path = self._get_out_path(name=self.rule.out)
|
||||
self._ensure_output_exists(os.path.dirname(output_path))
|
||||
self._append_output_paths([output_path])
|
||||
|
||||
# Async issue concat task
|
||||
d = self._run_task_async(_ConcatFilesTask(
|
||||
self.build_env, self.src_paths, output_path))
|
||||
self._chain(d)
|
||||
|
||||
|
||||
class _ConcatFilesTask(Task):
|
||||
def __init__(self, build_env, src_paths, output_path, *args, **kwargs):
|
||||
super(_ConcatFilesTask, self).__init__(build_env, *args, **kwargs)
|
||||
self.src_paths = src_paths
|
||||
self.output_path = output_path
|
||||
|
||||
def execute(self):
|
||||
with io.open(self.output_path, 'wt') as out_file:
|
||||
for src_path in self.src_paths:
|
||||
with io.open(src_path, 'rt') as in_file:
|
||||
out_file.write(in_file.read())
|
||||
return True
|
||||
|
||||
|
||||
@build_rule('template_files')
|
||||
class TemplateFilesRule(Rule):
|
||||
"""Applies simple templating to a set of files.
|
||||
Processes each source file replacing a list of strings with corresponding
|
||||
strings.
|
||||
|
||||
This uses the Python string templating functionality documented here:
|
||||
http://docs.python.org/library/string.html#template-strings
|
||||
|
||||
Identifiers in the source template should be of the form "${identifier}", each
|
||||
of which maps to a key in the params dictionary.
|
||||
|
||||
In order to prevent conflicts, it is strongly encouraged that a new_extension
|
||||
value is provided. If a source file has an extension it will be replaced with
|
||||
the specified one, and files without extensions will have it added.
|
||||
|
||||
TODO(benvanik): more advanced template vars? perhaps regex?
|
||||
|
||||
Inputs:
|
||||
srcs: Source file paths.
|
||||
new_extension: The extension to replace (or add) to all output files, with a
|
||||
leading dot ('.txt').
|
||||
params: A dictionary of key-value replacement parameters.
|
||||
|
||||
Outputs:
|
||||
One file for each source file with the templating rules applied.
|
||||
"""
|
||||
|
||||
def __init__(self, name, new_extension=None, params=None, *args, **kwargs):
|
||||
"""Initializes a file templating rule.
|
||||
|
||||
Args:
|
||||
name: Rule name.
|
||||
new_extension: Replacement extension ('.txt').
|
||||
params: A dictionary of key-value replacement parameters.
|
||||
"""
|
||||
super(TemplateFilesRule, self).__init__(name, *args, **kwargs)
|
||||
self.new_extension = new_extension
|
||||
self.params = params
|
||||
|
||||
class _Context(RuleContext):
|
||||
def begin(self):
|
||||
super(TemplateFilesRule._Context, self).begin()
|
||||
|
||||
# Get all source -> output paths (and ensure directories exist)
|
||||
file_pairs = []
|
||||
for src_path in self.src_paths:
|
||||
out_path = self._get_out_path_for_src(src_path)
|
||||
if self.rule.new_extension:
|
||||
out_path = os.path.splitext(out_path)[0] + self.rule.new_extension
|
||||
self._ensure_output_exists(os.path.dirname(out_path))
|
||||
self._append_output_paths([out_path])
|
||||
file_pairs.append((src_path, out_path))
|
||||
|
||||
# Async issue templating task
|
||||
d = self._run_task_async(_TemplateFilesTask(
|
||||
self.build_env, file_pairs, self.rule.params))
|
||||
self._chain(d)
|
||||
|
||||
|
||||
class _TemplateFilesTask(Task):
|
||||
def __init__(self, build_env, file_pairs, params, *args, **kwargs):
|
||||
super(_TemplateFilesTask, self).__init__(build_env, *args, **kwargs)
|
||||
self.file_pairs = file_pairs
|
||||
self.params = params
|
||||
|
||||
def execute(self):
|
||||
for file_pair in self.file_pairs:
|
||||
with io.open(file_pair[0], 'rt') as f:
|
||||
template_str = f.read()
|
||||
template = string.Template(template_str)
|
||||
result_str = template.substitute(self.params)
|
||||
with io.open(file_pair[1], 'wt') as f:
|
||||
f.write(result_str)
|
||||
return True
|
|
@ -0,0 +1,210 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the core_rules module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
from anvil.context import BuildContext, BuildEnvironment, Status
|
||||
from anvil.project import FileModuleResolver, Project
|
||||
from anvil.test import FixtureTestCase
|
||||
from core_rules import *
|
||||
|
||||
|
||||
class RuleTestCase(FixtureTestCase):
|
||||
def assertRuleResultsEqual(self, build_ctx, rule_path, expected_file_matches,
|
||||
output_prefix=''):
|
||||
results = build_ctx.get_rule_results(rule_path)
|
||||
self.assertEqual(results[0], Status.SUCCEEDED)
|
||||
output_paths = results[1]
|
||||
|
||||
root_path = os.path.join(build_ctx.build_env.root_path, output_prefix)
|
||||
result_file_list = [os.path.relpath(f, root_path) for f in output_paths]
|
||||
self.assertEqual(
|
||||
set(result_file_list),
|
||||
set(expected_file_matches))
|
||||
|
||||
|
||||
class FileSetRuleTest(RuleTestCase):
|
||||
"""Behavioral tests of the FileSetRule type."""
|
||||
fixture='core_rules/file_set'
|
||||
|
||||
def setUp(self):
|
||||
super(FileSetRuleTest, self).setUp()
|
||||
self.build_env = BuildEnvironment(root_path=self.root_path)
|
||||
|
||||
def test(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
self.assertTrue(ctx.execute_sync([
|
||||
':a',
|
||||
':a_glob',
|
||||
':b_ref',
|
||||
':all_glob',
|
||||
':combo',
|
||||
':dupes',
|
||||
'dir:b',
|
||||
'dir:b_glob',
|
||||
]))
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':a', ['a.txt',])
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':a_glob', ['a.txt',])
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':b_ref', ['dir/b.txt',])
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':all_glob', ['a.txt', 'dir/b.txt',])
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':combo', ['a.txt', 'dir/b.txt',])
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':dupes', ['a.txt', 'dir/b.txt',])
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
'dir:b', ['dir/b.txt',])
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
'dir:b_glob', ['dir/b.txt',])
|
||||
|
||||
|
||||
class CopyFilesRuleTest(RuleTestCase):
|
||||
"""Behavioral tests of the CopyFilesRule type."""
|
||||
fixture='core_rules/copy_files'
|
||||
|
||||
def setUp(self):
|
||||
super(CopyFilesRuleTest, self).setUp()
|
||||
self.build_env = BuildEnvironment(root_path=self.root_path)
|
||||
|
||||
def test(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
self.assertTrue(ctx.execute_sync([
|
||||
':copy_all_txt',
|
||||
'dir:copy_c',
|
||||
]))
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':copy_all_txt', ['a.txt',
|
||||
'dir/b.txt'],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/a.txt'),
|
||||
'a\n')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/dir/b.txt'),
|
||||
'b\n')
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
'dir:copy_c', ['dir/c.not-txt',],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/dir/c.not-txt'),
|
||||
'c\n')
|
||||
|
||||
|
||||
class ConcatFilesRuleTest(RuleTestCase):
|
||||
"""Behavioral tests of the ConcatFilesRule type."""
|
||||
fixture='core_rules/concat_files'
|
||||
|
||||
def setUp(self):
|
||||
super(ConcatFilesRuleTest, self).setUp()
|
||||
self.build_env = BuildEnvironment(root_path=self.root_path)
|
||||
|
||||
def test(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
self.assertTrue(ctx.execute_sync([
|
||||
':concat',
|
||||
':concat_out',
|
||||
':concat_template',
|
||||
':templated',
|
||||
]))
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':concat', ['concat',],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/concat'),
|
||||
'1\n2\n3\n4\n')
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':concat_out', ['concat.txt',],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/concat.txt'),
|
||||
'1\n2\n3\n4\n')
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':concat_template', ['concat_template',],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/concat_template'),
|
||||
'1\n2\n3\n4\nx${hello}x\n1\n2\n3\n4\n')
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':templated', ['concat_template.out',],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/concat_template.out'),
|
||||
'1\n2\n3\n4\nxworld!x\n1\n2\n3\n4\n')
|
||||
|
||||
|
||||
class TemplateFilesRuleTest(RuleTestCase):
|
||||
"""Behavioral tests of the TemplateFilesRule type."""
|
||||
fixture='core_rules/template_files'
|
||||
|
||||
def setUp(self):
|
||||
super(TemplateFilesRuleTest, self).setUp()
|
||||
self.build_env = BuildEnvironment(root_path=self.root_path)
|
||||
|
||||
def test(self):
|
||||
project = Project(module_resolver=FileModuleResolver(self.root_path))
|
||||
|
||||
with BuildContext(self.build_env, project) as ctx:
|
||||
self.assertTrue(ctx.execute_sync([
|
||||
':template_all',
|
||||
':template_dep_2',
|
||||
]))
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':template_all', ['a.txt',
|
||||
'dir/b.txt'],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/a.txt'),
|
||||
'123world456\n')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/dir/b.txt'),
|
||||
'b123world456\n')
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':template_dep_1', ['a.nfo',
|
||||
'dir/b.nfo'],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/a.nfo'),
|
||||
'123${arg2}456\n')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/dir/b.nfo'),
|
||||
'b123${arg2}456\n')
|
||||
|
||||
self.assertRuleResultsEqual(ctx,
|
||||
':template_dep_2', ['a.out',
|
||||
'dir/b.out'],
|
||||
output_prefix='build-out')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/a.out'),
|
||||
'123world!456\n')
|
||||
self.assertFileContents(
|
||||
os.path.join(self.root_path, 'build-out/dir/b.out'),
|
||||
'b123world!456\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,351 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Task/multiprocessing support.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from async import Deferred
|
||||
|
||||
|
||||
class Task(object):
|
||||
"""Abstract base type for small tasks.
|
||||
A task should be the smallest possible unit of work a Rule may want to
|
||||
perform. Examples include copying a set of files, converting an mp3, or
|
||||
compiling some code.
|
||||
|
||||
Tasks can execute in parallel with other tasks, and are run in a seperate
|
||||
process. They must be pickleable and should access no global state.
|
||||
|
||||
TODO(benvanik): add support for logging - a Queue that pushes back
|
||||
log/progress messages?
|
||||
"""
|
||||
|
||||
def __init__(self, build_env, *args, **kwargs):
|
||||
"""Initializes a task.
|
||||
|
||||
Args:
|
||||
build_env: The build environment for state.
|
||||
"""
|
||||
self.build_env = build_env
|
||||
|
||||
def execute(self):
|
||||
"""Executes the task.
|
||||
This method will be called in a separate process and should not use any
|
||||
state not accessible from the Task. The Task will have been pickled and
|
||||
will not be merged back with the parent.
|
||||
|
||||
The result of this method must be pickleable and will be sent back to the
|
||||
deferred callback. If an exception is raised it will be wrapped in the
|
||||
deferred's errback.
|
||||
|
||||
Returns:
|
||||
A result to pass back to the deferred callback.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ExecutableError(Exception):
|
||||
"""An exception concerning the execution of a command.
|
||||
"""
|
||||
|
||||
def __init__(self, return_code, *args, **kwargs):
|
||||
"""Initializes an executable error.
|
||||
|
||||
Args:
|
||||
return_code: The return code of the application.
|
||||
"""
|
||||
super(ExecutableError, self).__init__(*args, **kwargs)
|
||||
self.return_code = return_code
|
||||
|
||||
def __str__(self):
|
||||
return 'ExecutableError: call returned %s' % (self.return_code)
|
||||
|
||||
|
||||
class ExecutableTask(Task):
|
||||
"""A task that executes a command in the shell.
|
||||
|
||||
If the call returns an error an ExecutableError is raised.
|
||||
"""
|
||||
|
||||
def __init__(self, build_env, executable_name, call_args=None,
|
||||
*args, **kwargs):
|
||||
"""Initializes an executable task.
|
||||
|
||||
Args:
|
||||
build_env: The build environment for state.
|
||||
executable_name: The name (or full path) of an executable.
|
||||
call_args: Arguments to pass to the executable.
|
||||
"""
|
||||
super(ExecutableTask, self).__init__(build_env, *args, **kwargs)
|
||||
self.executable_name = executable_name
|
||||
self.call_args = call_args if call_args else []
|
||||
|
||||
def execute(self):
|
||||
p = subprocess.Popen([self.executable_name] + self.call_args,
|
||||
bufsize=-1, # system default
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
# TODO(benvanik): would be nice to support a few modes here - enabling
|
||||
# streaming output from the process (for watching progress/etc).
|
||||
# This right now just waits until it exits and grabs everything.
|
||||
(stdoutdata, stderrdata) = p.communicate()
|
||||
|
||||
return_code = p.returncode
|
||||
if return_code != 0:
|
||||
raise ExecutableError(return_code=return_code)
|
||||
|
||||
return (stdoutdata, stderrdata)
|
||||
|
||||
|
||||
class JavaExecutableTask(ExecutableTask):
|
||||
"""A task that executes a Java class in the shell.
|
||||
"""
|
||||
|
||||
def __init__(self, build_env, jar_path, call_args=None, *args, **kwargs):
|
||||
"""Initializes an executable task.
|
||||
|
||||
Args:
|
||||
build_env: The build environment for state.
|
||||
jar_path: The name (or full path) of a jar to execute.
|
||||
call_args: Arguments to pass to the executable.
|
||||
"""
|
||||
executable_name = 'java'
|
||||
call_args = ['-jar', jar_path] + call_args if call_args else []
|
||||
super(JavaExecutableTask, self).__init__(build_env, executable_name,
|
||||
call_args, *args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def detect_java_version(cls, java_executable='java'):
|
||||
"""Gets the version number of Java.
|
||||
|
||||
Returns:
|
||||
The version in the form of '1.7.0', or None if Java is not found.
|
||||
"""
|
||||
try:
|
||||
p = subprocess.Popen([java_executable, '-version'],
|
||||
stderr=subprocess.PIPE)
|
||||
line = p.communicate()[1]
|
||||
return re.search(r'[0-9\.]+', line).group()
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
# TODO(benvanik): node.js-specific executable task
|
||||
# class NodeExecutableTask(ExecutableTask):
|
||||
# pass
|
||||
|
||||
|
||||
# TODO(benvanik): python-specific executable task
|
||||
# class PythonExecutableTask(ExecutableTask):
|
||||
# pass
|
||||
|
||||
|
||||
class TaskExecutor(object):
|
||||
"""An abstract queue for task execution.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initializes a task executor.
|
||||
"""
|
||||
self.closed = False
|
||||
self._running_count = 0
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if not self.closed:
|
||||
self.close()
|
||||
|
||||
def has_any_running(self):
|
||||
"""
|
||||
Returns:
|
||||
True if there are any tasks still running.
|
||||
"""
|
||||
return self._running_count > 0
|
||||
|
||||
def run_task_async(self, task):
|
||||
"""Queues a new task for execution.
|
||||
|
||||
Args:
|
||||
task: Task object to execute on a worker thread.
|
||||
|
||||
Returns:
|
||||
A deferred that signals completion of the task. The results of the task
|
||||
will be passed to the callback.
|
||||
|
||||
Raises:
|
||||
RuntimeError: Invalid executor state.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def wait(self, deferreds):
|
||||
"""Blocks waiting on a list of deferreds until they all complete.
|
||||
This should laregly be used for testing. The deferreds must have been
|
||||
returned from run_task_async.
|
||||
|
||||
Args:
|
||||
deferreds: A list of Deferreds (or one).
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def close(self, graceful=True):
|
||||
"""Closes the executor, waits for all tasks to complete, and joins.
|
||||
This will block until tasks complete.
|
||||
|
||||
Args:
|
||||
graceful: True to allow outstanding tasks to complete.
|
||||
|
||||
Raises:
|
||||
RuntimeError: Invalid executor state.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class InProcessTaskExecutor(TaskExecutor):
|
||||
"""A simple inline task executor.
|
||||
Blocks on task execution, performing all tasks in the running process.
|
||||
This makes testing simpler as all deferreds are complete upon callback.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initializes a task executor.
|
||||
"""
|
||||
super(InProcessTaskExecutor, self).__init__(*args, **kwargs)
|
||||
|
||||
def run_task_async(self, task):
|
||||
if self.closed:
|
||||
raise RuntimeError('Executor has been closed and cannot run new tasks')
|
||||
|
||||
deferred = Deferred()
|
||||
try:
|
||||
result = task.execute()
|
||||
deferred.callback(result)
|
||||
except Exception as e:
|
||||
deferred.errback(exception=e)
|
||||
return deferred
|
||||
|
||||
def wait(self, deferreds):
|
||||
pass
|
||||
|
||||
def close(self, graceful=True):
|
||||
if self.closed:
|
||||
raise RuntimeError(
|
||||
'Attempting to close an executor that has already been closed')
|
||||
self.closed = True
|
||||
self._running_count = 0
|
||||
|
||||
|
||||
class MultiProcessTaskExecutor(TaskExecutor):
|
||||
"""A pool for multiprocess task execution.
|
||||
"""
|
||||
|
||||
def __init__(self, worker_count=None, *args, **kwargs):
|
||||
"""Initializes a task executor.
|
||||
This may take a bit to run, as the process pool is primed.
|
||||
|
||||
Args:
|
||||
worker_count: Number of worker threads to use when building. None to use
|
||||
as many processors as are available.
|
||||
"""
|
||||
super(MultiProcessTaskExecutor, self).__init__(*args, **kwargs)
|
||||
self.worker_count = worker_count
|
||||
try:
|
||||
self._pool = multiprocessing.Pool(processes=self.worker_count,
|
||||
initializer=_task_initializer)
|
||||
except OSError as e: # pragma: no cover
|
||||
print e
|
||||
print 'Unable to initialize multiprocessing!'
|
||||
if sys.platform == 'cygwin':
|
||||
print ('Cygwin has known issues with multiprocessing and there\'s no '
|
||||
'workaround. Boo!')
|
||||
print 'Try running with -j 1 to disable multiprocessing'
|
||||
raise
|
||||
self._waiting_deferreds = {}
|
||||
|
||||
def run_task_async(self, task):
|
||||
if self.closed:
|
||||
raise RuntimeError('Executor has been closed and cannot run new tasks')
|
||||
|
||||
# Pass on results to the defered
|
||||
deferred = Deferred()
|
||||
def _thunk_callback(*args, **kwargs):
|
||||
self._running_count = self._running_count - 1
|
||||
del self._waiting_deferreds[deferred]
|
||||
if len(args) and isinstance(args[0], Exception):
|
||||
deferred.errback(exception=args[0])
|
||||
else:
|
||||
deferred.callback(*args)
|
||||
|
||||
# Queue
|
||||
self._running_count = self._running_count + 1
|
||||
async_result = self._pool.apply_async(_task_thunk, [task],
|
||||
callback=_thunk_callback)
|
||||
self._waiting_deferreds[deferred] = async_result
|
||||
|
||||
return deferred
|
||||
|
||||
def wait(self, deferreds):
|
||||
try:
|
||||
iter(deferreds)
|
||||
except:
|
||||
deferreds = [deferreds]
|
||||
spin_deferreds = []
|
||||
for deferred in deferreds:
|
||||
if deferred.is_done():
|
||||
continue
|
||||
if not self._waiting_deferreds.has_key(deferred):
|
||||
# Not a deferred created by this - queue for a spin wait
|
||||
spin_deferreds.append(deferred)
|
||||
else:
|
||||
async_result = self._waiting_deferreds[deferred]
|
||||
async_result.wait()
|
||||
for deferred in spin_deferreds:
|
||||
while not deferred.is_done():
|
||||
time.sleep(0.01)
|
||||
|
||||
def close(self, graceful=True):
|
||||
if self.closed:
|
||||
raise RuntimeError(
|
||||
'Attempting to close an executor that has already been closed')
|
||||
self.closed = True
|
||||
if graceful:
|
||||
self._pool.close()
|
||||
else:
|
||||
self._pool.terminate()
|
||||
self._pool.join()
|
||||
self._running_count = 0
|
||||
self._waiting_deferreds.clear()
|
||||
|
||||
def _task_initializer(): # pragma: no cover
|
||||
"""Task executor process initializer, used by MultiProcessTaskExecutor.
|
||||
Called once on each process the TaskExecutor uses.
|
||||
"""
|
||||
#print 'started! %s' % (multiprocessing.current_process().name)
|
||||
pass
|
||||
|
||||
def _task_thunk(task): # pragma: no cover
|
||||
"""Thunk for executing tasks, used by MultiProcessTaskExecutor.
|
||||
This is called from separate processes so do not access any global state.
|
||||
|
||||
Args:
|
||||
task: Task to execute.
|
||||
|
||||
Returns:
|
||||
The result of the task execution. This is passed to the deferred.
|
||||
"""
|
||||
try:
|
||||
return task.execute()
|
||||
except Exception as e:
|
||||
return e
|
|
@ -0,0 +1,139 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the task module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import unittest2
|
||||
|
||||
from context import BuildEnvironment
|
||||
from task import *
|
||||
from test import AsyncTestCase, FixtureTestCase
|
||||
|
||||
|
||||
class ExecutableTaskTest(FixtureTestCase):
|
||||
"""Behavioral tests for ExecutableTask."""
|
||||
fixture = 'simple'
|
||||
|
||||
def setUp(self):
|
||||
super(ExecutableTaskTest, self).setUp()
|
||||
self.build_env = BuildEnvironment(root_path=self.root_path)
|
||||
|
||||
def testExecution(self):
|
||||
task = ExecutableTask(self.build_env, 'cat', [
|
||||
os.path.join(self.root_path, 'a.txt')])
|
||||
self.assertEqual(task.execute(),
|
||||
('hello!\n', ''))
|
||||
|
||||
task = ExecutableTask(self.build_env, 'cat', [
|
||||
os.path.join(self.root_path, 'x.txt')])
|
||||
with self.assertRaises(ExecutableError):
|
||||
task.execute()
|
||||
|
||||
def testJava(self):
|
||||
version = JavaExecutableTask.detect_java_version()
|
||||
self.assertNotEqual(len(version), 0)
|
||||
self.assertIsNone(
|
||||
JavaExecutableTask.detect_java_version(java_executable='xxx'))
|
||||
|
||||
# TODO(benvanik): test a JAR somehow
|
||||
task = JavaExecutableTask(self.build_env, 'some_jar')
|
||||
|
||||
|
||||
class SuccessTask(Task):
|
||||
def __init__(self, build_env, success_result, *args, **kwargs):
|
||||
super(SuccessTask, self).__init__(build_env, *args, **kwargs)
|
||||
self.success_result = success_result
|
||||
def execute(self):
|
||||
return self.success_result
|
||||
|
||||
class FailureTask(Task):
|
||||
def execute(self):
|
||||
raise TypeError('Failed!')
|
||||
|
||||
|
||||
class TaskExecutorTest(AsyncTestCase):
|
||||
"""Behavioral tests of the TaskExecutor type."""
|
||||
|
||||
def runTestsWithExecutorType(self, executor_cls):
|
||||
build_env = BuildEnvironment()
|
||||
|
||||
executor = executor_cls()
|
||||
executor.close()
|
||||
with self.assertRaises(RuntimeError):
|
||||
executor.run_task_async(SuccessTask(build_env, True))
|
||||
with self.assertRaises(RuntimeError):
|
||||
executor.close()
|
||||
|
||||
with executor_cls() as executor:
|
||||
d = executor.run_task_async(SuccessTask(build_env, True))
|
||||
executor.wait(d)
|
||||
self.assertFalse(executor.has_any_running())
|
||||
self.assertCallbackEqual(d, True)
|
||||
executor.close()
|
||||
self.assertFalse(executor.has_any_running())
|
||||
|
||||
with executor_cls() as executor:
|
||||
d = executor.run_task_async(FailureTask(build_env))
|
||||
executor.wait(d)
|
||||
self.assertFalse(executor.has_any_running())
|
||||
self.assertErrbackWithError(d, TypeError)
|
||||
|
||||
d = executor.run_task_async(SuccessTask(build_env, True))
|
||||
executor.wait(d)
|
||||
executor.wait(d)
|
||||
self.assertFalse(executor.has_any_running())
|
||||
self.assertCallback(d)
|
||||
|
||||
da = executor.run_task_async(SuccessTask(build_env, 'a'))
|
||||
executor.wait(da)
|
||||
self.assertFalse(executor.has_any_running())
|
||||
self.assertCallbackEqual(da, 'a')
|
||||
db = executor.run_task_async(SuccessTask(build_env, 'b'))
|
||||
executor.wait(db)
|
||||
self.assertFalse(executor.has_any_running())
|
||||
self.assertCallbackEqual(db, 'b')
|
||||
dc = executor.run_task_async(SuccessTask(build_env, 'c'))
|
||||
executor.wait(dc)
|
||||
self.assertFalse(executor.has_any_running())
|
||||
self.assertCallbackEqual(dc, 'c')
|
||||
|
||||
da = executor.run_task_async(SuccessTask(build_env, 'a'))
|
||||
db = executor.run_task_async(SuccessTask(build_env, 'b'))
|
||||
dc = executor.run_task_async(SuccessTask(build_env, 'c'))
|
||||
executor.wait([da, db, dc])
|
||||
self.assertFalse(executor.has_any_running())
|
||||
self.assertCallbackEqual(dc, 'c')
|
||||
self.assertCallbackEqual(db, 'b')
|
||||
self.assertCallbackEqual(da, 'a')
|
||||
|
||||
da = executor.run_task_async(SuccessTask(build_env, 'a'))
|
||||
db = executor.run_task_async(FailureTask(build_env))
|
||||
dc = executor.run_task_async(SuccessTask(build_env, 'c'))
|
||||
executor.wait(da)
|
||||
self.assertCallbackEqual(da, 'a')
|
||||
executor.wait(db)
|
||||
self.assertErrbackWithError(db, TypeError)
|
||||
executor.wait(dc)
|
||||
self.assertCallbackEqual(dc, 'c')
|
||||
self.assertFalse(executor.has_any_running())
|
||||
|
||||
# This test is not quite right - it's difficult to test for proper
|
||||
# early termination
|
||||
with executor_cls() as executor:
|
||||
executor.close(graceful=False)
|
||||
self.assertFalse(executor.has_any_running())
|
||||
|
||||
def testInProcess(self):
|
||||
self.runTestsWithExecutorType(InProcessTaskExecutor)
|
||||
|
||||
def testMultiprocess(self):
|
||||
self.runTestsWithExecutorType(MultiProcessTaskExecutor)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,129 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Base test case for tests that require static file fixtures.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import sys
|
||||
import unittest2
|
||||
|
||||
import util
|
||||
|
||||
|
||||
def main():
|
||||
"""Entry point for running tests.
|
||||
"""
|
||||
|
||||
# Only find test_*.py files under anvil/
|
||||
loader = unittest2.TestLoader()
|
||||
tests = loader.discover('anvil',
|
||||
pattern='*_test.py',
|
||||
top_level_dir='.')
|
||||
|
||||
# Run the tests in the default runner
|
||||
test_runner = unittest2.runner.TextTestRunner(verbosity=2)
|
||||
test_runner.run(tests)
|
||||
|
||||
|
||||
class AsyncTestCase(unittest2.TestCase):
|
||||
"""Test case adding additional asserts for async results."""
|
||||
|
||||
def assertCallback(self, deferred):
|
||||
self.assertTrue(deferred.is_done())
|
||||
done = []
|
||||
def _callback(*args, **kwargs):
|
||||
done.append(True)
|
||||
def _errback(*args, **kwargs):
|
||||
self.fail('Deferred failed when it should have succeeded')
|
||||
deferred.add_errback_fn(_errback)
|
||||
deferred.add_callback_fn(_callback)
|
||||
if not len(done):
|
||||
self.fail('Deferred not called back with success')
|
||||
|
||||
def assertCallbackEqual(self, deferred, value):
|
||||
self.assertTrue(deferred.is_done())
|
||||
done = []
|
||||
def _callback(*args, **kwargs):
|
||||
self.assertEqual(args[0], value)
|
||||
done.append(True)
|
||||
def _errback(*args, **kwargs):
|
||||
self.fail('Deferred failed when it should have succeeded')
|
||||
deferred.add_errback_fn(_errback)
|
||||
deferred.add_callback_fn(_callback)
|
||||
if not len(done):
|
||||
self.fail('Deferred not called back with success')
|
||||
|
||||
def assertErrback(self, deferred):
|
||||
self.assertTrue(deferred.is_done())
|
||||
done = []
|
||||
def _callback(*args, **kwargs):
|
||||
self.fail('Deferred succeeded when it should have failed')
|
||||
def _errback(*args, **kwargs):
|
||||
done.append(True)
|
||||
deferred.add_callback_fn(_callback)
|
||||
deferred.add_errback_fn(_errback)
|
||||
if not len(done):
|
||||
self.fail('Deferred not called back with error')
|
||||
|
||||
def assertErrbackEqual(self, deferred, value):
|
||||
self.assertTrue(deferred.is_done())
|
||||
done = []
|
||||
def _callback(*args, **kwargs):
|
||||
self.fail('Deferred succeeded when it should have failed')
|
||||
def _errback(*args, **kwargs):
|
||||
self.assertEqual(args[0], value)
|
||||
done.append(True)
|
||||
deferred.add_callback_fn(_callback)
|
||||
deferred.add_errback_fn(_errback)
|
||||
if not len(done):
|
||||
self.fail('Deferred not called back with error')
|
||||
|
||||
def assertErrbackWithError(self, deferred, error_cls):
|
||||
self.assertTrue(deferred.is_done())
|
||||
done = []
|
||||
def _callback(*args, **kwargs):
|
||||
self.fail('Deferred succeeded when it should have failed')
|
||||
def _errback(exception=None, *args, **kwargs):
|
||||
done.append(True)
|
||||
self.assertIsInstance(exception, error_cls)
|
||||
deferred.add_callback_fn(_callback)
|
||||
deferred.add_errback_fn(_errback)
|
||||
if not len(done):
|
||||
self.fail('Deferred not called back with error')
|
||||
|
||||
|
||||
class FixtureTestCase(AsyncTestCase):
|
||||
"""Test case supporting static fixture/output support.
|
||||
Set self.fixture to a folder name from the test/fixtures/ path.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(FixtureTestCase, self).setUp()
|
||||
|
||||
# Root output path
|
||||
self.temp_path = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, self.temp_path)
|
||||
self.root_path = self.temp_path
|
||||
|
||||
# Copy fixture files
|
||||
if self.fixture:
|
||||
self.root_path = os.path.join(self.root_path, self.fixture)
|
||||
build_path = util.find_build_path()
|
||||
if not build_path:
|
||||
raise Error('Unable to find build path')
|
||||
fixture_path = os.path.join(
|
||||
build_path, '..', 'test', 'fixtures', self.fixture)
|
||||
target_path = self.temp_path + '/' + self.fixture
|
||||
shutil.copytree(fixture_path, target_path)
|
||||
|
||||
def assertFileContents(self, path, contents):
|
||||
self.assertTrue(os.path.isfile(path))
|
||||
with io.open(path, 'rt') as f:
|
||||
file_contents = f.read()
|
||||
self.assertEqual(file_contents, contents)
|
|
@ -0,0 +1,105 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import os
|
||||
import string
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
# Unfortunately there is no one-true-timer in python
|
||||
# This should always be preferred over direct use of the time module
|
||||
if sys.platform == 'win32' or sys.platform == 'cygwin':
|
||||
timer = time.clock # pragma: no cover
|
||||
else:
|
||||
timer = time.time # pragma: no cover
|
||||
|
||||
|
||||
def find_build_path(): # pragma: no cover
|
||||
"""Scans up the current path for the anvil/ folder.
|
||||
|
||||
Returns:
|
||||
The 'anvil/' folder.
|
||||
"""
|
||||
path = sys.path[0]
|
||||
while True:
|
||||
if os.path.exists(os.path.join(path, 'anvil')):
|
||||
return os.path.join(path, 'anvil')
|
||||
path = os.path.dirname(path)
|
||||
if not len(path):
|
||||
return None
|
||||
|
||||
|
||||
def is_rule_path(value):
|
||||
"""Detects whether the given value is a rule name.
|
||||
|
||||
Returns:
|
||||
True if the string is a valid rule name.
|
||||
"""
|
||||
# NOTE: in the future this could be made to support modules/etc by looking
|
||||
# for any valid use of ':'
|
||||
return isinstance(value, str) and len(value) and string.find(value, ':') >= 0
|
||||
|
||||
|
||||
def validate_names(values, require_semicolon=False):
|
||||
"""Validates a list of rule names to ensure they are well-defined.
|
||||
|
||||
Args:
|
||||
values: A list of values to validate.
|
||||
require_semicolon: Whether to require a :
|
||||
|
||||
Raises:
|
||||
NameError: A rule value is not valid.
|
||||
"""
|
||||
if not values:
|
||||
return
|
||||
for value in values:
|
||||
if not isinstance(value, str) or not len(value):
|
||||
raise TypeError('Names must be a string of non-zero length')
|
||||
if len(value.strip()) != len(value):
|
||||
raise NameError(
|
||||
'Names cannot have leading/trailing whitespace: "%s"' % (value))
|
||||
if require_semicolon and string.find(value, ':') == -1:
|
||||
raise NameError('Names must be a rule (contain a :): "%s"' % (value))
|
||||
|
||||
|
||||
def underscore_to_pascalcase(value):
|
||||
"""Converts a string from underscore_case to PascalCase.
|
||||
|
||||
Args:
|
||||
value: Source string value.
|
||||
Example - hello_world
|
||||
|
||||
Returns:
|
||||
The string, converted to PascalCase.
|
||||
Example - hello_world -> HelloWorld
|
||||
"""
|
||||
if not value:
|
||||
return value
|
||||
def __CapWord(seq):
|
||||
for word in seq:
|
||||
yield word.capitalize()
|
||||
return ''.join(__CapWord(word if word else '_' for word in value.split('_')))
|
||||
|
||||
|
||||
def which(executable_name):
|
||||
"""Gets the full path to the given executable.
|
||||
If the given path exists in the CWD or is already absolute it is returned.
|
||||
Otherwise this method will look through the system PATH to try to find it.
|
||||
|
||||
Args:
|
||||
executable_name: Name or path to the executable.
|
||||
|
||||
Returns:
|
||||
The full path to the executable or None if it was not found.
|
||||
"""
|
||||
if (os.path.exists(executable_name) and
|
||||
not os.path.isdir(executable_name)):
|
||||
return os.path.abspath(executable_name)
|
||||
for path in os.environ.get('PATH', '').split(':'):
|
||||
if (os.path.exists(os.path.join(path, executable_name)) and
|
||||
not os.path.isdir(os.path.join(path, executable_name))):
|
||||
return os.path.join(path, executable_name)
|
||||
return None
|
|
@ -0,0 +1,145 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Tests for the util module.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import unittest2
|
||||
|
||||
import util
|
||||
|
||||
|
||||
class IsRulePathTest(unittest2.TestCase):
|
||||
"""Behavioral tests of the is_rule_path method."""
|
||||
|
||||
def testEmpty(self):
|
||||
self.assertFalse(util.is_rule_path(None))
|
||||
self.assertFalse(util.is_rule_path(''))
|
||||
|
||||
def testTypes(self):
|
||||
self.assertFalse(util.is_rule_path(4))
|
||||
self.assertFalse(util.is_rule_path(['a']))
|
||||
self.assertFalse(util.is_rule_path({'a': 1}))
|
||||
|
||||
def testNames(self):
|
||||
self.assertTrue(util.is_rule_path(':a'))
|
||||
self.assertTrue(util.is_rule_path(':ab'))
|
||||
self.assertTrue(util.is_rule_path('xx:ab'))
|
||||
self.assertTrue(util.is_rule_path('/a/b:ab'))
|
||||
|
||||
self.assertFalse(util.is_rule_path('a'))
|
||||
self.assertFalse(util.is_rule_path('/a/b.c'))
|
||||
self.assertFalse(util.is_rule_path('a b c'))
|
||||
|
||||
|
||||
class ValidateNamesTest(unittest2.TestCase):
|
||||
"""Behavioral tests of the validate_names method."""
|
||||
|
||||
def testEmpty(self):
|
||||
util.validate_names(None)
|
||||
util.validate_names([])
|
||||
|
||||
def testNames(self):
|
||||
util.validate_names(['a'])
|
||||
util.validate_names([':a'])
|
||||
util.validate_names(['xx:a'])
|
||||
util.validate_names(['/a/b:a'])
|
||||
util.validate_names(['/a/b.c:a'])
|
||||
util.validate_names(['/a/b.c/:a'])
|
||||
util.validate_names(['a', ':b'])
|
||||
with self.assertRaises(TypeError):
|
||||
util.validate_names([None])
|
||||
with self.assertRaises(TypeError):
|
||||
util.validate_names([''])
|
||||
with self.assertRaises(TypeError):
|
||||
util.validate_names([{}])
|
||||
with self.assertRaises(NameError):
|
||||
util.validate_names([' a'])
|
||||
with self.assertRaises(NameError):
|
||||
util.validate_names(['a '])
|
||||
with self.assertRaises(NameError):
|
||||
util.validate_names([' a '])
|
||||
with self.assertRaises(NameError):
|
||||
util.validate_names(['a', ' b'])
|
||||
|
||||
def testRequireSemicolon(self):
|
||||
util.validate_names([':a'], require_semicolon=True)
|
||||
util.validate_names([':a', ':b'], require_semicolon=True)
|
||||
with self.assertRaises(NameError):
|
||||
util.validate_names(['a'], require_semicolon=True)
|
||||
with self.assertRaises(NameError):
|
||||
util.validate_names([':a', 'b'], require_semicolon=True)
|
||||
|
||||
|
||||
class UnderscoreToPascalCaseTest(unittest2.TestCase):
|
||||
"""Behavioral tests of the underscore_to_pascalcase method."""
|
||||
|
||||
def testEmpty(self):
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase(None),
|
||||
None)
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase(''),
|
||||
'')
|
||||
|
||||
def testUnderscores(self):
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('ab'),
|
||||
'Ab')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('aB'),
|
||||
'Ab')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('AB'),
|
||||
'Ab')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('a_b'),
|
||||
'AB')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('A_b'),
|
||||
'AB')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('aa_bb'),
|
||||
'AaBb')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('aa1_bb2'),
|
||||
'Aa1Bb2')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('1aa_2bb'),
|
||||
'1aa2bb')
|
||||
|
||||
def testWhitespace(self):
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase(' '),
|
||||
' ')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase(' a'),
|
||||
' a')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('a '),
|
||||
'A ')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase(' a '),
|
||||
' a ')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('a b'),
|
||||
'A b')
|
||||
self.assertEqual(
|
||||
util.underscore_to_pascalcase('a b'),
|
||||
'A b')
|
||||
|
||||
class WhichTest(unittest2.TestCase):
|
||||
"""Behavioral tests of the which method."""
|
||||
|
||||
def test(self):
|
||||
self.assertEqual(util.which('/bin/sh'), '/bin/sh')
|
||||
self.assertIsNone(util.which('xxx'))
|
||||
self.assertIsNotNone(util.which('cat'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
|
@ -0,0 +1,11 @@
|
|||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
# TODO(benvanik): pull from somewhere?
|
||||
VERSION = (0, 0, 0, 1)
|
||||
VERSION_STR = '0.0.0.1'
|
|
@ -0,0 +1,38 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
# This script runs the build unit tests with a coverage run and spits out
|
||||
# the result HTML to scratch/coverage/
|
||||
|
||||
# TODO(benvanik): merge with run-tests.py?
|
||||
|
||||
# This must currently run from the root of the repo
|
||||
# TODO(benvanik): make this runnable from anywhere (find git directory?)
|
||||
if [ ! -d ".git" ]; then
|
||||
echo "This script must be run from the root of the repository (the folder containing .git)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get into a known-good initial state by removing everything
|
||||
# (removes the possibility for confusing old output when runs fail)
|
||||
if [ -e ".coverage" ]; then
|
||||
rm .coverage
|
||||
fi
|
||||
if [ -d "scratch/coverage" ]; then
|
||||
rm -rf scratch/coverage
|
||||
fi
|
||||
|
||||
# Run all unit tests with coverage
|
||||
coverage run --branch ./run-tests.py
|
||||
|
||||
# Dump to console (so you see *something*)
|
||||
coverage report -m
|
||||
|
||||
# Output HTML report
|
||||
coverage html -d scratch/coverage/
|
||||
|
||||
# Cleanup the coverage temp data, as it's unused and regenerated
|
||||
if [ -e ".coverage" ]; then
|
||||
rm .coverage
|
||||
fi
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""Python build system test runner.
|
||||
In order to speed things up (and avoid some platform incompatibilities) this
|
||||
script should be used instead of unit2 or python -m unittest.
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add self to the root search path
|
||||
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
|
||||
|
||||
# Run the tests
|
||||
import anvil.test
|
||||
anvil.test.main()
|
|
@ -0,0 +1,67 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
"""
|
||||
Anvil
|
||||
-----
|
||||
|
||||
A parallel build system and content pipeline.
|
||||
|
||||
"""
|
||||
|
||||
__author__ = 'benvanik@google.com (Ben Vanik)'
|
||||
|
||||
|
||||
import sys
|
||||
from setuptools import setup
|
||||
|
||||
|
||||
# Require Python 2.6+
|
||||
if sys.hexversion < 0x02060000:
|
||||
raise RuntimeError('Python 2.6.0 or higher required')
|
||||
|
||||
|
||||
setup(
|
||||
name='Anvil',
|
||||
version='0.0.1dev',
|
||||
url='https://github.com/benvanik/anvil-build/',
|
||||
download_url='https://github.com/benvanik/anvil-build/tarball/master',
|
||||
license='Apache',
|
||||
author='Ben Vanik',
|
||||
author_email='benvanik@google.com',
|
||||
description='A parallel build system and content pipeline',
|
||||
long_description=__doc__,
|
||||
platforms='any',
|
||||
install_requires=[
|
||||
'argparse>=1.2.1',
|
||||
'autobahn>=0.5.1',
|
||||
'coverage>=3.5.1',
|
||||
'glob2>=0.3',
|
||||
'networkx>=1.6',
|
||||
'Sphinx>=1.1.3',
|
||||
'watchdog>=0.6',
|
||||
'unittest2>=0.5.1',
|
||||
],
|
||||
packages=['anvil',],
|
||||
test_suite='anvil.test',
|
||||
include_package_data=True,
|
||||
zip_safe=True,
|
||||
entry_points = {
|
||||
'console_scripts': [
|
||||
'anvil = anvil.manage:main',
|
||||
],
|
||||
},
|
||||
classifiers=[
|
||||
'Development Status :: 3 - Alpha',
|
||||
'Environment :: Console',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: OS Independent',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Topic :: Software Development :: Libraries :: Python Modules',
|
||||
'Topic :: Utilities',
|
||||
])
|
||||
|
|
@ -0,0 +1 @@
|
|||
1
|
|
@ -0,0 +1 @@
|
|||
2
|
|
@ -0,0 +1 @@
|
|||
3
|
|
@ -0,0 +1 @@
|
|||
4
|
|
@ -0,0 +1,17 @@
|
|||
concat_files(
|
||||
name='concat',
|
||||
srcs=['1.txt', '2.txt', '3.txt', '4.txt'])
|
||||
|
||||
concat_files(
|
||||
name='concat_out',
|
||||
srcs=['1.txt', '2.txt', '3.txt', '4.txt'],
|
||||
out='concat.txt')
|
||||
|
||||
concat_files(
|
||||
name='concat_template',
|
||||
srcs=[':concat_out', 't.txt', ':concat_out'])
|
||||
template_files(
|
||||
name='templated',
|
||||
srcs=[':concat_template',],
|
||||
new_extension='.out',
|
||||
params={'hello': 'world!'})
|
|
@ -0,0 +1 @@
|
|||
x${hello}x
|
|
@ -0,0 +1,4 @@
|
|||
file_set('a', srcs='a.txt')
|
||||
file_set('all_txt', srcs=glob('**/*.txt'))
|
||||
|
||||
copy_files('copy_all_txt', srcs=':all_txt')
|
|
@ -0,0 +1 @@
|
|||
a
|
|
@ -0,0 +1,4 @@
|
|||
file_set('b', srcs='b.txt')
|
||||
file_set('c', srcs='c.not-txt')
|
||||
|
||||
copy_files('copy_c', srcs=':c')
|
|
@ -0,0 +1 @@
|
|||
b
|
|
@ -0,0 +1 @@
|
|||
c
|
|
@ -0,0 +1,9 @@
|
|||
file_set('a', srcs='a.txt')
|
||||
file_set('a_glob', srcs=glob('*.txt'))
|
||||
|
||||
file_set('b_ref', srcs='dir:b')
|
||||
|
||||
file_set('all_glob', srcs=glob('**/*.txt'))
|
||||
|
||||
file_set('combo', srcs=[':a', ':b_ref'])
|
||||
file_set('dupes', srcs=[':a', 'a.txt', ':b_ref', 'dir:b', ':combo'])
|
|
@ -0,0 +1 @@
|
|||
a
|
|
@ -0,0 +1,2 @@
|
|||
file_set('b', srcs='b.txt')
|
||||
file_set('b_glob', srcs=glob('*.txt'))
|
|
@ -0,0 +1 @@
|
|||
b
|
|
@ -0,0 +1,25 @@
|
|||
file_set('a', srcs='a.txt')
|
||||
template_files(
|
||||
name='template_a',
|
||||
srcs='a.txt',
|
||||
params={'hello': 'world_a',})
|
||||
template_files(
|
||||
name='template_a_rule',
|
||||
srcs=':a',
|
||||
params={'hello': 'world_a_rule',})
|
||||
|
||||
file_set('all_glob', srcs=glob('**/*.txt'))
|
||||
template_files(
|
||||
name='template_all',
|
||||
srcs=':all_glob',
|
||||
params={'hello': 'world',})
|
||||
|
||||
template_files(
|
||||
name='template_dep_1',
|
||||
srcs=glob('**/*.nfo'),
|
||||
params={'arg1': '${arg2}',})
|
||||
template_files(
|
||||
name='template_dep_2',
|
||||
srcs=':template_dep_1',
|
||||
new_extension='.out',
|
||||
params={'arg2': 'world!',})
|
|
@ -0,0 +1 @@
|
|||
123${arg1}456
|
|
@ -0,0 +1 @@
|
|||
123${hello}456
|
|
@ -0,0 +1,9 @@
|
|||
file_set('b', srcs='b.txt')
|
||||
template_files(
|
||||
name='template_b',
|
||||
srcs='b.txt',
|
||||
params={'hello': 'world_b',})
|
||||
template_files(
|
||||
name='template_b_rule',
|
||||
srcs=':b',
|
||||
params={'hello': 'world_b_rule',})
|
|
@ -0,0 +1 @@
|
|||
b123${arg1}456
|
|
@ -0,0 +1 @@
|
|||
b123${hello}456
|
|
@ -0,0 +1,12 @@
|
|||
from anvil.manage import manage_command
|
||||
|
||||
|
||||
@manage_command('test_command')
|
||||
def test_command(args, cwd):
|
||||
return 0
|
||||
|
||||
|
||||
# Duplicate name
|
||||
@manage_command('test_command')
|
||||
def test_command1(args, cwd):
|
||||
return 0
|
|
@ -0,0 +1,6 @@
|
|||
from anvil.manage import manage_command
|
||||
|
||||
|
||||
@manage_command('test_command')
|
||||
def test_command(args, cwd):
|
||||
return 123
|
|
@ -0,0 +1 @@
|
|||
file_set('root_rule', deps=['a:rule_a'])
|
|
@ -0,0 +1 @@
|
|||
file_set('rule_a', deps=['../b:rule_b'])
|
|
@ -0,0 +1 @@
|
|||
file_set('rule_b', deps=['c:rule_c'])
|
|
@ -0,0 +1 @@
|
|||
file_set('rule_c', deps=['build_file.py:rule_c_file'])
|
|
@ -0,0 +1 @@
|
|||
file_set('rule_c_file')
|
|
@ -0,0 +1 @@
|
|||
hi
|
|
@ -0,0 +1,16 @@
|
|||
# Dummy rule types for testing rules
|
||||
|
||||
|
||||
from anvil.rule import Rule, build_rule
|
||||
|
||||
|
||||
@build_rule('rule_a')
|
||||
class RuleA(Rule):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
super(RuleA, self).__init__(name, *args, **kwargs)
|
||||
|
||||
|
||||
@build_rule('rule_b')
|
||||
class RuleB(Rule):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
super(RuleB, self).__init__(name, *args, **kwargs)
|
|
@ -0,0 +1,16 @@
|
|||
# File with duplicate rules
|
||||
|
||||
|
||||
from anvil.rule import Rule, build_rule
|
||||
|
||||
|
||||
@build_rule('rule_d')
|
||||
class RuleD1(Rule):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
super(RuleD1, self).__init__(name, *args, **kwargs)
|
||||
|
||||
|
||||
@build_rule('rule_d')
|
||||
class RuleD2(Rule):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
super(RuleD2, self).__init__(name, *args, **kwargs)
|
|
@ -0,0 +1,10 @@
|
|||
# More (nested) rule types for testing rules
|
||||
|
||||
|
||||
from anvil.rule import Rule, build_rule
|
||||
|
||||
|
||||
@build_rule('rule_c')
|
||||
class RuleC(Rule):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
super(RuleC, self).__init__(name, *args, **kwargs)
|
|
@ -0,0 +1,10 @@
|
|||
# Dummy file - this rule should not be discovered
|
||||
|
||||
|
||||
from anvil.rule import Rule, build_rule
|
||||
|
||||
|
||||
@build_rule('rule_x')
|
||||
class RuleX(Rule):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
super(RuleX, self).__init__(name, *args, **kwargs)
|
|
@ -0,0 +1,37 @@
|
|||
# Simple sample build file
|
||||
# Does nothing but provide some rules
|
||||
|
||||
file_set('a',
|
||||
srcs=['a.txt'])
|
||||
|
||||
file_set('b',
|
||||
srcs=['b.txt'])
|
||||
|
||||
file_set('c',
|
||||
srcs=['c.txt'],
|
||||
deps=[':a', ':b'])
|
||||
|
||||
file_set('local_txt',
|
||||
srcs=glob('*.txt'))
|
||||
file_set('recursive_txt',
|
||||
srcs=glob('**/*.txt'))
|
||||
file_set('missing_txt',
|
||||
srcs='x.txt')
|
||||
file_set('missing_glob_txt',
|
||||
srcs=glob('*.notpresent'))
|
||||
|
||||
file_set('local_txt_filter',
|
||||
srcs=glob('*'),
|
||||
src_filter='*.txt')
|
||||
file_set('recursive_txt_filter',
|
||||
srcs=glob('**/*'),
|
||||
src_filter='*.txt')
|
||||
|
||||
file_set('file_input',
|
||||
srcs='a.txt')
|
||||
file_set('rule_input',
|
||||
srcs=':file_input')
|
||||
file_set('mixed_input',
|
||||
srcs=['b.txt', ':file_input'])
|
||||
file_set('missing_input',
|
||||
srcs=':x')
|
|
@ -0,0 +1 @@
|
|||
hello!
|
|
@ -0,0 +1 @@
|
|||
world!
|
|
@ -0,0 +1 @@
|
|||
!!!
|
|
@ -0,0 +1 @@
|
|||
file_set('d',srcs=['d.txt'])
|
|
@ -0,0 +1 @@
|
|||
!!!
|
|
@ -0,0 +1 @@
|
|||
!!!
|
|
@ -0,0 +1 @@
|
|||
!!!
|
|
@ -0,0 +1 @@
|
|||
!!!
|
Loading…
Reference in New Issue