desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Intersect self\'s versions with other.
Return whether the CompilerSpec changed.'
| def constrain(self, other):
| other = self._autospec(other)
if (not other.satisfies(self)):
raise UnsatisfiableCompilerSpecError(other, self)
return self.versions.intersect(other.versions)
|
'A CompilerSpec is concrete if its versions are concrete and there
is an available compiler with the right version.'
| @property
def concrete(self):
| return self.versions.concrete
|
'Add all flags in other that aren\'t in self to self.
Return whether the spec changed.'
| def constrain(self, other):
| if (other.spec and other.spec._concrete):
for k in self:
if (k not in other):
raise UnsatisfiableCompilerFlagSpecError(self[k], '<absent>')
changed = False
for k in other:
if ((k in self) and (not (set(self[k]) <= set(other[k])))):
raise UnsatisfiableC... |
'Create a new descriptor.
Parameters:
attribute_name (str): name of the attribute to be
searched for in the Package instance
default_handler (callable, optional): default function to be
called if the attribute was not found in the Package
instance'
| def __init__(self, attribute_name, default_handler=None):
| self.attribute_name = attribute_name
if (default_handler is None):
default_handler = (lambda descriptor, spec, cls: None)
self.default = default_handler
|
'Retrieves the property from Package using a well defined chain
of responsibility.
The order of call is:
1. if the query was through the name of a virtual package try to
search for the attribute `{virtual_name}_{attribute_name}`
in Package
2. try to search for attribute `{attribute_name}` in Package
3. try to call the ... | def __get__(self, instance, cls):
| pkg = instance.package
try:
query = instance.last_query
except AttributeError:
_ = instance[instance.name]
query = instance.last_query
callbacks_chain = []
if query.isvirtual:
specialized_name = '{0}_{1}'.format(query.name, self.attribute_name)
callbacks_chain... |
'Called by the parser to add an allowable version.'
| def _add_version(self, version):
| self.versions.add(version)
|
'Called by the parser to add a known flag.
Known flags currently include "arch"'
| def _add_flag(self, name, value):
| valid_flags = FlagMap.valid_compiler_flags()
if ((name == 'arch') or (name == 'architecture')):
parts = tuple(value.split('-'))
(plat, os, tgt) = (parts if (len(parts) == 3) else (None, None, value))
self._set_architecture(platform=plat, platform_os=os, target=tgt)
elif (name == 'pla... |
'Called by the parser to set the architecture.'
| def _set_architecture(self, **kwargs):
| arch_attrs = ['platform', 'platform_os', 'target']
if (self.architecture and self.architecture.concrete):
raise DuplicateArchitectureError(("Spec for '%s' cannot have two architectures." % self.name))
if (not self.architecture):
new_vals = tuple((kwargs.get(arg, None) for a... |
'Called by the parser to set the compiler.'
| def _set_compiler(self, compiler):
| if self.compiler:
raise DuplicateCompilerSpecError(("Spec for '%s' cannot have two compilers." % self.name))
self.compiler = compiler
|
'Called by the parser to add another spec as a dependency.'
| def _add_dependency(self, spec, deptypes):
| if (spec.name in self._dependencies):
raise DuplicateDependencyError(("Cannot depend on '%s' twice" % spec))
dspec = DependencySpec(self, spec, deptypes)
self._dependencies[spec.name] = dspec
spec._dependents[self.name] = dspec
|
'Follow dependent links and find the root of this spec\'s DAG.
In spack specs, there should be a single root (the package being
installed). This will throw an assertion error if that is not
the case.'
| @property
def root(self):
| if (not self._dependents):
return self
depiter = iter(self._dependents.values())
first_root = next(depiter).parent.root
assert all(((first_root is d.parent.root) for d in depiter))
return first_root
|
'Internal package call gets only the class object for a package.
Use this to just get package metadata.'
| @property
def package_class(self):
| return spack.repo.get_pkg_class(self.fullname)
|
'Right now, a spec is virtual if no package exists with its name.
TODO: revisit this -- might need to use a separate namespace and
be more explicit about this.
Possible idea: just use conventin and make virtual deps all
caps, e.g., MPI vs mpi.'
| @property
def virtual(self):
| return Spec.is_virtual(self.name)
|
'Test if a name is virtual without requiring a Spec.'
| @staticmethod
def is_virtual(name):
| return ((name is not None) and (not spack.repo.exists(name)))
|
'A spec is concrete if it can describe only ONE build of a package.
If any of the name, version, architecture, compiler,
variants, or depdenencies are ambiguous,then it is not concrete.'
| @property
def concrete(self):
| if self._concrete:
return True
self._concrete = bool(((not self.virtual) and (self.namespace is not None) and self.versions.concrete and self.variants.concrete and self.architecture and self.architecture.concrete and self.compiler and self.compiler.concrete and self.compiler_flags.concrete and self._dep... |
'Generic traversal of the DAG represented by this spec.
This will yield each node in the spec. Options:
order [=pre|post]
Order to traverse spec nodes. Defaults to preorder traversal.
Options are:
\'pre\': Pre-order traversal; each node is yielded before its
children in the dependency DAG.
\'post\': Post-order tr... | def traverse_edges(self, visited=None, d=0, deptype=None, deptype_query=None, dep_spec=None, **kwargs):
| depth = kwargs.get('depth', False)
key_fun = kwargs.get('key', id)
if isinstance(key_fun, string_types):
key_fun = attrgetter(key_fun)
yield_root = kwargs.get('root', True)
cover = kwargs.get('cover', 'nodes')
direction = kwargs.get('direction', 'children')
order = kwargs.get('order'... |
'Returns a version of the spec with the dependencies hashed
instead of completely enumerated.'
| @property
def short_spec(self):
| return self.format('$_$@$%@$+$=$/')
|
'Returns an auto-colorized version of ``self.short_spec``.'
| @property
def cshort_spec(self):
| return self.cformat('$_$@$%@$+$=$/')
|
'Return a hash of the entire spec DAG, including connectivity.'
| def dag_hash(self, length=None):
| if self._hash:
return self._hash[:length]
else:
yaml_text = syaml.dump(self.to_node_dict(), default_flow_style=True, width=maxint)
sha = hashlib.sha1(yaml_text.encode('utf-8'))
b32_hash = base64.b32encode(sha.digest()).lower()
if (sys.version_info[0] >= 3):
b3... |
'Get the first <bits> bits of the DAG hash as an integer type.'
| def dag_hash_bit_prefix(self, bits):
| return base32_prefix_bits(self.dag_hash(), bits)
|
'Read the DependencySpec portion of a YAML-formatted Spec.
This needs to be backward-compatible with older spack spec
formats so that reindex will work on old specs/databases.'
| @staticmethod
def read_yaml_dep_specs(dependency_dict):
| for (dep_name, elt) in dependency_dict.items():
if isinstance(elt, string_types):
(dag_hash, deptypes) = (elt, ['build', 'link'])
elif isinstance(elt, tuple):
(dag_hash, deptypes) = elt
elif isinstance(elt, dict):
(dag_hash, deptypes) = (elt['hash'], elt['... |
'Construct a spec from YAML.
Parameters:
data -- a nested dict/list data structure read from YAML or JSON.'
| @staticmethod
def from_dict(data):
| nodes = data['spec']
dep_list = [Spec.from_node_dict(node) for node in nodes]
if (not dep_list):
raise SpecError('YAML spec contains no nodes.')
deps = dict(((spec.name, spec) for spec in dep_list))
spec = dep_list[0]
for node in nodes:
name = next(iter(node))
... |
'Construct a spec from YAML.
Parameters:
stream -- string or file object to read from.'
| @staticmethod
def from_yaml(stream):
| try:
data = syaml.load(stream)
return Spec.from_dict(data)
except MarkedYAMLError as e:
raise syaml.SpackYAMLError('error parsing YAML spec:', str(e))
|
'Construct a spec from JSON.
Parameters:
stream -- string or file object to read from.'
| @staticmethod
def from_json(stream):
| try:
data = sjson.load(stream)
return Spec.from_dict(data)
except Exception as e:
raise sjson.SpackJSONError('error parsing JSON spec:', str(e))
|
'Recursive helper function for concretize().
This concretizes everything bottom-up. As things are
concretized, they\'re added to the presets, and ancestors
will prefer the settings of their children.'
| def _concretize_helper(self, presets=None, visited=None):
| if (presets is None):
presets = {}
if (visited is None):
visited = set()
if (self.name in visited):
return False
changed = False
for name in sorted(self._dependencies.keys()):
changed |= self._dependencies[name].spec._concretize_helper(presets, visited)
if (self.n... |
'Replace this virtual spec with a concrete spec.'
| def _replace_with(self, concrete):
| assert self.virtual
for (name, dep_spec) in self._dependents.items():
dependent = dep_spec.parent
deptypes = dep_spec.deptypes
if (self.name in dependent._dependencies):
del dependent._dependencies[self.name]
if (concrete.name not in dependent._dependencies):
... |
'Find virtual packages in this spec, replace them with providers,
and normalize again to include the provider\'s (potentially virtual)
dependencies. Repeat until there are no virtual deps.
Precondition: spec is normalized.
.. todo::
If a provider depends on something that conflicts with
other dependencies in the spec ... | def _expand_virtual_packages(self):
| self_index = ProviderIndex(self.traverse(), restrict=True)
changed = False
done = False
while (not done):
done = True
for spec in list(self.traverse()):
replacement = None
if spec.external:
continue
if spec.virtual:
repl... |
'A spec is concrete if it describes one build of a package uniquely.
This will ensure that this spec is concrete.
If this spec could describe more than one version, variant, or build
of a package, this will add constraints to make it concrete.
Some rigorous validation and checks are also performed on the spec.
Concreti... | def concretize(self):
| if (not self.name):
raise SpecError('Attempting to concretize anonymous spec')
if self._concrete:
return
changed = True
force = False
while changed:
changes = (self.normalize(force), self._expand_virtual_packages(), self._concretize_helper())
changed = any... |
'Mark this spec and its dependencies as concrete.
Only for internal use -- client code should use "concretize"
unless there is a need to force a spec to be concrete.'
| def _mark_concrete(self, value=True):
| for s in self.traverse(deptype_query=alldeps):
s._normal = value
s._concrete = value
|
'This is a non-destructive version of concretize(). First clones,
then returns a concrete version of this package without modifying
this package.'
| def concretized(self):
| clone = self.copy()
clone.concretize()
return clone
|
'Return a DependencyMap containing all of this spec\'s
dependencies with their constraints merged.
If copy is True, returns merged copies of its dependencies
without modifying the spec it\'s called on.
If copy is False, clears this spec\'s dependencies and
returns them.'
| def flat_dependencies(self, **kwargs):
| copy = kwargs.get('copy', True)
deptype_query = kwargs.get('deptype_query')
flat_deps = {}
try:
deptree = self.traverse(root=False, deptype_query=deptype_query)
for spec in deptree:
if (spec.name not in flat_deps):
if copy:
spec = spec.copy... |
'Return DependencyMap that points to all the dependencies in this
spec.'
| def index(self, deptype=None):
| dm = DependencyMap(None)
for spec in self.traverse(deptype=deptype):
dm[spec.name] = spec
return dm
|
'Evaluate all the conditions on a dependency with this name.
If the package depends on <name> in this configuration, return
the dependency. If no conditions are True (and we don\'t
depend on it), return None.'
| def _evaluate_dependency_conditions(self, name):
| pkg = spack.repo.get(self.fullname)
conditions = pkg.dependencies[name]
substitute_abstract_variants(self)
dep = None
for (when_spec, dep_spec) in conditions.items():
sat = self.satisfies(when_spec, strict=True)
if sat:
if (dep is None):
dep = Spec(name)
... |
'Find provider for a virtual spec in the provider index.
Raise an exception if there is a conflicting virtual
dependency already in this spec.'
| def _find_provider(self, vdep, provider_index):
| assert vdep.virtual
providers = provider_index.providers_for(vdep)
if providers:
for provider in providers:
for spec in providers:
if ((spec is not provider) and provider.satisfies(spec)):
providers.remove(spec)
if (len(providers) > 1):
... |
'Merge the dependency into this spec.
Caller should assume that this routine can owns the dep parameter
(i.e. it needs to be a copy of any internal structures like
dependencies on Package class objects).
This is the core of normalize(). There are some basic steps:
* If dep is virtual, evaluate whether it corresponds t... | def _merge_dependency(self, dep, deptypes, visited, spec_deps, provider_index):
| changed = False
if dep.virtual:
visited.add(dep.name)
provider = self._find_provider(dep, provider_index)
if provider:
dep = provider
else:
index = ProviderIndex([dep], restrict=True)
items = list(spec_deps.items())
for (name, vspec) in items:
... |
'Recursive helper function for _normalize.'
| def _normalize_helper(self, visited, spec_deps, provider_index):
| if (self.name in visited):
return False
visited.add(self.name)
if (self.virtual or self.external):
return False
any_change = False
changed = True
pkg = spack.repo.get(self.fullname)
while changed:
changed = False
for dep_name in pkg.dependencies:
p... |
'When specs are parsed, any dependencies specified are hanging off
the root, and ONLY the ones that were explicitly provided are there.
Normalization turns a partial flat spec into a DAG, where:
1. Known dependencies of the root package are in the DAG.
2. Each node\'s dependencies dict only contains its known direct
de... | def normalize(self, force=False):
| if (not self.name):
raise SpecError('Attempting to normalize anonymous spec')
if (self._normal and (not force)):
return False
if force:
self._mark_concrete(False)
self.validate_or_raise()
spec_deps = self.flat_dependencies(copy=False, deptype_query=alldeps)
pr... |
'Return a normalized copy of this spec without modifying this spec.'
| def normalized(self):
| clone = self.copy()
clone.normalize()
return clone
|
'Checks that names and values in this spec are real. If they\'re not,
it will raise an appropriate exception.'
| def validate_or_raise(self):
| for spec in self.traverse():
if ((not spec.virtual) and spec.name):
spack.repo.get(spec.fullname)
if spec.compiler:
if (not compilers.supported(spec.compiler)):
raise UnsupportedCompilerError(spec.compiler.name)
if (not spec.virtual):
pkg_c... |
'Merge the constraints of other with self.
Returns True if the spec changed as a result, False if not.'
| def constrain(self, other, deps=True):
| if self.concrete:
if self.satisfies(other):
return False
else:
raise UnsatisfiableSpecError(self, other, 'constrain a concrete spec')
other = self._autospec(other)
if (not ((self.name == other.name) or (not self.name) or (not other.name))):
raise Unsa... |
'Apply constraints of other spec\'s dependencies to this spec.'
| def _constrain_dependencies(self, other):
| other = self._autospec(other)
if ((not self._dependencies) or (not other._dependencies)):
return False
if (not other.satisfies_dependencies(self)):
raise UnsatisfiableDependencySpecError(other, self)
changed = False
for name in self.common_dependencies(other):
changed |= self... |
'Return names of dependencies that self an other have in common.'
| def common_dependencies(self, other):
| common = set((s.name for s in self.traverse(root=False)))
common.intersection_update((s.name for s in other.traverse(root=False)))
return common
|
'Return a constrained copy without modifying this spec.'
| def constrained(self, other, deps=True):
| clone = self.copy(deps=deps)
clone.constrain(other, deps)
return clone
|
'Returns dependencies in self that are not in other.'
| def dep_difference(self, other):
| mine = set((s.name for s in self.traverse(root=False)))
mine.difference_update((s.name for s in other.traverse(root=False)))
return mine
|
'Used to convert arguments to specs. If spec_like is a spec, returns
it. If it\'s a string, tries to parse a string. If that fails, tries
to parse a local spec from it (i.e. name is assumed to be self\'s name).'
| def _autospec(self, spec_like):
| if isinstance(spec_like, spack.spec.Spec):
return spec_like
try:
spec = spack.spec.Spec(spec_like)
if (not spec.name):
raise SpecError('anonymous package -- this will always be handled')
return spec
except SpecError:
return parse_anony... |
'Determine if this spec satisfies all constraints of another.
There are two senses for satisfies:
* `loose` (default): the absence of a constraint in self
implies that it *could* be satisfied by other, so we only
check that there are no conflicts with other for
constraints that this spec actually has.
* `strict`: stric... | def satisfies(self, other, deps=True, strict=False, strict_deps=False):
| other = self._autospec(other)
if other.concrete:
return (self.concrete and (self.dag_hash() == other.dag_hash()))
if ((not self.virtual) and other.virtual):
try:
pkg = spack.repo.get(self.fullname)
except spack.repository.UnknownEntityError:
return False
... |
'This checks constraints on common dependencies against each other.'
| def satisfies_dependencies(self, other, strict=False):
| other = self._autospec(other)
if strict:
if (other._dependencies and (not self._dependencies)):
return False
selfdeps = self.traverse(root=False)
otherdeps = other.traverse(root=False)
if (not all((any((d.satisfies(dep) for d in selfdeps)) for dep in otherdeps))):
... |
'Return list of any virtual deps in this spec.'
| def virtual_dependencies(self):
| return [spec for spec in self.traverse() if spec.virtual]
|
'Copy the spec other into self. This is an overwriting
copy. It does not copy any dependents (parents), but by default
copies dependencies.
To duplicate an entire DAG, call _dup() on the root of the DAG.
Options:
dependencies[=True]
Whether deps should be copied too. Set to False to copy a
spec but not its dependenc... | def _dup(self, other, deps=True, cleardeps=True):
| changed = True
if hasattr(self, 'name'):
changed = ((self.name != other.name) and (self.versions != other.versions) and (self.architecture != other.architecture) and (self.compiler != other.compiler) and (self.variants != other.variants) and (self._normal != other._normal) and (self.concrete != other.co... |
'Return a copy of this spec.
By default, returns a deep copy. To control how dependencies are
copied, supply:
deps=True: deep copy
deps=False: shallow copy (no dependencies)
deps=(\'link\', \'build\'):
only build and link dependencies. Similar for other deptypes.'
| def copy(self, deps=True):
| clone = Spec.__new__(Spec)
clone._dup(self, deps=deps)
return clone
|
'Get a dependency from the spec by its name. This call implicitly
sets a query state in the package being retrieved. The behavior of
packages may be influenced by additional query parameters that are
passed after a colon symbol.
Note that if a virtual package is queried a copy of the Spec is
returned while for non-virt... | def __getitem__(self, name):
| query_parameters = name.split(':')
if (len(query_parameters) > 2):
msg = "key has more than one ':' symbol."
msg += ' At most one is admitted.'
raise KeyError(msg)
(name, query_parameters) = (query_parameters[0], query_parameters[1:])
if query_par... |
'True if this spec satisfies the provided spec, or if any dependency
does. If the spec has no name, then we parse this one first.'
| def __contains__(self, spec):
| spec = self._autospec(spec)
for s in self.traverse():
if s.satisfies(spec, strict=True):
return True
return False
|
'Return a list of all dependencies sorted by name.'
| def sorted_deps(self):
| deps = self.flat_dependencies()
return tuple((deps[name] for name in sorted(deps)))
|
'Recursive helper for eq_dag and ne_dag. Does the actual DAG
traversal.'
| def _eq_dag(self, other, vs, vo, deptypes):
| vs.add(id(self))
vo.add(id(other))
if self.ne_node(other):
return False
if (len(self._dependencies) != len(other._dependencies)):
return False
ssorted = [self._dependencies[name] for name in sorted(self._dependencies)]
osorted = [other._dependencies[name] for name in sorted(other... |
'True if the full dependency DAGs of specs are equal.'
| def eq_dag(self, other, deptypes=True):
| return self._eq_dag(other, set(), set(), deptypes)
|
'True if the full dependency DAGs of specs are not equal.'
| def ne_dag(self, other, deptypes=True):
| return (not self.eq_dag(other, set(), set(), deptypes))
|
'Comparison key for just *this node* and not its deps.'
| def _cmp_node(self):
| return (self.name, self.namespace, self.versions, self.variants, self.architecture, self.compiler, self.compiler_flags)
|
'Equality with another spec, not including dependencies.'
| def eq_node(self, other):
| return (self._cmp_node() == other._cmp_node())
|
'Inequality with another spec, not including dependencies.'
| def ne_node(self, other):
| return (self._cmp_node() != other._cmp_node())
|
'This returns a key for the spec *including* DAG structure.
The key is the concatenation of:
1. A tuple describing this node in the DAG.
2. The hash of each of this node\'s dependencies\' cmp_keys.'
| def _cmp_key(self):
| if self._cmp_key_cache:
return self._cmp_key_cache
dep_tuple = tuple(((d.spec.name, hash(d.spec), tuple(sorted(d.deptypes))) for (name, d) in sorted(self._dependencies.items())))
key = (self._cmp_node(), dep_tuple)
if self._concrete:
self._cmp_key_cache = key
return key
|
'Prints out particular pieces of a spec, depending on what is
in the format string. The format strings you can provide are::
$_ Package name
$. Full package name (with namespace)
$@ Version with \'@\' prefix
$% Compiler with \'%\' prefix
$%@ Compiler with \'%\' prefix & compiler version with \'@\' prefix
$%+ ... | def format(self, format_string='$_$@$%@+$+$=', **kwargs):
| color = kwargs.get('color', False)
length = len(format_string)
out = StringIO()
named = escape = compiler = False
named_str = fmt = ''
def write(s, c):
f = ((color_formats[c] + cescape(s)) + '@.')
cwrite(f, stream=out, color=color)
iterator = enumerate(format_string)
for ... |
'Same as format, but color defaults to auto instead of False.'
| def cformat(self, *args, **kwargs):
| kwargs = kwargs.copy()
kwargs.setdefault('color', None)
return self.format(*args, **kwargs)
|
'Helper for tree to print DB install status.'
| def _install_status(self):
| if (not self.concrete):
return None
try:
record = spack.store.db.get_record(self)
return record.installed
except KeyError:
return None
|
'Helper for tree to print DB install status.'
| def _installed_explicitly(self):
| if (not self.concrete):
return None
try:
record = spack.store.db.get_record(self)
return record.explicit
except KeyError:
return None
|
'Prints out this spec and its dependencies, tree-formatted
with indentation.'
| def tree(self, **kwargs):
| color = kwargs.pop('color', get_color_when())
depth = kwargs.pop('depth', False)
hashes = kwargs.pop('hashes', False)
hlen = kwargs.pop('hashlen', None)
install_status = kwargs.pop('install_status', False)
cover = kwargs.pop('cover', 'nodes')
indent = kwargs.pop('indent', 0)
fmt = kwargs... |
'Parse a spec out of the input. If a spec is supplied, initialize
and return it instead of creating a new one.'
| def spec(self, name):
| if name:
(spec_namespace, dot, spec_name) = name.rpartition('.')
if (not spec_namespace):
spec_namespace = None
self.check_identifier(spec_name)
else:
spec_namespace = None
spec_name = None
spec = Spec.__new__(Spec)
spec.name = spec_name
spec.versi... |
'The only identifiers that can contain \'.\' are versions, but version
ids are context-sensitive so we have to check on a case-by-case
basis. Call this if we detect a version id where it shouldn\'t be.'
| def check_identifier(self, id=None):
| if (not id):
id = self.token.value
if ('.' in id):
self.last_token_error("{0}: Identifier cannot contain '.'".format(id))
|
'Takes the name of the vpkg'
| def __init__(self, vpkg, providers):
| super(MultipleProviderError, self).__init__(("Multiple providers found for '%s': %s" % (vpkg, [str(s) for s in providers])))
self.vpkg = vpkg
self.providers = providers
|
'Decorator for Spack directives.
Spack directives allow you to modify a package while it is being
defined, e.g. to add version or dependency information. Directives
are one of the key pieces of Spack\'s package "language", which is
embedded in python.
Here\'s an example directive:
@directive(dicts=\'versions\')
versio... | @staticmethod
def directive(dicts=None):
| global __all__
if isinstance(dicts, string_types):
dicts = (dicts,)
if (not isinstance(dicts, collections.Sequence)):
message = 'dicts arg must be list, tuple, or string. Found {0}'
raise TypeError(message.format(type(dicts)))
DirectiveMetaMixin._direct... |
'Empty cached config information.'
| def clear(self):
| self.sections = {}
|
'Used by the platform specific subclass to list available targets.
Raises an error if the platform specifies a name
that is reserved by spack as an alias.'
| def add_target(self, name, target):
| if (name in Platform.reserved_targets):
raise ValueError(('%s is a spack reserved alias and cannot be the name of a target' % name))
self.targets[name] = target
|
'This is a getter method for the target dictionary
that handles defaulting based on the values provided by default,
front-end, and back-end. This can be overwritten
by a subclass for which we want to provide further aliasing options.'
| def target(self, name):
| if (name == 'default_target'):
name = self.default
elif ((name == 'frontend') or (name == 'fe')):
name = self.front_end
elif ((name == 'backend') or (name == 'be')):
name = self.back_end
return self.targets.get(name, None)
|
'Add the operating_system class object into the
platform.operating_sys dictionary'
| def add_operating_system(self, name, os_class):
| if (name in Platform.reserved_oss):
raise ValueError(('%s is a spack reserved alias and cannot be the name of an OS' % name))
self.operating_sys[name] = os_class
|
'Subclass can override this method if it requires any
platform-specific build environment modifications.'
| @classmethod
def setup_platform_environment(self, pkg, env):
| pass
|
'Subclass is responsible for implementing this method.
Returns True if the Platform class detects that
it is the current platform
and False if it\'s not.'
| @classmethod
def detect(self):
| raise NotImplementedError()
|
'Return a list of compilers found in the supplied paths.
This invokes the find() method for each Compiler class,
and appends the compilers detected to a list.'
| def find_compilers(self, *paths):
| if (not paths):
paths = get_path('PATH')
filtered_path = []
for p in paths:
p = os.path.realpath(p)
if (not os.path.isdir(p)):
continue
filtered_path.append(p)
bin = join_path(p, 'bin')
if os.path.isdir(bin):
filtered_path.append(os.pat... |
'Try to find the given type of compiler in the user\'s
environment. For each set of compilers found, this returns
compiler objects with the cc, cxx, f77, fc paths and the
version filled in.
This will search for compilers with the names in cc_names,
cxx_names, etc. and it will group them if they have common
prefixes, su... | def find_compiler(self, cmp_cls, *path):
| dicts = parmap((lambda t: cmp_cls._find_matches_in_path(*t)), [((cmp_cls.cc_names, cmp_cls.cc_version) + tuple(path)), ((cmp_cls.cxx_names, cmp_cls.cxx_version) + tuple(path)), ((cmp_cls.f77_names, cmp_cls.f77_version) + tuple(path)), ((cmp_cls.fc_names, cmp_cls.fc_version) + tuple(path))])
all_keys = set()
... |
'Create a Database for Spack installations under ``root``.
A Database is a cache of Specs data from ``$prefix/spec.yaml``
files in Spack installation directories.
By default, Database files (data and lock files) are stored
under ``root/.spack-db``, which is created if it does not
exist. This is the ``db_dir``.
The Dat... | def __init__(self, root, db_dir=None):
| self.root = root
if (db_dir is None):
self._db_dir = join_path(self.root, _db_dirname)
else:
self._db_dir = db_dir
self._old_yaml_index_path = join_path(self._db_dir, 'index.yaml')
self._index_path = join_path(self._db_dir, 'index.json')
self._lock_path = join_path(self._db_dir, ... |
'Get a write lock context manager for use in a `with` block.'
| def write_transaction(self, timeout=_db_lock_timeout):
| return WriteTransaction(self.lock, self._read, self._write, timeout)
|
'Get a read lock context manager for use in a `with` block.'
| def read_transaction(self, timeout=_db_lock_timeout):
| return ReadTransaction(self.lock, self._read, timeout=timeout)
|
'Get a lock on a particular spec\'s installation directory.
NOTE: The installation directory **does not** need to exist.
Prefix lock is a byte range lock on the nth byte of a file.
The lock file is ``spack.store.db.prefix_lock`` -- the DB
tells us what to call it and it lives alongside the install DB.
n is the sys.maxs... | def prefix_lock(self, spec):
| prefix = spec.prefix
if (prefix not in self._prefix_locks):
self._prefix_locks[prefix] = Lock(self.prefix_lock_path, spec.dag_hash_bit_prefix(bit_length(sys.maxsize)), 1)
return self._prefix_locks[prefix]
|
'Write out the databsae to a JSON file.
This function does not do any locking or transactions.'
| def _write_to_file(self, stream):
| installs = dict(((k, v.to_dict()) for (k, v) in self._data.items()))
database = {'database': {'installs': installs, 'version': str(_db_version)}}
try:
sjson.dump(database, stream)
except YAMLError as e:
raise syaml.SpackYAMLError('error writing YAML database:', str(e))
|
'Recursively construct a spec from a hash in a YAML database.
Does not do any locking.'
| def _read_spec_from_dict(self, hash_key, installs):
| spec_dict = installs[hash_key]['spec']
for name in spec_dict:
spec_dict[name]['hash'] = hash_key
spec = spack.spec.Spec.from_node_dict(spec_dict)
return spec
|
'Fill database from file, do not maintain old data
Translate the spec portions from node-dict form to spec form
Does not do any locking.'
| def _read_from_file(self, stream, format='json'):
| if (format.lower() == 'json'):
load = sjson.load
elif (format.lower() == 'yaml'):
load = syaml.load
else:
raise ValueError(('Invalid database format: %s' % format))
try:
if isinstance(stream, string_types):
with open(stream, 'r') as f:
... |
'Build database index from scratch based on a directory layout.
Locks the DB if it isn\'t locked already.'
| def reindex(self, directory_layout):
| def _read_suppress_error():
try:
if os.path.isfile(self._index_path):
self._read_from_file(self._index_path)
except CorruptDatabaseError as e:
self._error = e
self._data = {}
transaction = WriteTransaction(self.lock, _read_suppress_error, self.... |
'Ensure consistency of reference counts in the DB.
Raise an AssertionError if something is amiss.
Does no locking.'
| def _check_ref_counts(self):
| counts = {}
for (key, rec) in self._data.items():
counts.setdefault(key, 0)
for dep in rec.spec.dependencies(_tracked_deps):
dep_key = dep.dag_hash()
counts.setdefault(dep_key, 0)
counts[dep_key] += 1
for rec in self._data.values():
key = rec.spec.... |
'Write the in-memory database index to its file path.
This is a helper function called by the WriteTransaction context
manager. If there is an exception while the write lock is active,
nothing will be written to the database file, but the in-memory
database *may* be left in an inconsistent state. It will be consistent... | def _write(self, type, value, traceback):
| if (type is not None):
return
temp_file = (self._index_path + ('.%s.%s.temp' % (socket.getfqdn(), os.getpid())))
try:
with open(temp_file, 'w') as f:
self._write_to_file(f)
os.rename(temp_file, self._index_path)
except:
if os.path.exists(temp_file):
... |
'Re-read Database from the data in the set location.
This does no locking, with one exception: it will automatically
migrate an index.yaml to an index.json if possible. This requires
taking a write lock.'
| def _read(self):
| if os.path.isfile(self._index_path):
self._read_from_file(self._index_path, format='json')
elif os.path.isfile(self._old_yaml_index_path):
if os.access(self._db_dir, (os.R_OK | os.W_OK)):
self._read_from_file(self._old_yaml_index_path, format='yaml')
with WriteTransaction... |
'Add an install record for this spec to the database.
Assumes spec is installed in ``layout.path_for_spec(spec)``.
Also ensures dependencies are present and updated in the DB as
either intsalled or missing.'
| def _add(self, spec, directory_layout=None, explicit=False):
| if (not spec.concrete):
raise NonConcreteSpecAddError('Specs added to DB must be concrete.')
for dep in spec.dependencies(_tracked_deps):
dkey = dep.dag_hash()
if (dkey not in self._data):
self._add(dep, directory_layout, explicit=False)
key = spec.dag_h... |
'Add spec at path to database, locking and reading DB to sync.
``add()`` will lock and read from the DB on disk.'
| @_autospec
def add(self, spec, directory_layout, explicit=False):
| with self.write_transaction():
self._add(spec, directory_layout, explicit=explicit)
|
'Get the exact spec OR get a single spec that matches.'
| def _get_matching_spec_key(self, spec, **kwargs):
| key = spec.dag_hash()
if (key not in self._data):
match = self.query_one(spec, **kwargs)
if match:
return match.dag_hash()
raise KeyError(('No such spec in database! %s' % spec))
return key
|
'Non-locking version of remove(); does real work.'
| def _remove(self, spec):
| key = self._get_matching_spec_key(spec)
rec = self._data[key]
if (rec.ref_count > 0):
rec.installed = False
return rec.spec
del self._data[key]
for dep in rec.spec.dependencies(_tracked_deps):
self._decrement_ref_count(dep)
return rec.spec
|
'Removes a spec from the database. To be called on uninstall.
Reads the database, then:
1. Marks the spec as not installed.
2. Removes the spec if it has no more dependents.
3. If removed, recursively updates dependencies\' ref counts
and removes them if they are no longer needed.'
| @_autospec
def remove(self, spec):
| with self.write_transaction():
return self._remove(spec)
|
'Return installed specs related to this one.'
| @_autospec
def installed_relatives(self, spec, direction='children', transitive=True):
| if (direction not in ('parents', 'children')):
raise ValueError(('Invalid direction: %s' % direction))
relatives = set()
for spec in self.query(spec):
if transitive:
to_add = spec.traverse(direction=direction, root=False)
elif (direction == 'parents'):
t... |
'Return the specs of all packages that extend
the given spec'
| @_autospec
def installed_extensions_for(self, extendee_spec):
| for spec in self.query():
try:
spack.store.layout.check_activated(extendee_spec, spec)
(yield spec.package)
except spack.directory_layout.NoSuchExtensionError:
continue
|
'Run a query on the database.
``query_spec``
Queries iterate through specs in the database and return
those that satisfy the supplied ``query_spec``. If
query_spec is `any`, This will match all specs in the
database. If it is a spec, we\'ll evaluate
``spec.satisfies(query_spec)``.
The query can be constrained by two ... | def query(self, query_spec=any, known=any, installed=True, explicit=any):
| with self.read_transaction():
if (isinstance(query_spec, spack.spec.Spec) and query_spec._concrete):
hash_key = query_spec.dag_hash()
if (hash_key in self._data):
return [self._data[hash_key].spec]
else:
return []
results = []
... |
'Query for exactly one spec that matches the query spec.
Raises an assertion error if more than one spec matches the
query. Returns None if no installed package matches.'
| def query_one(self, query_spec, known=any, installed=True):
| concrete_specs = self.query(query_spec, known, installed)
assert (len(concrete_specs) <= 1)
return (concrete_specs[0] if concrete_specs else None)
|
'Create a new ProviderIndex.
Optional arguments:
specs
List (or sequence) of specs. If provided, will call
`update` on this ProviderIndex with each spec in the list.
restrict
"restricts" values to the verbatim input specs; do not
pre-apply package\'s constraints.
TODO: rename this. It is intended to keep things as br... | def __init__(self, specs=None, restrict=False):
| if (specs is None):
specs = []
self.restrict = restrict
self.providers = {}
for spec in specs:
if (not isinstance(spec, spack.spec.Spec)):
spec = spack.spec.Spec(spec)
if spec.virtual:
continue
self.update(spec)
|
'Gives specs of all packages that provide virtual packages
with the supplied specs.'
| def providers_for(self, *vpkg_specs):
| providers = set()
for vspec in vpkg_specs:
if (type(vspec) == str):
vspec = spack.spec.Spec(vspec)
if (vspec.name in self.providers):
for (p_spec, spec_set) in self.providers[vspec.name].items():
if p_spec.satisfies(vspec, deps=False):
... |
'Whether a particular vpkg name is in the index.'
| def __contains__(self, name):
| return (name in self.providers)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.