language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def check_views(self): """Checks if the environments default view can be activated.""" try: # This is effectively a no-op, but it touches all packages in the # default view if they are installed. for view_name, view in self.views.items(): for _, spec in self.concretized_specs(): if spec in view and spec.package.installed: tty.debug( 'Spec %s in view %s' % (spec.name, view_name)) except (spack.repo.UnknownPackageError, spack.repo.UnknownNamespaceError) as e: tty.warn(e) tty.warn( 'Environment %s includes out of date packages or repos. ' 'Loading the environment view will require reconcretization.' % self.name)
def check_views(self): """Checks if the environments default view can be activated.""" try: # This is effectively a no-op, but it touches all packages in the # default view if they are installed. for view_name, view in self.views.items(): for _, spec in self.concretized_specs(): if spec in view and spec.package.installed: tty.debug( 'Spec %s in view %s' % (spec.name, view_name)) except (spack.repo.UnknownPackageError, spack.repo.UnknownNamespaceError) as e: tty.warn(e) tty.warn( 'Environment %s includes out of date packages or repos. ' 'Loading the environment view will require reconcretization.' % self.name)
Python
def add_default_view_to_env(self, env_mod): """ Collect the environment modifications to activate an environment using the default view. Removes duplicate paths. Args: env_mod (spack.util.environment.EnvironmentModifications): the environment modifications object that is modified. """ if default_view_name not in self.views: # No default view to add to shell return env_mod env_mod.extend(uenv.unconditional_environment_modifications( self.default_view)) mods, errors = self._env_modifications_for_default_view() env_mod.extend(mods) if errors: for err in errors: tty.warn(*err) # deduplicate paths from specs mapped to the same location for env_var in env_mod.group_by_name(): env_mod.prune_duplicate_paths(env_var) return env_mod
def add_default_view_to_env(self, env_mod): """ Collect the environment modifications to activate an environment using the default view. Removes duplicate paths. Args: env_mod (spack.util.environment.EnvironmentModifications): the environment modifications object that is modified. """ if default_view_name not in self.views: # No default view to add to shell return env_mod env_mod.extend(uenv.unconditional_environment_modifications( self.default_view)) mods, errors = self._env_modifications_for_default_view() env_mod.extend(mods) if errors: for err in errors: tty.warn(*err) # deduplicate paths from specs mapped to the same location for env_var in env_mod.group_by_name(): env_mod.prune_duplicate_paths(env_var) return env_mod
Python
def rm_default_view_from_env(self, env_mod): """ Collect the environment modifications to deactivate an environment using the default view. Reverses the action of ``add_default_view_to_env``. Args: env_mod (spack.util.environment.EnvironmentModifications): the environment modifications object that is modified. """ if default_view_name not in self.views: # No default view to add to shell return env_mod env_mod.extend(uenv.unconditional_environment_modifications( self.default_view).reversed()) mods, _ = self._env_modifications_for_default_view(reverse=True) env_mod.extend(mods) return env_mod
def rm_default_view_from_env(self, env_mod): """ Collect the environment modifications to deactivate an environment using the default view. Reverses the action of ``add_default_view_to_env``. Args: env_mod (spack.util.environment.EnvironmentModifications): the environment modifications object that is modified. """ if default_view_name not in self.views: # No default view to add to shell return env_mod env_mod.extend(uenv.unconditional_environment_modifications( self.default_view).reversed()) mods, _ = self._env_modifications_for_default_view(reverse=True) env_mod.extend(mods) return env_mod
Python
def uninstalled_specs(self): """Return a list of all uninstalled (and non-dev) specs.""" # Do the installed check across all specs within a single # DB read transaction to reduce time spent in lock acquisition. uninstalled_specs = [] with spack.store.db.read_transaction(): for concretized_hash in self.concretized_order: spec = self.specs_by_hash[concretized_hash] if not spec.package.installed or ( spec.satisfies('dev_path=*') or spec.satisfies('^dev_path=*') ): uninstalled_specs.append(spec) return uninstalled_specs
def uninstalled_specs(self): """Return a list of all uninstalled (and non-dev) specs.""" # Do the installed check across all specs within a single # DB read transaction to reduce time spent in lock acquisition. uninstalled_specs = [] with spack.store.db.read_transaction(): for concretized_hash in self.concretized_order: spec = self.specs_by_hash[concretized_hash] if not spec.package.installed or ( spec.satisfies('dev_path=*') or spec.satisfies('^dev_path=*') ): uninstalled_specs.append(spec) return uninstalled_specs
Python
def install_all(self, **install_args): """Install all concretized specs in an environment. Note: this does not regenerate the views for the environment; that needs to be done separately with a call to write(). Args: install_args (dict): keyword install arguments """ self.install_specs(None, **install_args)
def install_all(self, **install_args): """Install all concretized specs in an environment. Note: this does not regenerate the views for the environment; that needs to be done separately with a call to write(). Args: install_args (dict): keyword install arguments """ self.install_specs(None, **install_args)
Python
def all_specs(self): """Return all specs, even those a user spec would shadow.""" all_specs = set() for h in self.concretized_order: all_specs.update(self.specs_by_hash[h].traverse()) return sorted(all_specs)
def all_specs(self): """Return all specs, even those a user spec would shadow.""" all_specs = set() for h in self.concretized_order: all_specs.update(self.specs_by_hash[h].traverse()) return sorted(all_specs)
Python
def all_hashes(self): """Return hashes of all specs. Note these hashes exclude build dependencies.""" return list(set(s.dag_hash() for s in self.all_specs()))
def all_hashes(self): """Return hashes of all specs. Note these hashes exclude build dependencies.""" return list(set(s.dag_hash() for s in self.all_specs()))
Python
def matching_spec(self, spec): """ Given a spec (likely not concretized), find a matching concretized spec in the environment. The matching spec does not have to be installed in the environment, but must be concrete (specs added with `spack add` without an intervening `spack concretize` will not be matched). If there is a single root spec that matches the provided spec or a single dependency spec that matches the provided spec, then the concretized instance of that spec will be returned. If multiple root specs match the provided spec, or no root specs match and multiple dependency specs match, then this raises an error and reports all matching specs. """ # Root specs will be keyed by concrete spec, value abstract # Dependency-only specs will have value None matches = {} if not isinstance(spec, spack.spec.Spec): spec = spack.spec.Spec(spec) for user_spec, concretized_user_spec in self.concretized_specs(): # Deal with concrete specs differently if spec.concrete: # Matching a concrete spec is more restrictive # than just matching the dag hash is_match = ( spec in concretized_user_spec and concretized_user_spec[spec.name].build_hash() == spec.build_hash() ) if is_match: matches[spec] = spec continue if concretized_user_spec.satisfies(spec): matches[concretized_user_spec] = user_spec for dep_spec in concretized_user_spec.traverse(root=False): if dep_spec.satisfies(spec): # Don't overwrite the abstract spec if present # If not present already, set to None matches[dep_spec] = matches.get(dep_spec, None) if not matches: return None elif len(matches) == 1: return list(matches.keys())[0] root_matches = dict((concrete, abstract) for concrete, abstract in matches.items() if abstract) if len(root_matches) == 1: return list(root_matches.items())[0][0] # More than one spec matched, and either multiple roots matched or # none of the matches were roots # If multiple root specs match, it is assumed that the abstract # spec will most-succinctly summarize the difference between them # (and the user can enter one of these to disambiguate) match_strings = [] fmt_str = '{hash:7} ' + spack.spec.default_format for concrete, abstract in matches.items(): if abstract: s = 'Root spec %s\n %s' % (abstract, concrete.format(fmt_str)) else: s = 'Dependency spec\n %s' % concrete.format(fmt_str) match_strings.append(s) matches_str = '\n'.join(match_strings) msg = ("{0} matches multiple specs in the environment {1}: \n" "{2}".format(str(spec), self.name, matches_str)) raise SpackEnvironmentError(msg)
def matching_spec(self, spec): """ Given a spec (likely not concretized), find a matching concretized spec in the environment. The matching spec does not have to be installed in the environment, but must be concrete (specs added with `spack add` without an intervening `spack concretize` will not be matched). If there is a single root spec that matches the provided spec or a single dependency spec that matches the provided spec, then the concretized instance of that spec will be returned. If multiple root specs match the provided spec, or no root specs match and multiple dependency specs match, then this raises an error and reports all matching specs. """ # Root specs will be keyed by concrete spec, value abstract # Dependency-only specs will have value None matches = {} if not isinstance(spec, spack.spec.Spec): spec = spack.spec.Spec(spec) for user_spec, concretized_user_spec in self.concretized_specs(): # Deal with concrete specs differently if spec.concrete: # Matching a concrete spec is more restrictive # than just matching the dag hash is_match = ( spec in concretized_user_spec and concretized_user_spec[spec.name].build_hash() == spec.build_hash() ) if is_match: matches[spec] = spec continue if concretized_user_spec.satisfies(spec): matches[concretized_user_spec] = user_spec for dep_spec in concretized_user_spec.traverse(root=False): if dep_spec.satisfies(spec): # Don't overwrite the abstract spec if present # If not present already, set to None matches[dep_spec] = matches.get(dep_spec, None) if not matches: return None elif len(matches) == 1: return list(matches.keys())[0] root_matches = dict((concrete, abstract) for concrete, abstract in matches.items() if abstract) if len(root_matches) == 1: return list(root_matches.items())[0][0] # More than one spec matched, and either multiple roots matched or # none of the matches were roots # If multiple root specs match, it is assumed that the abstract # spec will most-succinctly summarize the difference between them # (and the user can enter one of these to disambiguate) match_strings = [] fmt_str = '{hash:7} ' + spack.spec.default_format for concrete, abstract in matches.items(): if abstract: s = 'Root spec %s\n %s' % (abstract, concrete.format(fmt_str)) else: s = 'Dependency spec\n %s' % concrete.format(fmt_str) match_strings.append(s) matches_str = '\n'.join(match_strings) msg = ("{0} matches multiple specs in the environment {1}: \n" "{2}".format(str(spec), self.name, matches_str)) raise SpackEnvironmentError(msg)
Python
def _to_lockfile_dict(self): """Create a dictionary to store a lockfile for this environment.""" concrete_specs = {} for spec in self.specs_by_hash.values(): for s in spec.traverse(): build_hash = s.build_hash() if build_hash not in concrete_specs: spec_dict = s.to_node_dict(hash=ht.build_hash) # Assumes no legacy formats, since this was just created. spec_dict[ht.dag_hash.name] = s.dag_hash() concrete_specs[build_hash] = spec_dict hash_spec_list = zip( self.concretized_order, self.concretized_user_specs) # this is the lockfile we'll write out data = { # metadata about the format '_meta': { 'file-type': 'spack-lockfile', 'lockfile-version': lockfile_format_version, 'specfile-version': spack.spec.specfile_format_version }, # users specs + hashes are the 'roots' of the environment 'roots': [{ 'hash': h, 'spec': str(s) } for h, s in hash_spec_list], # Concrete specs by hash, including dependencies 'concrete_specs': concrete_specs, } return data
def _to_lockfile_dict(self): """Create a dictionary to store a lockfile for this environment.""" concrete_specs = {} for spec in self.specs_by_hash.values(): for s in spec.traverse(): build_hash = s.build_hash() if build_hash not in concrete_specs: spec_dict = s.to_node_dict(hash=ht.build_hash) # Assumes no legacy formats, since this was just created. spec_dict[ht.dag_hash.name] = s.dag_hash() concrete_specs[build_hash] = spec_dict hash_spec_list = zip( self.concretized_order, self.concretized_user_specs) # this is the lockfile we'll write out data = { # metadata about the format '_meta': { 'file-type': 'spack-lockfile', 'lockfile-version': lockfile_format_version, 'specfile-version': spack.spec.specfile_format_version }, # users specs + hashes are the 'roots' of the environment 'roots': [{ 'hash': h, 'spec': str(s) } for h, s in hash_spec_list], # Concrete specs by hash, including dependencies 'concrete_specs': concrete_specs, } return data
Python
def _read_lockfile_dict(self, d): """Read a lockfile dictionary into this environment.""" roots = d['roots'] self.concretized_user_specs = [Spec(r['spec']) for r in roots] self.concretized_order = [r['hash'] for r in roots] json_specs_by_hash = d['concrete_specs'] root_hashes = set(self.concretized_order) specs_by_hash = {} for build_hash, node_dict in json_specs_by_hash.items(): spec = Spec.from_node_dict(node_dict) if d['_meta']['lockfile-version'] > 1: # Build hash is stored as a key, but not as part of the node dict # To ensure build hashes are not recomputed, we reattach here setattr(spec, ht.build_hash.attr, build_hash) specs_by_hash[build_hash] = spec for build_hash, node_dict in json_specs_by_hash.items(): for _, dep_hash, deptypes, _ in ( Spec.dependencies_from_node_dict(node_dict)): specs_by_hash[build_hash]._add_dependency( specs_by_hash[dep_hash], deptypes) # If we are reading an older lockfile format (which uses dag hashes # that exclude build deps), we use this to convert the old # concretized_order to the full hashes (preserving the order) old_hash_to_new = {} self.specs_by_hash = {} for _, spec in specs_by_hash.items(): dag_hash = spec.dag_hash() build_hash = spec.build_hash() if dag_hash in root_hashes: old_hash_to_new[dag_hash] = build_hash if (dag_hash in root_hashes or build_hash in root_hashes): self.specs_by_hash[build_hash] = spec if old_hash_to_new: # Replace any older hashes in concretized_order with hashes # that include build deps self.concretized_order = [ old_hash_to_new.get(h, h) for h in self.concretized_order]
def _read_lockfile_dict(self, d): """Read a lockfile dictionary into this environment.""" roots = d['roots'] self.concretized_user_specs = [Spec(r['spec']) for r in roots] self.concretized_order = [r['hash'] for r in roots] json_specs_by_hash = d['concrete_specs'] root_hashes = set(self.concretized_order) specs_by_hash = {} for build_hash, node_dict in json_specs_by_hash.items(): spec = Spec.from_node_dict(node_dict) if d['_meta']['lockfile-version'] > 1: # Build hash is stored as a key, but not as part of the node dict # To ensure build hashes are not recomputed, we reattach here setattr(spec, ht.build_hash.attr, build_hash) specs_by_hash[build_hash] = spec for build_hash, node_dict in json_specs_by_hash.items(): for _, dep_hash, deptypes, _ in ( Spec.dependencies_from_node_dict(node_dict)): specs_by_hash[build_hash]._add_dependency( specs_by_hash[dep_hash], deptypes) # If we are reading an older lockfile format (which uses dag hashes # that exclude build deps), we use this to convert the old # concretized_order to the full hashes (preserving the order) old_hash_to_new = {} self.specs_by_hash = {} for _, spec in specs_by_hash.items(): dag_hash = spec.dag_hash() build_hash = spec.build_hash() if dag_hash in root_hashes: old_hash_to_new[dag_hash] = build_hash if (dag_hash in root_hashes or build_hash in root_hashes): self.specs_by_hash[build_hash] = spec if old_hash_to_new: # Replace any older hashes in concretized_order with hashes # that include build deps self.concretized_order = [ old_hash_to_new.get(h, h) for h in self.concretized_order]
Python
def write(self, regenerate=True): """Writes an in-memory environment to its location on disk. Write out package files for each newly concretized spec. Also regenerate any views associated with the environment and run post-write hooks, if regenerate is True. Arguments: regenerate (bool): regenerate views and run post-write hooks as well as writing if True. """ # Intercept environment not using the latest schema format and prevent # them from being modified manifest_exists = os.path.exists(self.manifest_path) if manifest_exists and not is_latest_format(self.manifest_path): msg = ('The environment "{0}" needs to be written to disk, but ' 'is currently using a deprecated format. Please update it ' 'using:\n\n' '\tspack env update {0}\n\n' 'Note that previous versions of Spack will not be able to ' 'use the updated configuration.') raise RuntimeError(msg.format(self.name)) # ensure path in var/spack/environments fs.mkdirp(self.path) yaml_dict = config_dict(self.yaml) raw_yaml_dict = config_dict(self.raw_yaml) if self.specs_by_hash: # ensure the prefix/.env directory exists fs.mkdirp(self.env_subdir_path) for spec in self.new_specs: for dep in spec.traverse(): if not dep.concrete: raise ValueError('specs passed to environment.write() ' 'must be concrete!') root = os.path.join(self.repos_path, dep.namespace) repo = spack.repo.create_or_construct(root, dep.namespace) pkg_dir = repo.dirname_for_package_name(dep.name) fs.mkdirp(pkg_dir) spack.repo.path.dump_provenance(dep, pkg_dir) # write the lock file last with fs.write_tmp_and_move(self.lock_path) as f: sjson.dump(self._to_lockfile_dict(), stream=f) self._update_and_write_manifest(raw_yaml_dict, yaml_dict) else: with fs.safe_remove(self.lock_path): self._update_and_write_manifest(raw_yaml_dict, yaml_dict) # TODO: rethink where this needs to happen along with # writing. For some of the commands (like install, which write # concrete specs AND regen) this might as well be a separate # call. But, having it here makes the views consistent witht the # concretized environment for most operations. Which is the # special case? if regenerate: self.regenerate_views() # Run post_env_hooks spack.hooks.post_env_write(self) # new specs and new installs reset at write time self.new_specs = [] self.new_installs = []
def write(self, regenerate=True): """Writes an in-memory environment to its location on disk. Write out package files for each newly concretized spec. Also regenerate any views associated with the environment and run post-write hooks, if regenerate is True. Arguments: regenerate (bool): regenerate views and run post-write hooks as well as writing if True. """ # Intercept environment not using the latest schema format and prevent # them from being modified manifest_exists = os.path.exists(self.manifest_path) if manifest_exists and not is_latest_format(self.manifest_path): msg = ('The environment "{0}" needs to be written to disk, but ' 'is currently using a deprecated format. Please update it ' 'using:\n\n' '\tspack env update {0}\n\n' 'Note that previous versions of Spack will not be able to ' 'use the updated configuration.') raise RuntimeError(msg.format(self.name)) # ensure path in var/spack/environments fs.mkdirp(self.path) yaml_dict = config_dict(self.yaml) raw_yaml_dict = config_dict(self.raw_yaml) if self.specs_by_hash: # ensure the prefix/.env directory exists fs.mkdirp(self.env_subdir_path) for spec in self.new_specs: for dep in spec.traverse(): if not dep.concrete: raise ValueError('specs passed to environment.write() ' 'must be concrete!') root = os.path.join(self.repos_path, dep.namespace) repo = spack.repo.create_or_construct(root, dep.namespace) pkg_dir = repo.dirname_for_package_name(dep.name) fs.mkdirp(pkg_dir) spack.repo.path.dump_provenance(dep, pkg_dir) # write the lock file last with fs.write_tmp_and_move(self.lock_path) as f: sjson.dump(self._to_lockfile_dict(), stream=f) self._update_and_write_manifest(raw_yaml_dict, yaml_dict) else: with fs.safe_remove(self.lock_path): self._update_and_write_manifest(raw_yaml_dict, yaml_dict) # TODO: rethink where this needs to happen along with # writing. For some of the commands (like install, which write # concrete specs AND regen) this might as well be a separate # call. But, having it here makes the views consistent witht the # concretized environment for most operations. Which is the # special case? if regenerate: self.regenerate_views() # Run post_env_hooks spack.hooks.post_env_write(self) # new specs and new installs reset at write time self.new_specs = [] self.new_installs = []
Python
def _update_and_write_manifest(self, raw_yaml_dict, yaml_dict): """Update YAML manifest for this environment based on changes to spec lists and views and write it. """ # invalidate _repo cache self._repo = None # put any changes in the definitions in the YAML for name, speclist in self.spec_lists.items(): if name == user_speclist_name: # The primary list is handled differently continue active_yaml_lists = [x for x in yaml_dict.get('definitions', []) if name in x and _eval_conditional(x.get('when', 'True'))] # Remove any specs in yaml that are not in internal representation for ayl in active_yaml_lists: # If it's not a string, it's a matrix. Those can't have changed # If it is a string that starts with '$', it's a reference. # Those also can't have changed. ayl[name][:] = [s for s in ayl.setdefault(name, []) if (not isinstance(s, six.string_types)) or s.startswith('$') or Spec(s) in speclist.specs] # Put the new specs into the first active list from the yaml new_specs = [entry for entry in speclist.yaml_list if isinstance(entry, six.string_types) and not any(entry in ayl[name] for ayl in active_yaml_lists)] list_for_new_specs = active_yaml_lists[0].setdefault(name, []) list_for_new_specs[:] = list_for_new_specs + new_specs # put the new user specs in the YAML. # This can be done directly because there can't be multiple definitions # nor when clauses for `specs` list. yaml_spec_list = yaml_dict.setdefault(user_speclist_name, []) yaml_spec_list[:] = self.user_specs.yaml_list # Construct YAML representation of view default_name = default_view_name if self.views and len(self.views) == 1 and default_name in self.views: path = self.default_view.root if self.default_view == ViewDescriptor(self.path, self.view_path_default): view = True elif self.default_view == ViewDescriptor(self.path, path): view = path else: view = dict((name, view.to_dict()) for name, view in self.views.items()) elif self.views: view = dict((name, view.to_dict()) for name, view in self.views.items()) else: view = False yaml_dict['view'] = view if self.dev_specs: # Remove entries that are mirroring defaults write_dev_specs = copy.deepcopy(self.dev_specs) for name, entry in write_dev_specs.items(): if entry['path'] == name: del entry['path'] yaml_dict['develop'] = write_dev_specs else: yaml_dict.pop('develop', None) # Remove yaml sections that are shadowing defaults # construct garbage path to ensure we don't find a manifest by accident with fs.temp_cwd() as env_dir: bare_env = Environment(env_dir, with_view=self.view_path_default) keys_present = list(yaml_dict.keys()) for key in keys_present: if yaml_dict[key] == config_dict(bare_env.yaml).get(key, None): if key not in raw_yaml_dict: del yaml_dict[key] # if all that worked, write out the manifest file at the top level # (we used to check whether the yaml had changed and not write it out # if it hadn't. We can't do that anymore because it could be the only # thing that changed is the "override" attribute on a config dict, # which would not show up in even a string comparison between the two # keys). changed = not yaml_equivalent(self.yaml, self.raw_yaml) written = os.path.exists(self.manifest_path) if changed or not written: self.raw_yaml = copy.deepcopy(self.yaml) with fs.write_tmp_and_move(os.path.realpath(self.manifest_path)) as f: _write_yaml(self.yaml, f)
def _update_and_write_manifest(self, raw_yaml_dict, yaml_dict): """Update YAML manifest for this environment based on changes to spec lists and views and write it. """ # invalidate _repo cache self._repo = None # put any changes in the definitions in the YAML for name, speclist in self.spec_lists.items(): if name == user_speclist_name: # The primary list is handled differently continue active_yaml_lists = [x for x in yaml_dict.get('definitions', []) if name in x and _eval_conditional(x.get('when', 'True'))] # Remove any specs in yaml that are not in internal representation for ayl in active_yaml_lists: # If it's not a string, it's a matrix. Those can't have changed # If it is a string that starts with '$', it's a reference. # Those also can't have changed. ayl[name][:] = [s for s in ayl.setdefault(name, []) if (not isinstance(s, six.string_types)) or s.startswith('$') or Spec(s) in speclist.specs] # Put the new specs into the first active list from the yaml new_specs = [entry for entry in speclist.yaml_list if isinstance(entry, six.string_types) and not any(entry in ayl[name] for ayl in active_yaml_lists)] list_for_new_specs = active_yaml_lists[0].setdefault(name, []) list_for_new_specs[:] = list_for_new_specs + new_specs # put the new user specs in the YAML. # This can be done directly because there can't be multiple definitions # nor when clauses for `specs` list. yaml_spec_list = yaml_dict.setdefault(user_speclist_name, []) yaml_spec_list[:] = self.user_specs.yaml_list # Construct YAML representation of view default_name = default_view_name if self.views and len(self.views) == 1 and default_name in self.views: path = self.default_view.root if self.default_view == ViewDescriptor(self.path, self.view_path_default): view = True elif self.default_view == ViewDescriptor(self.path, path): view = path else: view = dict((name, view.to_dict()) for name, view in self.views.items()) elif self.views: view = dict((name, view.to_dict()) for name, view in self.views.items()) else: view = False yaml_dict['view'] = view if self.dev_specs: # Remove entries that are mirroring defaults write_dev_specs = copy.deepcopy(self.dev_specs) for name, entry in write_dev_specs.items(): if entry['path'] == name: del entry['path'] yaml_dict['develop'] = write_dev_specs else: yaml_dict.pop('develop', None) # Remove yaml sections that are shadowing defaults # construct garbage path to ensure we don't find a manifest by accident with fs.temp_cwd() as env_dir: bare_env = Environment(env_dir, with_view=self.view_path_default) keys_present = list(yaml_dict.keys()) for key in keys_present: if yaml_dict[key] == config_dict(bare_env.yaml).get(key, None): if key not in raw_yaml_dict: del yaml_dict[key] # if all that worked, write out the manifest file at the top level # (we used to check whether the yaml had changed and not write it out # if it hadn't. We can't do that anymore because it could be the only # thing that changed is the "override" attribute on a config dict, # which would not show up in even a string comparison between the two # keys). changed = not yaml_equivalent(self.yaml, self.raw_yaml) written = os.path.exists(self.manifest_path) if changed or not written: self.raw_yaml = copy.deepcopy(self.yaml) with fs.write_tmp_and_move(os.path.realpath(self.manifest_path)) as f: _write_yaml(self.yaml, f)
Python
def yaml_equivalent(first, second): """Returns whether two spack yaml items are equivalent, including overrides """ if isinstance(first, dict): return isinstance(second, dict) and _equiv_dict(first, second) elif isinstance(first, list): return isinstance(second, list) and _equiv_list(first, second) else: # it's a string return isinstance(second, six.string_types) and first == second
def yaml_equivalent(first, second): """Returns whether two spack yaml items are equivalent, including overrides """ if isinstance(first, dict): return isinstance(second, dict) and _equiv_dict(first, second) elif isinstance(first, list): return isinstance(second, list) and _equiv_list(first, second) else: # it's a string return isinstance(second, six.string_types) and first == second
Python
def _equiv_list(first, second): """Returns whether two spack yaml lists are equivalent, including overrides """ if len(first) != len(second): return False return all(yaml_equivalent(f, s) for f, s in zip(first, second))
def _equiv_list(first, second): """Returns whether two spack yaml lists are equivalent, including overrides """ if len(first) != len(second): return False return all(yaml_equivalent(f, s) for f, s in zip(first, second))
Python
def _equiv_dict(first, second): """Returns whether two spack yaml dicts are equivalent, including overrides """ if len(first) != len(second): return False same_values = all(yaml_equivalent(fv, sv) for fv, sv in zip(first.values(), second.values())) same_keys_with_same_overrides = all( fk == sk and getattr(fk, 'override', False) == getattr(sk, 'override', False) for fk, sk in zip(first.keys(), second.keys())) return same_values and same_keys_with_same_overrides
def _equiv_dict(first, second): """Returns whether two spack yaml dicts are equivalent, including overrides """ if len(first) != len(second): return False same_values = all(yaml_equivalent(fv, sv) for fv, sv in zip(first.values(), second.values())) same_keys_with_same_overrides = all( fk == sk and getattr(fk, 'override', False) == getattr(sk, 'override', False) for fk, sk in zip(first.keys(), second.keys())) return same_values and same_keys_with_same_overrides
Python
def manifest_file(env_name_or_dir): """Return the absolute path to a manifest file given the environment name or directory. Args: env_name_or_dir (str): either the name of a valid environment or a directory where a manifest file resides Raises: AssertionError: if the environment is not found """ env_dir = None if is_env_dir(env_name_or_dir): env_dir = os.path.abspath(env_name_or_dir) elif exists(env_name_or_dir): env_dir = os.path.abspath(root(env_name_or_dir)) assert env_dir, "environment not found [env={0}]".format(env_name_or_dir) return os.path.join(env_dir, manifest_name)
def manifest_file(env_name_or_dir): """Return the absolute path to a manifest file given the environment name or directory. Args: env_name_or_dir (str): either the name of a valid environment or a directory where a manifest file resides Raises: AssertionError: if the environment is not found """ env_dir = None if is_env_dir(env_name_or_dir): env_dir = os.path.abspath(env_name_or_dir) elif exists(env_name_or_dir): env_dir = os.path.abspath(root(env_name_or_dir)) assert env_dir, "environment not found [env={0}]".format(env_name_or_dir) return os.path.join(env_dir, manifest_name)
Python
def update_yaml(manifest, backup_file): """Update a manifest file from an old format to the current one. Args: manifest (str): path to a manifest file backup_file (str): file where to copy the original manifest Returns: True if the manifest was updated, False otherwise. Raises: AssertionError: in case anything goes wrong during the update """ # Check if the environment needs update with open(manifest) as f: data = syaml.load(f) top_level_key = _top_level_key(data) needs_update = spack.schema.env.update(data[top_level_key]) if not needs_update: msg = "No update needed [manifest={0}]".format(manifest) tty.debug(msg) return False # Copy environment to a backup file and update it msg = ('backup file "{0}" already exists on disk. Check its content ' 'and remove it before trying to update again.') assert not os.path.exists(backup_file), msg.format(backup_file) shutil.copy(manifest, backup_file) with open(manifest, 'w') as f: syaml.dump_config(data, f) return True
def update_yaml(manifest, backup_file): """Update a manifest file from an old format to the current one. Args: manifest (str): path to a manifest file backup_file (str): file where to copy the original manifest Returns: True if the manifest was updated, False otherwise. Raises: AssertionError: in case anything goes wrong during the update """ # Check if the environment needs update with open(manifest) as f: data = syaml.load(f) top_level_key = _top_level_key(data) needs_update = spack.schema.env.update(data[top_level_key]) if not needs_update: msg = "No update needed [manifest={0}]".format(manifest) tty.debug(msg) return False # Copy environment to a backup file and update it msg = ('backup file "{0}" already exists on disk. Check its content ' 'and remove it before trying to update again.') assert not os.path.exists(backup_file), msg.format(backup_file) shutil.copy(manifest, backup_file) with open(manifest, 'w') as f: syaml.dump_config(data, f) return True
Python
def _top_level_key(data): """Return the top level key used in this environment Args: data (dict): raw yaml data of the environment Returns: Either 'spack' or 'env' """ msg = ('cannot find top level attribute "spack" or "env"' 'in the environment') assert any(x in data for x in ('spack', 'env')), msg if 'spack' in data: return 'spack' return 'env'
def _top_level_key(data): """Return the top level key used in this environment Args: data (dict): raw yaml data of the environment Returns: Either 'spack' or 'env' """ msg = ('cannot find top level attribute "spack" or "env"' 'in the environment') assert any(x in data for x in ('spack', 'env')), msg if 'spack' in data: return 'spack' return 'env'
Python
def no_active_environment(): """Deactivate the active environment for the duration of the context. Has no effect when there is no active environment.""" env = active_environment() try: deactivate() yield finally: # TODO: we don't handle `use_env_repo` here. if env: activate(env)
def no_active_environment(): """Deactivate the active environment for the duration of the context. Has no effect when there is no active environment.""" env = active_environment() try: deactivate() yield finally: # TODO: we don't handle `use_env_repo` here. if env: activate(env)
Python
def kernel_version(): """Return the kernel version as a Version object. Note that the kernel version is distinct from OS and/or distribution versions. For instance: >>> external.distro.id() 'centos' >>> external.distro.version() '7' >>> platform.release() '5.10.84+' """ # Strip '+' characters just in case we're running a # version built from git/etc clean_version = re.sub(r'\+', r'', py_platform.release()) return Version(clean_version)
def kernel_version(): """Return the kernel version as a Version object. Note that the kernel version is distinct from OS and/or distribution versions. For instance: >>> external.distro.id() 'centos' >>> external.distro.version() '7' >>> platform.release() '5.10.84+' """ # Strip '+' characters just in case we're running a # version built from git/etc clean_version = re.sub(r'\+', r'', py_platform.release()) return Version(clean_version)
Python
def _setup_dependent_env(self, env, dependent_spec, deptypes): """Set PATH and PERL5LIB to include the extension and any other perl extensions it depends on, assuming they were installed with INSTALL_BASE defined.""" perl_lib_dirs = [] for d in dependent_spec.traverse(deptype=deptypes): if d.package.extends(self.spec): perl_lib_dirs.append(d.prefix.lib.perl5) if perl_lib_dirs: perl_lib_path = ':'.join(perl_lib_dirs) env.prepend_path('PERL5LIB', perl_lib_path)
def _setup_dependent_env(self, env, dependent_spec, deptypes): """Set PATH and PERL5LIB to include the extension and any other perl extensions it depends on, assuming they were installed with INSTALL_BASE defined.""" perl_lib_dirs = [] for d in dependent_spec.traverse(deptype=deptypes): if d.package.extends(self.spec): perl_lib_dirs.append(d.prefix.lib.perl5) if perl_lib_dirs: perl_lib_path = ':'.join(perl_lib_dirs) env.prepend_path('PERL5LIB', perl_lib_path)
Python
def filter_config_dot_pm(self): """Run after install so that Config.pm records the compiler that Spack built the package with. If this isn't done, $Config{cc} will be set to Spack's cc wrapper script. These files are read-only, which frustrates filter_file on some filesystems (NFSv4), so make them temporarily writable. """ kwargs = {'ignore_absent': True, 'backup': False, 'string': False} # Find the actual path to the installed Config.pm file. perl = self.spec['perl'].command config_dot_pm = perl('-MModule::Loaded', '-MConfig', '-e', 'print is_loaded(Config)', output=str) with self.make_briefly_writable(config_dot_pm): match = 'cc *=>.*' substitute = "cc => '{cc}',".format(cc=self.compiler.cc) filter_file(match, substitute, config_dot_pm, **kwargs) # And the path Config_heavy.pl d = os.path.dirname(config_dot_pm) config_heavy = join_path(d, 'Config_heavy.pl') with self.make_briefly_writable(config_heavy): match = '^cc=.*' substitute = "cc='{cc}'".format(cc=self.compiler.cc) filter_file(match, substitute, config_heavy, **kwargs) match = '^ld=.*' substitute = "ld='{ld}'".format(ld=self.compiler.cc) filter_file(match, substitute, config_heavy, **kwargs) match = "^ccflags='" substitute = "ccflags='%s " % ' '\ .join(self.spec.compiler_flags['cflags']) filter_file(match, substitute, config_heavy, **kwargs)
def filter_config_dot_pm(self): """Run after install so that Config.pm records the compiler that Spack built the package with. If this isn't done, $Config{cc} will be set to Spack's cc wrapper script. These files are read-only, which frustrates filter_file on some filesystems (NFSv4), so make them temporarily writable. """ kwargs = {'ignore_absent': True, 'backup': False, 'string': False} # Find the actual path to the installed Config.pm file. perl = self.spec['perl'].command config_dot_pm = perl('-MModule::Loaded', '-MConfig', '-e', 'print is_loaded(Config)', output=str) with self.make_briefly_writable(config_dot_pm): match = 'cc *=>.*' substitute = "cc => '{cc}',".format(cc=self.compiler.cc) filter_file(match, substitute, config_dot_pm, **kwargs) # And the path Config_heavy.pl d = os.path.dirname(config_dot_pm) config_heavy = join_path(d, 'Config_heavy.pl') with self.make_briefly_writable(config_heavy): match = '^cc=.*' substitute = "cc='{cc}'".format(cc=self.compiler.cc) filter_file(match, substitute, config_heavy, **kwargs) match = '^ld=.*' substitute = "ld='{ld}'".format(ld=self.compiler.cc) filter_file(match, substitute, config_heavy, **kwargs) match = "^ccflags='" substitute = "ccflags='%s " % ' '\ .join(self.spec.compiler_flags['cflags']) filter_file(match, substitute, config_heavy, **kwargs)
Python
def update(data): """Update the data in place to remove deprecated properties. Args: data (dict): dictionary to be updated Returns: True if data was changed, False otherwise """ # currently deprecated properties are # install_tree: <string> # install_path_scheme: <string> # updated: install_tree: {root: <string>, # projections: <projections_dict} # root replaces install_tree, projections replace install_path_scheme changed = False install_tree = data.get('install_tree', None) if isinstance(install_tree, six.string_types): # deprecated short-form install tree # add value as `root` in updated install_tree data['install_tree'] = {'root': install_tree} changed = True install_path_scheme = data.pop('install_path_scheme', None) if install_path_scheme: projections_data = { 'projections': { 'all': install_path_scheme } } # update projections with install_scheme # whether install_tree was updated or not # we merge the yaml to ensure we don't invalidate other projections update_data = data.get('install_tree', {}) update_data = spack.config.merge_yaml(update_data, projections_data) data['install_tree'] = update_data changed = True use_curl = data.pop('use_curl', None) if use_curl is not None: data['url_fetch_method'] = 'curl' if use_curl else 'urllib' changed = True return changed
def update(data): """Update the data in place to remove deprecated properties. Args: data (dict): dictionary to be updated Returns: True if data was changed, False otherwise """ # currently deprecated properties are # install_tree: <string> # install_path_scheme: <string> # updated: install_tree: {root: <string>, # projections: <projections_dict} # root replaces install_tree, projections replace install_path_scheme changed = False install_tree = data.get('install_tree', None) if isinstance(install_tree, six.string_types): # deprecated short-form install tree # add value as `root` in updated install_tree data['install_tree'] = {'root': install_tree} changed = True install_path_scheme = data.pop('install_path_scheme', None) if install_path_scheme: projections_data = { 'projections': { 'all': install_path_scheme } } # update projections with install_scheme # whether install_tree was updated or not # we merge the yaml to ensure we don't invalidate other projections update_data = data.get('install_tree', {}) update_data = spack.config.merge_yaml(update_data, projections_data) data['install_tree'] = update_data changed = True use_curl = data.pop('use_curl', None) if use_curl is not None: data['url_fetch_method'] = 'curl' if use_curl else 'urllib' changed = True return changed
Python
def determine_number_of_jobs( parallel=False, command_line=None, config_default=None, max_cpus=None): """ Packages that require sequential builds need 1 job. Otherwise we use the number of jobs set on the command line. If not set, then we use the config defaults (which is usually set through the builtin config scope), but we cap to the number of CPUs available to avoid oversubscription. Parameters: parallel (bool or None): true when package supports parallel builds command_line (int or None): command line override config_default (int or None): config default number of jobs max_cpus (int or None): maximum number of CPUs available. When None, this value is automatically determined. """ if not parallel: return 1 if command_line is None and 'command_line' in spack.config.scopes(): command_line = spack.config.get('config:build_jobs', scope='command_line') if command_line is not None: return command_line max_cpus = max_cpus or cpus_available() # in some rare cases _builtin config may not be set, so default to max 16 config_default = config_default or spack.config.get('config:build_jobs', 16) return min(max_cpus, config_default)
def determine_number_of_jobs( parallel=False, command_line=None, config_default=None, max_cpus=None): """ Packages that require sequential builds need 1 job. Otherwise we use the number of jobs set on the command line. If not set, then we use the config defaults (which is usually set through the builtin config scope), but we cap to the number of CPUs available to avoid oversubscription. Parameters: parallel (bool or None): true when package supports parallel builds command_line (int or None): command line override config_default (int or None): config default number of jobs max_cpus (int or None): maximum number of CPUs available. When None, this value is automatically determined. """ if not parallel: return 1 if command_line is None and 'command_line' in spack.config.scopes(): command_line = spack.config.get('config:build_jobs', scope='command_line') if command_line is not None: return command_line max_cpus = max_cpus or cpus_available() # in some rare cases _builtin config may not be set, so default to max 16 config_default = config_default or spack.config.get('config:build_jobs', 16) return min(max_cpus, config_default)
Python
def _set_variables_for_single_module(pkg, module): """Helper function to set module variables for single module.""" # Put a marker on this module so that it won't execute the body of this # function again, since it is not needed marker = '_set_run_already_called' if getattr(module, marker, False): return jobs = determine_number_of_jobs(parallel=pkg.parallel) m = module m.make_jobs = jobs # TODO: make these build deps that can be installed if not found. m.make = MakeExecutable('make', jobs) m.gmake = MakeExecutable('gmake', jobs) m.scons = MakeExecutable('scons', jobs) m.ninja = MakeExecutable('ninja', jobs) # easy shortcut to os.environ m.env = os.environ # Find the configure script in the archive path # Don't use which for this; we want to find it in the current dir. m.configure = Executable('./configure') m.meson = Executable('meson') m.cmake = Executable('cmake') m.ctest = MakeExecutable('ctest', jobs) # Standard CMake arguments m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg) m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg) # Put spack compiler paths in module scope. link_dir = spack.paths.build_env_path m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc']) m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx']) m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77']) m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc']) # Emulate some shell commands for convenience m.pwd = os.getcwd m.cd = os.chdir m.mkdir = os.mkdir m.makedirs = os.makedirs m.remove = os.remove m.removedirs = os.removedirs m.symlink = os.symlink m.mkdirp = mkdirp m.install = install m.install_tree = install_tree m.rmtree = shutil.rmtree m.move = shutil.move # Useful directories within the prefix are encapsulated in # a Prefix object. m.prefix = pkg.prefix # Platform-specific library suffix. m.dso_suffix = dso_suffix def static_to_shared_library(static_lib, shared_lib=None, **kwargs): compiler_path = kwargs.get('compiler', m.spack_cc) compiler = Executable(compiler_path) return _static_to_shared_library(pkg.spec.architecture, compiler, static_lib, shared_lib, **kwargs) m.static_to_shared_library = static_to_shared_library # Put a marker on this module so that it won't execute the body of this # function again, since it is not needed setattr(m, marker, True)
def _set_variables_for_single_module(pkg, module): """Helper function to set module variables for single module.""" # Put a marker on this module so that it won't execute the body of this # function again, since it is not needed marker = '_set_run_already_called' if getattr(module, marker, False): return jobs = determine_number_of_jobs(parallel=pkg.parallel) m = module m.make_jobs = jobs # TODO: make these build deps that can be installed if not found. m.make = MakeExecutable('make', jobs) m.gmake = MakeExecutable('gmake', jobs) m.scons = MakeExecutable('scons', jobs) m.ninja = MakeExecutable('ninja', jobs) # easy shortcut to os.environ m.env = os.environ # Find the configure script in the archive path # Don't use which for this; we want to find it in the current dir. m.configure = Executable('./configure') m.meson = Executable('meson') m.cmake = Executable('cmake') m.ctest = MakeExecutable('ctest', jobs) # Standard CMake arguments m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg) m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg) # Put spack compiler paths in module scope. link_dir = spack.paths.build_env_path m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc']) m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx']) m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77']) m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc']) # Emulate some shell commands for convenience m.pwd = os.getcwd m.cd = os.chdir m.mkdir = os.mkdir m.makedirs = os.makedirs m.remove = os.remove m.removedirs = os.removedirs m.symlink = os.symlink m.mkdirp = mkdirp m.install = install m.install_tree = install_tree m.rmtree = shutil.rmtree m.move = shutil.move # Useful directories within the prefix are encapsulated in # a Prefix object. m.prefix = pkg.prefix # Platform-specific library suffix. m.dso_suffix = dso_suffix def static_to_shared_library(static_lib, shared_lib=None, **kwargs): compiler_path = kwargs.get('compiler', m.spack_cc) compiler = Executable(compiler_path) return _static_to_shared_library(pkg.spec.architecture, compiler, static_lib, shared_lib, **kwargs) m.static_to_shared_library = static_to_shared_library # Put a marker on this module so that it won't execute the body of this # function again, since it is not needed setattr(m, marker, True)
Python
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None, **kwargs): """ Converts a static library to a shared library. The static library has to be built with PIC for the conversion to work. Parameters: static_lib (str): Path to the static library. shared_lib (str): Path to the shared library. Default is to derive from the static library's path. Keyword arguments: compiler (str): Path to the compiler. Default is spack_cc. compiler_output: Where to print compiler output to. arguments (str list): Additional arguments for the compiler. version (str): Library version. Default is unspecified. compat_version (str): Library compatibility version. Default is version. """ compiler_output = kwargs.get('compiler_output', None) arguments = kwargs.get('arguments', []) version = kwargs.get('version', None) compat_version = kwargs.get('compat_version', version) if not shared_lib: shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0], dso_suffix) compiler_args = [] # TODO: Compiler arguments should not be hardcoded but provided by # the different compiler classes. if 'linux' in arch or 'cray' in arch: soname = os.path.basename(shared_lib) if compat_version: soname += '.{0}'.format(compat_version) compiler_args = [ '-shared', '-Wl,-soname,{0}'.format(soname), '-Wl,--whole-archive', static_lib, '-Wl,--no-whole-archive' ] elif 'darwin' in arch: install_name = shared_lib if compat_version: install_name += '.{0}'.format(compat_version) compiler_args = [ '-dynamiclib', '-install_name', '{0}'.format(install_name), '-Wl,-force_load,{0}'.format(static_lib) ] if compat_version: compiler_args.extend(['-compatibility_version', '{0}'.format( compat_version)]) if version: compiler_args.extend(['-current_version', '{0}'.format(version)]) if len(arguments) > 0: compiler_args.extend(arguments) shared_lib_base = shared_lib if version: shared_lib += '.{0}'.format(version) elif compat_version: shared_lib += '.{0}'.format(compat_version) compiler_args.extend(['-o', shared_lib]) # Create symlinks for version and compat_version shared_lib_link = os.path.basename(shared_lib) if version or compat_version: os.symlink(shared_lib_link, shared_lib_base) if compat_version and compat_version != version: os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base, compat_version)) return compiler(*compiler_args, output=compiler_output)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None, **kwargs): """ Converts a static library to a shared library. The static library has to be built with PIC for the conversion to work. Parameters: static_lib (str): Path to the static library. shared_lib (str): Path to the shared library. Default is to derive from the static library's path. Keyword arguments: compiler (str): Path to the compiler. Default is spack_cc. compiler_output: Where to print compiler output to. arguments (str list): Additional arguments for the compiler. version (str): Library version. Default is unspecified. compat_version (str): Library compatibility version. Default is version. """ compiler_output = kwargs.get('compiler_output', None) arguments = kwargs.get('arguments', []) version = kwargs.get('version', None) compat_version = kwargs.get('compat_version', version) if not shared_lib: shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0], dso_suffix) compiler_args = [] # TODO: Compiler arguments should not be hardcoded but provided by # the different compiler classes. if 'linux' in arch or 'cray' in arch: soname = os.path.basename(shared_lib) if compat_version: soname += '.{0}'.format(compat_version) compiler_args = [ '-shared', '-Wl,-soname,{0}'.format(soname), '-Wl,--whole-archive', static_lib, '-Wl,--no-whole-archive' ] elif 'darwin' in arch: install_name = shared_lib if compat_version: install_name += '.{0}'.format(compat_version) compiler_args = [ '-dynamiclib', '-install_name', '{0}'.format(install_name), '-Wl,-force_load,{0}'.format(static_lib) ] if compat_version: compiler_args.extend(['-compatibility_version', '{0}'.format( compat_version)]) if version: compiler_args.extend(['-current_version', '{0}'.format(version)]) if len(arguments) > 0: compiler_args.extend(arguments) shared_lib_base = shared_lib if version: shared_lib += '.{0}'.format(version) elif compat_version: shared_lib += '.{0}'.format(compat_version) compiler_args.extend(['-o', shared_lib]) # Create symlinks for version and compat_version shared_lib_link = os.path.basename(shared_lib) if version or compat_version: os.symlink(shared_lib_link, shared_lib_base) if compat_version and compat_version != version: os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base, compat_version)) return compiler(*compiler_args, output=compiler_output)
Python
def load_external_modules(pkg): """Traverse a package's spec DAG and load any external modules. Traverse a package's dependencies and load any external modules associated with them. Args: pkg (spack.package.PackageBase): package to load deps for """ for dep in list(pkg.spec.traverse()): external_modules = dep.external_modules or [] for external_module in external_modules: load_module(external_module)
def load_external_modules(pkg): """Traverse a package's spec DAG and load any external modules. Traverse a package's dependencies and load any external modules associated with them. Args: pkg (spack.package.PackageBase): package to load deps for """ for dep in list(pkg.spec.traverse()): external_modules = dep.external_modules or [] for external_module in external_modules: load_module(external_module)
Python
def modifications_from_dependencies(spec, context, custom_mods_only=True): """Returns the environment modifications that are required by the dependencies of a spec and also applies modifications to this spec's package at module scope, if need be. Environment modifications include: - Updating PATH so that executables can be found - Updating CMAKE_PREFIX_PATH and PKG_CONFIG_PATH so that their respective tools can find Spack-built dependencies - Running custom package environment modifications Custom package modifications can conflict with the default PATH changes we make (specifically for the PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH environment variables), so this applies changes in a fixed order: - All modifications (custom and default) from external deps first - All modifications from non-external deps afterwards With that order, `PrependPath` actions from non-external default environment modifications will take precedence over custom modifications from external packages. A secondary constraint is that custom and default modifications are grouped on a per-package basis: combined with the post-order traversal this means that default modifications of dependents can override custom modifications of dependencies (again, this would only occur for PATH, CMAKE_PREFIX_PATH, or PKG_CONFIG_PATH). Args: spec (spack.spec.Spec): spec for which we want the modifications context (str): either 'build' for build-time modifications or 'run' for run-time modifications """ if context not in ['build', 'run', 'test']: raise ValueError( "Expecting context to be one of ['build', 'run', 'test'], " "got: {0}".format(context)) env = EnvironmentModifications() # Note: see computation of 'custom_mod_deps' and 'exe_deps' later in this # function; these sets form the building blocks of those collections. build_deps = set(spec.dependencies(deptype=('build', 'test'))) link_deps = set(spec.traverse(root=False, deptype='link')) build_link_deps = build_deps | link_deps build_and_supporting_deps = set() for build_dep in build_deps: build_and_supporting_deps.update(build_dep.traverse(deptype='run')) run_and_supporting_deps = set( spec.traverse(root=False, deptype=('run', 'link'))) test_and_supporting_deps = set() for test_dep in set(spec.dependencies(deptype='test')): test_and_supporting_deps.update(test_dep.traverse(deptype='run')) # All dependencies that might have environment modifications to apply custom_mod_deps = set() if context == 'build': custom_mod_deps.update(build_and_supporting_deps) # Tests may be performed after build custom_mod_deps.update(test_and_supporting_deps) else: # test/run context custom_mod_deps.update(run_and_supporting_deps) if context == 'test': custom_mod_deps.update(test_and_supporting_deps) custom_mod_deps.update(link_deps) # Determine 'exe_deps': the set of packages with binaries we want to use if context == 'build': exe_deps = build_and_supporting_deps | test_and_supporting_deps elif context == 'run': exe_deps = set(spec.traverse(deptype='run')) elif context == 'test': exe_deps = test_and_supporting_deps def default_modifications_for_dep(dep): if (dep in build_link_deps and not is_system_path(dep.prefix) and context == 'build'): prefix = dep.prefix env.prepend_path('CMAKE_PREFIX_PATH', prefix) for directory in ('lib', 'lib64', 'share'): pcdir = os.path.join(prefix, directory, 'pkgconfig') if os.path.isdir(pcdir): env.prepend_path('PKG_CONFIG_PATH', pcdir) if dep in exe_deps and not is_system_path(dep.prefix): _make_runnable(dep, env) def add_modifications_for_dep(dep): # Some callers of this function only want the custom modifications. # For callers that want both custom and default modifications, we want # to perform the default modifications here (this groups custom # and default modifications together on a per-package basis). if not custom_mods_only: default_modifications_for_dep(dep) # Perform custom modifications here (PrependPath actions performed in # the custom method override the default environment modifications # we do to help the build, namely for PATH, CMAKE_PREFIX_PATH, and # PKG_CONFIG_PATH) if dep in custom_mod_deps: dpkg = dep.package set_module_variables_for_package(dpkg) # Allow dependencies to modify the module dpkg.setup_dependent_package(spec.package.module, spec) if context == 'build': dpkg.setup_dependent_build_environment(env, spec) else: dpkg.setup_dependent_run_environment(env, spec) # Note that we want to perform environment modifications in a fixed order. # The Spec.traverse method provides this: i.e. in addition to # the post-order semantics, it also guarantees a fixed traversal order # among dependencies which are not constrained by post-order semantics. for dspec in spec.traverse(root=False, order='post'): if dspec.external: add_modifications_for_dep(dspec) for dspec in spec.traverse(root=False, order='post'): # Default env modifications for non-external packages can override # custom modifications of external packages (this can only occur # for modifications to PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH) if not dspec.external: add_modifications_for_dep(dspec) return env
def modifications_from_dependencies(spec, context, custom_mods_only=True): """Returns the environment modifications that are required by the dependencies of a spec and also applies modifications to this spec's package at module scope, if need be. Environment modifications include: - Updating PATH so that executables can be found - Updating CMAKE_PREFIX_PATH and PKG_CONFIG_PATH so that their respective tools can find Spack-built dependencies - Running custom package environment modifications Custom package modifications can conflict with the default PATH changes we make (specifically for the PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH environment variables), so this applies changes in a fixed order: - All modifications (custom and default) from external deps first - All modifications from non-external deps afterwards With that order, `PrependPath` actions from non-external default environment modifications will take precedence over custom modifications from external packages. A secondary constraint is that custom and default modifications are grouped on a per-package basis: combined with the post-order traversal this means that default modifications of dependents can override custom modifications of dependencies (again, this would only occur for PATH, CMAKE_PREFIX_PATH, or PKG_CONFIG_PATH). Args: spec (spack.spec.Spec): spec for which we want the modifications context (str): either 'build' for build-time modifications or 'run' for run-time modifications """ if context not in ['build', 'run', 'test']: raise ValueError( "Expecting context to be one of ['build', 'run', 'test'], " "got: {0}".format(context)) env = EnvironmentModifications() # Note: see computation of 'custom_mod_deps' and 'exe_deps' later in this # function; these sets form the building blocks of those collections. build_deps = set(spec.dependencies(deptype=('build', 'test'))) link_deps = set(spec.traverse(root=False, deptype='link')) build_link_deps = build_deps | link_deps build_and_supporting_deps = set() for build_dep in build_deps: build_and_supporting_deps.update(build_dep.traverse(deptype='run')) run_and_supporting_deps = set( spec.traverse(root=False, deptype=('run', 'link'))) test_and_supporting_deps = set() for test_dep in set(spec.dependencies(deptype='test')): test_and_supporting_deps.update(test_dep.traverse(deptype='run')) # All dependencies that might have environment modifications to apply custom_mod_deps = set() if context == 'build': custom_mod_deps.update(build_and_supporting_deps) # Tests may be performed after build custom_mod_deps.update(test_and_supporting_deps) else: # test/run context custom_mod_deps.update(run_and_supporting_deps) if context == 'test': custom_mod_deps.update(test_and_supporting_deps) custom_mod_deps.update(link_deps) # Determine 'exe_deps': the set of packages with binaries we want to use if context == 'build': exe_deps = build_and_supporting_deps | test_and_supporting_deps elif context == 'run': exe_deps = set(spec.traverse(deptype='run')) elif context == 'test': exe_deps = test_and_supporting_deps def default_modifications_for_dep(dep): if (dep in build_link_deps and not is_system_path(dep.prefix) and context == 'build'): prefix = dep.prefix env.prepend_path('CMAKE_PREFIX_PATH', prefix) for directory in ('lib', 'lib64', 'share'): pcdir = os.path.join(prefix, directory, 'pkgconfig') if os.path.isdir(pcdir): env.prepend_path('PKG_CONFIG_PATH', pcdir) if dep in exe_deps and not is_system_path(dep.prefix): _make_runnable(dep, env) def add_modifications_for_dep(dep): # Some callers of this function only want the custom modifications. # For callers that want both custom and default modifications, we want # to perform the default modifications here (this groups custom # and default modifications together on a per-package basis). if not custom_mods_only: default_modifications_for_dep(dep) # Perform custom modifications here (PrependPath actions performed in # the custom method override the default environment modifications # we do to help the build, namely for PATH, CMAKE_PREFIX_PATH, and # PKG_CONFIG_PATH) if dep in custom_mod_deps: dpkg = dep.package set_module_variables_for_package(dpkg) # Allow dependencies to modify the module dpkg.setup_dependent_package(spec.package.module, spec) if context == 'build': dpkg.setup_dependent_build_environment(env, spec) else: dpkg.setup_dependent_run_environment(env, spec) # Note that we want to perform environment modifications in a fixed order. # The Spec.traverse method provides this: i.e. in addition to # the post-order semantics, it also guarantees a fixed traversal order # among dependencies which are not constrained by post-order semantics. for dspec in spec.traverse(root=False, order='post'): if dspec.external: add_modifications_for_dep(dspec) for dspec in spec.traverse(root=False, order='post'): # Default env modifications for non-external packages can override # custom modifications of external packages (this can only occur # for modifications to PATH, CMAKE_PREFIX_PATH, and PKG_CONFIG_PATH) if not dspec.external: add_modifications_for_dep(dspec) return env
Python
def start_build_process(pkg, function, kwargs): """Create a child process to do part of a spack build. Args: pkg (spack.package.PackageBase): package whose environment we should set up the child process for. function (typing.Callable): argless function to run in the child process. Usage:: def child_fun(): # do stuff build_env.start_build_process(pkg, child_fun) The child process is run with the build environment set up by spack.build_environment. This allows package authors to have full control over the environment, etc. without affecting other builds that might be executed in the same spack call. If something goes wrong, the child process catches the error and passes it to the parent wrapped in a ChildError. The parent is expected to handle (or re-raise) the ChildError. This uses `multiprocessing.Process` to create the child process. The mechanism used to create the process differs on different operating systems and for different versions of Python. In some cases "fork" is used (i.e. the "fork" system call) and some cases it starts an entirely new Python interpreter process (in the docs this is referred to as the "spawn" start method). Breaking it down by OS: - Linux always uses fork. - Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after. - Windows always uses the "spawn" start method. For more information on `multiprocessing` child process creation mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods """ parent_pipe, child_pipe = multiprocessing.Pipe() input_multiprocess_fd = None serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg) try: # Forward sys.stdin when appropriate, to allow toggling verbosity if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'): input_fd = os.dup(sys.stdin.fileno()) input_multiprocess_fd = MultiProcessFd(input_fd) p = multiprocessing.Process( target=_setup_pkg_and_run, args=(serialized_pkg, function, kwargs, child_pipe, input_multiprocess_fd)) p.start() except InstallError as e: e.pkg = pkg raise finally: # Close the input stream in the parent process if input_multiprocess_fd is not None: input_multiprocess_fd.close() child_result = parent_pipe.recv() p.join() # If returns a StopPhase, raise it if isinstance(child_result, StopPhase): # do not print raise child_result # let the caller know which package went wrong. if isinstance(child_result, InstallError): child_result.pkg = pkg if isinstance(child_result, ChildError): # If the child process raised an error, print its output here rather # than waiting until the call to SpackError.die() in main(). This # allows exception handling output to be logged from within Spack. # see spack.main.SpackCommand. child_result.print_context() raise child_result return child_result
def start_build_process(pkg, function, kwargs): """Create a child process to do part of a spack build. Args: pkg (spack.package.PackageBase): package whose environment we should set up the child process for. function (typing.Callable): argless function to run in the child process. Usage:: def child_fun(): # do stuff build_env.start_build_process(pkg, child_fun) The child process is run with the build environment set up by spack.build_environment. This allows package authors to have full control over the environment, etc. without affecting other builds that might be executed in the same spack call. If something goes wrong, the child process catches the error and passes it to the parent wrapped in a ChildError. The parent is expected to handle (or re-raise) the ChildError. This uses `multiprocessing.Process` to create the child process. The mechanism used to create the process differs on different operating systems and for different versions of Python. In some cases "fork" is used (i.e. the "fork" system call) and some cases it starts an entirely new Python interpreter process (in the docs this is referred to as the "spawn" start method). Breaking it down by OS: - Linux always uses fork. - Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after. - Windows always uses the "spawn" start method. For more information on `multiprocessing` child process creation mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods """ parent_pipe, child_pipe = multiprocessing.Pipe() input_multiprocess_fd = None serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg) try: # Forward sys.stdin when appropriate, to allow toggling verbosity if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'): input_fd = os.dup(sys.stdin.fileno()) input_multiprocess_fd = MultiProcessFd(input_fd) p = multiprocessing.Process( target=_setup_pkg_and_run, args=(serialized_pkg, function, kwargs, child_pipe, input_multiprocess_fd)) p.start() except InstallError as e: e.pkg = pkg raise finally: # Close the input stream in the parent process if input_multiprocess_fd is not None: input_multiprocess_fd.close() child_result = parent_pipe.recv() p.join() # If returns a StopPhase, raise it if isinstance(child_result, StopPhase): # do not print raise child_result # let the caller know which package went wrong. if isinstance(child_result, InstallError): child_result.pkg = pkg if isinstance(child_result, ChildError): # If the child process raised an error, print its output here rather # than waiting until the call to SpackError.die() in main(). This # allows exception handling output to be logged from within Spack. # see spack.main.SpackCommand. child_result.print_context() raise child_result return child_result
Python
def save(self, obj, filename): """ Save a monitor json result to the save directory. """ filename = os.path.join(self.save_dir, filename) write_json(obj, filename) return {"message": "Build saved locally to %s" % filename}
def save(self, obj, filename): """ Save a monitor json result to the save directory. """ filename = os.path.join(self.save_dir, filename) write_json(obj, filename) return {"message": "Build saved locally to %s" % filename}
Python
def load_build_environment(self, spec): """ Load a build environment from install_environment.json. If we are running an analyze command, we will need to load previously used build environment metadata from install_environment.json to capture what was done during the build. """ if not hasattr(spec, "package") or not spec.package: tty.die("A spec must have a package to load the environment.") pkg_dir = os.path.dirname(spec.package.install_log_path) env_file = os.path.join(pkg_dir, "install_environment.json") build_environment = read_json(env_file) if not build_environment: tty.warn( "install_environment.json not found in package folder. " " This means that the current environment metadata will be used." ) else: self.build_environment = build_environment
def load_build_environment(self, spec): """ Load a build environment from install_environment.json. If we are running an analyze command, we will need to load previously used build environment metadata from install_environment.json to capture what was done during the build. """ if not hasattr(spec, "package") or not spec.package: tty.die("A spec must have a package to load the environment.") pkg_dir = os.path.dirname(spec.package.install_log_path) env_file = os.path.join(pkg_dir, "install_environment.json") build_environment = read_json(env_file) if not build_environment: tty.warn( "install_environment.json not found in package folder. " " This means that the current environment metadata will be used." ) else: self.build_environment = build_environment
Python
def capture_build_environment(self): """ Capture the environment for the build. This uses spack.util.environment.get_host_environment_metadata to do so. This is important because it's a unique identifier, along with the spec, for a Build. It should look something like this: {'host_os': 'ubuntu20.04', 'platform': 'linux', 'host_target': 'skylake', 'hostname': 'vanessa-ThinkPad-T490s', 'spack_version': '0.16.1-1455-52d5b55b65', 'kernel_version': '#73-Ubuntu SMP Mon Jan 18 17:25:17 UTC 2021'} This is saved to a package install's metadata folder as install_environment.json, and can be loaded by the monitor for uploading data relevant to a later analysis. """ from spack.util.environment import get_host_environment_metadata self.build_environment = get_host_environment_metadata() keys = list(self.build_environment.keys()) # Allow to customize any of these values via the environment for key in keys: envar_name = "SPACKMON_%s" % key.upper() envar = os.environ.get(envar_name) if envar: self.build_environment[key] = envar
def capture_build_environment(self): """ Capture the environment for the build. This uses spack.util.environment.get_host_environment_metadata to do so. This is important because it's a unique identifier, along with the spec, for a Build. It should look something like this: {'host_os': 'ubuntu20.04', 'platform': 'linux', 'host_target': 'skylake', 'hostname': 'vanessa-ThinkPad-T490s', 'spack_version': '0.16.1-1455-52d5b55b65', 'kernel_version': '#73-Ubuntu SMP Mon Jan 18 17:25:17 UTC 2021'} This is saved to a package install's metadata folder as install_environment.json, and can be loaded by the monitor for uploading data relevant to a later analysis. """ from spack.util.environment import get_host_environment_metadata self.build_environment = get_host_environment_metadata() keys = list(self.build_environment.keys()) # Allow to customize any of these values via the environment for key in keys: envar_name = "SPACKMON_%s" % key.upper() envar = os.environ.get(envar_name) if envar: self.build_environment[key] = envar
Python
def require_auth(self): """ Require authentication. The token and username must not be unset """ if not self.save_local and (not self.token or not self.username): tty.die("You are required to export SPACKMON_TOKEN and SPACKMON_USER")
def require_auth(self): """ Require authentication. The token and username must not be unset """ if not self.save_local and (not self.token or not self.username): tty.die("You are required to export SPACKMON_TOKEN and SPACKMON_USER")
Python
def prepare_request(self, endpoint, data, headers): """ Prepare a request given an endpoint, data, and headers. If data is provided, urllib makes the request a POST """ # Always reset headers for new request. self.reset() # Preserve previously used auth token headers = headers or self.headers # The calling function can provide a full or partial url if not endpoint.startswith("http"): endpoint = "%s/%s" % (self.baseurl, endpoint) # If we have data, the request will be POST if data: if not isinstance(data, str): data = sjson.dump(data) data = data.encode('ascii') return Request(endpoint, data=data, headers=headers)
def prepare_request(self, endpoint, data, headers): """ Prepare a request given an endpoint, data, and headers. If data is provided, urllib makes the request a POST """ # Always reset headers for new request. self.reset() # Preserve previously used auth token headers = headers or self.headers # The calling function can provide a full or partial url if not endpoint.startswith("http"): endpoint = "%s/%s" % (self.baseurl, endpoint) # If we have data, the request will be POST if data: if not isinstance(data, str): data = sjson.dump(data) data = data.encode('ascii') return Request(endpoint, data=data, headers=headers)
Python
def issue_request(self, request, retry=True): """ Given a prepared request, issue it. If we get an error, die. If there are times when we don't want to exit on error (but instead disable using the monitoring service) we could add that here. """ try: response = urlopen(request) except URLError as e: # If we have an authorization request, retry once with auth if hasattr(e, "code") and e.code == 401 and retry: if self.authenticate_request(e): request = self.prepare_request( e.url, sjson.load(request.data.decode('utf-8')), self.headers ) return self.issue_request(request, False) # Handle permanent re-directs! elif hasattr(e, "code") and e.code == 308: location = e.headers.get('Location') request_data = None if request.data: request_data = sjson.load(request.data.decode('utf-8'))[0] if location: request = self.prepare_request( location, request_data, self.headers ) return self.issue_request(request, True) # Otherwise, relay the message and exit on error msg = "" if hasattr(e, 'reason'): msg = e.reason elif hasattr(e, 'code'): msg = e.code # If we can parse the message, try it try: msg += "\n%s" % e.read().decode("utf8", 'ignore') except Exception: pass if self.allow_fail: tty.warning("Request to %s was not successful, but continuing." % e.url) return tty.die(msg) return response
def issue_request(self, request, retry=True): """ Given a prepared request, issue it. If we get an error, die. If there are times when we don't want to exit on error (but instead disable using the monitoring service) we could add that here. """ try: response = urlopen(request) except URLError as e: # If we have an authorization request, retry once with auth if hasattr(e, "code") and e.code == 401 and retry: if self.authenticate_request(e): request = self.prepare_request( e.url, sjson.load(request.data.decode('utf-8')), self.headers ) return self.issue_request(request, False) # Handle permanent re-directs! elif hasattr(e, "code") and e.code == 308: location = e.headers.get('Location') request_data = None if request.data: request_data = sjson.load(request.data.decode('utf-8'))[0] if location: request = self.prepare_request( location, request_data, self.headers ) return self.issue_request(request, True) # Otherwise, relay the message and exit on error msg = "" if hasattr(e, 'reason'): msg = e.reason elif hasattr(e, 'code'): msg = e.code # If we can parse the message, try it try: msg += "\n%s" % e.read().decode("utf8", 'ignore') except Exception: pass if self.allow_fail: tty.warning("Request to %s was not successful, but continuing." % e.url) return tty.die(msg) return response
Python
def new_configuration(self, specs): """ Given a list of specs, generate a new configuration for each. We return a lookup of specs with their package names. This assumes that we are only installing one version of each package. We aren't starting or creating any builds, so we don't need a build environment. """ configs = {} # There should only be one spec generally (what cases would have >1?) for spec in specs: # Not sure if this is needed here, but I see it elsewhere if spec.name in spack.repo.path or spec.virtual: spec.concretize() # Remove extra level of nesting as_dict = {"spec": spec.to_dict(hash=ht.full_hash)['spec'], "spack_version": self.spack_version} if self.save_local: filename = "spec-%s-%s-config.json" % (spec.name, spec.version) self.save(as_dict, filename) else: response = self.do_request("specs/new/", data=sjson.dump(as_dict)) configs[spec.package.name] = response.get('data', {}) return configs
def new_configuration(self, specs): """ Given a list of specs, generate a new configuration for each. We return a lookup of specs with their package names. This assumes that we are only installing one version of each package. We aren't starting or creating any builds, so we don't need a build environment. """ configs = {} # There should only be one spec generally (what cases would have >1?) for spec in specs: # Not sure if this is needed here, but I see it elsewhere if spec.name in spack.repo.path or spec.virtual: spec.concretize() # Remove extra level of nesting as_dict = {"spec": spec.to_dict(hash=ht.full_hash)['spec'], "spack_version": self.spack_version} if self.save_local: filename = "spec-%s-%s-config.json" % (spec.name, spec.version) self.save(as_dict, filename) else: response = self.do_request("specs/new/", data=sjson.dump(as_dict)) configs[spec.package.name] = response.get('data', {}) return configs
Python
def failed_concretization(self, specs): """ Given a list of abstract specs, tell spack monitor concretization failed. """ configs = {} # There should only be one spec generally (what cases would have >1?) for spec in specs: # update the spec to have build hash indicating that cannot be built meta = spec.to_dict()['spec'] nodes = [] for node in meta.get("nodes", []): for hashtype in ["build_hash", "full_hash"]: node[hashtype] = "FAILED_CONCRETIZATION" nodes.append(node) meta['nodes'] = nodes # We can't concretize / hash as_dict = {"spec": meta, "spack_version": self.spack_version} if self.save_local: filename = "spec-%s-%s-config.json" % (spec.name, spec.version) self.save(as_dict, filename) else: response = self.do_request("specs/new/", data=sjson.dump(as_dict)) configs[spec.package.name] = response.get('data', {}) return configs
def failed_concretization(self, specs): """ Given a list of abstract specs, tell spack monitor concretization failed. """ configs = {} # There should only be one spec generally (what cases would have >1?) for spec in specs: # update the spec to have build hash indicating that cannot be built meta = spec.to_dict()['spec'] nodes = [] for node in meta.get("nodes", []): for hashtype in ["build_hash", "full_hash"]: node[hashtype] = "FAILED_CONCRETIZATION" nodes.append(node) meta['nodes'] = nodes # We can't concretize / hash as_dict = {"spec": meta, "spack_version": self.spack_version} if self.save_local: filename = "spec-%s-%s-config.json" % (spec.name, spec.version) self.save(as_dict, filename) else: response = self.do_request("specs/new/", data=sjson.dump(as_dict)) configs[spec.package.name] = response.get('data', {}) return configs
Python
def update_build(self, spec, status="SUCCESS"): """ Update a build with a new status. This typically updates the relevant package to indicate a successful install. This endpoint can take a general status to update. """ data = {"build_id": self.get_build_id(spec), "status": status} if self.save_local: filename = "build-%s-status.json" % data['build_id'] return self.save(data, filename) return self.do_request("builds/update/", data=sjson.dump(data))
def update_build(self, spec, status="SUCCESS"): """ Update a build with a new status. This typically updates the relevant package to indicate a successful install. This endpoint can take a general status to update. """ data = {"build_id": self.get_build_id(spec), "status": status} if self.save_local: filename = "build-%s-status.json" % data['build_id'] return self.save(data, filename) return self.do_request("builds/update/", data=sjson.dump(data))
Python
def send_phase(self, pkg, phase_name, phase_output_file, status): """ Send the result of a phase during install. Given a package, phase name, and status, update the monitor endpoint to alert of the status of the stage. This includes parsing the package metadata folder for phase output and error files """ data = {"build_id": self.get_build_id(pkg.spec)} # Send output specific to the phase (does this include error?) data.update({"status": status, "output": read_file(phase_output_file), "phase_name": phase_name}) if self.save_local: filename = "build-%s-phase-%s.json" % (data['build_id'], phase_name) return self.save(data, filename) return self.do_request("builds/phases/update/", data=sjson.dump(data))
def send_phase(self, pkg, phase_name, phase_output_file, status): """ Send the result of a phase during install. Given a package, phase name, and status, update the monitor endpoint to alert of the status of the stage. This includes parsing the package metadata folder for phase output and error files """ data = {"build_id": self.get_build_id(pkg.spec)} # Send output specific to the phase (does this include error?) data.update({"status": status, "output": read_file(phase_output_file), "phase_name": phase_name}) if self.save_local: filename = "build-%s-phase-%s.json" % (data['build_id'], phase_name) return self.save(data, filename) return self.do_request("builds/phases/update/", data=sjson.dump(data))
Python
def upload_specfile(self, filename): """ Upload a spec file to the spack monitor server. Given a spec file (must be json) upload to the UploadSpec endpoint. This function is not used in the spack to server workflow, but could be useful is Spack Monitor is intended to send an already generated file in some kind of separate analysis. For the environment file, we parse out SPACK_* variables to include. """ # We load as json just to validate it spec = read_json(filename) data = {"spec": spec, "spack_verison": self.spack_version} if self.save_local: filename = "spec-%s-%s.json" % (spec.name, spec.version) return self.save(data, filename) return self.do_request("specs/new/", data=sjson.dump(data))
def upload_specfile(self, filename): """ Upload a spec file to the spack monitor server. Given a spec file (must be json) upload to the UploadSpec endpoint. This function is not used in the spack to server workflow, but could be useful is Spack Monitor is intended to send an already generated file in some kind of separate analysis. For the environment file, we parse out SPACK_* variables to include. """ # We load as json just to validate it spec = read_json(filename) data = {"spec": spec, "spack_verison": self.spack_version} if self.save_local: filename = "spec-%s-%s.json" % (spec.name, spec.version) return self.save(data, filename) return self.do_request("specs/new/", data=sjson.dump(data))
Python
def iter_read(self, pattern): """ A helper to read json from a directory glob and return it loaded. """ for filename in glob(pattern): basename = os.path.basename(filename) tty.info("Reading %s" % basename) yield read_json(filename)
def iter_read(self, pattern): """ A helper to read json from a directory glob and return it loaded. """ for filename in glob(pattern): basename = os.path.basename(filename) tty.info("Reading %s" % basename) yield read_json(filename)
Python
def upload_local_save(self, dirname): """ Upload results from a locally saved directory to spack monitor. The general workflow will first include an install with save local: spack install --monitor --monitor-save-local And then a request to upload the root or specific directory. spack upload monitor ~/.spack/reports/monitor/<date>/ """ dirname = os.path.abspath(dirname) if not os.path.exists(dirname): tty.die("%s does not exist." % dirname) # We can't be sure the level of nesting the user has provided # So we walk recursively through and look for build metadata for subdir, dirs, files in os.walk(dirname): root = os.path.join(dirname, subdir) # A metadata file signals a monitor export metadata = glob("%s%sbuild-metadata*" % (root, os.sep)) if not metadata or not files or not root or not subdir: continue self._upload_local_save(root) tty.info("Upload complete")
def upload_local_save(self, dirname): """ Upload results from a locally saved directory to spack monitor. The general workflow will first include an install with save local: spack install --monitor --monitor-save-local And then a request to upload the root or specific directory. spack upload monitor ~/.spack/reports/monitor/<date>/ """ dirname = os.path.abspath(dirname) if not os.path.exists(dirname): tty.die("%s does not exist." % dirname) # We can't be sure the level of nesting the user has provided # So we walk recursively through and look for build metadata for subdir, dirs, files in os.walk(dirname): root = os.path.join(dirname, subdir) # A metadata file signals a monitor export metadata = glob("%s%sbuild-metadata*" % (root, os.sep)) if not metadata or not files or not root or not subdir: continue self._upload_local_save(root) tty.info("Upload complete")
Python
def _upload_local_save(self, dirname): """ Given a found metadata file, upload results to spack monitor. """ # First find all the specs for spec in self.iter_read("%s%sspec*" % (dirname, os.sep)): self.do_request("specs/new/", data=sjson.dump(spec)) # Load build metadata to generate an id metadata = glob("%s%sbuild-metadata*" % (dirname, os.sep)) if not metadata: tty.die("Build metadata file(s) missing in %s" % dirname) # Create a build_id lookup based on hash hashes = {} for metafile in metadata: data = read_json(metafile) build = self.do_request("builds/new/", data=sjson.dump(data)) localhash = os.path.basename(metafile).replace(".json", "") hashes[localhash.replace('build-metadata-', "")] = build # Next upload build phases for phase in self.iter_read("%s%sbuild*phase*" % (dirname, os.sep)): build_id = hashes[phase['build_id']]['data']['build']['build_id'] phase['build_id'] = build_id self.do_request("builds/phases/update/", data=sjson.dump(phase)) # Next find the status objects for status in self.iter_read("%s%sbuild*status*" % (dirname, os.sep)): build_id = hashes[status['build_id']]['data']['build']['build_id'] status['build_id'] = build_id self.do_request("builds/update/", data=sjson.dump(status))
def _upload_local_save(self, dirname): """ Given a found metadata file, upload results to spack monitor. """ # First find all the specs for spec in self.iter_read("%s%sspec*" % (dirname, os.sep)): self.do_request("specs/new/", data=sjson.dump(spec)) # Load build metadata to generate an id metadata = glob("%s%sbuild-metadata*" % (dirname, os.sep)) if not metadata: tty.die("Build metadata file(s) missing in %s" % dirname) # Create a build_id lookup based on hash hashes = {} for metafile in metadata: data = read_json(metafile) build = self.do_request("builds/new/", data=sjson.dump(data)) localhash = os.path.basename(metafile).replace(".json", "") hashes[localhash.replace('build-metadata-', "")] = build # Next upload build phases for phase in self.iter_read("%s%sbuild*phase*" % (dirname, os.sep)): build_id = hashes[phase['build_id']]['data']['build']['build_id'] phase['build_id'] = build_id self.do_request("builds/phases/update/", data=sjson.dump(phase)) # Next find the status objects for status in self.iter_read("%s%sbuild*status*" % (dirname, os.sep)): build_id = hashes[status['build_id']]['data']['build']['build_id'] status['build_id'] = build_id self.do_request("builds/update/", data=sjson.dump(status))
Python
def parse_auth_header(authHeaderRaw): """ Parse an authentication header into relevant pieces """ regex = re.compile('([a-zA-z]+)="(.+?)"') matches = regex.findall(authHeaderRaw) lookup = dict() for match in matches: lookup[match[0]] = match[1] return authHeader(lookup)
def parse_auth_header(authHeaderRaw): """ Parse an authentication header into relevant pieces """ regex = re.compile('([a-zA-z]+)="(.+?)"') matches = regex.findall(authHeaderRaw) lookup = dict() for match in matches: lookup[match[0]] = match[1] return authHeader(lookup)
Python
def on_install_start(spec): """On start of an install, we want to ping the server if it exists """ if not spack.monitor.cli: return tty.debug("Running on_install_start for %s" % spec) build_id = spack.monitor.cli.new_build(spec) tty.verbose("Build created with id %s" % build_id)
def on_install_start(spec): """On start of an install, we want to ping the server if it exists """ if not spack.monitor.cli: return tty.debug("Running on_install_start for %s" % spec) build_id = spack.monitor.cli.new_build(spec) tty.verbose("Build created with id %s" % build_id)
Python
def on_install_success(spec): """On the success of an install (after everything is complete) """ if not spack.monitor.cli: return tty.debug("Running on_install_success for %s" % spec) result = spack.monitor.cli.update_build(spec, status="SUCCESS") tty.verbose(result.get('message'))
def on_install_success(spec): """On the success of an install (after everything is complete) """ if not spack.monitor.cli: return tty.debug("Running on_install_success for %s" % spec) result = spack.monitor.cli.update_build(spec, status="SUCCESS") tty.verbose(result.get('message'))
Python
def on_install_failure(spec): """Triggered on failure of an install """ if not spack.monitor.cli: return tty.debug("Running on_install_failure for %s" % spec) result = spack.monitor.cli.fail_task(spec) tty.verbose(result.get('message'))
def on_install_failure(spec): """Triggered on failure of an install """ if not spack.monitor.cli: return tty.debug("Running on_install_failure for %s" % spec) result = spack.monitor.cli.fail_task(spec) tty.verbose(result.get('message'))
Python
def on_install_cancel(spec): """Triggered on cancel of an install """ if not spack.monitor.cli: return tty.debug("Running on_install_cancel for %s" % spec) result = spack.monitor.cli.cancel_task(spec) tty.verbose(result.get('message'))
def on_install_cancel(spec): """Triggered on cancel of an install """ if not spack.monitor.cli: return tty.debug("Running on_install_cancel for %s" % spec) result = spack.monitor.cli.cancel_task(spec) tty.verbose(result.get('message'))
Python
def on_analyzer_save(pkg, result): """given a package and a result, if we have a spack monitor, upload the result to it. """ if not spack.monitor.cli: return # This hook runs after a save result spack.monitor.cli.send_analyze_metadata(pkg, result)
def on_analyzer_save(pkg, result): """given a package and a result, if we have a spack monitor, upload the result to it. """ if not spack.monitor.cli: return # This hook runs after a save result spack.monitor.cli.send_analyze_metadata(pkg, result)
Python
def mplib_content(spec, pre=None): """The mpi settings (from spack) for the OpenFOAM wmake includes, which allows later reuse within OpenFOAM. Optional parameter 'pre' to provide alternative prefix for bin and lib directories. """ mpi_spec = spec['mpi'] bin = mpi_spec.prefix.bin inc = mpi_spec.headers.directories[0] # Currently only need first one lib = pkglib(mpi_spec) libname = 'mpi' if 'mpich' in mpi_spec.name: libname = 'mpich' if pre: bin = join_path(pre, os.path.basename(bin)) inc = join_path(pre, os.path.basename(inc)) lib = join_path(pre, os.path.basename(lib)) else: pre = mpi_spec.prefix info = { 'name': '{0}-{1}'.format(mpi_spec.name, mpi_spec.version), 'prefix': pre, 'include': inc, 'bindir': bin, 'libdir': lib, 'FLAGS': '-DOMPI_SKIP_MPICXX -DMPICH_SKIP_MPICXX', 'PINC': '-I{0}'.format(inc), 'PLIBS': '-L{0} -l{1}'.format(lib, libname), } return info
def mplib_content(spec, pre=None): """The mpi settings (from spack) for the OpenFOAM wmake includes, which allows later reuse within OpenFOAM. Optional parameter 'pre' to provide alternative prefix for bin and lib directories. """ mpi_spec = spec['mpi'] bin = mpi_spec.prefix.bin inc = mpi_spec.headers.directories[0] # Currently only need first one lib = pkglib(mpi_spec) libname = 'mpi' if 'mpich' in mpi_spec.name: libname = 'mpich' if pre: bin = join_path(pre, os.path.basename(bin)) inc = join_path(pre, os.path.basename(inc)) lib = join_path(pre, os.path.basename(lib)) else: pre = mpi_spec.prefix info = { 'name': '{0}-{1}'.format(mpi_spec.name, mpi_spec.version), 'prefix': pre, 'include': inc, 'bindir': bin, 'libdir': lib, 'FLAGS': '-DOMPI_SKIP_MPICXX -DMPICH_SKIP_MPICXX', 'PINC': '-I{0}'.format(inc), 'PLIBS': '-L{0} -l{1}'.format(lib, libname), } return info
Python
def url_for_version(self, version): """Handles locations for patched and unpatched versions. Patched version (eg '1906_191103') are located in the corresponding unpatched directories (eg '1906'). Older versions (eg, v1612+) had additional '+' in naming """ if version <= Version('1612'): fmt = 'v{0}+/OpenFOAM-v{1}+.tgz' else: fmt = 'v{0}/OpenFOAM-v{1}.tgz' return self.list_url + fmt.format(version.up_to(1), version)
def url_for_version(self, version): """Handles locations for patched and unpatched versions. Patched version (eg '1906_191103') are located in the corresponding unpatched directories (eg '1906'). Older versions (eg, v1612+) had additional '+' in naming """ if version <= Version('1612'): fmt = 'v{0}+/OpenFOAM-v{1}+.tgz' else: fmt = 'v{0}/OpenFOAM-v{1}.tgz' return self.list_url + fmt.format(version.up_to(1), version)
Python
def patch(self): """Adjust OpenFOAM build for spack. Where needed, apply filter as an alternative to normal patching.""" add_extra_files(self, self.common, self.assets) # Prior to 1812, required OpenFOAM-v{VER} directory when sourcing projdir = "OpenFOAM-v{0}".format(self.version) if not os.path.exists(join_path(self.stage.path, projdir)): tty.info('Added directory link {0}'.format(projdir)) os.symlink( os.path.relpath( self.stage.source_path, self.stage.path ), join_path(self.stage.path, projdir) ) # Avoid WM_PROJECT_INST_DIR for ThirdParty # This modification is non-critical edits = { 'WM_THIRD_PARTY_DIR': r'$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party', } rewrite_environ_files( # etc/{bashrc,cshrc} edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc')) # The following filtering is non-critical. # It simply prevents 'site' dirs at the wrong level # (likely non-existent anyhow) from being added to # PATH, LD_LIBRARY_PATH. for rcdir in ['config.sh', 'config.csh']: rcfile = join_path('etc', rcdir, 'settings') if os.path.isfile(rcfile): filter_file( 'WM_PROJECT_INST_DIR/', 'WM_PROJECT_DIR/', rcfile, backup=False)
def patch(self): """Adjust OpenFOAM build for spack. Where needed, apply filter as an alternative to normal patching.""" add_extra_files(self, self.common, self.assets) # Prior to 1812, required OpenFOAM-v{VER} directory when sourcing projdir = "OpenFOAM-v{0}".format(self.version) if not os.path.exists(join_path(self.stage.path, projdir)): tty.info('Added directory link {0}'.format(projdir)) os.symlink( os.path.relpath( self.stage.source_path, self.stage.path ), join_path(self.stage.path, projdir) ) # Avoid WM_PROJECT_INST_DIR for ThirdParty # This modification is non-critical edits = { 'WM_THIRD_PARTY_DIR': r'$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party', } rewrite_environ_files( # etc/{bashrc,cshrc} edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc')) # The following filtering is non-critical. # It simply prevents 'site' dirs at the wrong level # (likely non-existent anyhow) from being added to # PATH, LD_LIBRARY_PATH. for rcdir in ['config.sh', 'config.csh']: rcfile = join_path('etc', rcdir, 'settings') if os.path.isfile(rcfile): filter_file( 'WM_PROJECT_INST_DIR/', 'WM_PROJECT_DIR/', rcfile, backup=False)
Python
def configure_trapFpe_off(self): """Disable trapFpe handling. Seems to be needed for several clang-derivatives. """ # Set 'trapFpe 0' in etc/controlDict controlDict = 'etc/controlDict' if os.path.exists(controlDict): filter_file(r'trapFpe\s+\d+\s*;', 'trapFpe 0;', controlDict, backup=False)
def configure_trapFpe_off(self): """Disable trapFpe handling. Seems to be needed for several clang-derivatives. """ # Set 'trapFpe 0' in etc/controlDict controlDict = 'etc/controlDict' if os.path.exists(controlDict): filter_file(r'trapFpe\s+\d+\s*;', 'trapFpe 0;', controlDict, backup=False)
Python
def make_fujitsu_rules(self): """Create Fujitsu rules (clang variant) unless supplied upstream. Implemented for 1812 and later (older rules are too messy to edit). Already included after 1912. """ general_rules = 'wmake/rules/General' arch_rules = 'wmake/rules/linuxARM64' # self.arch src = arch_rules + 'Clang' dst = arch_rules + 'Fujitsu' # self.compiler self.configure_trapFpe_off() # LLVM may falsely trigger FPE if os.path.exists(dst): return # Handle rules/<ARCH><COMP> or rules/<ARCH>/<COMP> if not os.path.exists(src): src = join_path(arch_rules, 'Clang') dst = join_path(arch_rules, 'Fujitsu') # self.compiler if os.path.exists(dst): return tty.info('Add Fujitsu wmake rules') copy_tree(src, dst) if self.spec.version >= Version('1906'): for cfg in ['c', 'c++', 'general']: rule = join_path(dst, cfg) filter_file('Clang', 'Fujitsu', rule, backup=False) else: filter_file('clang', spack_cc, join_path(dst, 'c'), backup=False, string=True) filter_file('clang++', spack_cxx, join_path(dst, 'c++'), backup=False, string=True) src = join_path(general_rules, 'Clang') dst = join_path(general_rules, 'Fujitsu') # self.compiler copy_tree(src, dst) if self.spec.version >= Version('1906'): filter_file('clang', spack_cc, join_path(dst, 'c'), backup=False, string=True) filter_file('clang++', spack_cxx, join_path(dst, 'c++'), backup=False, string=True)
def make_fujitsu_rules(self): """Create Fujitsu rules (clang variant) unless supplied upstream. Implemented for 1812 and later (older rules are too messy to edit). Already included after 1912. """ general_rules = 'wmake/rules/General' arch_rules = 'wmake/rules/linuxARM64' # self.arch src = arch_rules + 'Clang' dst = arch_rules + 'Fujitsu' # self.compiler self.configure_trapFpe_off() # LLVM may falsely trigger FPE if os.path.exists(dst): return # Handle rules/<ARCH><COMP> or rules/<ARCH>/<COMP> if not os.path.exists(src): src = join_path(arch_rules, 'Clang') dst = join_path(arch_rules, 'Fujitsu') # self.compiler if os.path.exists(dst): return tty.info('Add Fujitsu wmake rules') copy_tree(src, dst) if self.spec.version >= Version('1906'): for cfg in ['c', 'c++', 'general']: rule = join_path(dst, cfg) filter_file('Clang', 'Fujitsu', rule, backup=False) else: filter_file('clang', spack_cc, join_path(dst, 'c'), backup=False, string=True) filter_file('clang++', spack_cxx, join_path(dst, 'c++'), backup=False, string=True) src = join_path(general_rules, 'Clang') dst = join_path(general_rules, 'Fujitsu') # self.compiler copy_tree(src, dst) if self.spec.version >= Version('1906'): filter_file('clang', spack_cc, join_path(dst, 'c'), backup=False, string=True) filter_file('clang++', spack_cxx, join_path(dst, 'c++'), backup=False, string=True)
Python
def configure(self, spec, prefix): """Make adjustments to the OpenFOAM configuration files in their various locations: etc/bashrc, etc/config.sh/FEATURE and customizations that don't properly fit get placed in the etc/prefs.sh file (similiarly for csh). """ # Filtering bashrc, cshrc edits = {} edits.update(self.foam_arch.foam_dict()) rewrite_environ_files( # etc/{bashrc,cshrc} edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc')) # Content for etc/prefs.{csh,sh} self.etc_prefs = { # TODO # 'CMAKE_ARCH_PATH': spec['cmake'].prefix, # 'FLEX_ARCH_PATH': spec['flex'].prefix, # 'ZLIB_ARCH_PATH': spec['zlib'].prefix, } # MPI content, using MPI_ARCH_PATH user_mpi = mplib_content(spec, '${MPI_ARCH_PATH}') # Content for etc/config.{csh,sh}/ files self.etc_config = { 'CGAL': [ ('BOOST_ARCH_PATH', spec['boost'].prefix), ('CGAL_ARCH_PATH', spec['cgal'].prefix), ('LD_LIBRARY_PATH', foam_add_lib( pkglib(spec['boost'], '${BOOST_ARCH_PATH}'), pkglib(spec['cgal'], '${CGAL_ARCH_PATH}'))), ], 'FFTW': [ ('FFTW_ARCH_PATH', spec['fftw-api'].prefix), # Absolute ('LD_LIBRARY_PATH', foam_add_lib( pkglib(spec['fftw-api'], '${BOOST_ARCH_PATH}'))), ], # User-defined MPI 'mpi-user': [ ('MPI_ARCH_PATH', spec['mpi'].prefix), # Absolute ('LD_LIBRARY_PATH', foam_add_lib(user_mpi['libdir'])), ('PATH', foam_add_path(user_mpi['bindir'])), ], 'adios2': {}, 'scotch': {}, 'kahip': {}, 'metis': {}, 'ensight': {}, # Disable settings 'paraview': [], 'gperftools': [], # Disable settings 'vtk': [], } # With adios2 after 1912 if spec.satisfies('@1912:'): self.etc_config['adios2'] = [ ('ADIOS2_ARCH_PATH', spec['adios2'].prefix), ('LD_LIBRARY_PATH', foam_add_lib(pkglib(spec['adios2'], '${ADIOS2_ARCH_PATH}'))), ('PATH', foam_add_path('${ADIOS2_ARCH_PATH}/bin')), ] if '+scotch' in spec: self.etc_config['scotch'] = { 'SCOTCH_ARCH_PATH': spec['scotch'].prefix, # For src/parallel/decompose/Allwmake 'SCOTCH_VERSION': 'scotch-{0}'.format(spec['scotch'].version), } if '+kahip' in spec: self.etc_config['kahip'] = { 'KAHIP_ARCH_PATH': spec['kahip'].prefix, } if '+metis' in spec: self.etc_config['metis'] = { 'METIS_ARCH_PATH': spec['metis'].prefix, } # ParaView_INCLUDE_DIR is not used in 1812, but has no ill-effect if '+paraview' in spec: pvmajor = 'paraview-{0}'.format(spec['paraview'].version.up_to(2)) self.etc_config['paraview'] = [ ('ParaView_DIR', spec['paraview'].prefix), ('ParaView_INCLUDE_DIR', '${ParaView_DIR}/include/' + pvmajor), ('PV_PLUGIN_PATH', '$FOAM_LIBBIN/' + pvmajor), ('PATH', foam_add_path('${ParaView_DIR}/bin')), ] if '+vtk' in spec: self.etc_config['vtk'] = [ ('VTK_DIR', spec['vtk'].prefix), ('LD_LIBRARY_PATH', foam_add_lib(pkglib(spec['vtk'], '${VTK_DIR}'))), ] # Optional if '+mgridgen' in spec: self.etc_config['mgridgen'] = { 'MGRIDGEN_ARCH_PATH': spec['parmgridgen'].prefix } # Optional if '+zoltan' in spec: self.etc_config['zoltan'] = { 'ZOLTAN_ARCH_PATH': spec['zoltan'].prefix } # Write prefs files according to the configuration. # Only need prefs.sh for building, but install both for end-users if self.etc_prefs: write_environ( self.etc_prefs, posix=join_path('etc', 'prefs.sh'), cshell=join_path('etc', 'prefs.csh')) # Adjust components to use SPACK variants for component, subdict in self.etc_config.items(): write_environ( subdict, posix=join_path('etc', 'config.sh', component), cshell=join_path('etc', 'config.csh', component))
def configure(self, spec, prefix): """Make adjustments to the OpenFOAM configuration files in their various locations: etc/bashrc, etc/config.sh/FEATURE and customizations that don't properly fit get placed in the etc/prefs.sh file (similiarly for csh). """ # Filtering bashrc, cshrc edits = {} edits.update(self.foam_arch.foam_dict()) rewrite_environ_files( # etc/{bashrc,cshrc} edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc')) # Content for etc/prefs.{csh,sh} self.etc_prefs = { # TODO # 'CMAKE_ARCH_PATH': spec['cmake'].prefix, # 'FLEX_ARCH_PATH': spec['flex'].prefix, # 'ZLIB_ARCH_PATH': spec['zlib'].prefix, } # MPI content, using MPI_ARCH_PATH user_mpi = mplib_content(spec, '${MPI_ARCH_PATH}') # Content for etc/config.{csh,sh}/ files self.etc_config = { 'CGAL': [ ('BOOST_ARCH_PATH', spec['boost'].prefix), ('CGAL_ARCH_PATH', spec['cgal'].prefix), ('LD_LIBRARY_PATH', foam_add_lib( pkglib(spec['boost'], '${BOOST_ARCH_PATH}'), pkglib(spec['cgal'], '${CGAL_ARCH_PATH}'))), ], 'FFTW': [ ('FFTW_ARCH_PATH', spec['fftw-api'].prefix), # Absolute ('LD_LIBRARY_PATH', foam_add_lib( pkglib(spec['fftw-api'], '${BOOST_ARCH_PATH}'))), ], # User-defined MPI 'mpi-user': [ ('MPI_ARCH_PATH', spec['mpi'].prefix), # Absolute ('LD_LIBRARY_PATH', foam_add_lib(user_mpi['libdir'])), ('PATH', foam_add_path(user_mpi['bindir'])), ], 'adios2': {}, 'scotch': {}, 'kahip': {}, 'metis': {}, 'ensight': {}, # Disable settings 'paraview': [], 'gperftools': [], # Disable settings 'vtk': [], } # With adios2 after 1912 if spec.satisfies('@1912:'): self.etc_config['adios2'] = [ ('ADIOS2_ARCH_PATH', spec['adios2'].prefix), ('LD_LIBRARY_PATH', foam_add_lib(pkglib(spec['adios2'], '${ADIOS2_ARCH_PATH}'))), ('PATH', foam_add_path('${ADIOS2_ARCH_PATH}/bin')), ] if '+scotch' in spec: self.etc_config['scotch'] = { 'SCOTCH_ARCH_PATH': spec['scotch'].prefix, # For src/parallel/decompose/Allwmake 'SCOTCH_VERSION': 'scotch-{0}'.format(spec['scotch'].version), } if '+kahip' in spec: self.etc_config['kahip'] = { 'KAHIP_ARCH_PATH': spec['kahip'].prefix, } if '+metis' in spec: self.etc_config['metis'] = { 'METIS_ARCH_PATH': spec['metis'].prefix, } # ParaView_INCLUDE_DIR is not used in 1812, but has no ill-effect if '+paraview' in spec: pvmajor = 'paraview-{0}'.format(spec['paraview'].version.up_to(2)) self.etc_config['paraview'] = [ ('ParaView_DIR', spec['paraview'].prefix), ('ParaView_INCLUDE_DIR', '${ParaView_DIR}/include/' + pvmajor), ('PV_PLUGIN_PATH', '$FOAM_LIBBIN/' + pvmajor), ('PATH', foam_add_path('${ParaView_DIR}/bin')), ] if '+vtk' in spec: self.etc_config['vtk'] = [ ('VTK_DIR', spec['vtk'].prefix), ('LD_LIBRARY_PATH', foam_add_lib(pkglib(spec['vtk'], '${VTK_DIR}'))), ] # Optional if '+mgridgen' in spec: self.etc_config['mgridgen'] = { 'MGRIDGEN_ARCH_PATH': spec['parmgridgen'].prefix } # Optional if '+zoltan' in spec: self.etc_config['zoltan'] = { 'ZOLTAN_ARCH_PATH': spec['zoltan'].prefix } # Write prefs files according to the configuration. # Only need prefs.sh for building, but install both for end-users if self.etc_prefs: write_environ( self.etc_prefs, posix=join_path('etc', 'prefs.sh'), cshell=join_path('etc', 'prefs.csh')) # Adjust components to use SPACK variants for component, subdict in self.etc_config.items(): write_environ( subdict, posix=join_path('etc', 'config.sh', component), cshell=join_path('etc', 'config.csh', component))
Python
def install_write_location(self): """Set the installation location (projectdir) in bashrc,cshrc.""" mkdirp(self.projectdir) # Filtering: bashrc, cshrc edits = { 'WM_PROJECT_DIR': self.projectdir, } etc_dir = join_path(self.projectdir, 'etc') rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc edits, posix=join_path(etc_dir, 'bashrc'), cshell=join_path(etc_dir, 'cshrc'))
def install_write_location(self): """Set the installation location (projectdir) in bashrc,cshrc.""" mkdirp(self.projectdir) # Filtering: bashrc, cshrc edits = { 'WM_PROJECT_DIR': self.projectdir, } etc_dir = join_path(self.projectdir, 'etc') rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc edits, posix=join_path(etc_dir, 'bashrc'), cshell=join_path(etc_dir, 'cshrc'))
Python
def install_write_location(self): """Set the installation location (projectdir) in bashrc,cshrc. In 1806 and earlier, had WM_PROJECT_INST_DIR as the prefix directory where WM_PROJECT_DIR was installed. """ mkdirp(self.projectdir) projdir = os.path.basename(self.projectdir) # Filtering: bashrc, cshrc edits = { 'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir), 'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir), } etc_dir = join_path(self.projectdir, 'etc') rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc edits, posix=join_path(etc_dir, 'bashrc'), cshell=join_path(etc_dir, 'cshrc'))
def install_write_location(self): """Set the installation location (projectdir) in bashrc,cshrc. In 1806 and earlier, had WM_PROJECT_INST_DIR as the prefix directory where WM_PROJECT_DIR was installed. """ mkdirp(self.projectdir) projdir = os.path.basename(self.projectdir) # Filtering: bashrc, cshrc edits = { 'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir), 'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir), } etc_dir = join_path(self.projectdir, 'etc') rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc edits, posix=join_path(etc_dir, 'bashrc'), cshell=join_path(etc_dir, 'cshrc'))
Python
def install_links(self): """Add symlinks into bin/, lib/ (eg, for other applications)""" # Make build log visible - it contains OpenFOAM-specific information with working_dir(self.projectdir): os.symlink( join_path(os.path.relpath(self.install_log_path)), join_path('log.' + str(self.foam_arch))) if not self.config['link']: return # ln -s platforms/linux64GccXXX/lib lib with working_dir(self.projectdir): if os.path.isdir(self.archlib): os.symlink(self.archlib, 'lib') # (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .) with working_dir(join_path(self.projectdir, 'bin')): for f in [ f for f in glob.glob(join_path('..', self.archbin, "*")) if os.path.isfile(f) ]: os.symlink(f, os.path.basename(f))
def install_links(self): """Add symlinks into bin/, lib/ (eg, for other applications)""" # Make build log visible - it contains OpenFOAM-specific information with working_dir(self.projectdir): os.symlink( join_path(os.path.relpath(self.install_log_path)), join_path('log.' + str(self.foam_arch))) if not self.config['link']: return # ln -s platforms/linux64GccXXX/lib lib with working_dir(self.projectdir): if os.path.isdir(self.archlib): os.symlink(self.archlib, 'lib') # (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .) with working_dir(join_path(self.projectdir, 'bin')): for f in [ f for f in glob.glob(join_path('..', self.archbin, "*")) if os.path.isfile(f) ]: os.symlink(f, os.path.basename(f))
Python
def update_arch(self, spec): """Set WM_ARCH string corresponding to spack platform/target """ # spec.architecture.platform is like `uname -s`, but lower-case platform = str(spec.architecture.platform) # spec.target.family is like `uname -m` target = str(spec.target.family) # No spack platform family for ia64 or armv7l if platform == 'linux': if target == 'x86_64': platform += '64' elif target == 'ia64': platform += 'IA64' elif target == 'armv7l': platform += 'ARM7' elif target == 'aarch64': # overwritten as 'Arm64' in openfoam-org platform += 'ARM64' elif target == 'ppc64': platform += 'PPC64' elif target == 'ppc64le': platform += 'PPC64le' elif platform == 'darwin': platform += '64' # aarch64 or x86_64 # ... and others? self.arch = platform
def update_arch(self, spec): """Set WM_ARCH string corresponding to spack platform/target """ # spec.architecture.platform is like `uname -s`, but lower-case platform = str(spec.architecture.platform) # spec.target.family is like `uname -m` target = str(spec.target.family) # No spack platform family for ia64 or armv7l if platform == 'linux': if target == 'x86_64': platform += '64' elif target == 'ia64': platform += 'IA64' elif target == 'armv7l': platform += 'ARM7' elif target == 'aarch64': # overwritten as 'Arm64' in openfoam-org platform += 'ARM64' elif target == 'ppc64': platform += 'PPC64' elif target == 'ppc64le': platform += 'PPC64le' elif platform == 'darwin': platform += '64' # aarch64 or x86_64 # ... and others? self.arch = platform
Python
def update_options(self): """Set WM_OPTIONS string consistent with current settings """ # WM_OPTIONS # ---- # WM_LABEL_OPTION=Int$WM_LABEL_SIZE # WM_OPTIONS_BASE=$WM_ARCH$WM_COMPILER$WM_PRECISION_OPTION # WM_OPTIONS=$WM_OPTIONS_BASE$WM_LABEL_OPTION$WM_COMPILE_OPTION # or # WM_OPTIONS=$WM_OPTIONS_BASE$WM_COMPILE_OPTION # ---- self.options = ''.join([ self.arch, self.compiler, self.precision_option, ('Int' + self.label_size if self.label_size else ''), self.compile_option])
def update_options(self): """Set WM_OPTIONS string consistent with current settings """ # WM_OPTIONS # ---- # WM_LABEL_OPTION=Int$WM_LABEL_SIZE # WM_OPTIONS_BASE=$WM_ARCH$WM_COMPILER$WM_PRECISION_OPTION # WM_OPTIONS=$WM_OPTIONS_BASE$WM_LABEL_OPTION$WM_COMPILE_OPTION # or # WM_OPTIONS=$WM_OPTIONS_BASE$WM_COMPILE_OPTION # ---- self.options = ''.join([ self.arch, self.compiler, self.precision_option, ('Int' + self.label_size if self.label_size else ''), self.compile_option])
Python
def foam_dict(self): """Returns a dictionary for OpenFOAM prefs, bashrc, cshrc.""" return dict([ ('WM_COMPILER', self.compiler), ('WM_LABEL_SIZE', self.label_size), ('WM_PRECISION_OPTION', self.precision_option), ('WM_COMPILE_OPTION', self.compile_option), ('WM_MPLIB', self.mplib), ])
def foam_dict(self): """Returns a dictionary for OpenFOAM prefs, bashrc, cshrc.""" return dict([ ('WM_COMPILER', self.compiler), ('WM_LABEL_SIZE', self.label_size), ('WM_PRECISION_OPTION', self.precision_option), ('WM_COMPILE_OPTION', self.compile_option), ('WM_MPLIB', self.mplib), ])
Python
def _rule_directory(self, projdir, general=False): """Return the wmake/rules/ General or compiler rules directory. Supports wmake/rules/<ARCH><COMP> and wmake/rules/<ARCH>/<COMP>. """ rules_dir = os.path.join(projdir, 'wmake', 'rules') if general: return os.path.join(rules_dir, 'General') arch_dir = os.path.join(rules_dir, self.arch) comp_rules = arch_dir + self.compiler if os.path.isdir(comp_rules): return comp_rules else: return os.path.join(arch_dir, self.compiler)
def _rule_directory(self, projdir, general=False): """Return the wmake/rules/ General or compiler rules directory. Supports wmake/rules/<ARCH><COMP> and wmake/rules/<ARCH>/<COMP>. """ rules_dir = os.path.join(projdir, 'wmake', 'rules') if general: return os.path.join(rules_dir, 'General') arch_dir = os.path.join(rules_dir, self.arch) comp_rules = arch_dir + self.compiler if os.path.isdir(comp_rules): return comp_rules else: return os.path.join(arch_dir, self.compiler)
Python
def has_rule(self, projdir): """Verify that a wmake/rules/ compiler rule exists in the project. """ # Insist on a wmake rule for this architecture/compiler combination rule_dir = self._rule_directory(projdir) if not os.path.isdir(rule_dir): raise InstallError( 'No wmake rule for {0} {1}'.format(self.arch, self.compiler)) return True
def has_rule(self, projdir): """Verify that a wmake/rules/ compiler rule exists in the project. """ # Insist on a wmake rule for this architecture/compiler combination rule_dir = self._rule_directory(projdir) if not os.path.isdir(rule_dir): raise InstallError( 'No wmake rule for {0} {1}'.format(self.arch, self.compiler)) return True
Python
def create_rules(self, projdir, foam_pkg): """ Create {c,c++}-spack and mplib{USERMPI} rules in the specified project directory. The compiler rules are based on the respective {c,c++}Opt rules but with additional rpath information for the OpenFOAM libraries. The '-spack' rules channel spack information into OpenFOAM wmake rules with minimal modification to OpenFOAM. The rpath is used for the installed libpath (continue to use LD_LIBRARY_PATH for values during the build). """ # Note: the 'c' rules normally don't need rpath, since they are just # used for some statically linked wmake tools, but left in anyhow. # rpath for installed OpenFOAM libraries rpath = '{0}{1}'.format( foam_pkg.compiler.cxx_rpath_arg, join_path(foam_pkg.projectdir, foam_pkg.archlib)) user_mpi = mplib_content(foam_pkg.spec) rule_dir = self._rule_directory(projdir) with working_dir(rule_dir): # Compiler: copy existing cOpt,c++Opt and modify '*DBUG' value for lang in ['c', 'c++']: src = '{0}Opt'.format(lang) dst = '{0}{1}'.format(lang, self.compile_option) with open(src, 'r') as infile: with open(dst, 'w') as outfile: for line in infile: line = line.rstrip() outfile.write(line) if re.match(r'^\S+DBUG\s*=', line): outfile.write(' ') outfile.write(rpath) elif re.match(r'^\S+OPT\s*=', line): if self.arch_option: outfile.write(' ') outfile.write(self.arch_option) outfile.write('\n') # MPI rules for mplib in ['mplibUSERMPI']: with open(mplib, 'w') as out: out.write("""# MPI from spack ({name})\n PFLAGS = {FLAGS} PINC = {PINC} PLIBS = {PLIBS} #------- """.format(**user_mpi))
def create_rules(self, projdir, foam_pkg): """ Create {c,c++}-spack and mplib{USERMPI} rules in the specified project directory. The compiler rules are based on the respective {c,c++}Opt rules but with additional rpath information for the OpenFOAM libraries. The '-spack' rules channel spack information into OpenFOAM wmake rules with minimal modification to OpenFOAM. The rpath is used for the installed libpath (continue to use LD_LIBRARY_PATH for values during the build). """ # Note: the 'c' rules normally don't need rpath, since they are just # used for some statically linked wmake tools, but left in anyhow. # rpath for installed OpenFOAM libraries rpath = '{0}{1}'.format( foam_pkg.compiler.cxx_rpath_arg, join_path(foam_pkg.projectdir, foam_pkg.archlib)) user_mpi = mplib_content(foam_pkg.spec) rule_dir = self._rule_directory(projdir) with working_dir(rule_dir): # Compiler: copy existing cOpt,c++Opt and modify '*DBUG' value for lang in ['c', 'c++']: src = '{0}Opt'.format(lang) dst = '{0}{1}'.format(lang, self.compile_option) with open(src, 'r') as infile: with open(dst, 'w') as outfile: for line in infile: line = line.rstrip() outfile.write(line) if re.match(r'^\S+DBUG\s*=', line): outfile.write(' ') outfile.write(rpath) elif re.match(r'^\S+OPT\s*=', line): if self.arch_option: outfile.write(' ') outfile.write(self.arch_option) outfile.write('\n') # MPI rules for mplib in ['mplibUSERMPI']: with open(mplib, 'w') as out: out.write("""# MPI from spack ({name})\n PFLAGS = {FLAGS} PINC = {PINC} PLIBS = {PLIBS} #------- """.format(**user_mpi))
Python
def create_host_config(self, spec, prefix): """ This method creates a 'host-config' file that specifies all of the options used to configure and build ascent. For more details about 'host-config' files see: https://ascent.readthedocs.io/en/latest/BuildingAscent.html """ ####################### # Compiler Info ####################### c_compiler = env["SPACK_CC"] cpp_compiler = env["SPACK_CXX"] ####################################################################### # By directly fetching the names of the actual compilers we appear # to doing something evil here, but this is necessary to create a # 'host config' file that works outside of the spack install env. ####################################################################### sys_type = spec.architecture # if on llnl systems, we can use the SYS_TYPE if "SYS_TYPE" in env: sys_type = env["SYS_TYPE"] ############################################## # Find and record what CMake is used ############################################## if "+cmake" in spec: cmake_exe = spec['cmake'].command.path else: cmake_exe = which("cmake") if cmake_exe is None: msg = 'failed to find CMake (and cmake variant is off)' raise RuntimeError(msg) cmake_exe = cmake_exe.path host_cfg_fname = "%s-%s-%s-devil_ray.cmake" % (socket.gethostname(), sys_type, spec.compiler) cfg = open(host_cfg_fname, "w") cfg.write("##################################\n") cfg.write("# spack generated host-config\n") cfg.write("##################################\n") cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler)) cfg.write("##################################\n\n") # Include path to cmake for reference cfg.write("# cmake from spack \n") cfg.write("# cmake executable path: %s\n\n" % cmake_exe) ####################### # Compiler Settings ####################### cfg.write("#######\n") cfg.write("# using %s compiler spec\n" % spec.compiler) cfg.write("#######\n\n") if "+mpi" in spec: cfg.write(cmake_cache_entry("ENABLE_MPI", "ON")) mpicc_path = spec['mpi'].mpicc mpicxx_path = spec['mpi'].mpicxx # if we are using compiler wrappers on cray systems # use those for mpi wrappers, b/c spec['mpi'].mpicxx # etc make return the spack compiler wrappers # which can trip up mpi detection in CMake 3.14 if cpp_compiler == "CC": mpicc_path = "cc" mpicxx_path = "CC" cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", mpicc_path)) cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", mpicxx_path)) else: cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF")) cfg.write("# c compiler used by spack\n") cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler)) cfg.write("# cpp compiler used by spack\n") cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler)) # use global spack compiler flags cppflags = ' '.join(spec.compiler_flags['cppflags']) if cppflags: # avoid always ending up with ' ' with no flags defined cppflags += ' ' cflags = cppflags + ' '.join(spec.compiler_flags['cflags']) if cflags: cfg.write(cmake_cache_entry("CMAKE_C_FLAGS", cflags)) cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags']) if cxxflags: cfg.write(cmake_cache_entry("CMAKE_CXX_FLAGS", cxxflags)) fflags = ' '.join(spec.compiler_flags['fflags']) if self.spec.satisfies('%cce'): fflags += " -ef" if fflags: cfg.write(cmake_cache_entry("CMAKE_Fortran_FLAGS", fflags)) ####################### # Backends ####################### cfg.write("# CUDA Support\n") if "+cuda" in spec: cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON")) if 'cuda_arch' in spec.variants: cuda_value = spec.variants['cuda_arch'].value cuda_arch = cuda_value[0] cfg.write(cmake_cache_entry('CUDA_ARCH', 'sm_{0}'.format(cuda_arch))) else: cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF")) if "+openmp" in spec: cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON")) else: cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF")) # shared vs static libs if "+shared" in spec: cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON")) else: cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF")) ####################### # Unit Tests ####################### if "+test" in spec: cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "ON")) # we need this to control BLT tests cfg.write(cmake_cache_entry("ENABLE_TESTS", "ON")) else: cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "OFF")) # we need this to control BLT tests cfg.write(cmake_cache_entry("ENABLE_TESTS", "OFF")) ####################### # Utilities ####################### if "+utils" in spec: cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "ON")) else: cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "OFF")) ####################### # Logging ####################### if "+logging" in spec: cfg.write(cmake_cache_entry("ENABLE_LOGGING", "ON")) else: cfg.write(cmake_cache_entry("ENABLE_LOGGING", "OFF")) ####################### # Status ####################### if "+stats" in spec: cfg.write(cmake_cache_entry("ENABLE_STATS", "ON")) else: cfg.write(cmake_cache_entry("ENABLE_STATS", "OFF")) ####################################################################### # Core Dependencies ####################################################################### cfg.write("# conduit from spack \n") cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix)) cfg.write("# mfem from spack \n") cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix)) cfg.write("# raja from spack \n") cfg.write(cmake_cache_entry("RAJA_DIR", spec['raja'].prefix)) cfg.write("# umpire from spack \n") cfg.write(cmake_cache_entry("UMPIRE_DIR", spec['umpire'].prefix)) cfg.write("# apcompositor from spack \n") cfg.write(cmake_cache_entry("APCOMP_DIR", spec['apcomp'].prefix)) cfg.write("##################################\n") cfg.write("# end spack generated host-config\n") cfg.write("##################################\n") cfg.close() host_cfg_fname = os.path.abspath(host_cfg_fname) tty.info("spack generated conduit host-config file: " + host_cfg_fname) return host_cfg_fname
def create_host_config(self, spec, prefix): """ This method creates a 'host-config' file that specifies all of the options used to configure and build ascent. For more details about 'host-config' files see: https://ascent.readthedocs.io/en/latest/BuildingAscent.html """ ####################### # Compiler Info ####################### c_compiler = env["SPACK_CC"] cpp_compiler = env["SPACK_CXX"] ####################################################################### # By directly fetching the names of the actual compilers we appear # to doing something evil here, but this is necessary to create a # 'host config' file that works outside of the spack install env. ####################################################################### sys_type = spec.architecture # if on llnl systems, we can use the SYS_TYPE if "SYS_TYPE" in env: sys_type = env["SYS_TYPE"] ############################################## # Find and record what CMake is used ############################################## if "+cmake" in spec: cmake_exe = spec['cmake'].command.path else: cmake_exe = which("cmake") if cmake_exe is None: msg = 'failed to find CMake (and cmake variant is off)' raise RuntimeError(msg) cmake_exe = cmake_exe.path host_cfg_fname = "%s-%s-%s-devil_ray.cmake" % (socket.gethostname(), sys_type, spec.compiler) cfg = open(host_cfg_fname, "w") cfg.write("##################################\n") cfg.write("# spack generated host-config\n") cfg.write("##################################\n") cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler)) cfg.write("##################################\n\n") # Include path to cmake for reference cfg.write("# cmake from spack \n") cfg.write("# cmake executable path: %s\n\n" % cmake_exe) ####################### # Compiler Settings ####################### cfg.write("#######\n") cfg.write("# using %s compiler spec\n" % spec.compiler) cfg.write("#######\n\n") if "+mpi" in spec: cfg.write(cmake_cache_entry("ENABLE_MPI", "ON")) mpicc_path = spec['mpi'].mpicc mpicxx_path = spec['mpi'].mpicxx # if we are using compiler wrappers on cray systems # use those for mpi wrappers, b/c spec['mpi'].mpicxx # etc make return the spack compiler wrappers # which can trip up mpi detection in CMake 3.14 if cpp_compiler == "CC": mpicc_path = "cc" mpicxx_path = "CC" cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", mpicc_path)) cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", mpicxx_path)) else: cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF")) cfg.write("# c compiler used by spack\n") cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler)) cfg.write("# cpp compiler used by spack\n") cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler)) # use global spack compiler flags cppflags = ' '.join(spec.compiler_flags['cppflags']) if cppflags: # avoid always ending up with ' ' with no flags defined cppflags += ' ' cflags = cppflags + ' '.join(spec.compiler_flags['cflags']) if cflags: cfg.write(cmake_cache_entry("CMAKE_C_FLAGS", cflags)) cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags']) if cxxflags: cfg.write(cmake_cache_entry("CMAKE_CXX_FLAGS", cxxflags)) fflags = ' '.join(spec.compiler_flags['fflags']) if self.spec.satisfies('%cce'): fflags += " -ef" if fflags: cfg.write(cmake_cache_entry("CMAKE_Fortran_FLAGS", fflags)) ####################### # Backends ####################### cfg.write("# CUDA Support\n") if "+cuda" in spec: cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON")) if 'cuda_arch' in spec.variants: cuda_value = spec.variants['cuda_arch'].value cuda_arch = cuda_value[0] cfg.write(cmake_cache_entry('CUDA_ARCH', 'sm_{0}'.format(cuda_arch))) else: cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF")) if "+openmp" in spec: cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON")) else: cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF")) # shared vs static libs if "+shared" in spec: cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON")) else: cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF")) ####################### # Unit Tests ####################### if "+test" in spec: cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "ON")) # we need this to control BLT tests cfg.write(cmake_cache_entry("ENABLE_TESTS", "ON")) else: cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "OFF")) # we need this to control BLT tests cfg.write(cmake_cache_entry("ENABLE_TESTS", "OFF")) ####################### # Utilities ####################### if "+utils" in spec: cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "ON")) else: cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "OFF")) ####################### # Logging ####################### if "+logging" in spec: cfg.write(cmake_cache_entry("ENABLE_LOGGING", "ON")) else: cfg.write(cmake_cache_entry("ENABLE_LOGGING", "OFF")) ####################### # Status ####################### if "+stats" in spec: cfg.write(cmake_cache_entry("ENABLE_STATS", "ON")) else: cfg.write(cmake_cache_entry("ENABLE_STATS", "OFF")) ####################################################################### # Core Dependencies ####################################################################### cfg.write("# conduit from spack \n") cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix)) cfg.write("# mfem from spack \n") cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix)) cfg.write("# raja from spack \n") cfg.write(cmake_cache_entry("RAJA_DIR", spec['raja'].prefix)) cfg.write("# umpire from spack \n") cfg.write(cmake_cache_entry("UMPIRE_DIR", spec['umpire'].prefix)) cfg.write("# apcompositor from spack \n") cfg.write(cmake_cache_entry("APCOMP_DIR", spec['apcomp'].prefix)) cfg.write("##################################\n") cfg.write("# end spack generated host-config\n") cfg.write("##################################\n") cfg.close() host_cfg_fname = os.path.abspath(host_cfg_fname) tty.info("spack generated conduit host-config file: " + host_cfg_fname) return host_cfg_fname
Python
def _handle_external_and_upstream(pkg, explicit): """ Determine if the package is external or upstream and register it in the database if it is external package. Args: pkg (spack.package.Package): the package whose installation is under consideration explicit (bool): the package was explicitly requested by the user Return: bool: ``True`` if the package is external or upstream (so not to be installed locally), otherwise, ``True`` """ # For external packages the workflow is simplified, and basically # consists in module file generation and registration in the DB. if pkg.spec.external: _process_external_package(pkg, explicit) _print_installed_pkg('{0} (external {1})' .format(pkg.prefix, package_id(pkg))) return True if pkg.installed_upstream: tty.verbose('{0} is installed in an upstream Spack instance at {1}' .format(package_id(pkg), pkg.spec.prefix)) _print_installed_pkg(pkg.prefix) # This will result in skipping all post-install hooks. In the case # of modules this is considered correct because we want to retrieve # the module from the upstream Spack instance. return True return False
def _handle_external_and_upstream(pkg, explicit): """ Determine if the package is external or upstream and register it in the database if it is external package. Args: pkg (spack.package.Package): the package whose installation is under consideration explicit (bool): the package was explicitly requested by the user Return: bool: ``True`` if the package is external or upstream (so not to be installed locally), otherwise, ``True`` """ # For external packages the workflow is simplified, and basically # consists in module file generation and registration in the DB. if pkg.spec.external: _process_external_package(pkg, explicit) _print_installed_pkg('{0} (external {1})' .format(pkg.prefix, package_id(pkg))) return True if pkg.installed_upstream: tty.verbose('{0} is installed in an upstream Spack instance at {1}' .format(package_id(pkg), pkg.spec.prefix)) _print_installed_pkg(pkg.prefix) # This will result in skipping all post-install hooks. In the case # of modules this is considered correct because we want to retrieve # the module from the upstream Spack instance. return True return False
Python
def _packages_needed_to_bootstrap_compiler(compiler, architecture, pkgs): """ Return a list of packages required to bootstrap `pkg`s compiler Checks Spack's compiler configuration for a compiler that matches the package spec. Args: compiler (CompilerSpec): the compiler to bootstrap architecture (ArchSpec): the architecture for which to boostrap the compiler pkgs (list): the packages that may need their compiler installed Return: list: list of tuples, (PackageBase, bool), for concretized compiler-related packages that need to be installed and bool values specify whether the package is the bootstrap compiler (``True``) or one of its dependencies (``False``). The list will be empty if there are no compilers. """ tty.debug('Bootstrapping {0} compiler'.format(compiler)) compilers = spack.compilers.compilers_for_spec( compiler, arch_spec=architecture) if compilers: return [] dep = spack.compilers.pkg_spec_for_compiler(compiler) # Set the architecture for the compiler package in a way that allows the # concretizer to back off if needed for the older bootstrapping compiler dep.constrain('platform=%s' % str(architecture.platform)) dep.constrain('os=%s' % str(architecture.os)) dep.constrain('target=%s:' % architecture.target.microarchitecture.family.name) # concrete CompilerSpec has less info than concrete Spec # concretize as Spec to add that information dep.concretize() # mark compiler as depended-on by the packages that use it for pkg in pkgs: dep._dependents[pkg.name] = spack.spec.DependencySpec( pkg.spec, dep, ('build',)) packages = [(s.package, False) for s in dep.traverse(order='post', root=False)] packages.append((dep.package, True)) return packages
def _packages_needed_to_bootstrap_compiler(compiler, architecture, pkgs): """ Return a list of packages required to bootstrap `pkg`s compiler Checks Spack's compiler configuration for a compiler that matches the package spec. Args: compiler (CompilerSpec): the compiler to bootstrap architecture (ArchSpec): the architecture for which to boostrap the compiler pkgs (list): the packages that may need their compiler installed Return: list: list of tuples, (PackageBase, bool), for concretized compiler-related packages that need to be installed and bool values specify whether the package is the bootstrap compiler (``True``) or one of its dependencies (``False``). The list will be empty if there are no compilers. """ tty.debug('Bootstrapping {0} compiler'.format(compiler)) compilers = spack.compilers.compilers_for_spec( compiler, arch_spec=architecture) if compilers: return [] dep = spack.compilers.pkg_spec_for_compiler(compiler) # Set the architecture for the compiler package in a way that allows the # concretizer to back off if needed for the older bootstrapping compiler dep.constrain('platform=%s' % str(architecture.platform)) dep.constrain('os=%s' % str(architecture.os)) dep.constrain('target=%s:' % architecture.target.microarchitecture.family.name) # concrete CompilerSpec has less info than concrete Spec # concretize as Spec to add that information dep.concretize() # mark compiler as depended-on by the packages that use it for pkg in pkgs: dep._dependents[pkg.name] = spack.spec.DependencySpec( pkg.spec, dep, ('build',)) packages = [(s.package, False) for s in dep.traverse(order='post', root=False)] packages.append((dep.package, True)) return packages
Python
def _install_from_cache(pkg, cache_only, explicit, unsigned=False, full_hash_match=False): """ Extract the package from binary cache Args: pkg (spack.package.PackageBase): the package to install from the binary cache cache_only (bool): only extract from binary cache explicit (bool): ``True`` if installing the package was explicitly requested by the user, otherwise, ``False`` unsigned (bool): ``True`` if binary package signatures to be checked, otherwise, ``False`` Return: bool: ``True`` if the package was extract from binary cache, ``False`` otherwise """ installed_from_cache = _try_install_from_binary_cache( pkg, explicit, unsigned=unsigned, full_hash_match=full_hash_match) pkg_id = package_id(pkg) if not installed_from_cache: pre = 'No binary for {0} found'.format(pkg_id) if cache_only: tty.die('{0} when cache-only specified'.format(pre)) tty.msg('{0}: installing from source'.format(pre)) return False tty.debug('Successfully extracted {0} from binary cache'.format(pkg_id)) _print_installed_pkg(pkg.spec.prefix) spack.hooks.post_install(pkg.spec) return True
def _install_from_cache(pkg, cache_only, explicit, unsigned=False, full_hash_match=False): """ Extract the package from binary cache Args: pkg (spack.package.PackageBase): the package to install from the binary cache cache_only (bool): only extract from binary cache explicit (bool): ``True`` if installing the package was explicitly requested by the user, otherwise, ``False`` unsigned (bool): ``True`` if binary package signatures to be checked, otherwise, ``False`` Return: bool: ``True`` if the package was extract from binary cache, ``False`` otherwise """ installed_from_cache = _try_install_from_binary_cache( pkg, explicit, unsigned=unsigned, full_hash_match=full_hash_match) pkg_id = package_id(pkg) if not installed_from_cache: pre = 'No binary for {0} found'.format(pkg_id) if cache_only: tty.die('{0} when cache-only specified'.format(pre)) tty.msg('{0}: installing from source'.format(pre)) return False tty.debug('Successfully extracted {0} from binary cache'.format(pkg_id)) _print_installed_pkg(pkg.spec.prefix) spack.hooks.post_install(pkg.spec) return True
Python
def _try_install_from_binary_cache(pkg, explicit, unsigned=False, full_hash_match=False): """ Try to extract the package from binary cache. Args: pkg (spack.package.PackageBase): the package to be extracted from binary cache explicit (bool): the package was explicitly requested by the user unsigned (bool): ``True`` if binary package signatures to be checked, otherwise, ``False`` """ pkg_id = package_id(pkg) tty.debug('Searching for binary cache of {0}'.format(pkg_id)) matches = binary_distribution.get_mirrors_for_spec( pkg.spec, full_hash_match=full_hash_match) if not matches: return False # In the absence of guidance from user or some other reason to prefer one # mirror over another, any match will suffice, so just pick the first one. preferred_mirrors = [match['mirror_url'] for match in matches] binary_spec = matches[0]['spec'] return _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned, preferred_mirrors=preferred_mirrors)
def _try_install_from_binary_cache(pkg, explicit, unsigned=False, full_hash_match=False): """ Try to extract the package from binary cache. Args: pkg (spack.package.PackageBase): the package to be extracted from binary cache explicit (bool): the package was explicitly requested by the user unsigned (bool): ``True`` if binary package signatures to be checked, otherwise, ``False`` """ pkg_id = package_id(pkg) tty.debug('Searching for binary cache of {0}'.format(pkg_id)) matches = binary_distribution.get_mirrors_for_spec( pkg.spec, full_hash_match=full_hash_match) if not matches: return False # In the absence of guidance from user or some other reason to prefer one # mirror over another, any match will suffice, so just pick the first one. preferred_mirrors = [match['mirror_url'] for match in matches] binary_spec = matches[0]['spec'] return _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned, preferred_mirrors=preferred_mirrors)
Python
def log(pkg): """ Copy provenance into the install directory on success Args: pkg (spack.package.Package): the package that was built and installed """ packages_dir = spack.store.layout.build_packages_path(pkg.spec) # Remove first if we're overwriting another build try: # log and env install paths are inside this shutil.rmtree(packages_dir) except Exception as e: # FIXME : this potentially catches too many things... tty.debug(e) # Archive the whole stdout + stderr for the package fs.install(pkg.log_path, pkg.install_log_path) # Archive all phase log paths for phase_log in pkg.phase_log_files: log_file = os.path.basename(phase_log) log_file = os.path.join(os.path.dirname(packages_dir), log_file) fs.install(phase_log, log_file) # Archive the environment modifications for the build. fs.install(pkg.env_mods_path, pkg.install_env_path) if os.path.exists(pkg.configure_args_path): # Archive the args used for the build fs.install(pkg.configure_args_path, pkg.install_configure_args_path) # Finally, archive files that are specific to each package with fs.working_dir(pkg.stage.path): errors = six.StringIO() target_dir = os.path.join( spack.store.layout.metadata_path(pkg.spec), 'archived-files') for glob_expr in pkg.archive_files: # Check that we are trying to copy things that are # in the stage tree (not arbitrary files) abs_expr = os.path.realpath(glob_expr) if os.path.realpath(pkg.stage.path) not in abs_expr: errors.write('[OUTSIDE SOURCE PATH]: {0}\n'.format(glob_expr)) continue # Now that we are sure that the path is within the correct # folder, make it relative and check for matches if os.path.isabs(glob_expr): glob_expr = os.path.relpath(glob_expr, pkg.stage.path) files = glob.glob(glob_expr) for f in files: try: target = os.path.join(target_dir, f) # We must ensure that the directory exists before # copying a file in fs.mkdirp(os.path.dirname(target)) fs.install(f, target) except Exception as e: tty.debug(e) # Here try to be conservative, and avoid discarding # the whole install procedure because of copying a # single file failed errors.write('[FAILED TO ARCHIVE]: {0}'.format(f)) if errors.getvalue(): error_file = os.path.join(target_dir, 'errors.txt') fs.mkdirp(target_dir) with open(error_file, 'w') as err: err.write(errors.getvalue()) tty.warn('Errors occurred when archiving files.\n\t' 'See: {0}'.format(error_file)) dump_packages(pkg.spec, packages_dir)
def log(pkg): """ Copy provenance into the install directory on success Args: pkg (spack.package.Package): the package that was built and installed """ packages_dir = spack.store.layout.build_packages_path(pkg.spec) # Remove first if we're overwriting another build try: # log and env install paths are inside this shutil.rmtree(packages_dir) except Exception as e: # FIXME : this potentially catches too many things... tty.debug(e) # Archive the whole stdout + stderr for the package fs.install(pkg.log_path, pkg.install_log_path) # Archive all phase log paths for phase_log in pkg.phase_log_files: log_file = os.path.basename(phase_log) log_file = os.path.join(os.path.dirname(packages_dir), log_file) fs.install(phase_log, log_file) # Archive the environment modifications for the build. fs.install(pkg.env_mods_path, pkg.install_env_path) if os.path.exists(pkg.configure_args_path): # Archive the args used for the build fs.install(pkg.configure_args_path, pkg.install_configure_args_path) # Finally, archive files that are specific to each package with fs.working_dir(pkg.stage.path): errors = six.StringIO() target_dir = os.path.join( spack.store.layout.metadata_path(pkg.spec), 'archived-files') for glob_expr in pkg.archive_files: # Check that we are trying to copy things that are # in the stage tree (not arbitrary files) abs_expr = os.path.realpath(glob_expr) if os.path.realpath(pkg.stage.path) not in abs_expr: errors.write('[OUTSIDE SOURCE PATH]: {0}\n'.format(glob_expr)) continue # Now that we are sure that the path is within the correct # folder, make it relative and check for matches if os.path.isabs(glob_expr): glob_expr = os.path.relpath(glob_expr, pkg.stage.path) files = glob.glob(glob_expr) for f in files: try: target = os.path.join(target_dir, f) # We must ensure that the directory exists before # copying a file in fs.mkdirp(os.path.dirname(target)) fs.install(f, target) except Exception as e: tty.debug(e) # Here try to be conservative, and avoid discarding # the whole install procedure because of copying a # single file failed errors.write('[FAILED TO ARCHIVE]: {0}'.format(f)) if errors.getvalue(): error_file = os.path.join(target_dir, 'errors.txt') fs.mkdirp(target_dir) with open(error_file, 'w') as err: err.write(errors.getvalue()) tty.warn('Errors occurred when archiving files.\n\t' 'See: {0}'.format(error_file)) dump_packages(pkg.spec, packages_dir)
Python
def _check_deps_status(self, request): """Check the install status of the requested package Args: request (BuildRequest): the associated install request """ err = 'Cannot proceed with {0}: {1}' for dep in request.traverse_dependencies(): dep_pkg = dep.package dep_id = package_id(dep_pkg) # Check for failure since a prefix lock is not required if spack.store.db.prefix_failed(dep): action = "'spack install' the dependency" msg = '{0} is marked as an install failure: {1}' \ .format(dep_id, action) raise InstallError(err.format(request.pkg_id, msg)) # Attempt to get a read lock to ensure another process does not # uninstall the dependency while the requested spec is being # installed ltype, lock = self._ensure_locked('read', dep_pkg) if lock is None: msg = '{0} is write locked by another process'.format(dep_id) raise InstallError(err.format(request.pkg_id, msg)) # Flag external and upstream packages as being installed if dep_pkg.spec.external or dep_pkg.installed_upstream: self._flag_installed(dep_pkg) continue # Check the database to see if the dependency has been installed # and flag as such if appropriate rec, installed_in_db = self._check_db(dep) if installed_in_db and ( dep.dag_hash() not in request.overwrite or rec.installation_time > request.overwrite_time): tty.debug('Flagging {0} as installed per the database' .format(dep_id)) self._flag_installed(dep_pkg) else: lock.release_read()
def _check_deps_status(self, request): """Check the install status of the requested package Args: request (BuildRequest): the associated install request """ err = 'Cannot proceed with {0}: {1}' for dep in request.traverse_dependencies(): dep_pkg = dep.package dep_id = package_id(dep_pkg) # Check for failure since a prefix lock is not required if spack.store.db.prefix_failed(dep): action = "'spack install' the dependency" msg = '{0} is marked as an install failure: {1}' \ .format(dep_id, action) raise InstallError(err.format(request.pkg_id, msg)) # Attempt to get a read lock to ensure another process does not # uninstall the dependency while the requested spec is being # installed ltype, lock = self._ensure_locked('read', dep_pkg) if lock is None: msg = '{0} is write locked by another process'.format(dep_id) raise InstallError(err.format(request.pkg_id, msg)) # Flag external and upstream packages as being installed if dep_pkg.spec.external or dep_pkg.installed_upstream: self._flag_installed(dep_pkg) continue # Check the database to see if the dependency has been installed # and flag as such if appropriate rec, installed_in_db = self._check_db(dep) if installed_in_db and ( dep.dag_hash() not in request.overwrite or rec.installation_time > request.overwrite_time): tty.debug('Flagging {0} as installed per the database' .format(dep_id)) self._flag_installed(dep_pkg) else: lock.release_read()
Python
def _install_task(self, task): """ Perform the installation of the requested spec and/or dependency represented by the build task. Args: task (BuildTask): the installation build task for a package""" install_args = task.request.install_args cache_only = install_args.get('cache_only') explicit = task.explicit full_hash_match = install_args.get('full_hash_match') tests = install_args.get('tests') unsigned = install_args.get('unsigned') use_cache = install_args.get('use_cache') pkg, pkg_id = task.pkg, task.pkg_id tty.msg(install_msg(pkg_id, self.pid)) task.start = task.start or time.time() task.status = STATUS_INSTALLING # Use the binary cache if requested if use_cache and \ _install_from_cache(pkg, cache_only, explicit, unsigned, full_hash_match): self._update_installed(task) if task.compiler: spack.compilers.add_compilers_to_config( spack.compilers.find_compilers([pkg.spec.prefix])) return pkg.run_tests = (tests is True or tests and pkg.name in tests) # hook that allows tests to inspect the Package before installation # see unit_test_check() docs. if not pkg.unit_test_check(): return try: self._setup_install_dir(pkg) # Create a child process to do the actual installation. # Preserve verbosity settings across installs. spack.package.PackageBase._verbose = ( spack.build_environment.start_build_process( pkg, build_process, install_args) ) # Note: PARENT of the build process adds the new package to # the database, so that we don't need to re-read from file. spack.store.db.add(pkg.spec, spack.store.layout, explicit=explicit) # If a compiler, ensure it is added to the configuration if task.compiler: spack.compilers.add_compilers_to_config( spack.compilers.find_compilers([pkg.spec.prefix])) except spack.build_environment.StopPhase as e: # A StopPhase exception means that do_install was asked to # stop early from clients, and is not an error at this point spack.hooks.on_install_failure(task.request.pkg.spec) pid = '{0}: '.format(self.pid) if tty.show_pid() else '' tty.debug('{0}{1}'.format(pid, str(e))) tty.debug('Package stage directory: {0}' .format(pkg.stage.source_path))
def _install_task(self, task): """ Perform the installation of the requested spec and/or dependency represented by the build task. Args: task (BuildTask): the installation build task for a package""" install_args = task.request.install_args cache_only = install_args.get('cache_only') explicit = task.explicit full_hash_match = install_args.get('full_hash_match') tests = install_args.get('tests') unsigned = install_args.get('unsigned') use_cache = install_args.get('use_cache') pkg, pkg_id = task.pkg, task.pkg_id tty.msg(install_msg(pkg_id, self.pid)) task.start = task.start or time.time() task.status = STATUS_INSTALLING # Use the binary cache if requested if use_cache and \ _install_from_cache(pkg, cache_only, explicit, unsigned, full_hash_match): self._update_installed(task) if task.compiler: spack.compilers.add_compilers_to_config( spack.compilers.find_compilers([pkg.spec.prefix])) return pkg.run_tests = (tests is True or tests and pkg.name in tests) # hook that allows tests to inspect the Package before installation # see unit_test_check() docs. if not pkg.unit_test_check(): return try: self._setup_install_dir(pkg) # Create a child process to do the actual installation. # Preserve verbosity settings across installs. spack.package.PackageBase._verbose = ( spack.build_environment.start_build_process( pkg, build_process, install_args) ) # Note: PARENT of the build process adds the new package to # the database, so that we don't need to re-read from file. spack.store.db.add(pkg.spec, spack.store.layout, explicit=explicit) # If a compiler, ensure it is added to the configuration if task.compiler: spack.compilers.add_compilers_to_config( spack.compilers.find_compilers([pkg.spec.prefix])) except spack.build_environment.StopPhase as e: # A StopPhase exception means that do_install was asked to # stop early from clients, and is not an error at this point spack.hooks.on_install_failure(task.request.pkg.spec) pid = '{0}: '.format(self.pid) if tty.show_pid() else '' tty.debug('{0}{1}'.format(pid, str(e))) tty.debug('Package stage directory: {0}' .format(pkg.stage.source_path))
Python
def install(self): """ Install the requested package(s) and or associated dependencies. Args: pkg (spack.package.Package): the package to be built and installed""" self._init_queue() fail_fast_err = 'Terminating after first install failure' single_explicit_spec = len(self.build_requests) == 1 failed_explicits = [] term_title = TermTitle(len(self.build_pq)) while self.build_pq: term_title.next_pkg() task = self._pop_task() if task is None: continue spack.hooks.on_install_start(task.request.pkg.spec) install_args = task.request.install_args keep_prefix = install_args.get('keep_prefix') pkg, pkg_id, spec = task.pkg, task.pkg_id, task.pkg.spec term_title.set('Processing {0}'.format(pkg.name)) tty.debug('Processing {0}: task={1}'.format(pkg_id, task)) # Ensure that the current spec has NO uninstalled dependencies, # which is assumed to be reflected directly in its priority. # # If the spec has uninstalled dependencies, then there must be # a bug in the code (e.g., priority queue or uninstalled # dependencies handling). So terminate under the assumption that # all subsequent tasks will have non-zero priorities or may be # dependencies of this task. if task.priority != 0: tty.error('Detected uninstalled dependencies for {0}: {1}' .format(pkg_id, task.uninstalled_deps)) left = [dep_id for dep_id in task.uninstalled_deps if dep_id not in self.installed] if not left: tty.warn('{0} does NOT actually have any uninstalled deps' ' left'.format(pkg_id)) dep_str = 'dependencies' if task.priority > 1 else 'dependency' # Hook to indicate task failure, but without an exception spack.hooks.on_install_failure(task.request.pkg.spec) raise InstallError( 'Cannot proceed with {0}: {1} uninstalled {2}: {3}' .format(pkg_id, task.priority, dep_str, ','.join(task.uninstalled_deps))) # Skip the installation if the spec is not being installed locally # (i.e., if external or upstream) BUT flag it as installed since # some package likely depends on it. if not task.explicit: if _handle_external_and_upstream(pkg, False): self._flag_installed(pkg, task.dependents) continue # Flag a failed spec. Do not need an (install) prefix lock since # assume using a separate (failed) prefix lock file. if pkg_id in self.failed or spack.store.db.prefix_failed(spec): tty.warn('{0} failed to install'.format(pkg_id)) self._update_failed(task) # Mark that the package failed # TODO: this should also be for the task.pkg, but we don't # model transitive yet. spack.hooks.on_install_failure(task.request.pkg.spec) if self.fail_fast: raise InstallError(fail_fast_err) continue # Attempt to get a write lock. If we can't get the lock then # another process is likely (un)installing the spec or has # determined the spec has already been installed (though the # other process may be hung). term_title.set('Acquiring lock for {0}'.format(pkg.name)) ltype, lock = self._ensure_locked('write', pkg) if lock is None: # Attempt to get a read lock instead. If this fails then # another process has a write lock so must be (un)installing # the spec (or that process is hung). ltype, lock = self._ensure_locked('read', pkg) # Requeue the spec if we cannot get at least a read lock so we # can check the status presumably established by another process # -- failed, installed, or uninstalled -- on the next pass. if lock is None: self._requeue_task(task) continue # Take a timestamp with the overwrite argument to allow checking # whether another process has already overridden the package. if task.request.overwrite and task.explicit: task.request.overwrite_time = time.time() # Determine state of installation artifacts and adjust accordingly. term_title.set('Preparing {0}'.format(pkg.name)) self._prepare_for_install(task) # Flag an already installed package if pkg_id in self.installed: # Downgrade to a read lock to preclude other processes from # uninstalling the package until we're done installing its # dependents. ltype, lock = self._ensure_locked('read', pkg) if lock is not None: self._update_installed(task) _print_installed_pkg(pkg.prefix) # It's an already installed compiler, add it to the config if task.compiler: spack.compilers.add_compilers_to_config( spack.compilers.find_compilers([pkg.spec.prefix])) else: # At this point we've failed to get a write or a read # lock, which means another process has taken a write # lock between our releasing the write and acquiring the # read. # # Requeue the task so we can re-check the status # established by the other process -- failed, installed, # or uninstalled -- on the next pass. self.installed.remove(pkg_id) self._requeue_task(task) continue # Having a read lock on an uninstalled pkg may mean another # process completed an uninstall of the software between the # time we failed to acquire the write lock and the time we # took the read lock. # # Requeue the task so we can check the status presumably # established by the other process -- failed, installed, or # uninstalled -- on the next pass. if ltype == 'read': lock.release_read() self._requeue_task(task) continue # Proceed with the installation since we have an exclusive write # lock on the package. term_title.set('Installing {0}'.format(pkg.name)) try: action = self._install_action(task) if action == InstallAction.INSTALL: self._install_task(task) elif action == InstallAction.OVERWRITE: OverwriteInstall(self, spack.store.db, task).install() self._update_installed(task) # If we installed then we should keep the prefix stop_before_phase = getattr(pkg, 'stop_before_phase', None) last_phase = getattr(pkg, 'last_phase', None) keep_prefix = keep_prefix or \ (stop_before_phase is None and last_phase is None) except KeyboardInterrupt as exc: # The build has been terminated with a Ctrl-C so terminate # regardless of the number of remaining specs. err = 'Failed to install {0} due to {1}: {2}' tty.error(err.format(pkg.name, exc.__class__.__name__, str(exc))) spack.hooks.on_install_cancel(task.request.pkg.spec) raise except (Exception, SystemExit) as exc: self._update_failed(task, True, exc) spack.hooks.on_install_failure(task.request.pkg.spec) # Best effort installs suppress the exception and mark the # package as a failure. if (not isinstance(exc, spack.error.SpackError) or not exc.printed): exc.printed = True # SpackErrors can be printed by the build process or at # lower levels -- skip printing if already printed. # TODO: sort out this and SpackError.print_context() tty.error('Failed to install {0} due to {1}: {2}' .format(pkg.name, exc.__class__.__name__, str(exc))) # Terminate if requested to do so on the first failure. if self.fail_fast: raise InstallError('{0}: {1}' .format(fail_fast_err, str(exc))) # Terminate at this point if the single explicit spec has # failed to install. if single_explicit_spec and task.explicit: raise # Track explicit spec id and error to summarize when done if task.explicit: failed_explicits.append((pkg_id, str(exc))) finally: # Remove the install prefix if anything went wrong during # install. if not keep_prefix and not action == InstallAction.OVERWRITE: pkg.remove_prefix() # The subprocess *may* have removed the build stage. Mark it # not created so that the next time pkg.stage is invoked, we # check the filesystem for it. pkg.stage.created = False # Perform basic task cleanup for the installed spec to # include downgrading the write to a read lock self._cleanup_task(pkg) # Cleanup, which includes releasing all of the read locks self._cleanup_all_tasks() # Ensure we properly report if one or more explicit specs failed # or were not installed when should have been. missing = [request.pkg_id for request in self.build_requests if request.install_args.get('install_package') and request.pkg_id not in self.installed] if failed_explicits or missing: for pkg_id, err in failed_explicits: tty.error('{0}: {1}'.format(pkg_id, err)) for pkg_id in missing: tty.error('{0}: Package was not installed'.format(pkg_id)) raise InstallError('Installation request failed. Refer to ' 'reported errors for failing package(s).')
def install(self): """ Install the requested package(s) and or associated dependencies. Args: pkg (spack.package.Package): the package to be built and installed""" self._init_queue() fail_fast_err = 'Terminating after first install failure' single_explicit_spec = len(self.build_requests) == 1 failed_explicits = [] term_title = TermTitle(len(self.build_pq)) while self.build_pq: term_title.next_pkg() task = self._pop_task() if task is None: continue spack.hooks.on_install_start(task.request.pkg.spec) install_args = task.request.install_args keep_prefix = install_args.get('keep_prefix') pkg, pkg_id, spec = task.pkg, task.pkg_id, task.pkg.spec term_title.set('Processing {0}'.format(pkg.name)) tty.debug('Processing {0}: task={1}'.format(pkg_id, task)) # Ensure that the current spec has NO uninstalled dependencies, # which is assumed to be reflected directly in its priority. # # If the spec has uninstalled dependencies, then there must be # a bug in the code (e.g., priority queue or uninstalled # dependencies handling). So terminate under the assumption that # all subsequent tasks will have non-zero priorities or may be # dependencies of this task. if task.priority != 0: tty.error('Detected uninstalled dependencies for {0}: {1}' .format(pkg_id, task.uninstalled_deps)) left = [dep_id for dep_id in task.uninstalled_deps if dep_id not in self.installed] if not left: tty.warn('{0} does NOT actually have any uninstalled deps' ' left'.format(pkg_id)) dep_str = 'dependencies' if task.priority > 1 else 'dependency' # Hook to indicate task failure, but without an exception spack.hooks.on_install_failure(task.request.pkg.spec) raise InstallError( 'Cannot proceed with {0}: {1} uninstalled {2}: {3}' .format(pkg_id, task.priority, dep_str, ','.join(task.uninstalled_deps))) # Skip the installation if the spec is not being installed locally # (i.e., if external or upstream) BUT flag it as installed since # some package likely depends on it. if not task.explicit: if _handle_external_and_upstream(pkg, False): self._flag_installed(pkg, task.dependents) continue # Flag a failed spec. Do not need an (install) prefix lock since # assume using a separate (failed) prefix lock file. if pkg_id in self.failed or spack.store.db.prefix_failed(spec): tty.warn('{0} failed to install'.format(pkg_id)) self._update_failed(task) # Mark that the package failed # TODO: this should also be for the task.pkg, but we don't # model transitive yet. spack.hooks.on_install_failure(task.request.pkg.spec) if self.fail_fast: raise InstallError(fail_fast_err) continue # Attempt to get a write lock. If we can't get the lock then # another process is likely (un)installing the spec or has # determined the spec has already been installed (though the # other process may be hung). term_title.set('Acquiring lock for {0}'.format(pkg.name)) ltype, lock = self._ensure_locked('write', pkg) if lock is None: # Attempt to get a read lock instead. If this fails then # another process has a write lock so must be (un)installing # the spec (or that process is hung). ltype, lock = self._ensure_locked('read', pkg) # Requeue the spec if we cannot get at least a read lock so we # can check the status presumably established by another process # -- failed, installed, or uninstalled -- on the next pass. if lock is None: self._requeue_task(task) continue # Take a timestamp with the overwrite argument to allow checking # whether another process has already overridden the package. if task.request.overwrite and task.explicit: task.request.overwrite_time = time.time() # Determine state of installation artifacts and adjust accordingly. term_title.set('Preparing {0}'.format(pkg.name)) self._prepare_for_install(task) # Flag an already installed package if pkg_id in self.installed: # Downgrade to a read lock to preclude other processes from # uninstalling the package until we're done installing its # dependents. ltype, lock = self._ensure_locked('read', pkg) if lock is not None: self._update_installed(task) _print_installed_pkg(pkg.prefix) # It's an already installed compiler, add it to the config if task.compiler: spack.compilers.add_compilers_to_config( spack.compilers.find_compilers([pkg.spec.prefix])) else: # At this point we've failed to get a write or a read # lock, which means another process has taken a write # lock between our releasing the write and acquiring the # read. # # Requeue the task so we can re-check the status # established by the other process -- failed, installed, # or uninstalled -- on the next pass. self.installed.remove(pkg_id) self._requeue_task(task) continue # Having a read lock on an uninstalled pkg may mean another # process completed an uninstall of the software between the # time we failed to acquire the write lock and the time we # took the read lock. # # Requeue the task so we can check the status presumably # established by the other process -- failed, installed, or # uninstalled -- on the next pass. if ltype == 'read': lock.release_read() self._requeue_task(task) continue # Proceed with the installation since we have an exclusive write # lock on the package. term_title.set('Installing {0}'.format(pkg.name)) try: action = self._install_action(task) if action == InstallAction.INSTALL: self._install_task(task) elif action == InstallAction.OVERWRITE: OverwriteInstall(self, spack.store.db, task).install() self._update_installed(task) # If we installed then we should keep the prefix stop_before_phase = getattr(pkg, 'stop_before_phase', None) last_phase = getattr(pkg, 'last_phase', None) keep_prefix = keep_prefix or \ (stop_before_phase is None and last_phase is None) except KeyboardInterrupt as exc: # The build has been terminated with a Ctrl-C so terminate # regardless of the number of remaining specs. err = 'Failed to install {0} due to {1}: {2}' tty.error(err.format(pkg.name, exc.__class__.__name__, str(exc))) spack.hooks.on_install_cancel(task.request.pkg.spec) raise except (Exception, SystemExit) as exc: self._update_failed(task, True, exc) spack.hooks.on_install_failure(task.request.pkg.spec) # Best effort installs suppress the exception and mark the # package as a failure. if (not isinstance(exc, spack.error.SpackError) or not exc.printed): exc.printed = True # SpackErrors can be printed by the build process or at # lower levels -- skip printing if already printed. # TODO: sort out this and SpackError.print_context() tty.error('Failed to install {0} due to {1}: {2}' .format(pkg.name, exc.__class__.__name__, str(exc))) # Terminate if requested to do so on the first failure. if self.fail_fast: raise InstallError('{0}: {1}' .format(fail_fast_err, str(exc))) # Terminate at this point if the single explicit spec has # failed to install. if single_explicit_spec and task.explicit: raise # Track explicit spec id and error to summarize when done if task.explicit: failed_explicits.append((pkg_id, str(exc))) finally: # Remove the install prefix if anything went wrong during # install. if not keep_prefix and not action == InstallAction.OVERWRITE: pkg.remove_prefix() # The subprocess *may* have removed the build stage. Mark it # not created so that the next time pkg.stage is invoked, we # check the filesystem for it. pkg.stage.created = False # Perform basic task cleanup for the installed spec to # include downgrading the write to a read lock self._cleanup_task(pkg) # Cleanup, which includes releasing all of the read locks self._cleanup_all_tasks() # Ensure we properly report if one or more explicit specs failed # or were not installed when should have been. missing = [request.pkg_id for request in self.build_requests if request.install_args.get('install_package') and request.pkg_id not in self.installed] if failed_explicits or missing: for pkg_id, err in failed_explicits: tty.error('{0}: {1}'.format(pkg_id, err)) for pkg_id in missing: tty.error('{0}: Package was not installed'.format(pkg_id)) raise InstallError('Installation request failed. Refer to ' 'reported errors for failing package(s).')
Python
def install(self): """ Try to run the install task overwriting the package prefix. If this fails, try to recover the original install prefix. If that fails too, mark the spec as uninstalled. This function always the original install error if installation fails. """ try: with fs.replace_directory_transaction(self.task.pkg.prefix, self.tmp_root): self.installer._install_task(self.task) except fs.CouldNotRestoreDirectoryBackup as e: self.database.remove(self.task.pkg.spec) tty.error('Recovery of install dir of {0} failed due to ' '{1}: {2}. The spec is now uninstalled.'.format( self.task.pkg.name, e.outer_exception.__class__.__name__, str(e.outer_exception))) # Unwrap the actual installation exception. raise e.inner_exception
def install(self): """ Try to run the install task overwriting the package prefix. If this fails, try to recover the original install prefix. If that fails too, mark the spec as uninstalled. This function always the original install error if installation fails. """ try: with fs.replace_directory_transaction(self.task.pkg.prefix, self.tmp_root): self.installer._install_task(self.task) except fs.CouldNotRestoreDirectoryBackup as e: self.database.remove(self.task.pkg.spec) tty.error('Recovery of install dir of {0} failed due to ' '{1}: {2}. The spec is now uninstalled.'.format( self.task.pkg.name, e.outer_exception.__class__.__name__, str(e.outer_exception))) # Unwrap the actual installation exception. raise e.inner_exception
Python
def _add_default_args(self): """Ensure standard install options are set to at least the default.""" for arg, default in [('cache_only', False), ('context', 'build'), # installs *always* build ('dirty', False), ('fail_fast', False), ('fake', False), ('full_hash_match', False), ('install_deps', True), ('install_package', True), ('install_source', False), ('keep_prefix', False), ('keep_stage', False), ('restage', False), ('skip_patch', False), ('tests', False), ('unsigned', False), ('use_cache', True), ('verbose', False), ]: _ = self.install_args.setdefault(arg, default)
def _add_default_args(self): """Ensure standard install options are set to at least the default.""" for arg, default in [('cache_only', False), ('context', 'build'), # installs *always* build ('dirty', False), ('fail_fast', False), ('fake', False), ('full_hash_match', False), ('install_deps', True), ('install_package', True), ('install_source', False), ('keep_prefix', False), ('keep_stage', False), ('restage', False), ('skip_patch', False), ('tests', False), ('unsigned', False), ('use_cache', True), ('verbose', False), ]: _ = self.install_args.setdefault(arg, default)
Python
def _missing(name, purpose, system_only=True): """Message to be printed if an executable is not found""" msg = '[{2}] MISSING "{0}": {1}' if not system_only: return msg.format(name, purpose, '@*y{{B}}') return msg.format(name, purpose, '@*y{{-}}')
def _missing(name, purpose, system_only=True): """Message to be printed if an executable is not found""" msg = '[{2}] MISSING "{0}": {1}' if not system_only: return msg.format(name, purpose, '@*y{{B}}') return msg.format(name, purpose, '@*y{{-}}')
Python
def _required_system_executable(exes, msg): """Search for an executable is the system path only.""" if isinstance(exes, six.string_types): exes = (exes,) if spack.util.executable.which_string(*exes): return True, None return False, msg
def _required_system_executable(exes, msg): """Search for an executable is the system path only.""" if isinstance(exes, six.string_types): exes = (exes,) if spack.util.executable.which_string(*exes): return True, None return False, msg
Python
def _required_python_module(module, query_spec, msg): """Check if a Python module is available in the current interpreter or if it can be loaded from the bootstrap store """ if _python_import(module) or _try_import_from_store(module, query_spec): return True, None return False, msg
def _required_python_module(module, query_spec, msg): """Check if a Python module is available in the current interpreter or if it can be loaded from the bootstrap store """ if _python_import(module) or _try_import_from_store(module, query_spec): return True, None return False, msg
Python
def _required_executable(exes, query_spec, msg): """Search for an executable in the system path or in the bootstrap store.""" if isinstance(exes, six.string_types): exes = (exes,) if (spack.util.executable.which_string(*exes) or _executables_in_store(exes, query_spec)): return True, None return False, msg
def _required_executable(exes, query_spec, msg): """Search for an executable in the system path or in the bootstrap store.""" if isinstance(exes, six.string_types): exes = (exes,) if (spack.util.executable.which_string(*exes) or _executables_in_store(exes, query_spec)): return True, None return False, msg
Python
def status_message(section): """Return a status message to be printed to screen that refers to the section passed as argument and a bool which is True if there are missing dependencies. Args: section (str): either 'core' or 'buildcache' or 'optional' or 'develop' """ pass_token, fail_token = '@*g{[PASS]}', '@*r{[FAIL]}' # Contain the header of the section and a list of requirements spack_sections = { 'core': ("{0} @*{{Core Functionalities}}", _core_requirements), 'buildcache': ("{0} @*{{Binary packages}}", _buildcache_requirements), 'optional': ("{0} @*{{Optional Features}}", _optional_requirements), 'develop': ("{0} @*{{Development Dependencies}}", _development_requirements) } msg, required_software = spack_sections[section] with ensure_bootstrap_configuration(): missing_software = False for found, err_msg in required_software(): if not found: missing_software = True msg += "\n " + err_msg msg += '\n' msg = msg.format(pass_token if not missing_software else fail_token) return msg, missing_software
def status_message(section): """Return a status message to be printed to screen that refers to the section passed as argument and a bool which is True if there are missing dependencies. Args: section (str): either 'core' or 'buildcache' or 'optional' or 'develop' """ pass_token, fail_token = '@*g{[PASS]}', '@*r{[FAIL]}' # Contain the header of the section and a list of requirements spack_sections = { 'core': ("{0} @*{{Core Functionalities}}", _core_requirements), 'buildcache': ("{0} @*{{Binary packages}}", _buildcache_requirements), 'optional': ("{0} @*{{Optional Features}}", _optional_requirements), 'develop': ("{0} @*{{Development Dependencies}}", _development_requirements) } msg, required_software = spack_sections[section] with ensure_bootstrap_configuration(): missing_software = False for found, err_msg in required_software(): if not found: missing_software = True msg += "\n " + err_msg msg += '\n' msg = msg.format(pass_token if not missing_software else fail_token) return msg, missing_software
Python
def add_metadata_values_to_record(record_message, stream_to_sync): """Populate metadata _sdc columns from incoming record message The location of the required attributes are fixed in the stream """ extended_record = record_message['record'] extended_record['_sdc_extracted_at'] = record_message.get('time_extracted') extended_record['_sdc_batched_at'] = datetime.now().isoformat() extended_record['_sdc_deleted_at'] = record_message.get('record', {}).get('_sdc_deleted_at') return extended_record
def add_metadata_values_to_record(record_message, stream_to_sync): """Populate metadata _sdc columns from incoming record message The location of the required attributes are fixed in the stream """ extended_record = record_message['record'] extended_record['_sdc_extracted_at'] = record_message.get('time_extracted') extended_record['_sdc_batched_at'] = datetime.now().isoformat() extended_record['_sdc_deleted_at'] = record_message.get('record', {}).get('_sdc_deleted_at') return extended_record
Python
def flush_streams( streams, row_count, stream_to_sync, config, state, flushed_state, filter_streams=None): """ Flushes all buckets and resets records count to 0 as well as empties records to load list :param streams: dictionary with records to load per stream :param row_count: dictionary with row count per stream :param stream_to_sync: Snowflake db sync instance per stream :param config: dictionary containing the configuration :param state: dictionary containing the original state from tap :param flushed_state: dictionary containing updated states only when streams got flushed :param filter_streams: Keys of streams to flush from the streams dict. Default is every stream :return: State dict with flushed positions """ parallelism = config.get("parallelism", DEFAULT_PARALLELISM) max_parallelism = config.get("max_parallelism", DEFAULT_MAX_PARALLELISM) # Parallelism 0 means auto parallelism: # # Auto parallelism trying to flush streams efficiently with auto defined number # of threads where the number of threads is the number of streams that need to # be loaded but it's not greater than the value of max_parallelism if parallelism == 0: n_streams_to_flush = len(streams.keys()) if n_streams_to_flush > max_parallelism: parallelism = max_parallelism else: parallelism = n_streams_to_flush # Select the required streams to flush if filter_streams: streams_to_flush = filter_streams else: streams_to_flush = streams.keys() # Single-host, thread-based parallelism with parallel_backend('threading', n_jobs=parallelism): Parallel()(delayed(load_stream_batch)( stream=stream, records_to_load=streams[stream], row_count=row_count, db_sync=stream_to_sync[stream], delete_rows=config.get('hard_delete') ) for stream in streams_to_flush) # reset flushed stream records to empty to avoid flushing same records for stream in streams_to_flush: streams[stream] = {} # Update flushed streams if filter_streams: # update flushed_state position if we have state information for the stream if state is not None and stream in state.get('bookmarks', {}): # Create bookmark key if not exists if 'bookmarks' not in flushed_state: flushed_state['bookmarks'] = {} # Copy the stream bookmark from the latest state flushed_state['bookmarks'][stream] = copy.deepcopy(state['bookmarks'][stream]) # If we flush every bucket use the latest state else: flushed_state = copy.deepcopy(state) # Return with state message with flushed positions return flushed_state
def flush_streams( streams, row_count, stream_to_sync, config, state, flushed_state, filter_streams=None): """ Flushes all buckets and resets records count to 0 as well as empties records to load list :param streams: dictionary with records to load per stream :param row_count: dictionary with row count per stream :param stream_to_sync: Snowflake db sync instance per stream :param config: dictionary containing the configuration :param state: dictionary containing the original state from tap :param flushed_state: dictionary containing updated states only when streams got flushed :param filter_streams: Keys of streams to flush from the streams dict. Default is every stream :return: State dict with flushed positions """ parallelism = config.get("parallelism", DEFAULT_PARALLELISM) max_parallelism = config.get("max_parallelism", DEFAULT_MAX_PARALLELISM) # Parallelism 0 means auto parallelism: # # Auto parallelism trying to flush streams efficiently with auto defined number # of threads where the number of threads is the number of streams that need to # be loaded but it's not greater than the value of max_parallelism if parallelism == 0: n_streams_to_flush = len(streams.keys()) if n_streams_to_flush > max_parallelism: parallelism = max_parallelism else: parallelism = n_streams_to_flush # Select the required streams to flush if filter_streams: streams_to_flush = filter_streams else: streams_to_flush = streams.keys() # Single-host, thread-based parallelism with parallel_backend('threading', n_jobs=parallelism): Parallel()(delayed(load_stream_batch)( stream=stream, records_to_load=streams[stream], row_count=row_count, db_sync=stream_to_sync[stream], delete_rows=config.get('hard_delete') ) for stream in streams_to_flush) # reset flushed stream records to empty to avoid flushing same records for stream in streams_to_flush: streams[stream] = {} # Update flushed streams if filter_streams: # update flushed_state position if we have state information for the stream if state is not None and stream in state.get('bookmarks', {}): # Create bookmark key if not exists if 'bookmarks' not in flushed_state: flushed_state['bookmarks'] = {} # Copy the stream bookmark from the latest state flushed_state['bookmarks'][stream] = copy.deepcopy(state['bookmarks'][stream]) # If we flush every bucket use the latest state else: flushed_state = copy.deepcopy(state) # Return with state message with flushed positions return flushed_state
Python
def subobjs(self): """A list of sub-objects""" if not hasattr(self, "_subobjs"): self._subobjs = [] return self._subobjs
def subobjs(self): """A list of sub-objects""" if not hasattr(self, "_subobjs"): self._subobjs = [] return self._subobjs
Python
def generator(self): """String giving the way the file was generated; typically the name of the generating software package. """ return self._generator
def generator(self): """String giving the way the file was generated; typically the name of the generating software package. """ return self._generator
Python
def _centroid(gen): """Find the centroid of the coordinates given by the generator. The generator should yield pairs (longitude, latitude). :return: Pair (longitude, latitude) of the centroid. """ n, lon, lat = 0, 0, 0 for pair in gen: lon += pair[0] lat += pair[1] n += 1 if n == 0: raise ValueError("No points found to compute centroid from") return lon / n, lat / n
def _centroid(gen): """Find the centroid of the coordinates given by the generator. The generator should yield pairs (longitude, latitude). :return: Pair (longitude, latitude) of the centroid. """ n, lon, lat = 0, 0, 0 for pair in gen: lon += pair[0] lat += pair[1] n += 1 if n == 0: raise ValueError("No points found to compute centroid from") return lon / n, lat / n
Python
def complete_nodes(self): """Returns an ordered list of :class:`Node` instances which form the way. """ return self._full_nodes
def complete_nodes(self): """Returns an ordered list of :class:`Node` instances which form the way. """ return self._full_nodes
Python
def centroid(self): """Compute the centroid of this way. :return: Pair (longitude, latitude) of the centroid. """ return _centroid( ((node.longitude, node.latitude) for node in self.complete_nodes) )
def centroid(self): """Compute the centroid of this way. :return: Pair (longitude, latitude) of the centroid. """ return _centroid( ((node.longitude, node.latitude) for node in self.complete_nodes) )
Python
def complete_members(self): """Returns a list of members, each entry of which is a fully populated node, way or relation object. """ return self._full_members
def complete_members(self): """Returns a list of members, each entry of which is a fully populated node, way or relation object. """ return self._full_members
Python
def centroid(self): """Compute the centroid of this relation. We recursively find the centroid of each member, and then find the centroid of these centroids. :return: Pair (longitude, latitude) of the centroid. """ def gen(): for member in self.complete_members: if member.name == "node": yield (member.longitude, member.latitude) else: yield member.centroid() return _centroid(gen())
def centroid(self): """Compute the centroid of this relation. We recursively find the centroid of each member, and then find the centroid of these centroids. :return: Pair (longitude, latitude) of the centroid. """ def gen(): for member in self.complete_members: if member.name == "node": yield (member.longitude, member.latitude) else: yield member.centroid() return _centroid(gen())
Python
def lists_agree_up_to_ordering(l1, l2): """Use of dictionaries mean that returned lists might be in any order, so we need to allow order to vary...""" if len(l1) != len(l2): return False li1, li2 = list(l1), list(l2) try: for x in li1: index = li2.index(x) del li2[index] return True except ValueError: return False
def lists_agree_up_to_ordering(l1, l2): """Use of dictionaries mean that returned lists might be in any order, so we need to allow order to vary...""" if len(l1) != len(l2): return False li1, li2 = list(l1), list(l2) try: for x in li1: index = li2.index(x) del li2[index] return True except ValueError: return False
Python
def osm(self): """Returns an :class:`OSM` object detailing how the XML file was generated. """ return self._osm
def osm(self): """Returns an :class:`OSM` object detailing how the XML file was generated. """ return self._osm
Python
def _search_all_tags(self, dbname, wanted_tags): """Generator of ids which match all wanted_tags""" wanted_tags = list(wanted_tags.items()) if len(wanted_tags) == 0: raise ValueError("Must specify at least one tag") for osm_id in self._search_tags(dbname, wanted_tags[0][0], wanted_tags[0][1]): tags = self._get_tags(dbname, osm_id) if all( key in tags and tags[key] == value for (key, value) in wanted_tags ): yield osm_id
def _search_all_tags(self, dbname, wanted_tags): """Generator of ids which match all wanted_tags""" wanted_tags = list(wanted_tags.items()) if len(wanted_tags) == 0: raise ValueError("Must specify at least one tag") for osm_id in self._search_tags(dbname, wanted_tags[0][0], wanted_tags[0][1]): tags = self._get_tags(dbname, osm_id) if all( key in tags and tags[key] == value for (key, value) in wanted_tags ): yield osm_id
Python
def _search_all_tag_keys(self, dbname, wanted_keys): """Generator of ids which match all wanted_tags""" wanted_tags = list(wanted_keys) if len(wanted_tags) == 0: raise ValueError("Must specify at least one tag key") for osm_id in self._search_tag_keys(dbname, wanted_tags[0]): tags = self._get_tags(dbname, osm_id) if all( key in tags for key in wanted_tags ): yield osm_id
def _search_all_tag_keys(self, dbname, wanted_keys): """Generator of ids which match all wanted_tags""" wanted_tags = list(wanted_keys) if len(wanted_tags) == 0: raise ValueError("Must specify at least one tag key") for osm_id in self._search_tag_keys(dbname, wanted_tags[0]): tags = self._get_tags(dbname, osm_id) if all( key in tags for key in wanted_tags ): yield osm_id
Python
def search_relation_tags(self, tags): """Search all relations for any with matching tags. :param tags: A dictionary of key/value pairs. Only relations with all these tags are returned. :return: A list of matching relations. """ return [self.relation(osm_id) for osm_id in self._search_all_tags("relation_tags", tags)]
def search_relation_tags(self, tags): """Search all relations for any with matching tags. :param tags: A dictionary of key/value pairs. Only relations with all these tags are returned. :return: A list of matching relations. """ return [self.relation(osm_id) for osm_id in self._search_all_tags("relation_tags", tags)]
Python
def search_way_tags(self, tags): """Search all ways for any with matching tags. :param tags: A dictionary of key/value pairs. Only ways with all these tags are returned. :return: A list of matching ways. """ return [self.way(osm_id) for osm_id in self._search_all_tags("way_tags", tags)]
def search_way_tags(self, tags): """Search all ways for any with matching tags. :param tags: A dictionary of key/value pairs. Only ways with all these tags are returned. :return: A list of matching ways. """ return [self.way(osm_id) for osm_id in self._search_all_tags("way_tags", tags)]
Python
def search_node_tags(self, tags): """Search all nodes for any with matching tags. :param tags: A dictionary of key/value pairs. Only nodes with all these tags are returned. :return: A list of matching nodes. """ return [self.node(osm_id) for osm_id in self._search_all_tags("node_tags", tags)]
def search_node_tags(self, tags): """Search all nodes for any with matching tags. :param tags: A dictionary of key/value pairs. Only nodes with all these tags are returned. :return: A list of matching nodes. """ return [self.node(osm_id) for osm_id in self._search_all_tags("node_tags", tags)]
Python
def search_relation_tag_keys(self, keys): """Search all relations for any with matching tag keys. :param keys: A set of keys to search for. Any relations which have tags for all these keys (and any values) will be returned. :return: A generator (for efficiency) of matching relations. """ for osm_id in self._search_all_tag_keys("relation_tags", keys): yield self.relation(osm_id)
def search_relation_tag_keys(self, keys): """Search all relations for any with matching tag keys. :param keys: A set of keys to search for. Any relations which have tags for all these keys (and any values) will be returned. :return: A generator (for efficiency) of matching relations. """ for osm_id in self._search_all_tag_keys("relation_tags", keys): yield self.relation(osm_id)
Python
def search_way_tag_keys(self, keys, just_ids = False): """Search all ways for any with matching tag keys. :param keys: A set of keys to search for. Any ways which have tags for all these keys (and any values) will be returned. :param just_ids: If True, then only return the osm_id's of the ways. Default is False. :return: A generator (for efficiency) of matching ways. """ gen = self._search_all_tag_keys("way_tags", keys) if just_ids: yield from gen else: for osm_id in gen: yield self.way(osm_id)
def search_way_tag_keys(self, keys, just_ids = False): """Search all ways for any with matching tag keys. :param keys: A set of keys to search for. Any ways which have tags for all these keys (and any values) will be returned. :param just_ids: If True, then only return the osm_id's of the ways. Default is False. :return: A generator (for efficiency) of matching ways. """ gen = self._search_all_tag_keys("way_tags", keys) if just_ids: yield from gen else: for osm_id in gen: yield self.way(osm_id)
Python
def search_node_tag_keys(self, keys): """Search all nodes for any with matching tag keys. :param keys: A set of keys to search for. Any nodes which have tags for all these keys (and any values) will be returned. :return: A generator (for efficiency) of matching nodes. """ for osm_id in self._search_all_tag_keys("node_tags", keys): yield self.node(osm_id)
def search_node_tag_keys(self, keys): """Search all nodes for any with matching tag keys. :param keys: A set of keys to search for. Any nodes which have tags for all these keys (and any values) will be returned. :return: A generator (for efficiency) of matching nodes. """ for osm_id in self._search_all_tag_keys("node_tags", keys): yield self.node(osm_id)
Python
def node(self, osm_id): """Return details of the node with this id. Raises KeyError on failure to find. :param osm_id: The OSM id of the node. :return: An instance of :class:`Node`. """ result = self._connection.execute("select * from nodes where nodes.osm_id=?", (osm_id,)).fetchone() if result is None: raise KeyError("Node {} not found".format(osm_id)) return self._node_from_obj(result)
def node(self, osm_id): """Return details of the node with this id. Raises KeyError on failure to find. :param osm_id: The OSM id of the node. :return: An instance of :class:`Node`. """ result = self._connection.execute("select * from nodes where nodes.osm_id=?", (osm_id,)).fetchone() if result is None: raise KeyError("Node {} not found".format(osm_id)) return self._node_from_obj(result)