code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get_mod_subcmds(mod): ## Look in modules attributes subcmds = get_obj_subcmds(mod) path = os.path.dirname(os.path.realpath(mod.__file__)) if mod.__package__ is None: sys.path.insert(0, os.path.dirname(path)) mod.__package__ = kf.basename(path) for module_name in get_module_resources(mod): try: mod = importlib.import_module(".%s" % module_name, mod.__package__) except ImportError as e: msg.warn("%r could not be loaded: %s" % (module_name, e.message)) continue except IOError as e: print("%s" % module_name) raise if hasattr(mod, "Command") and is_cmd(mod.Command): obj = mod.Command if obj.__doc__ is None: msg.warn("Missing doc string for command from " "module %s" % module_name) continue if isinstance(obj, type): obj = obj() ## instanciate it. name = module_name.split("_", 1)[1] if name in subcmds: raise ValueError( "Module command %r conflicts with already defined object " "command." % name) subcmds[name] = obj return subcmds
Fetch action in same directory in python module python module loaded are of this form: '%s_*.py' % prefix
def initialize(self): if not os.path.exists(self.root_dir): os.makedirs(self.root_dir) assert os.path.isdir(self.root_dir), "%s is not a directory! Please move or remove it." % self.root_dir for d in ["bin", "lib", "include"]: target_path = os.path.join(self.root_dir, d) if not os.path.exists(target_path): os.makedirs(target_path) if not os.path.exists(self.manifest_path): open(self.manifest_path, "w+").close() self.new = False
Generate the root directory root if it doesn't already exist
def finalize(self): if self.rc_file: self.rc_file.close() if self.env_file: self.env_file.close()
finalize any open file handles
def remove(self): if self.rc_file: self.rc_file.close() if self.env_file: self.env_file.close() shutil.rmtree(self.root_dir)
Removes the sprinter directory, if it exists
def symlink_to_bin(self, name, path): self.__symlink_dir("bin", name, path) os.chmod(os.path.join(self.root_dir, "bin", name), os.stat(path).st_mode | stat.S_IXUSR | stat.S_IRUSR)
Symlink an object at path to name in the bin folder.
def remove_from_bin(self, name): self.__remove_path(os.path.join(self.root_dir, "bin", name))
Remove an object from the bin folder.
def remove_from_lib(self, name): self.__remove_path(os.path.join(self.root_dir, "lib", name))
Remove an object from the bin folder.
def remove_feature(self, feature_name): self.clear_feature_symlinks(feature_name) if os.path.exists(self.install_directory(feature_name)): self.__remove_path(self.install_directory(feature_name))
Remove an feature from the environment root folder.
def clear_feature_symlinks(self, feature_name): logger.debug("Clearing feature symlinks for %s" % feature_name) feature_path = self.install_directory(feature_name) for d in ('bin', 'lib'): if os.path.exists(os.path.join(self.root_dir, d)): for link in os.listdir(os.path.join(self.root_dir, d)): path = os.path.join(self.root_dir, d, link) if feature_path in os.path.realpath(path): getattr(self, 'remove_from_%s' % d)(link)
Clear the symlinks for a feature in the symlinked path
def add_to_env(self, content): if not self.rewrite_config: raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.") if not self.env_file: self.env_path, self.env_file = self.__get_env_handle(self.root_dir) self.env_file.write(content + '\n')
add content to the env script.
def add_to_rc(self, content): if not self.rewrite_config: raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.") if not self.rc_file: self.rc_path, self.rc_file = self.__get_rc_handle(self.root_dir) self.rc_file.write(content + '\n')
add content to the rc script.
def add_to_gui(self, content): if not self.rewrite_config: raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.") if not self.gui_file: self.gui_path, self.gui_file = self.__get_gui_handle(self.root_dir) self.gui_file.write(content + '\n')
add content to the gui script.
def __remove_path(self, path): curpath = os.path.abspath(os.curdir) if not os.path.exists(path): logger.warn("Attempted to remove a non-existent path %s" % path) return try: if os.path.islink(path): os.unlink(path) elif os.path.isdir(path): shutil.rmtree(path) else: os.unlink(path) # in the case we just deleted ourselves out of a valid directory, # we move to a valid directory. if curpath == path: os.chdir(tempfile.gettempdir()) except OSError: logger.error("Unable to remove object at path %s" % path) raise DirectoryException("Unable to remove object at path %s" % path)
Remove an object
def __get_env_handle(self, root_dir): env_path = os.path.join(root_dir, '.env') gui_path = os.path.join(root_dir, '.gui') fh = open(env_path, "w+") # .env will source utils.sh if it hasn't already fh.write(source_template % (gui_path, gui_path)) fh.write(source_template % (self.shell_util_path, self.shell_util_path)) return (env_path, fh)
get the filepath and filehandle to the .env file for the environment
def __get_rc_handle(self, root_dir): rc_path = os.path.join(root_dir, '.rc') env_path = os.path.join(root_dir, '.env') fh = open(rc_path, "w+") # .rc will always source .env fh.write(source_template % (env_path, env_path)) return (rc_path, fh)
get the filepath and filehandle to the rc file for the environment
def __get_gui_handle(self, root_dir): gui_path = os.path.join(root_dir, '.gui') fh = open(gui_path, "w+") return (gui_path, fh)
get the filepath and filehandle to the .env file for the environment
def __symlink_dir(self, dir_name, name, path): target_dir = os.path.join(self.root_dir, dir_name) if not os.path.exists(target_dir): os.makedirs(target_dir) target_path = os.path.join(self.root_dir, dir_name, name) logger.debug("Attempting to symlink %s to %s..." % (path, target_path)) if os.path.exists(target_path): if os.path.islink(target_path): os.remove(target_path) else: logger.warn("%s is not a symlink! please remove it manually." % target_path) return os.symlink(path, target_path)
Symlink an object at path to name in the dir_name folder. remove it if it already exists.
def create(self, options=None): if options is None: raise ValueError("Please pass in an options dict") if not _has_content(options): raise NoContentError("must supply 'document_content' or 'document_url'") default_options = { "name": "default", "document_type": "pdf", "test": False, "async": False, "raise_exception_on_failure": False, } options = dict(list(default_options.items()) + list(options.items())) raise_exception_on_failure = options.pop("raise_exception_on_failure") query = {"user_credentials": self.api_key} if options["async"]: query["output"] = "json" resp = requests.post( "%sdocs" % (self._url), json=options, params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentCreationFailure(resp.content, resp.status_code) if options["async"]: return json.loads(resp.content.decode("utf-8")) else: return resp
Create a new document job (sync or async).
def list_docs(self, options=None): if options is None: raise ValueError("Please pass in an options dict") default_options = { "page": 1, "per_page": 100, "raise_exception_on_failure": False, "user_credentials": self.api_key, } options = dict(list(default_options.items()) + list(options.items())) raise_exception_on_failure = options.pop("raise_exception_on_failure") resp = requests.get( "%sdocs" % (self._url), params=options, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentListingFailure(resp.content, resp.status_code) return resp
Return list of previously created documents.
def status(self, status_id, raise_exception_on_failure=False): query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sstatus/%s" % (self._url, status_id), params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentStatusFailure(resp.content, resp.status_code) if resp.status_code == 200: as_json = json.loads(resp.content) if as_json["status"] == "completed": as_json["download_key"] = _get_download_key(as_json["download_url"]) return as_json return resp
Return the status of the generation job.
def download(self, download_key, raise_exception_on_failure=False): query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sdownload/%s" % (self._url, download_key), params=query, timeout=self._timeout, ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentDownloadFailure(resp.content, resp.status_code) return resp
Download the file represented by the download_key.
def merge_INIConf(a, b): for sname in b.sections(): if a.has_section(sname): for oname in b.options(sname): a[sname][oname] = b[sname][oname] else: a[sname] = b[sname] return a
用 b 的内容覆盖 a 的内容(若重名),并返回 a
def copy_from_dict(self, adict, parent=None): if not parent: parent = self for k,v in adict.items(): if isinstance(v, dict): vDict = PYConf(v) self.copy_from_dict(v, vDict) parent[k] = vDict else: parent[k] = v
从一个已经存在的 dict 中复制所有的值。 :param adict: 被复制的 dict。 :type adict: dict :param parent: 复制到哪个父对象。 若为 None 则复制到 self 。 :type parent: rookout.PYConf
def dump(self, human=False): txt = str(self) if human: txt = txt.replace(", '", ",\n'") txt = txt.replace("{", "{\n") txt = txt.replace("}", "\n}") txt = txt.replace("[", "[\n") txt = txt.replace("]", "\n]") return txt
将自身内容打印成字符串 :param bool human: 若值为 True ,则打印成易读格式。
def save_to_file(self, path, human=True): write_file(path, self.dump(human)) slog.info("Save %a done.", path)
将自身内容保存到文件。 :param str path: 保存的文件路径。 :param bool human: 参见 :func:`dump()`
def read_from_file(self, path): if not os.path.exists(path): slog.warning("The file %s is not exist.", path) return False txt = read_file(path) dic = eval(txt) self.copy_from_dict(dic) return True
从一个文本文件中读入信息。 假设该文本文件的格式与 :func:`dump()` 相同。 :param str path: 待读入的文件路径。
def _set_parameters(self, parameters): nr_f = self.f.size # sort out parameters rho0, m, tau, c = self._sort_parameters(parameters) newsize = (nr_f, len(m)) # rho0_resized = np.resize(rho0, newsize) m_resized = np.resize(m, newsize) tau_resized = np.resize(tau, newsize) c_resized = np.resize(c, newsize) omega = np.atleast_2d(2 * np.pi * self.f).T self.w = np.resize(omega, (len(m), nr_f)).T self.rho0 = rho0 self.m = m_resized self.tau = tau_resized self.c = c_resized # compute some common terms self.otc = (self.w * self.tau) ** self.c self.otc2 = (self.w * self.tau) ** (2 * self.c) self.ang = self.c * np.pi / 2.0 # rad self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2
Sort out the various possible parameter inputs and return a config object (dict) We have multiple input formats: 1) a list, tuple, or numpy.ndarray, containing the linear parameters in the following order: * for single term: rho0, m1, tau1, c1 * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ... 2) a dictionary with the entries "rho0", "m", "tau", "c" 2b) if the dictionary entries for "m", "tau", and "c" are lists, the entries correspond to mulitple polarisazion terms
def response(self, parameters): r # get a config object self._set_parameters(parameters) terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c))) # sum up terms specs = np.sum(terms, axis=1) rcomplex = self.rho0 * (1 - specs) response = sip_response.sip_response(self.f, rcomplex=rcomplex) return response
r"""Complex response of the Cole-Cole model:: :math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j \omega \tau_i)^c_i})\right)` Parameters ---------- parameters: list or tuple or numpy.ndarray Cole-Cole model parameters: rho0, m, tau, c (all linear) Returns ------- response: :class:`sip_models.sip_response.sip_response` model response object
def dre_drho0(self, pars): r self._set_parameters(pars) numerator = self.m * self.otc * (np.cos(self.ang) + self.otc) term = numerator / self.denom specs = np.sum(term, axis=1) result = 1 - specs return result
r""" Compute partial derivative of real parts with respect to :math:`\rho_0` :math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 - \frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}` Note that partial derivatives towards :math:`\rho_0` are 1D, in contrast to the other parameter derivatives, which usually return 2D arrays! Returns ------- dre_drho0: :class:`numpy.ndarray` Size N (nr of frequencies) array with the derivatives
def dre_dlog10rho0(self, pars): # first call the linear response to set the parameters linear_response = self.dre_drho0(pars) result = np.log(10) * self.rho0 * linear_response return result
Compute partial derivative of real parts to log10(rho0)
def dre_dm(self, pars): r self._set_parameters(pars) numerator = -self.otc * (np.cos(self.ang) + self.otc) result = numerator / self.denom result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m (\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
def dim_dm(self, pars): r self._set_parameters(pars) numerator = -self.otc * np.sin(self.ang) result = numerator / self.denom result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m (\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
def dim_dtau(self, pars): r self._set_parameters(pars) # term1 nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\ self.c * self.tau ** (self.c - 1) term1 = nom1 / self.denom # term2 nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) * np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) * self.tau ** (2 * self.c - 1)) term2 = nom2 / self.denom ** 2 result = term1 + term2 result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0 \frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2} \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c \pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
def dim_dc(self, pars): r self._set_parameters(pars) # term1 nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\ np.sin(self.ang) nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang) term1 = (nom1a + nom1b) / self.denom # term2 nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) - 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) + 2 * np.log(self.w * self.tau) * self.otc2) term2 = nom2 / self.denom ** 2 result = term1 + term2 result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
def Jacobian_re_im(self, pars): r partials = [] # partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :]) partials.append(self.dre_drho0(pars)[:, np.newaxis]) partials.append(self.dre_dm(pars)) # partials.append(self.dre_dlog10tau(pars)) partials.append(self.dre_dtau(pars)) partials.append(self.dre_dc(pars)) # partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :]) partials.append(self.dim_drho0(pars)[:, np.newaxis]) partials.append(self.dim_dm(pars)) # partials.append(self.dim_dlog10tau(pars)) partials.append(self.dim_dtau(pars)) partials.append(self.dim_dc(pars)) print('SHAPES') for x in partials: print(x.shape) J = np.concatenate(partials, axis=1) return J
r""" :math:`J` >>> import sip_models.res.cc as cc >>> import numpy as np >>> f = np.logspace(-3, 3, 20) >>> pars = [100, 0.1, 0.04, 0.8] >>> obj = cc.cc(f) >>> J = obj.Jacobian_re_im(pars)
def read_dict_or_list_from_json(desired_type: Type[dict], file_object: TextIOBase, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]: # lazy import in order not to force use of jprops import json res = json.load(file_object) # convert if required return ConversionFinder.convert_collection_values_according_to_pep(res, desired_type, conversion_finder, logger, **kwargs)
Helper method to read a dictionary from a .json file using json library :param file_object: :return:
def get_default_collection_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: return [SingleFileParserFunction(parser_function=read_dict_or_list_from_json, streaming_mode=True, custom_name='read_dict_or_list_from_json', supported_exts={'.json'}, supported_types={dict, list}, function_args={'conversion_finder': conversion_finder}), MultifileCollectionParser(parser_finder) ]
Utility method to return the default parsers able to parse a dictionary from a file. :return:
def get_default_collection_converters(conversion_finder: ConversionFinder) -> List[Union[Converter[Any, dict], Converter[dict, Any]]]: return [ConverterFunction(from_type=List, to_type=Set, conversion_method=list_to_set, custom_name='list_to_set', function_args={'conversion_finder': conversion_finder}), ConverterFunction(from_type=List, to_type=Tuple, conversion_method=list_to_tuple, custom_name='list_to_tuple', function_args={'conversion_finder': conversion_finder})]
Utility method to return the default converters associated to dict (from dict to other type, and from other type to dict) :return:
def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: # nb of file children n_children = len(obj_on_fs.get_multifile_children()) # first extract base collection type subtypes, key_type = _extract_collection_base_type(desired_type) if isinstance(subtypes, tuple): # -- check the tuple length if n_children != len(subtypes): raise FolderAndFilesStructureError.create_for_multifile_tuple(obj_on_fs, len(subtypes), len(obj_on_fs.get_multifile_children())) else: # -- repeat the subtype n times subtypes = [subtypes] * n_children # -- for each child create a plan with the appropriate parser children_plan = OrderedDict() # use sorting for reproducible results in case of multiple errors for (child_name, child_fileobject), child_typ in zip(sorted(obj_on_fs.get_multifile_children().items()), subtypes): # -- use the parserfinder to find the plan t, child_parser = self.parser_finder.build_parser_for_fileobject_and_desiredtype(child_fileobject, child_typ, logger) children_plan[child_name] = child_parser.create_parsing_plan(t, child_fileobject, logger, _main_call=False) return children_plan
Simply inspects the required type to find the base type expected for items of the collection, and relies on the ParserFinder to find the parsing plan :param obj_on_fs: :param desired_type: :param logger: :return:
def dispatch(self, producer=None): log.info('@Event.dispatch `{}` with subject `{}`' .format(self.name, self.subject)) producer = (producer or Registry.get_producer()) if not producer: raise MissingProducerError('You have not registered a Producer') try: producer.produce(self.topic, self.name, self.subject, self.data) except: fallback = Registry.get_fallback() fallback(self) raise
Dispatch the event, sending a message to the queue using a producer. :param producer: optional `Producer` to replace the default one.
def get_stackdelta(op): res = opstackd[op.opname] if callable(res): res = res(op) return res
Returns the number of elements that the instruction *op* adds to the stack. # Arguments op (dis.Instruction): The instruction to retrieve the stackdelta value for. # Raises KeyError: If the instruction *op* is not supported.
def load_actions(spec, group=None, expr_parser=None): if expr_parser is None: expr_parser = ExpressionParser() actions = ActionList() for name in spec: options = {} as_ = None decorators = [] if isinstance(name, dict): actionspec = dict(name) as_ = actionspec.pop("as", None) for dec, dec_cls in action_decorators: if dec in actionspec: decorators.append((dec_cls, expr_parser.compile(actionspec.pop(dec)))) name, options = actionspec.popitem() if options: options = expr_parser.compile(options) if isinstance(name, Action): action = name elif isinstance(name, ActionFunction): action = name.action else: action = action_resolver.resolve_or_delayed(name, options, group, as_) for dec_cls, arg in decorators: action = dec_cls(action, arg) actions.append(action) return actions
Each item can be an action name as a string or a dict. When using a dict, one key/item pair must be the action name and its options and the rest action decorator names and their options. Example: load_actions(["login_required", {"flash": {"message": "hello world", "label": "warning"}}])
def load_grouped_actions(spec, default_group=None, key_prefix="actions", pop_keys=False, expr_parser=None): actions = ActionList() if expr_parser is None: expr_parser = ExpressionParser() for key in spec.keys(): if key != key_prefix and not key.startswith(key_prefix + "."): continue group = default_group if "." in key: (_, group) = key.split(".") actions.extend(load_actions(spec[key], group, expr_parser)) if pop_keys: spec.pop(key) return actions
Instanciates actions from a dict. Will look for a key name key_prefix and for key starting with key_prefix followed by a dot and a group name. A group name can be any string and will can be used later to filter actions. Values associated to these keys should be lists that will be loaded using load_actions()
def create_action_from_dict(name, spec, base_class=ActionsAction, metaclass=type, pop_keys=False): actions = load_grouped_actions(spec, pop_keys=pop_keys) attrs = {"actions": actions, "name": name} if "as" in spec: attrs["as_"] = spec["as"] if pop_keys: del spec["as"] for k in ("requires", "methods", "defaults", "default_option"): if k in spec: attrs[k] = spec[k] if pop_keys: del spec[k] return metaclass(name, (base_class,), attrs)
Creates an action class based on a dict loaded using load_grouped_actions()
def calculate_inverse_document_frequencies(self): for doc in self.processed_corpus: for word in doc: self.inverse_document_frequencies[word] += 1 for key,value in self.inverse_document_frequencies.iteritems(): idf = log((1.0 * len(self.corpus)) / value) self.inverse_document_frequencies[key] = idf
Q.calculate_inverse_document_frequencies() -- measures how much information the term provides, i.e. whether the term is common or rare across all documents. This is obtained by dividing the total number of documents by the number of documents containing the term, and then taking the logarithm of that quotient.
def calculate_term_frequencies(self): for doc in self.processed_corpus: term_frequency_doc = defaultdict(int) for word in doc: term_frequency_doc[word] += 1 for key,value in term_frequency_doc.iteritems(): term_frequency_doc[key] = (1.0 * value) / len(doc) self.term_frequencies.append(term_frequency_doc)
Q.calculate_term_frequencies() -- calculate the number of times each term t occurs in document d.
def match_query_to_corpus(self): ranking = [] for i,doc in enumerate(self.processed_corpus): rank = 0.0 for word in self.processed_query: if word in doc: rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word] ranking.append((rank,i)) matching_corpus_index = 0 max_rank = 0 for rank,index in ranking: if rank > max_rank: matching_corpus_index = index max_rank = rank return matching_corpus_index
Q.match_query_to_corpus() -> index -- return the matched corpus index of the user query
def process_corpus(self): for doc in self.corpus_list: doc = wt(doc) sentence = [] for word in doc: if word not in self.stop_words and word not in self.punctuation: word = self.stemmer.stem(word) sentence.append(word) self.processed_corpus.append(sentence)
Q.process_corpus() -- processes the queries defined by us, by tokenizing, stemming, and removing stop words.
def process_query(self): self.query = wt(self.query) self.processed_query = [] for word in self.query: if word not in self.stop_words and word not in self.punctuation: self.processed_query.append(self.stemmer.stem(word))
Q.process_query() -- processes the user query, by tokenizing and stemming words.
def query(self, query): self.query = query self.process_query() matching_corpus_index = self.match_query_to_corpus() return self.category_list[matching_corpus_index].strip()
Q.query(query string) -> category string -- return the matched category for any user query
def load_manifest(raw_manifest, namespace=None, **kwargs): if isinstance(raw_manifest, configparser.RawConfigParser): return Manifest(raw_manifest) manifest = create_configparser() if not manifest.has_section('config'): manifest.add_section('config') _load_manifest_interpret_source(manifest, raw_manifest, **kwargs) return Manifest(manifest, namespace=namespace)
wrapper method which generates the manifest from various sources
def _load_manifest_interpret_source(manifest, source, username=None, password=None, verify_certificate=True, do_inherit=True): try: if isinstance(source, string_types): if source.startswith("http"): # if manifest is a url _load_manifest_from_url(manifest, source, verify_certificate=verify_certificate, username=username, password=password) else: _load_manifest_from_file(manifest, source) if not manifest.has_option('config', 'source'): manifest.set('config', 'source', str(source)) else: # assume source is a file pointer manifest.readfp(source) if manifest.has_option('config', 'extends') and do_inherit: parent_manifest = configparser.RawConfigParser() _load_manifest_interpret_source(parent_manifest, manifest.get('config', 'extends'), username=username, password=password, verify_certificate=verify_certificate) for s in parent_manifest.sections(): for k, v in parent_manifest.items(s): if not manifest.has_option(s, k): manifest.set(s, k, v) except configparser.Error: logger.debug("", exc_info=True) error_message = sys.exc_info()[1] raise ManifestException("Unable to parse manifest!: {0}".format(error_message))
Interpret the <source>, and load the results into <manifest>
def _load_manifest_from_url(manifest, url, verify_certificate=True, username=None, password=None): try: if username and password: manifest_file_handler = StringIO(lib.authenticated_get(username, password, url, verify=verify_certificate).decode("utf-8")) else: manifest_file_handler = StringIO(lib.cleaned_request( 'get', url, verify=verify_certificate ).text) manifest.readfp(manifest_file_handler) except requests.exceptions.RequestException: logger.debug("", exc_info=True) error_message = sys.exc_info()[1] raise ManifestException("There was an error retrieving {0}!\n {1}".format(url, str(error_message)))
load a url body into a manifest
def _load_manifest_from_file(manifest, path): path = os.path.abspath(os.path.expanduser(path)) if not os.path.exists(path): raise ManifestException("Manifest does not exist at {0}!".format(path)) manifest.read(path) if not manifest.has_option('config', 'source'): manifest.set('config', 'source', str(path))
load manifest from file
def formula_sections(self): if self.dtree is not None: return self.dtree.order else: return [s for s in self.manifest.sections() if s != "config"]
Return all sections related to a formula, re-ordered according to the "depends" section.
def is_affirmative(self, section, option): return self.has_option(section, option) and \ lib.is_affirmative(self.get(section, option))
Return true if the section option combo exists and it is set to a truthy value.
def write(self, file_handle): for k, v in self.inputs.write_values().items(): self.set('config', k, v) self.set('config', 'namespace', self.namespace) self.manifest.write(file_handle)
write the current state to a file manifest
def get_context_dict(self): context_dict = {} for s in self.sections(): for k, v in self.manifest.items(s): context_dict["%s:%s" % (s, k)] = v for k, v in self.inputs.values().items(): context_dict["config:{0}".format(k)] = v context_dict.update(self.additional_context_variables.items()) context_dict.update(dict([("%s|escaped" % k, re.escape(str(v) or "")) for k, v in context_dict.items()])) return context_dict
return a context dict of the desired state
def get(self, section, key, default=MANIFEST_NULL_KEY): if not self.manifest.has_option(section, key) and default is not MANIFEST_NULL_KEY: return default return self.manifest.get(section, key)
Returns the value if it exist, or default if default is set
def __parse_namespace(self): if self.manifest.has_option('config', 'namespace'): return self.manifest.get('config', 'namespace') elif self.manifest.has_option('config', 'source'): return NAMESPACE_REGEX.search(self.manifest.get('config', 'source')).groups()[0] else: logger.warn('Could not parse namespace implicitely') return None
Parse the namespace from various sources
def __generate_dependency_tree(self): dependency_dict = {} for s in self.manifest.sections(): if s != "config": if self.manifest.has_option(s, 'depends'): dependency_list = [d.strip() for d in re.split('\n|,', self.manifest.get(s, 'depends'))] dependency_dict[s] = dependency_list else: dependency_dict[s] = [] try: return DependencyTree(dependency_dict) except DependencyTreeException: dte = sys.exc_info()[1] raise ManifestException("Dependency tree for manifest is invalid! %s" % str(dte))
Generate the dependency tree object
def __substitute_objects(self, value, context_dict): if type(value) == dict: return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()]) elif type(value) == str: try: return value % context_dict except KeyError: e = sys.exc_info()[1] logger.warn("Could not specialize %s! Error: %s" % (value, e)) return value else: return value
recursively substitute value with the context_dict
def __setup_inputs(self): input_object = Inputs() # populate input schemas for s in self.manifest.sections(): if self.has_option(s, 'inputs'): input_object.add_inputs_from_inputstring(self.get(s, 'inputs')) # add in values for k, v in self.items('config'): if input_object.is_input(s): input_object.set_input(k, v) return input_object
Setup the inputs object
def validate(self): if self.target: for k in self.target.keys(): if k in self.deprecated_options: self.logger.warn( self.deprecated_options[k].format(option=k, feature=self.feature_name)) elif (k not in self.valid_options and k not in self.required_options and '*' not in self.valid_options): self.logger.warn("Unused option %s in %s!" % (k, self.feature_name)) for k in self.required_options: if not self.target.has(k): self._log_error( "Required option %s not present in feature %s!" % (k, self.feature_name))
validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception
def should_run(self): should_run = True config = self.target or self.source if config.has('systems'): should_run = False valid_systems = [s.lower() for s in config.get('systems').split(",")] for system_type, param in [('is_osx', 'osx'), ('is_debian', 'debian')]: if param in valid_systems and getattr(system, system_type)(): should_run = True return should_run
Returns true if the feature should run
def resolve(self): if self.source and self.target: for key in self.source.keys(): if (key not in self.dont_carry_over_options and not self.target.has(key)): self.target.set(key, self.source.get(key))
Resolve differences between the target and the source configuration
def _log_error(self, message): key = (self.feature_name, self.target.get('formula')) self.environment.log_feature_error(key, "ERROR: " + message)
Log an error for the feature
def _prompt_value(self, key, prompt_string, default=None, only_if_empty=True): main_manifest = self.target or self.source if only_if_empty and main_manifest.has(key): return main_manifest.get(key) prompt_default = default if self.source and self.source.has(key): prompt_default = self.source.get(key) main_manifest.set(key, lib.prompt(prompt_string, default=prompt_default))
prompts the user for a value, and saves it to either the target or source manifest (whichever is appropriate for the phase) this method takes will default to the original value passed by the user in the case one exists. e.g. if a user already answered 'yes' to a question, it will use 'yes' as the default vs the one passed into this method.
def jinja_fragment_extension(tag, endtag=None, name=None, tag_only=False, allow_args=True, callblock_args=None): if endtag is None: endtag = "end" + tag def decorator(f): def parse(self, parser): lineno = parser.stream.next().lineno args = [] kwargs = [] if allow_args: args, kwargs = parse_block_signature(parser) call = self.call_method("support_method", args, kwargs, lineno=lineno) if tag_only: return nodes.Output([call], lineno=lineno) call_args = [] if callblock_args is not None: for arg in callblock_args: call_args.append(nodes.Name(arg, 'param', lineno=lineno)) body = parser.parse_statements(['name:' + endtag], drop_needle=True) return nodes.CallBlock(call, call_args, [], body, lineno=lineno) def support_method(self, *args, **kwargs): return f(*args, **kwargs) attrs = {"tags": set([tag]), "parse": parse, "support_method": support_method} return type(name or f.__name__, (Extension,), attrs) return decorator
Decorator to easily create a jinja extension which acts as a fragment.
def jinja_block_as_fragment_extension(name, tagname=None, classname=None): if tagname is None: tagname = name if classname is None: classname = "%sBlockFragmentExtension" % name.capitalize() return type(classname, (BaseJinjaBlockAsFragmentExtension,), { "tags": set([tagname]), "end_tag": "end" + tagname, "block_name": name})
Creates a fragment extension which will just act as a replacement of the block statement.
def dir_visitor(dirname, visitor): visitor(dirname) for obj in os.listdir(dirname): obj_path = os.path.join(dirname, obj) if os.path.isdir(obj_path): dir_visitor(obj_path, visitor)
_dir_visitor_ walk through all files in dirname, find directories and call the callable on them. :param dirname: Name of directory to start visiting, all subdirs will be visited :param visitor: Callable invoked on each dir visited
def replicate_directory_tree(input_dir, output_dir): def transplant_dir(target, dirname): x = dirname.replace(input_dir, target) if not os.path.exists(x): LOGGER.info('Creating: {}'.format(x)) os.makedirs(x) dir_visitor( input_dir, functools.partial(transplant_dir, output_dir) )
_replicate_directory_tree_ clone dir structure under input_dir into output dir All subdirs beneath input_dir will be created under output_dir :param input_dir: path to dir tree to be cloned :param output_dir: path to new dir where dir structure will be created
def find_templates(input_dir): templates = [] def template_finder(result, dirname): for obj in os.listdir(dirname): if obj.endswith('.mustache'): result.append(os.path.join(dirname, obj)) dir_visitor( input_dir, functools.partial(template_finder, templates) ) return templates
_find_templates_ traverse the input_dir structure and return a list of template files ending with .mustache :param input_dir: Path to start recursive search for mustache templates :returns: List of file paths corresponding to templates
def find_copies(input_dir, exclude_list): copies = [] def copy_finder(copies, dirname): for obj in os.listdir(dirname): pathname = os.path.join(dirname, obj) if os.path.isdir(pathname): continue if obj in exclude_list: continue if obj.endswith('.mustache'): continue copies.append(os.path.join(dirname, obj)) dir_visitor( input_dir, functools.partial(copy_finder, copies) ) return copies
find files that are not templates and not in the exclude_list for copying from template to image
def render_template(template_in, file_out, context): renderer = pystache.Renderer() result = renderer.render_path(template_in, context) with open(file_out, 'w') as handle: LOGGER.info('Rendering: {} to {}'.format(template_in, file_out)) handle.write(result) shutil.copymode(template_in, file_out)
_render_template_ Render a single template file, using the context provided and write the file out to the location specified #TODO: verify the template is completely rendered, no missing values
def copy_file(src, target): LOGGER.info("Copying {} to {}".format(src, target)) shutil.copyfile(src, target) shutil.copymode(src, target)
copy_file copy source to target
def process_templates(input_dir, target_dir, context): if not target_dir.endswith('/'): target_dir = "{}/".format(target_dir) if not os.path.exists(target_dir): LOGGER.info('Creating: {}'.format(target_dir)) os.makedirs(target_dir) replicate_directory_tree(input_dir, target_dir) templates = find_templates(input_dir) for templ in templates: output_file = templ.replace(input_dir, target_dir) output_file = output_file[:-len('.mustache')] render_template(templ, output_file, context)
_process_templates_ Given the input dir containing a set of template, clone the structure under that directory into the target dir using the context to process any mustache templates that are encountered
def process_copies(input_dir, target_dir, excludes): copies = find_copies(input_dir, excludes) for c in copies: output_file = c.replace(input_dir, target_dir) copy_file(c, output_file)
_process_copies_ Handles files to be copied across, assumes that dir structure has already been replicated
def newDevice(deviceJson, lupusec): type_tag = deviceJson.get('type') if not type_tag: _LOGGER.info('Device has no type') if type_tag in CONST.TYPE_OPENING: return LupusecBinarySensor(deviceJson, lupusec) elif type_tag in CONST.TYPE_SENSOR: return LupusecBinarySensor(deviceJson, lupusec) elif type_tag in CONST.TYPE_SWITCH: return LupusecSwitch(deviceJson, lupusec) else: _LOGGER.info('Device is not known') return None
Create new device object for the given type.
def get_devices(self, refresh=False, generic_type=None): _LOGGER.info("Updating all devices...") if refresh or self._devices is None: if self._devices is None: self._devices = {} responseObject = self.get_sensors() if (responseObject and not isinstance(responseObject, (tuple, list))): responseObject = responseObject for deviceJson in responseObject: # Attempt to reuse an existing device device = self._devices.get(deviceJson['name']) # No existing device, create a new one if device: device.update(deviceJson) else: device = newDevice(deviceJson, self) if not device: _LOGGER.info('Device is unknown') continue self._devices[device.device_id] = device # We will be treating the Lupusec panel itself as an armable device. panelJson = self.get_panel() _LOGGER.debug("Get the panel in get_devices: %s", panelJson) self._panel.update(panelJson) alarmDevice = self._devices.get('0') if alarmDevice: alarmDevice.update(panelJson) else: alarmDevice = ALARM.create_alarm(panelJson, self) self._devices['0'] = alarmDevice # Now we will handle the power switches switches = self.get_power_switches() _LOGGER.debug( 'Get active the power switches in get_devices: %s', switches) for deviceJson in switches: # Attempt to reuse an existing device device = self._devices.get(deviceJson['name']) # No existing device, create a new one if device: device.update(deviceJson) else: device = newDevice(deviceJson, self) if not device: _LOGGER.info('Device is unknown') continue self._devices[device.device_id] = device if generic_type: devices = [] for device in self._devices.values(): if (device.type is not None and device.type in generic_type[0]): devices.append(device) return devices return list(self._devices.values())
Get all devices from Lupusec.
def parse_from_dict(json_dict): history_columns = json_dict['columns'] history_list = MarketHistoryList( upload_keys=json_dict['uploadKeys'], history_generator=json_dict['generator'], ) for rowset in json_dict['rowsets']: generated_at = parse_datetime(rowset['generatedAt']) region_id = rowset['regionID'] type_id = rowset['typeID'] history_list.set_empty_region(region_id, type_id, generated_at) for row in rowset['rows']: history_kwargs = _columns_to_kwargs( SPEC_TO_KWARG_CONVERSION, history_columns, row) historical_date = parse_datetime(history_kwargs['historical_date']) history_kwargs.update({ 'type_id': type_id, 'region_id': region_id, 'historical_date': historical_date, 'generated_at': generated_at, }) history_list.add_entry(MarketHistoryEntry(**history_kwargs)) return history_list
Given a Unified Uploader message, parse the contents and return a MarketHistoryList instance. :param dict json_dict: A Unified Uploader message as a dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within.
def encode_to_json(history_list): rowsets = [] for items_in_region_list in history_list._history.values(): region_id = items_in_region_list.region_id type_id = items_in_region_list.type_id generated_at = gen_iso_datetime_str(items_in_region_list.generated_at) rows = [] for entry in items_in_region_list.entries: historical_date = gen_iso_datetime_str(entry.historical_date) # The order in which these values are added is crucial. It must # match STANDARD_ENCODED_COLUMNS. rows.append([ historical_date, entry.num_orders, entry.total_quantity, entry.low_price, entry.high_price, entry.average_price, ]) rowsets.append(dict( generatedAt = generated_at, regionID = region_id, typeID = type_id, rows = rows, )) json_dict = { 'resultType': 'history', 'version': '0.1', 'uploadKeys': history_list.upload_keys, 'generator': history_list.history_generator, 'currentTime': gen_iso_datetime_str(now_dtime_in_utc()), # This must match the order of the values in the row assembling portion # above this. 'columns': STANDARD_ENCODED_COLUMNS, 'rowsets': rowsets, } return json.dumps(json_dict)
Encodes this MarketHistoryList instance to a JSON string. :param MarketHistoryList history_list: The history instance to serialize. :rtype: str
def load(self, configuration): try: self.config = yaml.load(open(configuration, "rb")) except IOError: try: self.config = yaml.load(configuration) except ParserError, e: raise ParserError('Error parsing config: %s' % e) # put customer data into self.customer if isinstance(self.config, dict): self.customer = self.config.get('customer', {}) self.instances_dict = self.config.get('instances', {}) self.web2py_dir = self.config.get('web2py', None) self.api_type = self.config.get('api_type', 'jsonrpc') self.valid = True else: self.customer = {} self.instances_dict = {} self.web2py_dir = None self.valid = False
Load a YAML configuration file. :param configuration: Configuration filename or YAML string
def instances(self, test_type=".*"): import re data = {} for k, v in self.instances_dict.iteritems(): if re.match(test_type, v.get('test_type'), re.IGNORECASE): if 'filter_type' in v: hostfilter = { 'filtertype': v['filter_type'], 'content': v['filter_value'] } else: hostfilter = {} data[k] = { 'name': v.get('name'), 'start': v.get('start'), 'end': v.get('end'), 'url': v.get('url'), 'hostfilter': hostfilter, 'test_type': v.get('test_type') } return data
Returns a dict of all instances defined using a regex :param test_type: Regular expression to match for self.instance['test_type'] value names
def none_to_blank(s, exchange=''): if isinstance(s, list): return [none_to_blank(z) for y, z in enumerate(s)] return exchange if s is None else unicode(s)
Replaces NoneType with '' >>> none_to_blank(None, '') '' >>> none_to_blank(None) '' >>> none_to_blank('something', '') u'something' >>> none_to_blank(['1', None]) [u'1', ''] :param s: String to replace :para exchange: Character to return for None, default is blank ('') :return: If s is None, returns exchange
def make_good_url(url=None, addition="/"): if url is None: return None if isinstance(url, str) and isinstance(addition, str): return "%s/%s" % (url.rstrip('/'), addition.lstrip('/')) else: return None
Appends addition to url, ensuring the right number of slashes exist and the path doesn't get clobbered. >>> make_good_url('http://www.server.com/anywhere', 'else') 'http://www.server.com/anywhere/else' >>> make_good_url('http://test.com/', '/somewhere/over/the/rainbow/') 'http://test.com/somewhere/over/the/rainbow/' >>> make_good_url('None') 'None/' >>> make_good_url() >>> make_good_url({}) >>> make_good_url(addition='{}') :param url: URL :param addition: Something to add to the URL :return: New URL with addition
def build_kvasir_url( proto="https", server="localhost", port="8443", base="Kvasir", user="test", password="test", path=KVASIR_JSONRPC_PATH): uri = proto + '://' + user + '@' + password + '/' + server + ':' + port + '/' + base return make_good_url(uri, path)
Creates a full URL to reach Kvasir given specific data >>> build_kvasir_url('https', 'localhost', '8443', 'Kvasir', 'test', 'test') 'https://test@test/localhost:8443/Kvasir/api/call/jsonrpc' >>> build_kvasir_url() 'https://test@test/localhost:8443/Kvasir/api/call/jsonrpc' >>> build_kvasir_url(server='localhost', port='443', password='password', path='bad/path') 'https://test@password/localhost:443/Kvasir/bad/path' :param proto: Protocol type - http or https :param server: Hostname or IP address of Web2py server :param port: Port to reach server :param base: Base application name :param user: Username for basic auth :param password: Password for basic auth :param path: Full path to JSONRPC (/api/call/jsonrpc) :return: A full URL that can reach Kvasir's JSONRPC interface
def get_default(parser, section, option, default): try: result = parser.get(section, option) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): result = default return result
helper to get config settings with a default if not present
def set_db_application_prefix(prefix, sep=None): global _APPLICATION_PREFIX, _APPLICATION_SEP _APPLICATION_PREFIX = prefix if (sep is not None): _APPLICATION_SEP = sep
Set the global app prefix and separator.
def find_by_index(self, cls, index_name, value): return self.backend.find_by_index(cls, index_name, value)
Find records matching index query - defer to backend.
def humanTime(seconds): ''' Convert seconds to something more human-friendly ''' intervals = ['days', 'hours', 'minutes', 'seconds'] x = deltaTime(seconds=seconds) return ' '.join('{} {}'.format(getattr(x, k), k) for k in intervals if getattr(x, k)f humanTime(seconds): ''' Convert seconds to something more human-friendly ''' intervals = ['days', 'hours', 'minutes', 'seconds'] x = deltaTime(seconds=seconds) return ' '.join('{} {}'.format(getattr(x, k), k) for k in intervals if getattr(x, k))
Convert seconds to something more human-friendly
def humanTimeConverter(): ''' Cope whether we're passed a time in seconds on the command line or via stdin ''' if len(sys.argv) == 2: print humanFriendlyTime(seconds=int(sys.argv[1])) else: for line in sys.stdin: print humanFriendlyTime(int(line)) sys.exit(0f humanTimeConverter(): ''' Cope whether we're passed a time in seconds on the command line or via stdin ''' if len(sys.argv) == 2: print humanFriendlyTime(seconds=int(sys.argv[1])) else: for line in sys.stdin: print humanFriendlyTime(int(line)) sys.exit(0)
Cope whether we're passed a time in seconds on the command line or via stdin
def train(self, data, **kwargs): self.data = data for i in xrange(0,data.shape[1]): column_mean = np.mean(data.icol(i)) column_stdev = np.std(data.icol(i)) #Have to do += or "list" type will fail (ie with append) self.column_means += [column_mean] self.column_stdevs += [column_stdev] self.data = self.predict(data)
Calculate the standard deviations and means in the training data
def predict(self, test_data, **kwargs): if test_data.shape[1]!=self.data.shape[1]: raise Exception("Test data has different number of columns than training data.") for i in xrange(0,test_data.shape[1]): test_data.loc[:,i] = test_data.icol(i) - self.column_means[i] if int(self.column_stdevs[i])!=0: test_data.loc[:,i] = test_data.icol(i) / self.column_stdevs[i] return test_data
Adjust new input by the values in the training data
def action_decorator(name): def decorator(cls): action_decorators.append((name, cls)) return cls return decorator
Decorator to register an action decorator
def load_global_config(config_path): config = configparser.RawConfigParser() if os.path.exists(config_path): logger.debug("Checking and setting global parameters...") config.read(config_path) else: _initial_run() logger.info("Unable to find a global sprinter configuration!") logger.info("Creating one now. Please answer some questions" + " about what you would like sprinter to do.") logger.info("") # checks and sets sections if not config.has_section('global'): config.add_section('global') configure_config(config) write_config(config, config_path) return config
Load a global configuration object, and query for any required variables along the way
def print_global_config(global_config): if global_config.has_section('shell'): print("\nShell configurations:") for shell_type, set_value in global_config.items('shell'): print("{0}: {1}".format(shell_type, set_value)) if global_config.has_option('global', 'env_source_rc'): print("\nHave sprinter env source rc: {0}".format( global_config.get('global', 'env_source_rc')))
print the global configuration
def create_default_config(): config = configparser.RawConfigParser() config.add_section('global') config.set('global', 'env_source_rc', False) config.add_section('shell') config.set('shell', 'bash', "true") config.set('shell', 'zsh', "true") config.set('shell', 'gui', "true") return config
Create a default configuration object, with all parameters filled
def _initial_run(): if not system.is_officially_supported(): logger.warn(warning_template + "===========================================================\n" + "Sprinter is not officially supported on {0}! Please use at your own risk.\n\n".format(system.operating_system()) + "You can find the supported platforms here:\n" + "(http://sprinter.readthedocs.org/en/latest/index.html#compatible-systems)\n\n" + "Conversely, please help us support your system by reporting on issues\n" + "(http://sprinter.readthedocs.org/en/latest/faq.html#i-need-help-who-do-i-talk-to)\n" + "===========================================================") else: logger.info( "\nThanks for using \n" + "=" * 60 + sprinter_template + "=" * 60 )
Check things during the initial setting of sprinter's global config
def _configure_shell(config): config.has_section('shell') or config.add_section('shell') logger.info( "What shells or environments would you like sprinter to work with?\n" "(Sprinter will not try to inject into environments not specified here.)\n" "If you specify 'gui', sprinter will attempt to inject it's state into graphical programs as well.\n" "i.e. environment variables sprinter set will affect programs as well, not just shells\n" "WARNING: injecting into the GUI can be very dangerous. it usually requires a restart\n" " to modify any environmental configuration." ) environments = list(enumerate(sorted(SHELL_CONFIG), start=1)) logger.info("[0]: All, " + ", ".join(["[%d]: %s" % (index, val) for index, val in environments])) desired_environments = lib.prompt("type the environment, comma-separated", default="0") for index, val in environments: if str(index) in desired_environments or "0" in desired_environments: config.set('shell', val, 'true') else: config.set('shell', val, 'false')
Checks and queries values for the shell