text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def raw_row_generator(self): """Like rowgenerator, but does not try to create a row processor table""" from rowgenerators import get_generator self.doc.set_sys_path() # Set sys path to package 'lib' dir in case of python function generator ru = self.resolved_url try: resource = ru.resource # For Metapack urls return resource.row_generator except AttributeError: pass ut = ru.get_resource().get_target() # Encoding is supposed to be preserved in the URL but isn't source_url = parse_app_url(self.url) # source_url will be None for Sql terms. ut.encoding = self.get_value('encoding') or (source_url.encoding if source_url else None) g = get_generator(ut, resource=self, doc=self._doc, working_dir=self._doc.doc_dir, env=self.env) assert g, ut return g
[ "def", "raw_row_generator", "(", "self", ")", ":", "from", "rowgenerators", "import", "get_generator", "self", ".", "doc", ".", "set_sys_path", "(", ")", "# Set sys path to package 'lib' dir in case of python function generator", "ru", "=", "self", ".", "resolved_url", "try", ":", "resource", "=", "ru", ".", "resource", "# For Metapack urls", "return", "resource", ".", "row_generator", "except", "AttributeError", ":", "pass", "ut", "=", "ru", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "# Encoding is supposed to be preserved in the URL but isn't", "source_url", "=", "parse_app_url", "(", "self", ".", "url", ")", "# source_url will be None for Sql terms.", "ut", ".", "encoding", "=", "self", ".", "get_value", "(", "'encoding'", ")", "or", "(", "source_url", ".", "encoding", "if", "source_url", "else", "None", ")", "g", "=", "get_generator", "(", "ut", ",", "resource", "=", "self", ",", "doc", "=", "self", ".", "_doc", ",", "working_dir", "=", "self", ".", "_doc", ".", "doc_dir", ",", "env", "=", "self", ".", "env", ")", "assert", "g", ",", "ut", "return", "g" ]
30.129032
0.006224
def push_images(): # type: () -> None """ Push all project docker images to a remote registry. """ registry = conf.get('docker.registry') docker_images = conf.get('docker.images', []) if registry is None: log.err("You must define docker.registry conf variable to push images") sys.exit(-1) for image in docker_images: push_image(registry, image)
[ "def", "push_images", "(", ")", ":", "# type: () -> None", "registry", "=", "conf", ".", "get", "(", "'docker.registry'", ")", "docker_images", "=", "conf", ".", "get", "(", "'docker.images'", ",", "[", "]", ")", "if", "registry", "is", "None", ":", "log", ".", "err", "(", "\"You must define docker.registry conf variable to push images\"", ")", "sys", ".", "exit", "(", "-", "1", ")", "for", "image", "in", "docker_images", ":", "push_image", "(", "registry", ",", "image", ")" ]
32
0.002532
def import_attribute(name): """ Return an attribute from a dotted path name (e.g. "path.to.func"). Copied from nvie's rq https://github.com/nvie/rq/blob/master/rq/utils.py """ if hasattr(name, '__call__'): return name module_name, attribute = name.rsplit('.', 1) module = importlib.import_module(module_name) return getattr(module, attribute)
[ "def", "import_attribute", "(", "name", ")", ":", "if", "hasattr", "(", "name", ",", "'__call__'", ")", ":", "return", "name", "module_name", ",", "attribute", "=", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "return", "getattr", "(", "module", ",", "attribute", ")" ]
37.3
0.002618
def countedArray( expr, intExpr=None ): """Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. """ arrayExpr = Forward() def countFieldParseAction(s,l,t): n = t[0] arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) return [] if intExpr is None: intExpr = Word(nums).setParseAction(lambda t:int(t[0])) else: intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) return ( intExpr + arrayExpr )
[ "def", "countedArray", "(", "expr", ",", "intExpr", "=", "None", ")", ":", "arrayExpr", "=", "Forward", "(", ")", "def", "countFieldParseAction", "(", "s", ",", "l", ",", "t", ")", ":", "n", "=", "t", "[", "0", "]", "arrayExpr", "<<", "(", "n", "and", "Group", "(", "And", "(", "[", "expr", "]", "*", "n", ")", ")", "or", "Group", "(", "empty", ")", ")", "return", "[", "]", "if", "intExpr", "is", "None", ":", "intExpr", "=", "Word", "(", "nums", ")", ".", "setParseAction", "(", "lambda", "t", ":", "int", "(", "t", "[", "0", "]", ")", ")", "else", ":", "intExpr", "=", "intExpr", ".", "copy", "(", ")", "intExpr", ".", "setName", "(", "\"arrayLen\"", ")", "intExpr", ".", "addParseAction", "(", "countFieldParseAction", ",", "callDuringTry", "=", "True", ")", "return", "(", "intExpr", "+", "arrayExpr", ")" ]
42.421053
0.01335
def get_projects(self, **kwargs): """Get a user's project. :param str login: User's login (Default: self._login) :return: JSON """ _login = kwargs.get('login', self._login) search_url = SEARCH_URL.format(login=_login) return self._request_api(url=search_url).json()
[ "def", "get_projects", "(", "self", ",", "*", "*", "kwargs", ")", ":", "_login", "=", "kwargs", ".", "get", "(", "'login'", ",", "self", ".", "_login", ")", "search_url", "=", "SEARCH_URL", ".", "format", "(", "login", "=", "_login", ")", "return", "self", ".", "_request_api", "(", "url", "=", "search_url", ")", ".", "json", "(", ")" ]
31.4
0.006192
def auto_watering(self): """Return if zone is configured to automatic watering.""" value = "zone{}".format(self.id) return find_program_status(self._parent.html['home'], value)
[ "def", "auto_watering", "(", "self", ")", ":", "value", "=", "\"zone{}\"", ".", "format", "(", "self", ".", "id", ")", "return", "find_program_status", "(", "self", ".", "_parent", ".", "html", "[", "'home'", "]", ",", "value", ")" ]
49.25
0.01
def validate_tempi(tempi, reference=True): """Checks that there are two non-negative tempi. For a reference value, at least one tempo has to be greater than zero. Parameters ---------- tempi : np.ndarray length-2 array of tempo, in bpm reference : bool indicates a reference value """ if tempi.size != 2: raise ValueError('tempi must have exactly two values') if not np.all(np.isfinite(tempi)) or np.any(tempi < 0): raise ValueError('tempi={} must be non-negative numbers'.format(tempi)) if reference and np.all(tempi == 0): raise ValueError('reference tempi={} must have one' ' value greater than zero'.format(tempi))
[ "def", "validate_tempi", "(", "tempi", ",", "reference", "=", "True", ")", ":", "if", "tempi", ".", "size", "!=", "2", ":", "raise", "ValueError", "(", "'tempi must have exactly two values'", ")", "if", "not", "np", ".", "all", "(", "np", ".", "isfinite", "(", "tempi", ")", ")", "or", "np", ".", "any", "(", "tempi", "<", "0", ")", ":", "raise", "ValueError", "(", "'tempi={} must be non-negative numbers'", ".", "format", "(", "tempi", ")", ")", "if", "reference", "and", "np", ".", "all", "(", "tempi", "==", "0", ")", ":", "raise", "ValueError", "(", "'reference tempi={} must have one'", "' value greater than zero'", ".", "format", "(", "tempi", ")", ")" ]
30.73913
0.001372
def _contentful_user_agent(self): """ Sets the X-Contentful-User-Agent header. """ header = {} from . import __version__ header['sdk'] = { 'name': 'contentful-management.py', 'version': __version__ } header['app'] = { 'name': self.application_name, 'version': self.application_version } header['integration'] = { 'name': self.integration_name, 'version': self.integration_version } header['platform'] = { 'name': 'python', 'version': platform.python_version() } os_name = platform.system() if os_name == 'Darwin': os_name = 'macOS' elif not os_name or os_name == 'Java': os_name = None elif os_name and os_name not in ['macOS', 'Windows']: os_name = 'Linux' header['os'] = { 'name': os_name, 'version': platform.release() } def format_header(key, values): header = "{0} {1}".format(key, values['name']) if values['version'] is not None: header = "{0}/{1}".format(header, values['version']) return "{0};".format(header) result = [] for k, values in header.items(): if not values['name']: continue result.append(format_header(k, values)) return ' '.join(result)
[ "def", "_contentful_user_agent", "(", "self", ")", ":", "header", "=", "{", "}", "from", ".", "import", "__version__", "header", "[", "'sdk'", "]", "=", "{", "'name'", ":", "'contentful-management.py'", ",", "'version'", ":", "__version__", "}", "header", "[", "'app'", "]", "=", "{", "'name'", ":", "self", ".", "application_name", ",", "'version'", ":", "self", ".", "application_version", "}", "header", "[", "'integration'", "]", "=", "{", "'name'", ":", "self", ".", "integration_name", ",", "'version'", ":", "self", ".", "integration_version", "}", "header", "[", "'platform'", "]", "=", "{", "'name'", ":", "'python'", ",", "'version'", ":", "platform", ".", "python_version", "(", ")", "}", "os_name", "=", "platform", ".", "system", "(", ")", "if", "os_name", "==", "'Darwin'", ":", "os_name", "=", "'macOS'", "elif", "not", "os_name", "or", "os_name", "==", "'Java'", ":", "os_name", "=", "None", "elif", "os_name", "and", "os_name", "not", "in", "[", "'macOS'", ",", "'Windows'", "]", ":", "os_name", "=", "'Linux'", "header", "[", "'os'", "]", "=", "{", "'name'", ":", "os_name", ",", "'version'", ":", "platform", ".", "release", "(", ")", "}", "def", "format_header", "(", "key", ",", "values", ")", ":", "header", "=", "\"{0} {1}\"", ".", "format", "(", "key", ",", "values", "[", "'name'", "]", ")", "if", "values", "[", "'version'", "]", "is", "not", "None", ":", "header", "=", "\"{0}/{1}\"", ".", "format", "(", "header", ",", "values", "[", "'version'", "]", ")", "return", "\"{0};\"", ".", "format", "(", "header", ")", "result", "=", "[", "]", "for", "k", ",", "values", "in", "header", ".", "items", "(", ")", ":", "if", "not", "values", "[", "'name'", "]", ":", "continue", "result", ".", "append", "(", "format_header", "(", "k", ",", "values", ")", ")", "return", "' '", ".", "join", "(", "result", ")" ]
30.166667
0.001338
def create(self, request, *args, **kwargs): """ Run **POST** against */api/alerts/* to create or update alert. If alert with posted scope and alert_type already exists - it will be updated. Only users with staff privileges can create alerts. Request example: .. code-block:: javascript POST /api/alerts/ Accept: application/json Content-Type: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "scope": "http://testserver/api/projects/b9e8a102b5ff4469b9ac03253fae4b95/", "message": "message#1", "alert_type": "first_alert", "severity": "Debug" } """ return super(AlertViewSet, self).create(request, *args, **kwargs)
[ "def", "create", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "AlertViewSet", ",", "self", ")", ".", "create", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
36.913043
0.005741
def Floor(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Applies the Floor operator to a vertex. This maps a vertex to the biggest integer less than or equal to its value :param input_vertex: the vertex to be floor'd """ return Double(context.jvm_view().FloorVertex, label, cast_to_double_vertex(input_vertex))
[ "def", "Floor", "(", "input_vertex", ":", "vertex_constructor_param_types", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Double", "(", "context", ".", "jvm_view", "(", ")", ".", "FloorVertex", ",", "label", ",", "cast_to_double_vertex", "(", "input_vertex", ")", ")" ]
46.625
0.015789
def timeid(self, data: ['SASdata', str] = None, by: str = None, id: str = None, out: [str, 'SASdata'] = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the TIMEID procedure Documentation link: http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeid_syntax.htm :param data: SASdata object or string. This parameter is required. :parm by: The by variable can only be a string type. :parm id: The id variable can only be a string type. :parm out: The out variable can be a string or SASdata type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
[ "def", "timeid", "(", "self", ",", "data", ":", "[", "'SASdata'", ",", "str", "]", "=", "None", ",", "by", ":", "str", "=", "None", ",", "id", ":", "str", "=", "None", ",", "out", ":", "[", "str", ",", "'SASdata'", "]", "=", "None", ",", "procopts", ":", "str", "=", "None", ",", "stmtpassthrough", ":", "str", "=", "None", ",", "*", "*", "kwargs", ":", "dict", ")", "->", "'SASresults'", ":" ]
49.714286
0.010338
def yoffset(self, value): """gets/sets the yoffset""" if self._yoffset != value and \ isinstance(value, (int, float, long)): self._yoffset = value
[ "def", "yoffset", "(", "self", ",", "value", ")", ":", "if", "self", ".", "_yoffset", "!=", "value", "and", "isinstance", "(", "value", ",", "(", "int", ",", "float", ",", "long", ")", ")", ":", "self", ".", "_yoffset", "=", "value" ]
36.2
0.010811
def exec_cluster(config_file, cmd, docker, screen, tmux, stop, start, override_cluster_name, port_forward): """Runs a command on the specified cluster. Arguments: config_file: path to the cluster yaml cmd: command to run docker: whether to run command in docker container of config screen: whether to run in a screen tmux: whether to run in a tmux session stop: whether to stop the cluster after command run start: whether to start the cluster if it isn't up override_cluster_name: set the name of the cluster port_forward: port to forward """ assert not (screen and tmux), "Can specify only one of `screen` or `tmux`." config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) head_node = _get_head_node( config, config_file, override_cluster_name, create_if_needed=start) provider = get_node_provider(config["provider"], config["cluster_name"]) try: updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], runtime_hash="", ) def wrap_docker(command): container_name = config["docker"]["container_name"] if not container_name: raise ValueError("Docker container not specified in config.") return with_docker_exec( [command], container_name=container_name)[0] cmd = wrap_docker(cmd) if docker else cmd if stop: shutdown_cmd = ( "ray stop; ray teardown ~/ray_bootstrap_config.yaml " "--yes --workers-only") if docker: shutdown_cmd = wrap_docker(shutdown_cmd) cmd += ("; {}; sudo shutdown -h now".format(shutdown_cmd)) _exec( updater, cmd, screen, tmux, expect_error=stop, port_forward=port_forward) if tmux or screen: attach_command_parts = ["ray attach", config_file] if override_cluster_name is not None: attach_command_parts.append( "--cluster-name={}".format(override_cluster_name)) if tmux: attach_command_parts.append("--tmux") elif screen: attach_command_parts.append("--screen") attach_command = " ".join(attach_command_parts) attach_info = "Use `{}` to check on command status.".format( attach_command) logger.info(attach_info) finally: provider.cleanup()
[ "def", "exec_cluster", "(", "config_file", ",", "cmd", ",", "docker", ",", "screen", ",", "tmux", ",", "stop", ",", "start", ",", "override_cluster_name", ",", "port_forward", ")", ":", "assert", "not", "(", "screen", "and", "tmux", ")", ",", "\"Can specify only one of `screen` or `tmux`.\"", "config", "=", "yaml", ".", "load", "(", "open", "(", "config_file", ")", ".", "read", "(", ")", ")", "if", "override_cluster_name", "is", "not", "None", ":", "config", "[", "\"cluster_name\"", "]", "=", "override_cluster_name", "config", "=", "_bootstrap_config", "(", "config", ")", "head_node", "=", "_get_head_node", "(", "config", ",", "config_file", ",", "override_cluster_name", ",", "create_if_needed", "=", "start", ")", "provider", "=", "get_node_provider", "(", "config", "[", "\"provider\"", "]", ",", "config", "[", "\"cluster_name\"", "]", ")", "try", ":", "updater", "=", "NodeUpdaterThread", "(", "node_id", "=", "head_node", ",", "provider_config", "=", "config", "[", "\"provider\"", "]", ",", "provider", "=", "provider", ",", "auth_config", "=", "config", "[", "\"auth\"", "]", ",", "cluster_name", "=", "config", "[", "\"cluster_name\"", "]", ",", "file_mounts", "=", "config", "[", "\"file_mounts\"", "]", ",", "initialization_commands", "=", "[", "]", ",", "setup_commands", "=", "[", "]", ",", "runtime_hash", "=", "\"\"", ",", ")", "def", "wrap_docker", "(", "command", ")", ":", "container_name", "=", "config", "[", "\"docker\"", "]", "[", "\"container_name\"", "]", "if", "not", "container_name", ":", "raise", "ValueError", "(", "\"Docker container not specified in config.\"", ")", "return", "with_docker_exec", "(", "[", "command", "]", ",", "container_name", "=", "container_name", ")", "[", "0", "]", "cmd", "=", "wrap_docker", "(", "cmd", ")", "if", "docker", "else", "cmd", "if", "stop", ":", "shutdown_cmd", "=", "(", "\"ray stop; ray teardown ~/ray_bootstrap_config.yaml \"", "\"--yes --workers-only\"", ")", "if", "docker", ":", "shutdown_cmd", "=", "wrap_docker", "(", "shutdown_cmd", ")", "cmd", "+=", "(", "\"; {}; sudo shutdown -h now\"", ".", "format", "(", "shutdown_cmd", ")", ")", "_exec", "(", "updater", ",", "cmd", ",", "screen", ",", "tmux", ",", "expect_error", "=", "stop", ",", "port_forward", "=", "port_forward", ")", "if", "tmux", "or", "screen", ":", "attach_command_parts", "=", "[", "\"ray attach\"", ",", "config_file", "]", "if", "override_cluster_name", "is", "not", "None", ":", "attach_command_parts", ".", "append", "(", "\"--cluster-name={}\"", ".", "format", "(", "override_cluster_name", ")", ")", "if", "tmux", ":", "attach_command_parts", ".", "append", "(", "\"--tmux\"", ")", "elif", "screen", ":", "attach_command_parts", ".", "append", "(", "\"--screen\"", ")", "attach_command", "=", "\" \"", ".", "join", "(", "attach_command_parts", ")", "attach_info", "=", "\"Use `{}` to check on command status.\"", ".", "format", "(", "attach_command", ")", "logger", ".", "info", "(", "attach_info", ")", "finally", ":", "provider", ".", "cleanup", "(", ")" ]
36.35
0.000335
def swap(self): '''Swap stereo channels. If the input is not stereo, pairs of channels are swapped, and a possible odd last channel passed through. E.g., for seven channels, the output order will be 2, 1, 4, 3, 6, 5, 7. See Also ---------- remix ''' effect_args = ['swap'] self.effects.extend(effect_args) self.effects_log.append('swap') return self
[ "def", "swap", "(", "self", ")", ":", "effect_args", "=", "[", "'swap'", "]", "self", ".", "effects", ".", "extend", "(", "effect_args", ")", "self", ".", "effects_log", ".", "append", "(", "'swap'", ")", "return", "self" ]
26.625
0.004535
def set_sensor_thresholds(self, sensor_number, lun=0, unr=None, ucr=None, unc=None, lnc=None, lcr=None, lnr=None): """Set the sensor thresholds that are not 'None' `sensor_number` `unr` for upper non-recoverable `ucr` for upper critical `unc` for upper non-critical `lnc` for lower non-critical `lcr` for lower critical `lnr` for lower non-recoverable """ req = create_request_by_name('SetSensorThresholds') req.sensor_number = sensor_number req.lun = lun thresholds = dict(unr=unr, ucr=ucr, unc=unc, lnc=lnc, lcr=lcr, lnr=lnr) for key, value in thresholds.items(): if value is not None: setattr(req.set_mask, key, 1) setattr(req.threshold, key, value) rsp = self.send_message(req) check_completion_code(rsp.completion_code)
[ "def", "set_sensor_thresholds", "(", "self", ",", "sensor_number", ",", "lun", "=", "0", ",", "unr", "=", "None", ",", "ucr", "=", "None", ",", "unc", "=", "None", ",", "lnc", "=", "None", ",", "lcr", "=", "None", ",", "lnr", "=", "None", ")", ":", "req", "=", "create_request_by_name", "(", "'SetSensorThresholds'", ")", "req", ".", "sensor_number", "=", "sensor_number", "req", ".", "lun", "=", "lun", "thresholds", "=", "dict", "(", "unr", "=", "unr", ",", "ucr", "=", "ucr", ",", "unc", "=", "unc", ",", "lnc", "=", "lnc", ",", "lcr", "=", "lcr", ",", "lnr", "=", "lnr", ")", "for", "key", ",", "value", "in", "thresholds", ".", "items", "(", ")", ":", "if", "value", "is", "not", "None", ":", "setattr", "(", "req", ".", "set_mask", ",", "key", ",", "1", ")", "setattr", "(", "req", ".", "threshold", ",", "key", ",", "value", ")", "rsp", "=", "self", ".", "send_message", "(", "req", ")", "check_completion_code", "(", "rsp", ".", "completion_code", ")" ]
35.961538
0.004167
def __get_features_for_observation(self, data_frame=None, observation='LA-LL', skip_id=None, last_column_is_id=False): """ Extract the features for a given observation from a data frame :param data_frame: data frame to get features from :type data_frame: pandas.DataFrame :param observation: observation name :type observation: string :param skip_id: skip any test with a given id (optional) :type skip_id: int :param last_column_is_id: skip the last column of the data frame (useful when id is last column - optional) :type last_column_is_id: bool :return features: the features :rtype features: np.array """ try: features = np.array([]) if data_frame is None: data_frame = self.data_frame for index, row in data_frame.iterrows(): if not skip_id == row['id']: features_row = np.nan_to_num(row[row.keys().str.contains(observation)].values) features_row = np.append(features_row, row['id']) features = np.vstack([features, features_row]) if features.size else features_row # not the same when getting a single point if last_column_is_id: if np.ndim(features) > 1: to_return = features[:,:-1] else: to_return = features[:-1] else: to_return = features return to_return, data_frame['id'].values except: logging.error(" observation not found in data frame")
[ "def", "__get_features_for_observation", "(", "self", ",", "data_frame", "=", "None", ",", "observation", "=", "'LA-LL'", ",", "skip_id", "=", "None", ",", "last_column_is_id", "=", "False", ")", ":", "try", ":", "features", "=", "np", ".", "array", "(", "[", "]", ")", "if", "data_frame", "is", "None", ":", "data_frame", "=", "self", ".", "data_frame", "for", "index", ",", "row", "in", "data_frame", ".", "iterrows", "(", ")", ":", "if", "not", "skip_id", "==", "row", "[", "'id'", "]", ":", "features_row", "=", "np", ".", "nan_to_num", "(", "row", "[", "row", ".", "keys", "(", ")", ".", "str", ".", "contains", "(", "observation", ")", "]", ".", "values", ")", "features_row", "=", "np", ".", "append", "(", "features_row", ",", "row", "[", "'id'", "]", ")", "features", "=", "np", ".", "vstack", "(", "[", "features", ",", "features_row", "]", ")", "if", "features", ".", "size", "else", "features_row", "# not the same when getting a single point", "if", "last_column_is_id", ":", "if", "np", ".", "ndim", "(", "features", ")", ">", "1", ":", "to_return", "=", "features", "[", ":", ",", ":", "-", "1", "]", "else", ":", "to_return", "=", "features", "[", ":", "-", "1", "]", "else", ":", "to_return", "=", "features", "return", "to_return", ",", "data_frame", "[", "'id'", "]", ".", "values", "except", ":", "logging", ".", "error", "(", "\" observation not found in data frame\"", ")" ]
42.45
0.004606
def pow2_quantized_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, w_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, with_zero_w=False, sign_w=True, n_w=8, m_w=2, ste_fine_grained_w=True, quantize_b=True, with_zero_b=False, sign_b=True, n_b=8, m_b=2, ste_fine_grained_b=True,): """Pow2 Quantized Convolution. Pow2 Quantized Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j}, where :math:`Q(w_{n, m, i, j})` is the power-of-2 quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) Quantized values are stored as floating point number for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. sign_b (bool): Use signed quantization if `True`. n_b (int): Bit width used for bias. m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: N-D array. """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, False) # Link computation graph real_w_q = F.pow2_quantize(w, quantize=quantize_w, sign=sign_w, with_zero=with_zero_w, n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", (outmaps,), b_init, False) # Link computation graph real_b_q = F.pow2_quantize(b, quantize=quantize_b, sign=sign_b, with_zero=with_zero_b, n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group)
[ "def", "pow2_quantized_convolution", "(", "inp", ",", "outmaps", ",", "kernel", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "group", "=", "1", ",", "w_init", "=", "None", ",", "b_init", "=", "None", ",", "base_axis", "=", "1", ",", "fix_parameters", "=", "False", ",", "rng", "=", "None", ",", "with_bias", "=", "True", ",", "quantize_w", "=", "True", ",", "with_zero_w", "=", "False", ",", "sign_w", "=", "True", ",", "n_w", "=", "8", ",", "m_w", "=", "2", ",", "ste_fine_grained_w", "=", "True", ",", "quantize_b", "=", "True", ",", "with_zero_b", "=", "False", ",", "sign_b", "=", "True", ",", "n_b", "=", "8", ",", "m_b", "=", "2", ",", "ste_fine_grained_b", "=", "True", ",", ")", ":", "if", "w_init", "is", "None", ":", "w_init", "=", "UniformInitializer", "(", "calc_uniform_lim_glorot", "(", "inp", ".", "shape", "[", "base_axis", "]", ",", "outmaps", ",", "tuple", "(", "kernel", ")", ")", ",", "rng", "=", "rng", ")", "if", "with_bias", "and", "b_init", "is", "None", ":", "b_init", "=", "ConstantInitializer", "(", ")", "# Floating Weight", "w", "=", "get_parameter_or_create", "(", "\"W\"", ",", "(", "outmaps", ",", "inp", ".", "shape", "[", "base_axis", "]", "//", "group", ")", "+", "tuple", "(", "kernel", ")", ",", "w_init", ",", "True", ",", "not", "fix_parameters", ")", "# Quantized Weight", "if", "quantize_w", ":", "w_q", "=", "get_parameter_or_create", "(", "\"W_q\"", ",", "(", "outmaps", ",", "inp", ".", "shape", "[", "base_axis", "]", "//", "group", ")", "+", "tuple", "(", "kernel", ")", ",", "w_init", ",", "False", ")", "# Link computation graph", "real_w_q", "=", "F", ".", "pow2_quantize", "(", "w", ",", "quantize", "=", "quantize_w", ",", "sign", "=", "sign_w", ",", "with_zero", "=", "with_zero_w", ",", "n", "=", "n_w", ",", "m", "=", "m_w", ",", "ste_fine_grained", "=", "ste_fine_grained_w", ",", "outputs", "=", "[", "w_q", ".", "data", "]", ")", "real_w_q", ".", "persistent", "=", "True", "else", ":", "real_w_q", "=", "w", "# Bias", "# Floating", "b", "=", "None", "b_q", "=", "None", "real_b_q", "=", "None", "if", "with_bias", ":", "b", "=", "get_parameter_or_create", "(", "\"b\"", ",", "(", "outmaps", ",", ")", ",", "b_init", ",", "True", ",", "not", "fix_parameters", ")", "if", "quantize_b", ":", "b_q", "=", "get_parameter_or_create", "(", "\"b_q\"", ",", "(", "outmaps", ",", ")", ",", "b_init", ",", "False", ")", "# Link computation graph", "real_b_q", "=", "F", ".", "pow2_quantize", "(", "b", ",", "quantize", "=", "quantize_b", ",", "sign", "=", "sign_b", ",", "with_zero", "=", "with_zero_b", ",", "n", "=", "n_b", ",", "m", "=", "m_b", ",", "ste_fine_grained", "=", "ste_fine_grained_b", ",", "outputs", "=", "[", "b_q", ".", "data", "]", ")", "real_b_q", ".", "persistent", "=", "True", "else", ":", "real_b_q", "=", "b", "return", "F", ".", "convolution", "(", "inp", ",", "real_w_q", ",", "real_b_q", ",", "base_axis", ",", "pad", ",", "stride", ",", "dilation", ",", "group", ")" ]
51.899083
0.004337
def descr_prototype(self, buf): """ Describe the prototype ("head") of the function. """ state = "define" if self.blocks else "declare" ret = self.return_value args = ", ".join(str(a) for a in self.args) name = self.get_reference() attrs = self.attributes if any(self.args): vararg = ', ...' if self.ftype.var_arg else '' else: vararg = '...' if self.ftype.var_arg else '' linkage = self.linkage cconv = self.calling_convention prefix = " ".join(str(x) for x in [state, linkage, cconv, ret] if x) metadata = self._stringify_metadata() prototype = "{prefix} {name}({args}{vararg}) {attrs}{metadata}\n".format( prefix=prefix, name=name, args=args, vararg=vararg, attrs=attrs, metadata=metadata) buf.append(prototype)
[ "def", "descr_prototype", "(", "self", ",", "buf", ")", ":", "state", "=", "\"define\"", "if", "self", ".", "blocks", "else", "\"declare\"", "ret", "=", "self", ".", "return_value", "args", "=", "\", \"", ".", "join", "(", "str", "(", "a", ")", "for", "a", "in", "self", ".", "args", ")", "name", "=", "self", ".", "get_reference", "(", ")", "attrs", "=", "self", ".", "attributes", "if", "any", "(", "self", ".", "args", ")", ":", "vararg", "=", "', ...'", "if", "self", ".", "ftype", ".", "var_arg", "else", "''", "else", ":", "vararg", "=", "'...'", "if", "self", ".", "ftype", ".", "var_arg", "else", "''", "linkage", "=", "self", ".", "linkage", "cconv", "=", "self", ".", "calling_convention", "prefix", "=", "\" \"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "[", "state", ",", "linkage", ",", "cconv", ",", "ret", "]", "if", "x", ")", "metadata", "=", "self", ".", "_stringify_metadata", "(", ")", "prototype", "=", "\"{prefix} {name}({args}{vararg}) {attrs}{metadata}\\n\"", ".", "format", "(", "prefix", "=", "prefix", ",", "name", "=", "name", ",", "args", "=", "args", ",", "vararg", "=", "vararg", ",", "attrs", "=", "attrs", ",", "metadata", "=", "metadata", ")", "buf", ".", "append", "(", "prototype", ")" ]
41.428571
0.003371
def _get_first_import(node, context, name, base, level, alias): """return the node where [base.]<name> is imported or None if not found """ fullname = "%s.%s" % (base, name) if base else name first = None found = False for first in context.body: if first is node: continue if first.scope() is node.scope() and first.fromlineno > node.fromlineno: continue if isinstance(first, astroid.Import): if any(fullname == iname[0] for iname in first.names): found = True break elif isinstance(first, astroid.ImportFrom): if level == first.level: for imported_name, imported_alias in first.names: if fullname == "%s.%s" % (first.modname, imported_name): found = True break if ( name != "*" and name == imported_name and not (alias or imported_alias) ): found = True break if found: break if found and not astroid.are_exclusive(first, node): return first return None
[ "def", "_get_first_import", "(", "node", ",", "context", ",", "name", ",", "base", ",", "level", ",", "alias", ")", ":", "fullname", "=", "\"%s.%s\"", "%", "(", "base", ",", "name", ")", "if", "base", "else", "name", "first", "=", "None", "found", "=", "False", "for", "first", "in", "context", ".", "body", ":", "if", "first", "is", "node", ":", "continue", "if", "first", ".", "scope", "(", ")", "is", "node", ".", "scope", "(", ")", "and", "first", ".", "fromlineno", ">", "node", ".", "fromlineno", ":", "continue", "if", "isinstance", "(", "first", ",", "astroid", ".", "Import", ")", ":", "if", "any", "(", "fullname", "==", "iname", "[", "0", "]", "for", "iname", "in", "first", ".", "names", ")", ":", "found", "=", "True", "break", "elif", "isinstance", "(", "first", ",", "astroid", ".", "ImportFrom", ")", ":", "if", "level", "==", "first", ".", "level", ":", "for", "imported_name", ",", "imported_alias", "in", "first", ".", "names", ":", "if", "fullname", "==", "\"%s.%s\"", "%", "(", "first", ".", "modname", ",", "imported_name", ")", ":", "found", "=", "True", "break", "if", "(", "name", "!=", "\"*\"", "and", "name", "==", "imported_name", "and", "not", "(", "alias", "or", "imported_alias", ")", ")", ":", "found", "=", "True", "break", "if", "found", ":", "break", "if", "found", "and", "not", "astroid", ".", "are_exclusive", "(", "first", ",", "node", ")", ":", "return", "first", "return", "None" ]
36.882353
0.001554
def _getPastEvents(self, request): """Return the past events in this site.""" home = request.site.root_page return getAllPastEvents(request, home=home)
[ "def", "_getPastEvents", "(", "self", ",", "request", ")", ":", "home", "=", "request", ".", "site", ".", "root_page", "return", "getAllPastEvents", "(", "request", ",", "home", "=", "home", ")" ]
43
0.011429
def dump_to_string(self, cnf, **kwargs): """ Dump config 'cnf' to a string. :param cnf: Configuration data to dump :param kwargs: optional keyword parameters to be sanitized :: dict :return: Dict-like object holding config parameters """ stream = anyconfig.compat.StringIO() self.dump_to_stream(cnf, stream, **kwargs) return stream.getvalue()
[ "def", "dump_to_string", "(", "self", ",", "cnf", ",", "*", "*", "kwargs", ")", ":", "stream", "=", "anyconfig", ".", "compat", ".", "StringIO", "(", ")", "self", ".", "dump_to_stream", "(", "cnf", ",", "stream", ",", "*", "*", "kwargs", ")", "return", "stream", ".", "getvalue", "(", ")" ]
33.75
0.004808
def initial(self, request, *args, **kwargs): """ Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only devices of current node """ super(NodeDeviceList, self).initial(request, *args, **kwargs) # ensure node exists try: self.node = Node.objects.published()\ .accessible_to(request.user)\ .get(slug=self.kwargs.get('slug', None)) except Node.DoesNotExist: raise Http404(_('Node not found.')) # check permissions on node (for device creation) self.check_object_permissions(request, self.node) # return only devices of current node self.queryset = Device.objects.filter(node_id=self.node.id)\ .accessible_to(self.request.user)\ .select_related('node')
[ "def", "initial", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "NodeDeviceList", ",", "self", ")", ".", "initial", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# ensure node exists", "try", ":", "self", ".", "node", "=", "Node", ".", "objects", ".", "published", "(", ")", ".", "accessible_to", "(", "request", ".", "user", ")", ".", "get", "(", "slug", "=", "self", ".", "kwargs", ".", "get", "(", "'slug'", ",", "None", ")", ")", "except", "Node", ".", "DoesNotExist", ":", "raise", "Http404", "(", "_", "(", "'Node not found.'", ")", ")", "# check permissions on node (for device creation)", "self", ".", "check_object_permissions", "(", "request", ",", "self", ".", "node", ")", "# return only devices of current node", "self", ".", "queryset", "=", "Device", ".", "objects", ".", "filter", "(", "node_id", "=", "self", ".", "node", ".", "id", ")", ".", "accessible_to", "(", "self", ".", "request", ".", "user", ")", ".", "select_related", "(", "'node'", ")" ]
40.043478
0.003181
def main(argString=None): """Check for duplicated samples in a tfam/tped file. :param argString: the options :type argString: list Here are the steps for the duplicated samples step. 1. Prints the options. 2. Reads the ``tfam`` file (:py:func:`readTFAM`). 3. Separate the duplicated samples from the unique samples (:py:func:`findDuplicates`). 4. Writes the unique samples into a file named ``prefix.unique_samples.tfam`` (:py:func:`printUniqueTFAM`). 5. Reads the ``tped`` file and write into ``prefix.unique_samples.tped`` the pedigree file for the unique samples (:py:func:`processTPED`). Saves in memory the pedigree for the duplicated samples. Updates the indexes of the duplicated samples. 6. If there are no duplicated samples, simply copies the files ``prefix.unique_samples`` (``tped`` and ``tfam``) to ``prefix.final.tfam`` and ``prefix..final.tped``, respectively. 7. Computes the completion (for each of the duplicated samples) and the concordance of each sample pairs (:py:func:`computeStatistics`). 8. Prints statistics (concordance and completion) (:py:func:`printStatistics`). 9. We print the concordance matrix for each duplicated samples (:py:func:`printConcordance`). 10. We print the ``tped`` and the ``tfam`` file for the duplicated samples (``prefix.duplicated_samples``) (:py:func:`printDuplicatedTPEDandTFAM`). 11. Choose the best of each duplicates (to keep and to complete) according to completion and concordance (:py:func:`chooseBestDuplicates`). 12. Creates a unique ``tped`` and ``tfam`` from the duplicated samples by completing the best chosen one with the other samples (:py:func:`createAndCleanTPED`). 13. Merge the two tfiles together (``prefix.unique_samples`` and ``prefix.chosen_samples``) to create the final dataset (``prefix.final``) (:py:func:`addToTPEDandTFAM`). """ # Getting and checking the options args = parseArgs(argString) checkArgs(args) logger.info("Options used:") for key, value in vars(args).iteritems(): logger.info(" --{} {}".format(key.replace("_", "-"), value)) # Reading the tfam file logger.info("Reading TFAM") tfam = readTFAM(args.tfile + ".tfam") # Find duplicated samples logger.info("Finding duplicated samples") uniqueSamples, duplicatedSamples = findDuplicates(tfam) # Prints the unique tfam logger.info("Creating TFAM for unique samples") printUniqueTFAM(tfam, uniqueSamples, args.out) # Process the TPED file logger.info("Reading TPED (and creating TPED for unique samples)") tped, tpedSamples = processTPED(uniqueSamples, duplicatedSamples, args.tfile + ".tped", args.out) if len(duplicatedSamples) == 0: logger.info("There are no duplicates in {}.tfam".format(args.tfile)) # There are no duplicated samples try: shutil.copy(args.out + ".unique_samples.tfam", args.out + ".final.tfam") except IOError: msg = "%s.unique_samples.tfam: can't copy to " \ "%s.final.tfam" % (args.out, args.out) raise ProgramError(msg) try: shutil.copy(args.out + ".unique_samples.tped", args.out + ".final.tped") except IOError: msg = "%s.unique_samples.tped: can't copy to " \ "%s.final.tped" % (args.out, args.out) raise ProgramError(msg) else: # We continue # Compute statistics logger.info("Computing the completion and concordance of duplicated " "samples") completion, concordance = computeStatistics(tped, tfam, tpedSamples, duplicatedSamples, args.out) # Print the statistics logger.info("Printing the statistics") completion_percentage = printStatistics(completion, concordance, tpedSamples, duplicatedSamples, args.out) # Print the concordance file logger.info("Printing the concordance file") concordance_percentage = printConcordance(concordance, args.out) # Print the duplicated TFAM and TPED logger.info("Creating TPED and TFAM for duplicated samples") printDuplicatedTPEDandTFAM(tped, tfam, tpedSamples, duplicatedSamples, args.out) # Choose the best duplicates logger.info("Choosing the best duplicates") chosenSamples, comp, conc = chooseBestDuplicates( tped, tpedSamples, duplicatedSamples, completion_percentage, concordance_percentage, args.out, ) # Clean the genotype of the chosen samples logger.info("Cleaning and creating unique TPED and TFAM from " "duplicated samples") newTPED, newTFAM = createAndCleanTPED( tped, tfam, tpedSamples, duplicatedSamples, chosenSamples, args.out, comp, args.sample_completion_threshold, conc, args.sample_concordance_threshold, ) # Add the chosen TPED and TFAM logger.info("Creating final TPED and TFAM file") addToTPEDandTFAM(newTPED, newTFAM, args.out, args.out + ".unique_samples")
[ "def", "main", "(", "argString", "=", "None", ")", ":", "# Getting and checking the options", "args", "=", "parseArgs", "(", "argString", ")", "checkArgs", "(", "args", ")", "logger", ".", "info", "(", "\"Options used:\"", ")", "for", "key", ",", "value", "in", "vars", "(", "args", ")", ".", "iteritems", "(", ")", ":", "logger", ".", "info", "(", "\" --{} {}\"", ".", "format", "(", "key", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", ",", "value", ")", ")", "# Reading the tfam file", "logger", ".", "info", "(", "\"Reading TFAM\"", ")", "tfam", "=", "readTFAM", "(", "args", ".", "tfile", "+", "\".tfam\"", ")", "# Find duplicated samples", "logger", ".", "info", "(", "\"Finding duplicated samples\"", ")", "uniqueSamples", ",", "duplicatedSamples", "=", "findDuplicates", "(", "tfam", ")", "# Prints the unique tfam", "logger", ".", "info", "(", "\"Creating TFAM for unique samples\"", ")", "printUniqueTFAM", "(", "tfam", ",", "uniqueSamples", ",", "args", ".", "out", ")", "# Process the TPED file", "logger", ".", "info", "(", "\"Reading TPED (and creating TPED for unique samples)\"", ")", "tped", ",", "tpedSamples", "=", "processTPED", "(", "uniqueSamples", ",", "duplicatedSamples", ",", "args", ".", "tfile", "+", "\".tped\"", ",", "args", ".", "out", ")", "if", "len", "(", "duplicatedSamples", ")", "==", "0", ":", "logger", ".", "info", "(", "\"There are no duplicates in {}.tfam\"", ".", "format", "(", "args", ".", "tfile", ")", ")", "# There are no duplicated samples", "try", ":", "shutil", ".", "copy", "(", "args", ".", "out", "+", "\".unique_samples.tfam\"", ",", "args", ".", "out", "+", "\".final.tfam\"", ")", "except", "IOError", ":", "msg", "=", "\"%s.unique_samples.tfam: can't copy to \"", "\"%s.final.tfam\"", "%", "(", "args", ".", "out", ",", "args", ".", "out", ")", "raise", "ProgramError", "(", "msg", ")", "try", ":", "shutil", ".", "copy", "(", "args", ".", "out", "+", "\".unique_samples.tped\"", ",", "args", ".", "out", "+", "\".final.tped\"", ")", "except", "IOError", ":", "msg", "=", "\"%s.unique_samples.tped: can't copy to \"", "\"%s.final.tped\"", "%", "(", "args", ".", "out", ",", "args", ".", "out", ")", "raise", "ProgramError", "(", "msg", ")", "else", ":", "# We continue", "# Compute statistics", "logger", ".", "info", "(", "\"Computing the completion and concordance of duplicated \"", "\"samples\"", ")", "completion", ",", "concordance", "=", "computeStatistics", "(", "tped", ",", "tfam", ",", "tpedSamples", ",", "duplicatedSamples", ",", "args", ".", "out", ")", "# Print the statistics", "logger", ".", "info", "(", "\"Printing the statistics\"", ")", "completion_percentage", "=", "printStatistics", "(", "completion", ",", "concordance", ",", "tpedSamples", ",", "duplicatedSamples", ",", "args", ".", "out", ")", "# Print the concordance file", "logger", ".", "info", "(", "\"Printing the concordance file\"", ")", "concordance_percentage", "=", "printConcordance", "(", "concordance", ",", "args", ".", "out", ")", "# Print the duplicated TFAM and TPED", "logger", ".", "info", "(", "\"Creating TPED and TFAM for duplicated samples\"", ")", "printDuplicatedTPEDandTFAM", "(", "tped", ",", "tfam", ",", "tpedSamples", ",", "duplicatedSamples", ",", "args", ".", "out", ")", "# Choose the best duplicates", "logger", ".", "info", "(", "\"Choosing the best duplicates\"", ")", "chosenSamples", ",", "comp", ",", "conc", "=", "chooseBestDuplicates", "(", "tped", ",", "tpedSamples", ",", "duplicatedSamples", ",", "completion_percentage", ",", "concordance_percentage", ",", "args", ".", "out", ",", ")", "# Clean the genotype of the chosen samples", "logger", ".", "info", "(", "\"Cleaning and creating unique TPED and TFAM from \"", "\"duplicated samples\"", ")", "newTPED", ",", "newTFAM", "=", "createAndCleanTPED", "(", "tped", ",", "tfam", ",", "tpedSamples", ",", "duplicatedSamples", ",", "chosenSamples", ",", "args", ".", "out", ",", "comp", ",", "args", ".", "sample_completion_threshold", ",", "conc", ",", "args", ".", "sample_concordance_threshold", ",", ")", "# Add the chosen TPED and TFAM", "logger", ".", "info", "(", "\"Creating final TPED and TFAM file\"", ")", "addToTPEDandTFAM", "(", "newTPED", ",", "newTFAM", ",", "args", ".", "out", ",", "args", ".", "out", "+", "\".unique_samples\"", ")" ]
40.266187
0.000174
def citedReferencesRetrieve(self, queryId, count=100, offset=1, retrieveParameters=None): """The citedReferencesRetrieve operation submits a query returned by a previous citedReferences operation. This operation is useful for overcoming the retrieval limit of 100 records per query. For example, a citedReferences operation may find 106 cited references, as revealed by the content of the recordsFound element, but it returns only records 1-100. You could perform a subsequent citedReferencesretrieve operation to obtain records 101-106. :queryId: The query ID from a previous citedReferences operation :count: Number of records to display in the result. Cannot be less than 0 and cannot be greater than 100. If count is 0 then only the summary information will be returned. :offset: First record in results to return. Must be greater than zero :retrieveParameters: Retrieve parameters. If omitted the result of make_retrieveParameters(offset, count, 'RS', 'D') is used. """ return self._search.service.citedReferencesRetrieve( queryId=queryId, retrieveParameters=(retrieveParameters or self.make_retrieveParameters(offset, count)) )
[ "def", "citedReferencesRetrieve", "(", "self", ",", "queryId", ",", "count", "=", "100", ",", "offset", "=", "1", ",", "retrieveParameters", "=", "None", ")", ":", "return", "self", ".", "_search", ".", "service", ".", "citedReferencesRetrieve", "(", "queryId", "=", "queryId", ",", "retrieveParameters", "=", "(", "retrieveParameters", "or", "self", ".", "make_retrieveParameters", "(", "offset", ",", "count", ")", ")", ")" ]
50.035714
0.002101
def load_library(self,libname): """Given the name of a library, load it.""" paths = self.getpaths(libname) for path in paths: if os.path.exists(path): return self.load(path) raise ImportError("%s not found." % libname)
[ "def", "load_library", "(", "self", ",", "libname", ")", ":", "paths", "=", "self", ".", "getpaths", "(", "libname", ")", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "self", ".", "load", "(", "path", ")", "raise", "ImportError", "(", "\"%s not found.\"", "%", "libname", ")" ]
30.222222
0.010714
def on_fork(): """ Should be called by any program integrating Mitogen each time the process is forked, in the context of the new child. """ reset_logging_framework() # Must be first! fixup_prngs() mitogen.core.Latch._on_fork() mitogen.core.Side._on_fork() mitogen.core.ExternalContext.service_stub_lock = threading.Lock() mitogen__service = sys.modules.get('mitogen.service') if mitogen__service: mitogen__service._pool_lock = threading.Lock()
[ "def", "on_fork", "(", ")", ":", "reset_logging_framework", "(", ")", "# Must be first!", "fixup_prngs", "(", ")", "mitogen", ".", "core", ".", "Latch", ".", "_on_fork", "(", ")", "mitogen", ".", "core", ".", "Side", ".", "_on_fork", "(", ")", "mitogen", ".", "core", ".", "ExternalContext", ".", "service_stub_lock", "=", "threading", ".", "Lock", "(", ")", "mitogen__service", "=", "sys", ".", "modules", ".", "get", "(", "'mitogen.service'", ")", "if", "mitogen__service", ":", "mitogen__service", ".", "_pool_lock", "=", "threading", ".", "Lock", "(", ")" ]
34.642857
0.002008
def finish_parse(self, last_lineno_seen): """Clean-up/summary tasks run at the end of parsing.""" if self.state == self.STATES['step_in_progress']: # We've reached the end of the log without seeing the final "step finish" # marker, which would normally have triggered updating the step. As such we # must manually close out the current step, so things like result, finish # time are set for it. This ensures that the error summary for Taskcluster # infra failures actually lists the error that occurs at the # end of the log. self.end_step(last_lineno_seen)
[ "def", "finish_parse", "(", "self", ",", "last_lineno_seen", ")", ":", "if", "self", ".", "state", "==", "self", ".", "STATES", "[", "'step_in_progress'", "]", ":", "# We've reached the end of the log without seeing the final \"step finish\"", "# marker, which would normally have triggered updating the step. As such we", "# must manually close out the current step, so things like result, finish", "# time are set for it. This ensures that the error summary for Taskcluster", "# infra failures actually lists the error that occurs at the", "# end of the log.", "self", ".", "end_step", "(", "last_lineno_seen", ")" ]
64.8
0.009132
def p_Exception(p): """Exception : exception IDENTIFIER Inheritance "{" ExceptionMembers "}" ";" """ p[0] = model.Exception(name=p[2], parent=p[3], members=p[5])
[ "def", "p_Exception", "(", "p", ")", ":", "p", "[", "0", "]", "=", "model", ".", "Exception", "(", "name", "=", "p", "[", "2", "]", ",", "parent", "=", "p", "[", "3", "]", ",", "members", "=", "p", "[", "5", "]", ")" ]
41
0.017964
def runCommandSplits(splits, silent=False, shell=False): """ Run a shell command given the command's parsed command line """ try: if silent: with open(os.devnull, 'w') as devnull: subprocess.check_call( splits, stdout=devnull, stderr=devnull, shell=shell) else: subprocess.check_call(splits, shell=shell) except OSError as exception: if exception.errno == 2: # cmd not found raise Exception( "Can't find command while trying to run {}".format(splits)) else: raise
[ "def", "runCommandSplits", "(", "splits", ",", "silent", "=", "False", ",", "shell", "=", "False", ")", ":", "try", ":", "if", "silent", ":", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "devnull", ":", "subprocess", ".", "check_call", "(", "splits", ",", "stdout", "=", "devnull", ",", "stderr", "=", "devnull", ",", "shell", "=", "shell", ")", "else", ":", "subprocess", ".", "check_call", "(", "splits", ",", "shell", "=", "shell", ")", "except", "OSError", "as", "exception", ":", "if", "exception", ".", "errno", "==", "2", ":", "# cmd not found", "raise", "Exception", "(", "\"Can't find command while trying to run {}\"", ".", "format", "(", "splits", ")", ")", "else", ":", "raise" ]
35.294118
0.001623
def get_range_response(self, range_start, range_end, sort_order=None, sort_target='key', **kwargs): """Get a range of keys.""" range_request = self._build_get_range_request( key=range_start, range_end=range_end, sort_order=sort_order, sort_target=sort_target, **kwargs ) return self.kvstub.Range( range_request, self.timeout, credentials=self.call_credentials, metadata=self.metadata )
[ "def", "get_range_response", "(", "self", ",", "range_start", ",", "range_end", ",", "sort_order", "=", "None", ",", "sort_target", "=", "'key'", ",", "*", "*", "kwargs", ")", ":", "range_request", "=", "self", ".", "_build_get_range_request", "(", "key", "=", "range_start", ",", "range_end", "=", "range_end", ",", "sort_order", "=", "sort_order", ",", "sort_target", "=", "sort_target", ",", "*", "*", "kwargs", ")", "return", "self", ".", "kvstub", ".", "Range", "(", "range_request", ",", "self", ".", "timeout", ",", "credentials", "=", "self", ".", "call_credentials", ",", "metadata", "=", "self", ".", "metadata", ")" ]
32.058824
0.005348
def detect_erc20_unindexed_event_params(contract): """ Detect un-indexed ERC20 event parameters in a given contract. :param contract: The contract to check ERC20 events for un-indexed parameters in. :return: A list of tuple(event, parameter) of parameters which should be indexed. """ # Create our result array results = [] # If this contract isn't an ERC20 token, we return our empty results. if not contract.is_erc20(): return results # Loop through all events to look for poor form. for event in contract.events: # Only handle events which are declared in this contract. if event.contract != contract: continue # If this is transfer/approval events, expect the first two parameters to be indexed. if event.full_name in ["Transfer(address,address,uint256)", "Approval(address,address,uint256)"]: if not event.elems[0].indexed: results.append((event, event.elems[0])) if not event.elems[1].indexed: results.append((event, event.elems[1])) # Return the results. return results
[ "def", "detect_erc20_unindexed_event_params", "(", "contract", ")", ":", "# Create our result array", "results", "=", "[", "]", "# If this contract isn't an ERC20 token, we return our empty results.", "if", "not", "contract", ".", "is_erc20", "(", ")", ":", "return", "results", "# Loop through all events to look for poor form.", "for", "event", "in", "contract", ".", "events", ":", "# Only handle events which are declared in this contract.", "if", "event", ".", "contract", "!=", "contract", ":", "continue", "# If this is transfer/approval events, expect the first two parameters to be indexed.", "if", "event", ".", "full_name", "in", "[", "\"Transfer(address,address,uint256)\"", ",", "\"Approval(address,address,uint256)\"", "]", ":", "if", "not", "event", ".", "elems", "[", "0", "]", ".", "indexed", ":", "results", ".", "append", "(", "(", "event", ",", "event", ".", "elems", "[", "0", "]", ")", ")", "if", "not", "event", ".", "elems", "[", "1", "]", ".", "indexed", ":", "results", ".", "append", "(", "(", "event", ",", "event", ".", "elems", "[", "1", "]", ")", ")", "# Return the results.", "return", "results" ]
41.3
0.003943
def result_report_class_wise(self): """Report class-wise results Returns ------- str result report in string format """ results = self.results_class_wise_metrics() output = self.ui.section_header('Class-wise metrics', indent=2) + '\n' output += self.ui.row( 'Scene label', 'Ncorr', 'Nref', 'Accuracy', widths=[20, 12, 12, 12], separators=[True, False, True, False], indent=4 ) + '\n' output += self.ui.row('-', '-', '-', '-') + '\n' for scene_label in self.scene_label_list: output += self.ui.row( scene_label, results[scene_label]['count']['Ncorr'], results[scene_label]['count']['Nref'], results[scene_label]['accuracy']['accuracy'] * 100, types=['str', 'int', 'int', 'float1_percentage'] ) + '\n' return output
[ "def", "result_report_class_wise", "(", "self", ")", ":", "results", "=", "self", ".", "results_class_wise_metrics", "(", ")", "output", "=", "self", ".", "ui", ".", "section_header", "(", "'Class-wise metrics'", ",", "indent", "=", "2", ")", "+", "'\\n'", "output", "+=", "self", ".", "ui", ".", "row", "(", "'Scene label'", ",", "'Ncorr'", ",", "'Nref'", ",", "'Accuracy'", ",", "widths", "=", "[", "20", ",", "12", ",", "12", ",", "12", "]", ",", "separators", "=", "[", "True", ",", "False", ",", "True", ",", "False", "]", ",", "indent", "=", "4", ")", "+", "'\\n'", "output", "+=", "self", ".", "ui", ".", "row", "(", "'-'", ",", "'-'", ",", "'-'", ",", "'-'", ")", "+", "'\\n'", "for", "scene_label", "in", "self", ".", "scene_label_list", ":", "output", "+=", "self", ".", "ui", ".", "row", "(", "scene_label", ",", "results", "[", "scene_label", "]", "[", "'count'", "]", "[", "'Ncorr'", "]", ",", "results", "[", "scene_label", "]", "[", "'count'", "]", "[", "'Nref'", "]", ",", "results", "[", "scene_label", "]", "[", "'accuracy'", "]", "[", "'accuracy'", "]", "*", "100", ",", "types", "=", "[", "'str'", ",", "'int'", ",", "'int'", ",", "'float1_percentage'", "]", ")", "+", "'\\n'", "return", "output" ]
28.757576
0.002039
def _import(klass): '''1) Get a reference to the module 2) Check the file that module's imported from 3) If that file's been updated, force a reload of that module return it''' mod = __import__(klass.rpartition('.')[0]) for segment in klass.split('.')[1:-1]: mod = getattr(mod, segment) # Alright, now check the file associated with it. Note that clases # defined in __main__ don't have a __file__ attribute if klass not in BaseJob._loaded: BaseJob._loaded[klass] = time.time() if hasattr(mod, '__file__'): try: mtime = os.stat(mod.__file__).st_mtime if BaseJob._loaded[klass] < mtime: mod = reload_module(mod) except OSError: logger.warn('Could not check modification time of %s', mod.__file__) return getattr(mod, klass.rpartition('.')[2])
[ "def", "_import", "(", "klass", ")", ":", "mod", "=", "__import__", "(", "klass", ".", "rpartition", "(", "'.'", ")", "[", "0", "]", ")", "for", "segment", "in", "klass", ".", "split", "(", "'.'", ")", "[", "1", ":", "-", "1", "]", ":", "mod", "=", "getattr", "(", "mod", ",", "segment", ")", "# Alright, now check the file associated with it. Note that clases", "# defined in __main__ don't have a __file__ attribute", "if", "klass", "not", "in", "BaseJob", ".", "_loaded", ":", "BaseJob", ".", "_loaded", "[", "klass", "]", "=", "time", ".", "time", "(", ")", "if", "hasattr", "(", "mod", ",", "'__file__'", ")", ":", "try", ":", "mtime", "=", "os", ".", "stat", "(", "mod", ".", "__file__", ")", ".", "st_mtime", "if", "BaseJob", ".", "_loaded", "[", "klass", "]", "<", "mtime", ":", "mod", "=", "reload_module", "(", "mod", ")", "except", "OSError", ":", "logger", ".", "warn", "(", "'Could not check modification time of %s'", ",", "mod", ".", "__file__", ")", "return", "getattr", "(", "mod", ",", "klass", ".", "rpartition", "(", "'.'", ")", "[", "2", "]", ")" ]
41.695652
0.003058
def overview(self, tag=None, fromdate=None, todate=None): """ Gets a brief overview of statistics for all of your outbound email. """ return self.call("GET", "/stats/outbound", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "overview", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
49.4
0.011952
def get_hook_model(): """ Returns the Custom Hook model if defined in settings, otherwise the default Hook model. """ from rest_hooks.models import Hook HookModel = Hook if getattr(settings, 'HOOK_CUSTOM_MODEL', None): HookModel = get_module(settings.HOOK_CUSTOM_MODEL) return HookModel
[ "def", "get_hook_model", "(", ")", ":", "from", "rest_hooks", ".", "models", "import", "Hook", "HookModel", "=", "Hook", "if", "getattr", "(", "settings", ",", "'HOOK_CUSTOM_MODEL'", ",", "None", ")", ":", "HookModel", "=", "get_module", "(", "settings", ".", "HOOK_CUSTOM_MODEL", ")", "return", "HookModel" ]
31.7
0.003067
def subkeys(self, path): """ A generalized form that can return multiple subkeys. """ for _ in subpaths_for_path_range(path, hardening_chars="'pH"): yield self.subkey(_)
[ "def", "subkeys", "(", "self", ",", "path", ")", ":", "for", "_", "in", "subpaths_for_path_range", "(", "path", ",", "hardening_chars", "=", "\"'pH\"", ")", ":", "yield", "self", ".", "subkey", "(", "_", ")" ]
34.666667
0.00939
def img_src_finder(pipeline_index, soup, finder_image_urls=[], *args, **kwargs): """ Find image URL in <img>'s src attribute """ now_finder_image_urls = [] for img in soup.find_all('img'): src = img.get('src', None) if src: src = str(src) if (src not in finder_image_urls) and \ (src not in now_finder_image_urls): now_finder_image_urls.append(src) output = {} output['finder_image_urls'] = finder_image_urls + now_finder_image_urls return output
[ "def", "img_src_finder", "(", "pipeline_index", ",", "soup", ",", "finder_image_urls", "=", "[", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "now_finder_image_urls", "=", "[", "]", "for", "img", "in", "soup", ".", "find_all", "(", "'img'", ")", ":", "src", "=", "img", ".", "get", "(", "'src'", ",", "None", ")", "if", "src", ":", "src", "=", "str", "(", "src", ")", "if", "(", "src", "not", "in", "finder_image_urls", ")", "and", "(", "src", "not", "in", "now_finder_image_urls", ")", ":", "now_finder_image_urls", ".", "append", "(", "src", ")", "output", "=", "{", "}", "output", "[", "'finder_image_urls'", "]", "=", "finder_image_urls", "+", "now_finder_image_urls", "return", "output" ]
26.772727
0.001639
def addPolicyURI(self, policy_uri): """Add a authentication policy to this response This method is intended to be used by the provider to add a policy that the provider conformed to when authenticating the user. @param policy_uri: The identifier for the preferred type of authentication. @see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies """ if policy_uri not in self.auth_policies: self.auth_policies.append(policy_uri)
[ "def", "addPolicyURI", "(", "self", ",", "policy_uri", ")", ":", "if", "policy_uri", "not", "in", "self", ".", "auth_policies", ":", "self", ".", "auth_policies", ".", "append", "(", "policy_uri", ")" ]
45.416667
0.005396
def filter_desc(self, graintype=None, group=None, reference=None, size=None, phase=None): ''' This routine is to filter for description elements. You can check what is available in the description by running, >>> i.header_desc() where i is the instance you loaded. You can run the filter multiple times! You can filter for the following types: Parameters ---------- graintype : string or list Give graintypes as either 'M' for only mainstream or more than one ['M','Z']. group : integer or list Group of graintypes, important for oxides and silicates, since they are split into groups and not into types. Example 1, or give a list [1,3]. reference : string or list Give the reference you want to filter for, try an i.info() to pick the right name! You can select a single referennce as string or multiple references in as a list. size : string Filter for grain sizes, give '<5.0' or '>5.0' as a string for larger or smaller than a given grainsize in um. Only data with known grainsizes are chosen. Often grain sizes are given in a times b, where a and b are the minumum and maximum measurements from an image. If you give a >5.0 then grains with the smaller dimension >5um are taken into account. If you want <5.0 then grains with the upper dimension <5um are taken into account. ''' # filter for graintype if graintype != None: indexing = [] # index file on which lines to pick if type(graintype) == str: graintype = [graintype] # filter for typ in graintype: for i in range(len(self.desc)): if self.desc[i][self.descdict['Type']] == typ: indexing.append(i) # filter: self._filter_desc(indexing) # filter for graintype if phase != None: indexing = [] # index file on which lines to pick if type(phase) == str: phase = [phase] # filter for typ in phase: for i in range(len(self.desc)): if self.desc[i][self.descdict['Phase']] == typ: indexing.append(i) # filter: self._filter_desc(indexing) # filter for group (oxides and silicates) if group != None: indexing = [] # index file on which lines to pick if type(group) != list: group = [group] # filter for grp in group: for i in range(len(self.desc)): if self.desc[i][self.descdict['Group']] == str(int(grp)): indexing.append(i) # filter: self._filter_desc(indexing) # filter for reference if reference != None: indexing = [] # index file on which lines to pick if type(reference) != list: reference = [reference] # filter for ri in range(len(reference)): for i in range(len(self.desc)): if self.desc[i][self.descdict['Reference']] == reference[ri]: indexing.append(i) # filter: self._filter_desc(indexing) # filter for grainzise if size != None: indexing = [] # index file on which lines to pick # filter operator = size[0:1] size = float(size[1:len(size)]) for i in range(len(self.desc)): if self.desc[i][self.descdict['Size (microns)']] != '': try: # print self.desc[i][self.descdict['Size (microns)']] comperator1 = self.desc[i][self.descdict['Size (microns)']].split('x')[0] comperator2 = self.desc[i][self.descdict['Size (microns)']].split('x')[1] comperator = [float(comperator1),float(comperator2)] if operator == '<': comperator = np.min(comperator) else: comperator = np.max(comperator) except IndexError or AttributeError: try: comperator = float(self.desc[i][self.descdict['Size (microns)']]) except ValueError: continue if operator == '>': if comperator > size: indexing.append(i) elif operator == '<': if comperator < size: indexing.append(i) else: continue # filter: self._filter_desc(indexing)
[ "def", "filter_desc", "(", "self", ",", "graintype", "=", "None", ",", "group", "=", "None", ",", "reference", "=", "None", ",", "size", "=", "None", ",", "phase", "=", "None", ")", ":", "# filter for graintype", "if", "graintype", "!=", "None", ":", "indexing", "=", "[", "]", "# index file on which lines to pick", "if", "type", "(", "graintype", ")", "==", "str", ":", "graintype", "=", "[", "graintype", "]", "# filter", "for", "typ", "in", "graintype", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "desc", ")", ")", ":", "if", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Type'", "]", "]", "==", "typ", ":", "indexing", ".", "append", "(", "i", ")", "# filter:", "self", ".", "_filter_desc", "(", "indexing", ")", "# filter for graintype", "if", "phase", "!=", "None", ":", "indexing", "=", "[", "]", "# index file on which lines to pick", "if", "type", "(", "phase", ")", "==", "str", ":", "phase", "=", "[", "phase", "]", "# filter", "for", "typ", "in", "phase", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "desc", ")", ")", ":", "if", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Phase'", "]", "]", "==", "typ", ":", "indexing", ".", "append", "(", "i", ")", "# filter:", "self", ".", "_filter_desc", "(", "indexing", ")", "# filter for group (oxides and silicates)", "if", "group", "!=", "None", ":", "indexing", "=", "[", "]", "# index file on which lines to pick", "if", "type", "(", "group", ")", "!=", "list", ":", "group", "=", "[", "group", "]", "# filter", "for", "grp", "in", "group", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "desc", ")", ")", ":", "if", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Group'", "]", "]", "==", "str", "(", "int", "(", "grp", ")", ")", ":", "indexing", ".", "append", "(", "i", ")", "# filter:", "self", ".", "_filter_desc", "(", "indexing", ")", "# filter for reference", "if", "reference", "!=", "None", ":", "indexing", "=", "[", "]", "# index file on which lines to pick", "if", "type", "(", "reference", ")", "!=", "list", ":", "reference", "=", "[", "reference", "]", "# filter", "for", "ri", "in", "range", "(", "len", "(", "reference", ")", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "desc", ")", ")", ":", "if", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Reference'", "]", "]", "==", "reference", "[", "ri", "]", ":", "indexing", ".", "append", "(", "i", ")", "# filter:", "self", ".", "_filter_desc", "(", "indexing", ")", "# filter for grainzise", "if", "size", "!=", "None", ":", "indexing", "=", "[", "]", "# index file on which lines to pick", "# filter", "operator", "=", "size", "[", "0", ":", "1", "]", "size", "=", "float", "(", "size", "[", "1", ":", "len", "(", "size", ")", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "desc", ")", ")", ":", "if", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Size (microns)'", "]", "]", "!=", "''", ":", "try", ":", "# print self.desc[i][self.descdict['Size (microns)']]", "comperator1", "=", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Size (microns)'", "]", "]", ".", "split", "(", "'x'", ")", "[", "0", "]", "comperator2", "=", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Size (microns)'", "]", "]", ".", "split", "(", "'x'", ")", "[", "1", "]", "comperator", "=", "[", "float", "(", "comperator1", ")", ",", "float", "(", "comperator2", ")", "]", "if", "operator", "==", "'<'", ":", "comperator", "=", "np", ".", "min", "(", "comperator", ")", "else", ":", "comperator", "=", "np", ".", "max", "(", "comperator", ")", "except", "IndexError", "or", "AttributeError", ":", "try", ":", "comperator", "=", "float", "(", "self", ".", "desc", "[", "i", "]", "[", "self", ".", "descdict", "[", "'Size (microns)'", "]", "]", ")", "except", "ValueError", ":", "continue", "if", "operator", "==", "'>'", ":", "if", "comperator", ">", "size", ":", "indexing", ".", "append", "(", "i", ")", "elif", "operator", "==", "'<'", ":", "if", "comperator", "<", "size", ":", "indexing", ".", "append", "(", "i", ")", "else", ":", "continue", "# filter:", "self", ".", "_filter_desc", "(", "indexing", ")" ]
38.159091
0.003289
def set_property_filter(filter_proto, name, op, value): """Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a' """ filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
[ "def", "set_property_filter", "(", "filter_proto", ",", "name", ",", "op", ",", "value", ")", ":", "filter_proto", ".", "Clear", "(", ")", "pf", "=", "filter_proto", ".", "property_filter", "pf", ".", "property", ".", "name", "=", "name", "pf", ".", "op", "=", "op", "set_value", "(", "pf", ".", "value", ",", "value", ")", "return", "filter_proto" ]
26.818182
0.013093
def _font_name(self, ufo): """Generate a postscript-style font name.""" family_name = ( ufo.info.familyName.replace(" ", "") if ufo.info.familyName is not None else "None" ) style_name = ( ufo.info.styleName.replace(" ", "") if ufo.info.styleName is not None else "None" ) return "{}-{}".format(family_name, style_name)
[ "def", "_font_name", "(", "self", ",", "ufo", ")", ":", "family_name", "=", "(", "ufo", ".", "info", ".", "familyName", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "if", "ufo", ".", "info", ".", "familyName", "is", "not", "None", "else", "\"None\"", ")", "style_name", "=", "(", "ufo", ".", "info", ".", "styleName", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "if", "ufo", ".", "info", ".", "styleName", "is", "not", "None", "else", "\"None\"", ")", "return", "\"{}-{}\"", ".", "format", "(", "family_name", ",", "style_name", ")" ]
32.846154
0.004556
def wv45(msg): """Wake vortex. Args: msg (String): 28 bytes hexadecimal message string Returns: int: Wake vortex level. 0=NIL, 1=Light, 2=Moderate, 3=Severe """ d = hex2bin(data(msg)) if d[12] == '0': return None ws = bin2int(d[13:15]) return ws
[ "def", "wv45", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "if", "d", "[", "12", "]", "==", "'0'", ":", "return", "None", "ws", "=", "bin2int", "(", "d", "[", "13", ":", "15", "]", ")", "return", "ws" ]
18.125
0.003279
def fake2db_couchdb_initiator(self, number_of_rows, name=None, custom=None): '''Main handler for the operation ''' rows = number_of_rows db = self.database_caller_creator(name) if custom: self.custom_db_creator(rows, db, custom) sys.exit(0) self.data_filler_simple_registration(rows, db) self.data_filler_detailed_registration(rows, db) self.data_filler_company(rows, db) self.data_filler_user_agent(rows, db) self.data_filler_customer(rows, db)
[ "def", "fake2db_couchdb_initiator", "(", "self", ",", "number_of_rows", ",", "name", "=", "None", ",", "custom", "=", "None", ")", ":", "rows", "=", "number_of_rows", "db", "=", "self", ".", "database_caller_creator", "(", "name", ")", "if", "custom", ":", "self", ".", "custom_db_creator", "(", "rows", ",", "db", ",", "custom", ")", "sys", ".", "exit", "(", "0", ")", "self", ".", "data_filler_simple_registration", "(", "rows", ",", "db", ")", "self", ".", "data_filler_detailed_registration", "(", "rows", ",", "db", ")", "self", ".", "data_filler_company", "(", "rows", ",", "db", ")", "self", ".", "data_filler_user_agent", "(", "rows", ",", "db", ")", "self", ".", "data_filler_customer", "(", "rows", ",", "db", ")" ]
36.6
0.005329
async def get_all_tracks(self) -> List[PlaylistTrack]: """Get all playlist tracks from the playlist. Returns ------- tracks : List[PlaylistTrack] The playlists tracks. """ if isinstance(self._tracks, PartialTracks): return await self._tracks.build() _tracks = [] offset = 0 while len(self.tracks) < self.total_tracks: data = await self.__client.http.get_playlist_tracks(self.owner.id, self.id, limit=50, offset=offset) _tracks += [PlaylistTrack(self.__client, item) for item in data['items']] offset += 50 self.total_tracks = len(self._tracks) return list(self._tracks)
[ "async", "def", "get_all_tracks", "(", "self", ")", "->", "List", "[", "PlaylistTrack", "]", ":", "if", "isinstance", "(", "self", ".", "_tracks", ",", "PartialTracks", ")", ":", "return", "await", "self", ".", "_tracks", ".", "build", "(", ")", "_tracks", "=", "[", "]", "offset", "=", "0", "while", "len", "(", "self", ".", "tracks", ")", "<", "self", ".", "total_tracks", ":", "data", "=", "await", "self", ".", "__client", ".", "http", ".", "get_playlist_tracks", "(", "self", ".", "owner", ".", "id", ",", "self", ".", "id", ",", "limit", "=", "50", ",", "offset", "=", "offset", ")", "_tracks", "+=", "[", "PlaylistTrack", "(", "self", ".", "__client", ",", "item", ")", "for", "item", "in", "data", "[", "'items'", "]", "]", "offset", "+=", "50", "self", ".", "total_tracks", "=", "len", "(", "self", ".", "_tracks", ")", "return", "list", "(", "self", ".", "_tracks", ")" ]
33.380952
0.005548
def ekrcec(handle, segno, recno, column, lenout, nelts=_SPICE_EK_EKRCEX_ROOM_DEFAULT): """ Read data from a character column in a specified EK record. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekrcec_c.html :param handle: Handle attached to EK file. :type handle: int :param segno: Index of segment containing record. :type segno: int :param recno: Record from which data is to be read. :type recno: int :param column: Column name. :type column: str :param lenout: Maximum length of output strings. :type lenout: int :param nelts: Number of elements to allow for (default=100) :type nelts: int :return: Number of values in column entry, Character values in column entry, Flag indicating whether column entry is null. :rtype: tuple """ handle = ctypes.c_int(handle) segno = ctypes.c_int(segno) recno = ctypes.c_int(recno) column = stypes.stringToCharP(column) lenout = ctypes.c_int(lenout) nvals = ctypes.c_int() cvals = stypes.emptyCharArray(yLen=nelts, xLen=lenout) isnull = ctypes.c_int() libspice.ekrcec_c(handle, segno, recno, column, lenout, ctypes.byref(nvals), ctypes.byref(cvals), ctypes.byref(isnull)) assert failed() or (nvals.value <= nelts) return nvals.value, stypes.cVectorToPython(cvals)[:nvals.value], bool(isnull.value)
[ "def", "ekrcec", "(", "handle", ",", "segno", ",", "recno", ",", "column", ",", "lenout", ",", "nelts", "=", "_SPICE_EK_EKRCEX_ROOM_DEFAULT", ")", ":", "handle", "=", "ctypes", ".", "c_int", "(", "handle", ")", "segno", "=", "ctypes", ".", "c_int", "(", "segno", ")", "recno", "=", "ctypes", ".", "c_int", "(", "recno", ")", "column", "=", "stypes", ".", "stringToCharP", "(", "column", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "lenout", ")", "nvals", "=", "ctypes", ".", "c_int", "(", ")", "cvals", "=", "stypes", ".", "emptyCharArray", "(", "yLen", "=", "nelts", ",", "xLen", "=", "lenout", ")", "isnull", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "ekrcec_c", "(", "handle", ",", "segno", ",", "recno", ",", "column", ",", "lenout", ",", "ctypes", ".", "byref", "(", "nvals", ")", ",", "ctypes", ".", "byref", "(", "cvals", ")", ",", "ctypes", ".", "byref", "(", "isnull", ")", ")", "assert", "failed", "(", ")", "or", "(", "nvals", ".", "value", "<=", "nelts", ")", "return", "nvals", ".", "value", ",", "stypes", ".", "cVectorToPython", "(", "cvals", ")", "[", ":", "nvals", ".", "value", "]", ",", "bool", "(", "isnull", ".", "value", ")" ]
39.142857
0.002849
def setPageCount( self, pageCount ): """ Sets the number of pages that this widget holds. :param pageCount | <int> """ if ( pageCount == self._pageCount ): return pageCount = max(1, pageCount) self._pageCount = pageCount self._pagesSpinner.setMaximum(pageCount) self._pagesLabel.setText('of %i for ' % pageCount) if ( pageCount and self.currentPage() <= 0 ): self.setCurrentPage(1) elif ( pageCount < self.currentPage() ): self.setCurrentPage(pageCount) if ( not self.signalsBlocked() ): self.pageCountChanged.emit(pageCount) self._prevButton.setEnabled(self.currentPage() > 1) self._nextButton.setEnabled(self.currentPage() < pageCount)
[ "def", "setPageCount", "(", "self", ",", "pageCount", ")", ":", "if", "(", "pageCount", "==", "self", ".", "_pageCount", ")", ":", "return", "pageCount", "=", "max", "(", "1", ",", "pageCount", ")", "self", ".", "_pageCount", "=", "pageCount", "self", ".", "_pagesSpinner", ".", "setMaximum", "(", "pageCount", ")", "self", ".", "_pagesLabel", ".", "setText", "(", "'of %i for '", "%", "pageCount", ")", "if", "(", "pageCount", "and", "self", ".", "currentPage", "(", ")", "<=", "0", ")", ":", "self", ".", "setCurrentPage", "(", "1", ")", "elif", "(", "pageCount", "<", "self", ".", "currentPage", "(", ")", ")", ":", "self", ".", "setCurrentPage", "(", "pageCount", ")", "if", "(", "not", "self", ".", "signalsBlocked", "(", ")", ")", ":", "self", ".", "pageCountChanged", ".", "emit", "(", "pageCount", ")", "self", ".", "_prevButton", ".", "setEnabled", "(", "self", ".", "currentPage", "(", ")", ">", "1", ")", "self", ".", "_nextButton", ".", "setEnabled", "(", "self", ".", "currentPage", "(", ")", "<", "pageCount", ")" ]
33.269231
0.021348
def determine_reference_direction_fast(nickname, server, possible_autonomous_profiles=None, possible_component_profiles=None): """ Determine CIM_ReferenceProfile Antecedent/Dependent direction from server data and a list of known autonomous and/or component profiles using the algorithm defined for the _server. This is the prototype for the code that was reimplemented in pywbem. Parameters: org_vm possible_autonomous_profiles () """ def _determine_type(profilepaths, v0_dict, v1_dict, autonomous): """ Determine type from data in the two dictionaries and the profile_list. Returns string defining type ('snia' or 'dmtf'). Returns None if the profile list is None or None of the profilepaths exist in either v0_dict or v1_dict """ if not profilepaths: return None t = ['snia', 'dmtf'] if not autonomous: t.reverse() dir_type = None v0_paths = [] v1_paths = [] for ppath in profilepaths: if ppath in v0_dict: v0_paths.append(ppath) if VERBOSE: print('DETERMINED_TYPE v0 %s %s' % (ppath, t[0])) elif ppath in v1_dict: v1_paths.append(ppath) if VERBOSE: print('DETERMINED_TYPE v1 %s %s' % (ppath, t[1])) if v0_paths and not v1_paths: dir_type = t[0] elif v1_paths and not v0_paths: dir_type = t[1] elif not v0_paths and not v1_paths: dir_type = None else: ps = 'possible %s' % ('autonomous' if autonomous else 'component') print('ERROR VALUERROR %s\n%s:%s\n%s: %s' % (ps, t[0], v0_paths, t[1], v1_paths)) raise ValueError("Cannot determine type. " "determine_cimrefrence_direction shows " "conflicts in %s profile list. %s; %s\n%s; %s" % (ps, t[0], v0_paths, t[1], v1_paths)) return dir_type if VERBOSE: print('POSSIBLE_AUTONOMOUS_PROFILES:\n%s' % possible_autonomous_profiles) if not possible_autonomous_profiles and not possible_component_profiles: raise ValueError("Either possible_autonomous_profiles or " "possible_component_profiles must have a value") assoc_dict = fast_count_associators(server) # returns dictionary where key is profile name and value is dict of # ant: dep: with value count # Reduce to dictionary where ant/dep are 0 and non-zero, i.e. top and bottom new_dict = {} for key, value in assoc_dict.items(): if (not value['dep'] and value['ant']) \ or (value['dep'] and not value['ant']): new_dict[key] = (value['dep'], value['ant']) if not value['dep'] and not value['ant']: print('ERROR key %s value %s' % (key, value)) # print('NEW_DICT %s' % new_dict) # create a dictionary with entry for each new_dict itme that has data in # one of the value items. v0_dict = {key: value for key, value in new_dict.items() if value[0]} v1_dict = {key: value for key, value in new_dict.items() if value[1]} if VERBOSE: print('V0_DICT %s' % v0_dict) print('V1_DICT %s' % v1_dict) print('POSSIBLE_AUTONOMOUS_PROFILES %s' % possible_autonomous_profiles) auto_dir_type = _determine_type(possible_autonomous_profiles, v0_dict, v1_dict, True) comp_dir_type = _determine_type(possible_component_profiles, v0_dict, v1_dict, False) if VERBOSE: print('AUTO_DIR %s %s' % (auto_dir_type, comp_dir_type)) if auto_dir_type and comp_dir_type: if auto_dir_type == comp_dir_type: return auto_dir_type elif not auto_dir_type and not comp_dir_type: return None else: if auto_dir_type: return auto_dir_type elif comp_dir_type: return comp_dir_type if VERBOSE: print('RAISE VALUERR %s %s' % (auto_dir_type, comp_dir_type)) raise ValueError('Name: %s; Cannot determine ' 'possible CIM_ReferencedProfile direction. ' 'Autonomous and componentTests do not match. ' 'auto_dir_type=%s, ' 'comp_dir_type=%s\nServer=%s; ' % (nickname, auto_dir_type, comp_dir_type, server))
[ "def", "determine_reference_direction_fast", "(", "nickname", ",", "server", ",", "possible_autonomous_profiles", "=", "None", ",", "possible_component_profiles", "=", "None", ")", ":", "def", "_determine_type", "(", "profilepaths", ",", "v0_dict", ",", "v1_dict", ",", "autonomous", ")", ":", "\"\"\"\n Determine type from data in the two dictionaries and the profile_list.\n Returns string defining type ('snia' or 'dmtf'). Returns None if\n the profile list is None or None of the profilepaths exist in either\n v0_dict or v1_dict\n \"\"\"", "if", "not", "profilepaths", ":", "return", "None", "t", "=", "[", "'snia'", ",", "'dmtf'", "]", "if", "not", "autonomous", ":", "t", ".", "reverse", "(", ")", "dir_type", "=", "None", "v0_paths", "=", "[", "]", "v1_paths", "=", "[", "]", "for", "ppath", "in", "profilepaths", ":", "if", "ppath", "in", "v0_dict", ":", "v0_paths", ".", "append", "(", "ppath", ")", "if", "VERBOSE", ":", "print", "(", "'DETERMINED_TYPE v0 %s %s'", "%", "(", "ppath", ",", "t", "[", "0", "]", ")", ")", "elif", "ppath", "in", "v1_dict", ":", "v1_paths", ".", "append", "(", "ppath", ")", "if", "VERBOSE", ":", "print", "(", "'DETERMINED_TYPE v1 %s %s'", "%", "(", "ppath", ",", "t", "[", "1", "]", ")", ")", "if", "v0_paths", "and", "not", "v1_paths", ":", "dir_type", "=", "t", "[", "0", "]", "elif", "v1_paths", "and", "not", "v0_paths", ":", "dir_type", "=", "t", "[", "1", "]", "elif", "not", "v0_paths", "and", "not", "v1_paths", ":", "dir_type", "=", "None", "else", ":", "ps", "=", "'possible %s'", "%", "(", "'autonomous'", "if", "autonomous", "else", "'component'", ")", "print", "(", "'ERROR VALUERROR %s\\n%s:%s\\n%s: %s'", "%", "(", "ps", ",", "t", "[", "0", "]", ",", "v0_paths", ",", "t", "[", "1", "]", ",", "v1_paths", ")", ")", "raise", "ValueError", "(", "\"Cannot determine type. \"", "\"determine_cimrefrence_direction shows \"", "\"conflicts in %s profile list. %s; %s\\n%s; %s\"", "%", "(", "ps", ",", "t", "[", "0", "]", ",", "v0_paths", ",", "t", "[", "1", "]", ",", "v1_paths", ")", ")", "return", "dir_type", "if", "VERBOSE", ":", "print", "(", "'POSSIBLE_AUTONOMOUS_PROFILES:\\n%s'", "%", "possible_autonomous_profiles", ")", "if", "not", "possible_autonomous_profiles", "and", "not", "possible_component_profiles", ":", "raise", "ValueError", "(", "\"Either possible_autonomous_profiles or \"", "\"possible_component_profiles must have a value\"", ")", "assoc_dict", "=", "fast_count_associators", "(", "server", ")", "# returns dictionary where key is profile name and value is dict of", "# ant: dep: with value count", "# Reduce to dictionary where ant/dep are 0 and non-zero, i.e. top and bottom", "new_dict", "=", "{", "}", "for", "key", ",", "value", "in", "assoc_dict", ".", "items", "(", ")", ":", "if", "(", "not", "value", "[", "'dep'", "]", "and", "value", "[", "'ant'", "]", ")", "or", "(", "value", "[", "'dep'", "]", "and", "not", "value", "[", "'ant'", "]", ")", ":", "new_dict", "[", "key", "]", "=", "(", "value", "[", "'dep'", "]", ",", "value", "[", "'ant'", "]", ")", "if", "not", "value", "[", "'dep'", "]", "and", "not", "value", "[", "'ant'", "]", ":", "print", "(", "'ERROR key %s value %s'", "%", "(", "key", ",", "value", ")", ")", "# print('NEW_DICT %s' % new_dict)", "# create a dictionary with entry for each new_dict itme that has data in", "# one of the value items.", "v0_dict", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "new_dict", ".", "items", "(", ")", "if", "value", "[", "0", "]", "}", "v1_dict", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "new_dict", ".", "items", "(", ")", "if", "value", "[", "1", "]", "}", "if", "VERBOSE", ":", "print", "(", "'V0_DICT %s'", "%", "v0_dict", ")", "print", "(", "'V1_DICT %s'", "%", "v1_dict", ")", "print", "(", "'POSSIBLE_AUTONOMOUS_PROFILES %s'", "%", "possible_autonomous_profiles", ")", "auto_dir_type", "=", "_determine_type", "(", "possible_autonomous_profiles", ",", "v0_dict", ",", "v1_dict", ",", "True", ")", "comp_dir_type", "=", "_determine_type", "(", "possible_component_profiles", ",", "v0_dict", ",", "v1_dict", ",", "False", ")", "if", "VERBOSE", ":", "print", "(", "'AUTO_DIR %s %s'", "%", "(", "auto_dir_type", ",", "comp_dir_type", ")", ")", "if", "auto_dir_type", "and", "comp_dir_type", ":", "if", "auto_dir_type", "==", "comp_dir_type", ":", "return", "auto_dir_type", "elif", "not", "auto_dir_type", "and", "not", "comp_dir_type", ":", "return", "None", "else", ":", "if", "auto_dir_type", ":", "return", "auto_dir_type", "elif", "comp_dir_type", ":", "return", "comp_dir_type", "if", "VERBOSE", ":", "print", "(", "'RAISE VALUERR %s %s'", "%", "(", "auto_dir_type", ",", "comp_dir_type", ")", ")", "raise", "ValueError", "(", "'Name: %s; Cannot determine '", "'possible CIM_ReferencedProfile direction. '", "'Autonomous and componentTests do not match. '", "'auto_dir_type=%s, '", "'comp_dir_type=%s\\nServer=%s; '", "%", "(", "nickname", ",", "auto_dir_type", ",", "comp_dir_type", ",", "server", ")", ")" ]
41.306306
0.000426
def summary_permutation(context_counts, context_to_mut, seq_context, gene_seq, score_dir, num_permutations=10000, min_frac=0.0, min_recur=2, drop_silent=False): """Performs null-permutations and summarizes the results as features over the gene. Parameters ---------- context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest num_permutations : int, default: 10000 number of permutations to create for null drop_silent : bool, default=False Flage on whether to drop all silent mutations. Some data sources do not report silent mutations, and the simulations should match this. Returns ------- summary_info_list : list of lists list of non-silent and silent mutation counts under the null along with information on recurrent missense counts and missense positional entropy. """ mycontexts = context_counts.index.tolist() somatic_base = [base for one_context in mycontexts for base in context_to_mut[one_context]] # get random positions determined by sequence context tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(), num_permutations) tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos) # determine result of random positions gene_name = gene_seq.bed.gene_name gene_len = gene_seq.bed.cds_len summary_info_list = [] for i, row in enumerate(tmp_mut_pos): # get info about mutations tmp_mut_info = mc.get_aa_mut_info(row, somatic_base, gene_seq) # Get all metrics summarizing each gene tmp_summary = cutils.calc_summary_info(tmp_mut_info['Reference AA'], tmp_mut_info['Somatic AA'], tmp_mut_info['Codon Pos'], gene_name, score_dir, min_frac=min_frac, min_recur=min_recur) # drop silent if needed if drop_silent: # silent mutation count is index 1 tmp_summary[1] = 0 # limit the precision of floats #pos_ent = tmp_summary[-1] #tmp_summary[-1] = '{0:.5f}'.format(pos_ent) summary_info_list.append([gene_name, i+1, gene_len]+tmp_summary) return summary_info_list
[ "def", "summary_permutation", "(", "context_counts", ",", "context_to_mut", ",", "seq_context", ",", "gene_seq", ",", "score_dir", ",", "num_permutations", "=", "10000", ",", "min_frac", "=", "0.0", ",", "min_recur", "=", "2", ",", "drop_silent", "=", "False", ")", ":", "mycontexts", "=", "context_counts", ".", "index", ".", "tolist", "(", ")", "somatic_base", "=", "[", "base", "for", "one_context", "in", "mycontexts", "for", "base", "in", "context_to_mut", "[", "one_context", "]", "]", "# get random positions determined by sequence context", "tmp_contxt_pos", "=", "seq_context", ".", "random_pos", "(", "context_counts", ".", "iteritems", "(", ")", ",", "num_permutations", ")", "tmp_mut_pos", "=", "np", ".", "hstack", "(", "pos_array", "for", "base", ",", "pos_array", "in", "tmp_contxt_pos", ")", "# determine result of random positions", "gene_name", "=", "gene_seq", ".", "bed", ".", "gene_name", "gene_len", "=", "gene_seq", ".", "bed", ".", "cds_len", "summary_info_list", "=", "[", "]", "for", "i", ",", "row", "in", "enumerate", "(", "tmp_mut_pos", ")", ":", "# get info about mutations", "tmp_mut_info", "=", "mc", ".", "get_aa_mut_info", "(", "row", ",", "somatic_base", ",", "gene_seq", ")", "# Get all metrics summarizing each gene", "tmp_summary", "=", "cutils", ".", "calc_summary_info", "(", "tmp_mut_info", "[", "'Reference AA'", "]", ",", "tmp_mut_info", "[", "'Somatic AA'", "]", ",", "tmp_mut_info", "[", "'Codon Pos'", "]", ",", "gene_name", ",", "score_dir", ",", "min_frac", "=", "min_frac", ",", "min_recur", "=", "min_recur", ")", "# drop silent if needed", "if", "drop_silent", ":", "# silent mutation count is index 1", "tmp_summary", "[", "1", "]", "=", "0", "# limit the precision of floats", "#pos_ent = tmp_summary[-1]", "#tmp_summary[-1] = '{0:.5f}'.format(pos_ent)", "summary_info_list", ".", "append", "(", "[", "gene_name", ",", "i", "+", "1", ",", "gene_len", "]", "+", "tmp_summary", ")", "return", "summary_info_list" ]
40.012821
0.000938
def patch_namespaced_pod_template(self, name, namespace, body, **kwargs): # noqa: E501 """patch_namespaced_pod_template # noqa: E501 partially update the specified PodTemplate # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_pod_template(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodTemplate (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1PodTemplate If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_pod_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.patch_namespaced_pod_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
[ "def", "patch_namespaced_pod_template", "(", "self", ",", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "patch_namespaced_pod_template_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "patch_namespaced_pod_template_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
61.68
0.001277
def get_hist(rfile, histname, get_overflow=False): """Read a 1D Histogram.""" import root_numpy as rnp rfile = open_rfile(rfile) hist = rfile[histname] xlims = np.array(list(hist.xedges())) bin_values = rnp.hist2array(hist, include_overflow=get_overflow) rfile.close() return bin_values, xlims
[ "def", "get_hist", "(", "rfile", ",", "histname", ",", "get_overflow", "=", "False", ")", ":", "import", "root_numpy", "as", "rnp", "rfile", "=", "open_rfile", "(", "rfile", ")", "hist", "=", "rfile", "[", "histname", "]", "xlims", "=", "np", ".", "array", "(", "list", "(", "hist", ".", "xedges", "(", ")", ")", ")", "bin_values", "=", "rnp", ".", "hist2array", "(", "hist", ",", "include_overflow", "=", "get_overflow", ")", "rfile", ".", "close", "(", ")", "return", "bin_values", ",", "xlims" ]
31.7
0.003067
def subcmd_bootstrap_parser(subcmd): """ bootstrap subcommand """ subcmd.add_argument( '--broker', action='store', dest='broker', help=u'Route to the Ansible Service Broker' ) subcmd.add_argument( '--secure', action='store_true', dest='verify', help=u'Verify SSL connection to Ansible Service Broker', default=False ) subcmd.add_argument( '--ca-path', action='store', dest='cert', help=u'CA cert to use for verifying SSL connection to Ansible Service Broker', default=None ) subcmd.add_argument( '--no-relist', action='store_true', dest='no_relist', help=u'Do not relist the catalog after bootstrapping the broker', default=False ) subcmd.add_argument( '--username', '-u', action='store', default=None, dest='basic_auth_username', help=u'Specify the basic auth username to be used' ) subcmd.add_argument( '--password', '-p', action='store', default=None, dest='basic_auth_password', help=u'Specify the basic auth password to be used' ) subcmd.add_argument( '--broker-name', action='store', dest='broker_name', help=u'Name of the ServiceBroker k8s resource', default=u'ansible-service-broker' ) return
[ "def", "subcmd_bootstrap_parser", "(", "subcmd", ")", ":", "subcmd", ".", "add_argument", "(", "'--broker'", ",", "action", "=", "'store'", ",", "dest", "=", "'broker'", ",", "help", "=", "u'Route to the Ansible Service Broker'", ")", "subcmd", ".", "add_argument", "(", "'--secure'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'verify'", ",", "help", "=", "u'Verify SSL connection to Ansible Service Broker'", ",", "default", "=", "False", ")", "subcmd", ".", "add_argument", "(", "'--ca-path'", ",", "action", "=", "'store'", ",", "dest", "=", "'cert'", ",", "help", "=", "u'CA cert to use for verifying SSL connection to Ansible Service Broker'", ",", "default", "=", "None", ")", "subcmd", ".", "add_argument", "(", "'--no-relist'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'no_relist'", ",", "help", "=", "u'Do not relist the catalog after bootstrapping the broker'", ",", "default", "=", "False", ")", "subcmd", ".", "add_argument", "(", "'--username'", ",", "'-u'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "dest", "=", "'basic_auth_username'", ",", "help", "=", "u'Specify the basic auth username to be used'", ")", "subcmd", ".", "add_argument", "(", "'--password'", ",", "'-p'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "dest", "=", "'basic_auth_password'", ",", "help", "=", "u'Specify the basic auth password to be used'", ")", "subcmd", ".", "add_argument", "(", "'--broker-name'", ",", "action", "=", "'store'", ",", "dest", "=", "'broker_name'", ",", "help", "=", "u'Name of the ServiceBroker k8s resource'", ",", "default", "=", "u'ansible-service-broker'", ")", "return" ]
26.415094
0.001377
def parse_html(html): """ Create an lxml.html.HtmlElement from a string with html. XXX: mostly copy-pasted from parsel.selector.create_root_node """ body = html.strip().replace('\x00', '').encode('utf8') or b'<html/>' parser = lxml.html.HTMLParser(recover=True, encoding='utf8') root = lxml.etree.fromstring(body, parser=parser) if root is None: root = lxml.etree.fromstring(b'<html/>', parser=parser) return root
[ "def", "parse_html", "(", "html", ")", ":", "body", "=", "html", ".", "strip", "(", ")", ".", "replace", "(", "'\\x00'", ",", "''", ")", ".", "encode", "(", "'utf8'", ")", "or", "b'<html/>'", "parser", "=", "lxml", ".", "html", ".", "HTMLParser", "(", "recover", "=", "True", ",", "encoding", "=", "'utf8'", ")", "root", "=", "lxml", ".", "etree", ".", "fromstring", "(", "body", ",", "parser", "=", "parser", ")", "if", "root", "is", "None", ":", "root", "=", "lxml", ".", "etree", ".", "fromstring", "(", "b'<html/>'", ",", "parser", "=", "parser", ")", "return", "root" ]
44.4
0.002208
def _get_metadata(cls, request, response, head=None): """Parses out the head and link properties based on the HTTP Request from the client, and the Protobuf response from the validator. """ head = response.get('head_id', head) metadata = {'link': cls._build_url(request, head=head)} if head is not None: metadata['head'] = head return metadata
[ "def", "_get_metadata", "(", "cls", ",", "request", ",", "response", ",", "head", "=", "None", ")", ":", "head", "=", "response", ".", "get", "(", "'head_id'", ",", "head", ")", "metadata", "=", "{", "'link'", ":", "cls", ".", "_build_url", "(", "request", ",", "head", "=", "head", ")", "}", "if", "head", "is", "not", "None", ":", "metadata", "[", "'head'", "]", "=", "head", "return", "metadata" ]
40.3
0.004854
def make(parser): """DEPRECATED prepare OpenStack basic environment""" s = parser.add_subparsers( title='commands', metavar='COMMAND', help='description', ) def gen_pass_f(args): gen_pass() gen_pass_parser = s.add_parser('gen-pass', help='generate the password') gen_pass_parser.set_defaults(func=gen_pass_f) def cmd_f(args): cmd(args.user, args.hosts.split(','), args.key_filename, args.password, args.run) cmd_parser = s.add_parser('cmd', help='run command line on the target host') cmd_parser.add_argument('--run', help='the command running on the remote node', action='store', default=None, dest='run') cmd_parser.set_defaults(func=cmd_f)
[ "def", "make", "(", "parser", ")", ":", "s", "=", "parser", ".", "add_subparsers", "(", "title", "=", "'commands'", ",", "metavar", "=", "'COMMAND'", ",", "help", "=", "'description'", ",", ")", "def", "gen_pass_f", "(", "args", ")", ":", "gen_pass", "(", ")", "gen_pass_parser", "=", "s", ".", "add_parser", "(", "'gen-pass'", ",", "help", "=", "'generate the password'", ")", "gen_pass_parser", ".", "set_defaults", "(", "func", "=", "gen_pass_f", ")", "def", "cmd_f", "(", "args", ")", ":", "cmd", "(", "args", ".", "user", ",", "args", ".", "hosts", ".", "split", "(", "','", ")", ",", "args", ".", "key_filename", ",", "args", ".", "password", ",", "args", ".", "run", ")", "cmd_parser", "=", "s", ".", "add_parser", "(", "'cmd'", ",", "help", "=", "'run command line on the target host'", ")", "cmd_parser", ".", "add_argument", "(", "'--run'", ",", "help", "=", "'the command running on the remote node'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "dest", "=", "'run'", ")", "cmd_parser", ".", "set_defaults", "(", "func", "=", "cmd_f", ")" ]
37.578947
0.005464
def update(self, vrfs): """ Method to update vrf's :param vrfs: List containing vrf's desired to updated :return: None """ data = {'vrfs': vrfs} vrfs_ids = [str(vrf.get('id')) for vrf in vrfs] return super(ApiVrf, self).put('api/v3/vrf/%s/' % ';'.join(vrfs_ids), data)
[ "def", "update", "(", "self", ",", "vrfs", ")", ":", "data", "=", "{", "'vrfs'", ":", "vrfs", "}", "vrfs_ids", "=", "[", "str", "(", "vrf", ".", "get", "(", "'id'", ")", ")", "for", "vrf", "in", "vrfs", "]", "return", "super", "(", "ApiVrf", ",", "self", ")", ".", "put", "(", "'api/v3/vrf/%s/'", "%", "';'", ".", "join", "(", "vrfs_ids", ")", ",", "data", ")" ]
27.846154
0.005348
def make_log_record_output(category, level, message, format=None, datefmt=None, **kwargs): """ Create the output for a log record, like performed by :mod:`logging` module. :param category: Name of the logger (as string or None). :param level: Log level (as number). :param message: Log message to use. :returns: Log record output (as string) """ if not category or (category == "__ROOT__"): category = "root" levelname = logging.getLevelName(level) record_data = dict(name=category, levelname=levelname, msg=message) record_data.update(kwargs) record = logging.makeLogRecord(record_data) formatter = logging.Formatter(format, datefmt=datefmt) return formatter.format(record)
[ "def", "make_log_record_output", "(", "category", ",", "level", ",", "message", ",", "format", "=", "None", ",", "datefmt", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "category", "or", "(", "category", "==", "\"__ROOT__\"", ")", ":", "category", "=", "\"root\"", "levelname", "=", "logging", ".", "getLevelName", "(", "level", ")", "record_data", "=", "dict", "(", "name", "=", "category", ",", "levelname", "=", "levelname", ",", "msg", "=", "message", ")", "record_data", ".", "update", "(", "kwargs", ")", "record", "=", "logging", ".", "makeLogRecord", "(", "record_data", ")", "formatter", "=", "logging", ".", "Formatter", "(", "format", ",", "datefmt", "=", "datefmt", ")", "return", "formatter", ".", "format", "(", "record", ")" ]
42.333333
0.002567
def update_contact(self, um_from_user, um_to_user, message): """ Get or update a contacts information """ contact, created = self.get_or_create(um_from_user, um_to_user, message) # If the contact already existed, update the message if not created: contact.latest_message = message contact.save() return contact
[ "def", "update_contact", "(", "self", ",", "um_from_user", ",", "um_to_user", ",", "message", ")", ":", "contact", ",", "created", "=", "self", ".", "get_or_create", "(", "um_from_user", ",", "um_to_user", ",", "message", ")", "# If the contact already existed, update the message", "if", "not", "created", ":", "contact", ".", "latest_message", "=", "message", "contact", ".", "save", "(", ")", "return", "contact" ]
41.545455
0.004283
def controlprompt_cmd(self, cmd): """Perform a "controlpromptentry" command.""" data = tags.string_tag('cmbe', cmd) + tags.uint8_tag('cmcc', 0) return self.daap.post(_CTRL_PROMPT_CMD, data=data)
[ "def", "controlprompt_cmd", "(", "self", ",", "cmd", ")", ":", "data", "=", "tags", ".", "string_tag", "(", "'cmbe'", ",", "cmd", ")", "+", "tags", ".", "uint8_tag", "(", "'cmcc'", ",", "0", ")", "return", "self", ".", "daap", ".", "post", "(", "_CTRL_PROMPT_CMD", ",", "data", "=", "data", ")" ]
53.75
0.009174
def _apply_filters(self, p_todos): """ Applies the filters to the list of todo items. """ result = p_todos for _filter in sorted(self._filters, key=lambda f: f.order): result = _filter.filter(result) return result
[ "def", "_apply_filters", "(", "self", ",", "p_todos", ")", ":", "result", "=", "p_todos", "for", "_filter", "in", "sorted", "(", "self", ".", "_filters", ",", "key", "=", "lambda", "f", ":", "f", ".", "order", ")", ":", "result", "=", "_filter", ".", "filter", "(", "result", ")", "return", "result" ]
31.5
0.007722
def uninstall(path, restart=False): ''' Uninstall a specific KB. Args: path (str): The full path to the msu file to uninstall. This can also be just the name of the KB to uninstall restart (bool): ``True`` to force a restart if required by the installation. Adds the ``/forcerestart`` switch to the ``wusa.exe`` command. ``False`` will add the ``/norestart`` switch instead. Default is ``False`` Returns: bool: ``True`` if successful, otherwise ``False`` Raises: CommandExecutionError: If an error is encountered CLI Example: .. code-block:: bash salt '*' wusa.uninstall KB123456 # or salt '*' wusa.uninstall C:/temp/KB123456.msu ''' # Build the command cmd = ['wusa.exe', '/uninstall', '/quiet'] kb = os.path.splitext(os.path.basename(path))[0] if os.path.exists(path): cmd.append(path) else: cmd.append( '/kb:{0}'.format(kb[2:] if kb.lower().startswith('kb') else kb)) if restart: cmd.append('/forcerestart') else: cmd.append('/norestart') # Run the command ret_code = __salt__['cmd.retcode'](cmd, ignore_retcode=True) # Check the ret_code # If you pass /quiet and specify /kb, you'll always get retcode 87 if there # is an error. Use the actual file to get a more descriptive error errors = {-2145116156: '{0} does not support uninstall'.format(kb), 2359303: '{0} not installed'.format(kb), 87: 'Unknown error. Try specifying an .msu file'} if ret_code in errors: raise CommandExecutionError(errors[ret_code]) elif ret_code: raise CommandExecutionError('Unknown error: {0}'.format(ret_code)) return True
[ "def", "uninstall", "(", "path", ",", "restart", "=", "False", ")", ":", "# Build the command", "cmd", "=", "[", "'wusa.exe'", ",", "'/uninstall'", ",", "'/quiet'", "]", "kb", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "[", "0", "]", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "cmd", ".", "append", "(", "path", ")", "else", ":", "cmd", ".", "append", "(", "'/kb:{0}'", ".", "format", "(", "kb", "[", "2", ":", "]", "if", "kb", ".", "lower", "(", ")", ".", "startswith", "(", "'kb'", ")", "else", "kb", ")", ")", "if", "restart", ":", "cmd", ".", "append", "(", "'/forcerestart'", ")", "else", ":", "cmd", ".", "append", "(", "'/norestart'", ")", "# Run the command", "ret_code", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "ignore_retcode", "=", "True", ")", "# Check the ret_code", "# If you pass /quiet and specify /kb, you'll always get retcode 87 if there", "# is an error. Use the actual file to get a more descriptive error", "errors", "=", "{", "-", "2145116156", ":", "'{0} does not support uninstall'", ".", "format", "(", "kb", ")", ",", "2359303", ":", "'{0} not installed'", ".", "format", "(", "kb", ")", ",", "87", ":", "'Unknown error. Try specifying an .msu file'", "}", "if", "ret_code", "in", "errors", ":", "raise", "CommandExecutionError", "(", "errors", "[", "ret_code", "]", ")", "elif", "ret_code", ":", "raise", "CommandExecutionError", "(", "'Unknown error: {0}'", ".", "format", "(", "ret_code", ")", ")", "return", "True" ]
29.847458
0.00055
def _get_data_bounds(data_bounds, pos=None, length=None): """"Prepare data bounds, possibly using min/max of the data.""" if data_bounds is None or (isinstance(data_bounds, string_types) and data_bounds == 'auto'): if pos is not None and len(pos): m, M = pos.min(axis=0), pos.max(axis=0) data_bounds = [m[0], m[1], M[0], M[1]] else: data_bounds = NDC data_bounds = np.atleast_2d(data_bounds) ind_x = data_bounds[:, 0] == data_bounds[:, 2] ind_y = data_bounds[:, 1] == data_bounds[:, 3] if np.sum(ind_x): data_bounds[ind_x, 0] -= 1 data_bounds[ind_x, 2] += 1 if np.sum(ind_y): data_bounds[ind_y, 1] -= 1 data_bounds[ind_y, 3] += 1 # Extend the data_bounds if needed. if length is None: length = pos.shape[0] if pos is not None else 1 if data_bounds.shape[0] == 1: data_bounds = np.tile(data_bounds, (length, 1)) # Check the shape of data_bounds. assert data_bounds.shape == (length, 4) _check_data_bounds(data_bounds) return data_bounds
[ "def", "_get_data_bounds", "(", "data_bounds", ",", "pos", "=", "None", ",", "length", "=", "None", ")", ":", "if", "data_bounds", "is", "None", "or", "(", "isinstance", "(", "data_bounds", ",", "string_types", ")", "and", "data_bounds", "==", "'auto'", ")", ":", "if", "pos", "is", "not", "None", "and", "len", "(", "pos", ")", ":", "m", ",", "M", "=", "pos", ".", "min", "(", "axis", "=", "0", ")", ",", "pos", ".", "max", "(", "axis", "=", "0", ")", "data_bounds", "=", "[", "m", "[", "0", "]", ",", "m", "[", "1", "]", ",", "M", "[", "0", "]", ",", "M", "[", "1", "]", "]", "else", ":", "data_bounds", "=", "NDC", "data_bounds", "=", "np", ".", "atleast_2d", "(", "data_bounds", ")", "ind_x", "=", "data_bounds", "[", ":", ",", "0", "]", "==", "data_bounds", "[", ":", ",", "2", "]", "ind_y", "=", "data_bounds", "[", ":", ",", "1", "]", "==", "data_bounds", "[", ":", ",", "3", "]", "if", "np", ".", "sum", "(", "ind_x", ")", ":", "data_bounds", "[", "ind_x", ",", "0", "]", "-=", "1", "data_bounds", "[", "ind_x", ",", "2", "]", "+=", "1", "if", "np", ".", "sum", "(", "ind_y", ")", ":", "data_bounds", "[", "ind_y", ",", "1", "]", "-=", "1", "data_bounds", "[", "ind_y", ",", "3", "]", "+=", "1", "# Extend the data_bounds if needed.", "if", "length", "is", "None", ":", "length", "=", "pos", ".", "shape", "[", "0", "]", "if", "pos", "is", "not", "None", "else", "1", "if", "data_bounds", ".", "shape", "[", "0", "]", "==", "1", ":", "data_bounds", "=", "np", ".", "tile", "(", "data_bounds", ",", "(", "length", ",", "1", ")", ")", "# Check the shape of data_bounds.", "assert", "data_bounds", ".", "shape", "==", "(", "length", ",", "4", ")", "_check_data_bounds", "(", "data_bounds", ")", "return", "data_bounds" ]
35.354839
0.000888
def validate(self): '''Validate required headers and validate notification headers''' for header in self._requiredHeaders: if not self.headers.get(header, False): raise errors.ParseError('Missing Registration Header: ' + header) for notice in self.notifications: for header in self._requiredNotificationHeaders: if not notice.get(header, False): raise errors.ParseError('Missing Notification Header: ' + header)
[ "def", "validate", "(", "self", ")", ":", "for", "header", "in", "self", ".", "_requiredHeaders", ":", "if", "not", "self", ".", "headers", ".", "get", "(", "header", ",", "False", ")", ":", "raise", "errors", ".", "ParseError", "(", "'Missing Registration Header: '", "+", "header", ")", "for", "notice", "in", "self", ".", "notifications", ":", "for", "header", "in", "self", ".", "_requiredNotificationHeaders", ":", "if", "not", "notice", ".", "get", "(", "header", ",", "False", ")", ":", "raise", "errors", ".", "ParseError", "(", "'Missing Notification Header: '", "+", "header", ")" ]
47.555556
0.022936
def signed_token_generator(private_pem, **kwargs): """ :param private_pem: """ def signed_token_generator(request): request.claims = kwargs return common.generate_signed_token(private_pem, request) return signed_token_generator
[ "def", "signed_token_generator", "(", "private_pem", ",", "*", "*", "kwargs", ")", ":", "def", "signed_token_generator", "(", "request", ")", ":", "request", ".", "claims", "=", "kwargs", "return", "common", ".", "generate_signed_token", "(", "private_pem", ",", "request", ")", "return", "signed_token_generator" ]
28.444444
0.003788
def join_right_in(self, *objs): """ Create a join condition, connect B and C """ if not objs: return self.table.c[self.fielda]!=self.table.c[self.fielda] else: keys = get_objs_columns(objs, self.reference_fieldname) return (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & (self.table.c[self.fielda].in_(keys))
[ "def", "join_right_in", "(", "self", ",", "*", "objs", ")", ":", "if", "not", "objs", ":", "return", "self", ".", "table", ".", "c", "[", "self", ".", "fielda", "]", "!=", "self", ".", "table", ".", "c", "[", "self", ".", "fielda", "]", "else", ":", "keys", "=", "get_objs_columns", "(", "objs", ",", "self", ".", "reference_fieldname", ")", "return", "(", "self", ".", "table", ".", "c", "[", "self", ".", "fieldb", "]", "==", "self", ".", "reference_class", ".", "c", "[", "self", ".", "reference_fieldname", "]", ")", "&", "(", "self", ".", "table", ".", "c", "[", "self", ".", "fielda", "]", ".", "in_", "(", "keys", ")", ")" ]
45.555556
0.009569
def selectize_tags_media(media_type='css',name=''): """ Usage: ------ To include css media: selectize_tags_media 'css' <theme> To include Selectize Scripts: selectize_tags_media 'js' To include Selectize Scripts and Jquery: selectize_tags_media 'js' 'jquery' """ if media_type=='js': str_script='<script src="{url}"></script>\n' html=str_script.format(url=static('selectize/selectize.min.js')) if name=='jquery': html=str_script.format(url=static('selectize/jquery.min.js'))+html return html if name:name+='.' fpath='selectize/css/selectize.{name}css'.format(name=name) return '<link rel="stylesheet" href="{url}">'.format(url=static(fpath))
[ "def", "selectize_tags_media", "(", "media_type", "=", "'css'", ",", "name", "=", "''", ")", ":", "if", "media_type", "==", "'js'", ":", "str_script", "=", "'<script src=\"{url}\"></script>\\n'", "html", "=", "str_script", ".", "format", "(", "url", "=", "static", "(", "'selectize/selectize.min.js'", ")", ")", "if", "name", "==", "'jquery'", ":", "html", "=", "str_script", ".", "format", "(", "url", "=", "static", "(", "'selectize/jquery.min.js'", ")", ")", "+", "html", "return", "html", "if", "name", ":", "name", "+=", "'.'", "fpath", "=", "'selectize/css/selectize.{name}css'", ".", "format", "(", "name", "=", "name", ")", "return", "'<link rel=\"stylesheet\" href=\"{url}\">'", ".", "format", "(", "url", "=", "static", "(", "fpath", ")", ")" ]
28.130435
0.044843
def get_service_regex(base_url, service_url, sub_service): """Get the regex for a given service. :param base_url: string - Base URI :param service_url: string - Service URI under the Base URI :param sub_service: boolean - is the Service URI for a sub-service? :returns: Python Regex object containing the regex for the Service """ # if the specified service_url is already a regex # then just use. Otherwise create what we need if StackInABoxService.is_regex(service_url): logger.debug('StackInABoxService: Received regex {0} for use...' .format(service_url.pattern)) # Validate the regex against StackInABoxService requirement StackInABoxService.validate_regex(service_url, sub_service) return service_url else: regex = '^{0}{1}$'.format('', service_url) logger.debug('StackInABoxService: {0} + {1} -> {2}' .format(base_url, service_url, regex)) return re.compile(regex)
[ "def", "get_service_regex", "(", "base_url", ",", "service_url", ",", "sub_service", ")", ":", "# if the specified service_url is already a regex", "# then just use. Otherwise create what we need", "if", "StackInABoxService", ".", "is_regex", "(", "service_url", ")", ":", "logger", ".", "debug", "(", "'StackInABoxService: Received regex {0} for use...'", ".", "format", "(", "service_url", ".", "pattern", ")", ")", "# Validate the regex against StackInABoxService requirement", "StackInABoxService", ".", "validate_regex", "(", "service_url", ",", "sub_service", ")", "return", "service_url", "else", ":", "regex", "=", "'^{0}{1}$'", ".", "format", "(", "''", ",", "service_url", ")", "logger", ".", "debug", "(", "'StackInABoxService: {0} + {1} -> {2}'", ".", "format", "(", "base_url", ",", "service_url", ",", "regex", ")", ")", "return", "re", ".", "compile", "(", "regex", ")" ]
44.416667
0.001837
def fasta(self): """Generates sequence data for the protein in FASTA format.""" max_line_length = 79 fasta_str = '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format( self.parent.id.upper(), self.id) seq = self.sequence split_seq = [seq[i: i + max_line_length] for i in range(0, len(seq), max_line_length)] for seq_part in split_seq: fasta_str += '{0}\n'.format(seq_part) return fasta_str
[ "def", "fasta", "(", "self", ")", ":", "max_line_length", "=", "79", "fasta_str", "=", "'>{0}:{1}|PDBID|CHAIN|SEQUENCE\\n'", ".", "format", "(", "self", ".", "parent", ".", "id", ".", "upper", "(", ")", ",", "self", ".", "id", ")", "seq", "=", "self", ".", "sequence", "split_seq", "=", "[", "seq", "[", "i", ":", "i", "+", "max_line_length", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "seq", ")", ",", "max_line_length", ")", "]", "for", "seq_part", "in", "split_seq", ":", "fasta_str", "+=", "'{0}\\n'", ".", "format", "(", "seq_part", ")", "return", "fasta_str" ]
42.454545
0.004193
async def run(self): """Entrypoint to route messages between plugins.""" logging.info('Starting message router...') coroutines = set() while True: coro = self._poll_channel() coroutines.add(coro) _, coroutines = await asyncio.wait(coroutines, timeout=0.1)
[ "async", "def", "run", "(", "self", ")", ":", "logging", ".", "info", "(", "'Starting message router...'", ")", "coroutines", "=", "set", "(", ")", "while", "True", ":", "coro", "=", "self", ".", "_poll_channel", "(", ")", "coroutines", ".", "add", "(", "coro", ")", "_", ",", "coroutines", "=", "await", "asyncio", ".", "wait", "(", "coroutines", ",", "timeout", "=", "0.1", ")" ]
35.111111
0.006173
def _terminate_procs(procs): """ Terminate all processes in the process dictionary """ logging.warn("Stopping all remaining processes") for proc, g in procs.values(): logging.debug("[%s] SIGTERM", proc.pid) try: proc.terminate() except OSError as e: # we don't care if the process we tried to kill didn't exist. if e.errno != errno.ESRCH: raise sys.exit(1)
[ "def", "_terminate_procs", "(", "procs", ")", ":", "logging", ".", "warn", "(", "\"Stopping all remaining processes\"", ")", "for", "proc", ",", "g", "in", "procs", ".", "values", "(", ")", ":", "logging", ".", "debug", "(", "\"[%s] SIGTERM\"", ",", "proc", ".", "pid", ")", "try", ":", "proc", ".", "terminate", "(", ")", "except", "OSError", "as", "e", ":", "# we don't care if the process we tried to kill didn't exist.", "if", "e", ".", "errno", "!=", "errno", ".", "ESRCH", ":", "raise", "sys", ".", "exit", "(", "1", ")" ]
31.642857
0.002193
def values(self, predicate=None): """ Returns a list clone of the values contained in this map or values of the entries which are filtered with the predicate if provided. **Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa.** :param predicate: (Predicate), predicate to filter the entries (optional). :return: (Sequence), a list of clone of the values contained in this map. .. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates. """ if predicate: predicate_data = self._to_data(predicate) return self._encode_invoke(map_values_with_predicate_codec, predicate=predicate_data) else: return self._encode_invoke(map_values_codec)
[ "def", "values", "(", "self", ",", "predicate", "=", "None", ")", ":", "if", "predicate", ":", "predicate_data", "=", "self", ".", "_to_data", "(", "predicate", ")", "return", "self", ".", "_encode_invoke", "(", "map_values_with_predicate_codec", ",", "predicate", "=", "predicate_data", ")", "else", ":", "return", "self", ".", "_encode_invoke", "(", "map_values_codec", ")" ]
44.684211
0.009227
def clear_state(self, activity, agent, registration=None): """Clear state(s) with specified activity and agent :param activity: Activity object of state(s) to be deleted :type activity: :class:`tincan.activity.Activity` :param agent: Agent object of state(s) to be deleted :type agent: :class:`tincan.agent.Agent` :param registration: registration UUID of state(s) to be deleted :type registration: str | unicode :return: LRS Response object :rtype: :class:`tincan.lrs_response.LRSResponse` """ return self._delete_state( activity=activity, agent=agent, registration=registration )
[ "def", "clear_state", "(", "self", ",", "activity", ",", "agent", ",", "registration", "=", "None", ")", ":", "return", "self", ".", "_delete_state", "(", "activity", "=", "activity", ",", "agent", "=", "agent", ",", "registration", "=", "registration", ")" ]
41.058824
0.002801
def register_rpc(name=None): """Decorator. Allows registering a function for RPC. * http://uwsgi.readthedocs.io/en/latest/RPC.html Example: .. code-block:: python @register_rpc() def expose_me(): do() :param str|unicode name: RPC function name to associate with decorated function. :rtype: callable """ def wrapper(func): func_name = func.__name__ rpc_name = name or func_name uwsgi.register_rpc(rpc_name, func) _LOG.debug("Registering '%s' for RPC under '%s' alias ...", func_name, rpc_name) return func return wrapper
[ "def", "register_rpc", "(", "name", "=", "None", ")", ":", "def", "wrapper", "(", "func", ")", ":", "func_name", "=", "func", ".", "__name__", "rpc_name", "=", "name", "or", "func_name", "uwsgi", ".", "register_rpc", "(", "rpc_name", ",", "func", ")", "_LOG", ".", "debug", "(", "\"Registering '%s' for RPC under '%s' alias ...\"", ",", "func_name", ",", "rpc_name", ")", "return", "func", "return", "wrapper" ]
20.966667
0.00304
def find_project(self): # type: () -> List[str] """ Get all candidate projects :return: """ folders = [f for f in os.listdir(".") if os.path.isdir(f)] candidates = [] setup = self.setup_py_source() for folder in folders: if os.path.isfile(folder + "/__init__.py"): dunder_source = self._read_file(folder + "/__init__.py") project = folder if setup: # prevents test folders & other junk in_setup = ( "'{0}".format(project) not in setup and '"{0}"'.format(project) not in setup ) in_dunder = ( "'{0}".format(dunder_source) not in setup and '"{0}"'.format(dunder_source) not in setup ) if not in_setup and not in_dunder: continue candidates.append(folder) # TODO: parse setup.cfg if not candidates: candidates = candidates + self.find_single_file_project() if not candidates: candidates = candidates + self.find_malformed_single_file_project() candidates = list(set([x for x in candidates if x])) # too many if not candidates: candidates.extend(self.via_find_packages()) candidates = list(set([x for x in candidates if x])) if len(candidates) > 1: for unlikely in [ "test", "tests", "example", "examples", "demo", "demos", "test_files", ]: if unlikely in candidates: logger.warning("Assuming {0} is not the project".format(unlikely)) candidates.remove(unlikely) if len(candidates) == 1: break # too few or too many if len(candidates) != 1: likely_name = self.name_from_setup_py() if likely_name in candidates: return [likely_name] return list(set([x for x in candidates if x]))
[ "def", "find_project", "(", "self", ")", ":", "# type: () -> List[str]", "folders", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "\".\"", ")", "if", "os", ".", "path", ".", "isdir", "(", "f", ")", "]", "candidates", "=", "[", "]", "setup", "=", "self", ".", "setup_py_source", "(", ")", "for", "folder", "in", "folders", ":", "if", "os", ".", "path", ".", "isfile", "(", "folder", "+", "\"/__init__.py\"", ")", ":", "dunder_source", "=", "self", ".", "_read_file", "(", "folder", "+", "\"/__init__.py\"", ")", "project", "=", "folder", "if", "setup", ":", "# prevents test folders & other junk", "in_setup", "=", "(", "\"'{0}\"", ".", "format", "(", "project", ")", "not", "in", "setup", "and", "'\"{0}\"'", ".", "format", "(", "project", ")", "not", "in", "setup", ")", "in_dunder", "=", "(", "\"'{0}\"", ".", "format", "(", "dunder_source", ")", "not", "in", "setup", "and", "'\"{0}\"'", ".", "format", "(", "dunder_source", ")", "not", "in", "setup", ")", "if", "not", "in_setup", "and", "not", "in_dunder", ":", "continue", "candidates", ".", "append", "(", "folder", ")", "# TODO: parse setup.cfg", "if", "not", "candidates", ":", "candidates", "=", "candidates", "+", "self", ".", "find_single_file_project", "(", ")", "if", "not", "candidates", ":", "candidates", "=", "candidates", "+", "self", ".", "find_malformed_single_file_project", "(", ")", "candidates", "=", "list", "(", "set", "(", "[", "x", "for", "x", "in", "candidates", "if", "x", "]", ")", ")", "# too many", "if", "not", "candidates", ":", "candidates", ".", "extend", "(", "self", ".", "via_find_packages", "(", ")", ")", "candidates", "=", "list", "(", "set", "(", "[", "x", "for", "x", "in", "candidates", "if", "x", "]", ")", ")", "if", "len", "(", "candidates", ")", ">", "1", ":", "for", "unlikely", "in", "[", "\"test\"", ",", "\"tests\"", ",", "\"example\"", ",", "\"examples\"", ",", "\"demo\"", ",", "\"demos\"", ",", "\"test_files\"", ",", "]", ":", "if", "unlikely", "in", "candidates", ":", "logger", ".", "warning", "(", "\"Assuming {0} is not the project\"", ".", "format", "(", "unlikely", ")", ")", "candidates", ".", "remove", "(", "unlikely", ")", "if", "len", "(", "candidates", ")", "==", "1", ":", "break", "# too few or too many", "if", "len", "(", "candidates", ")", "!=", "1", ":", "likely_name", "=", "self", ".", "name_from_setup_py", "(", ")", "if", "likely_name", "in", "candidates", ":", "return", "[", "likely_name", "]", "return", "list", "(", "set", "(", "[", "x", "for", "x", "in", "candidates", "if", "x", "]", ")", ")" ]
34.3125
0.001328
def filter(self, query: Query, entity: type) -> Tuple[Query, Any]: """Apply the `_method` to all childs of the node. :param query: The sqlachemy query. :type query: Query :param entity: The entity model of the query. :type entity: type :return: A tuple with in first place the updated query and in second place the list of filters to apply to the query. :rtype: Tuple[Query, Any] """ new_query = query c_filter_list = [] for child in self._childs: new_query, f_list = child.filter(new_query, entity) c_filter_list.append(f_list) return ( new_query, self._method(*c_filter_list) )
[ "def", "filter", "(", "self", ",", "query", ":", "Query", ",", "entity", ":", "type", ")", "->", "Tuple", "[", "Query", ",", "Any", "]", ":", "new_query", "=", "query", "c_filter_list", "=", "[", "]", "for", "child", "in", "self", ".", "_childs", ":", "new_query", ",", "f_list", "=", "child", ".", "filter", "(", "new_query", ",", "entity", ")", "c_filter_list", ".", "append", "(", "f_list", ")", "return", "(", "new_query", ",", "self", ".", "_method", "(", "*", "c_filter_list", ")", ")" ]
33.318182
0.003979
def get_modified_date(parsed, raw): 'Return best possible guess to post modification timestamp.' if parsed: return feedparser_ts(parsed) if not raw: return None # Parse weird timestamps that feedparser can't handle, e.g.: July 30, 2013 ts, val = None, raw.replace('_', ' ') if not ts: # coreutils' "date" parses virtually everything, but is more expensive to use from subprocess import Popen, PIPE with open(os.devnull, 'w') as devnull: proc = Popen(['date', '+%s', '-d', val], stdout=PIPE, stderr=devnull) val = proc.stdout.read() if not proc.wait(): ts = datetime.fromtimestamp(int(val.strip()), tz=timezone.utc) if ts: return ts raise ValueError('Unrecognized raw value format: {0!r}'.format(val))
[ "def", "get_modified_date", "(", "parsed", ",", "raw", ")", ":", "if", "parsed", ":", "return", "feedparser_ts", "(", "parsed", ")", "if", "not", "raw", ":", "return", "None", "# Parse weird timestamps that feedparser can't handle, e.g.: July 30, 2013", "ts", ",", "val", "=", "None", ",", "raw", ".", "replace", "(", "'_'", ",", "' '", ")", "if", "not", "ts", ":", "# coreutils' \"date\" parses virtually everything, but is more expensive to use", "from", "subprocess", "import", "Popen", ",", "PIPE", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "devnull", ":", "proc", "=", "Popen", "(", "[", "'date'", ",", "'+%s'", ",", "'-d'", ",", "val", "]", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "devnull", ")", "val", "=", "proc", ".", "stdout", ".", "read", "(", ")", "if", "not", "proc", ".", "wait", "(", ")", ":", "ts", "=", "datetime", ".", "fromtimestamp", "(", "int", "(", "val", ".", "strip", "(", ")", ")", ",", "tz", "=", "timezone", ".", "utc", ")", "if", "ts", ":", "return", "ts", "raise", "ValueError", "(", "'Unrecognized raw value format: {0!r}'", ".", "format", "(", "val", ")", ")" ]
41.882353
0.026099
def np(numpy_feval, name=None, allow_extra_outputs=False): """Creates a custom evaluation metric that receives its inputs as numpy arrays. Parameters ---------- numpy_feval : callable(label, pred) Custom evaluation function that receives labels and predictions for a minibatch as numpy arrays and returns the corresponding custom metric as a floating point number. name : str, optional Name of the custom metric. allow_extra_outputs : bool, optional Whether prediction output is allowed to have extra outputs. This is useful in cases like RNN where states are also part of output which can then be fed back to the RNN in the next step. By default, extra outputs are not allowed. Returns ------- float Custom metric corresponding to the provided labels and predictions. Example ------- >>> def custom_metric(label, pred): ... return np.mean(np.abs(label-pred)) ... >>> metric = mx.metric.np(custom_metric) """ def feval(label, pred): """Internal eval function.""" return numpy_feval(label, pred) feval.__name__ = numpy_feval.__name__ return CustomMetric(feval, name, allow_extra_outputs)
[ "def", "np", "(", "numpy_feval", ",", "name", "=", "None", ",", "allow_extra_outputs", "=", "False", ")", ":", "def", "feval", "(", "label", ",", "pred", ")", ":", "\"\"\"Internal eval function.\"\"\"", "return", "numpy_feval", "(", "label", ",", "pred", ")", "feval", ".", "__name__", "=", "numpy_feval", ".", "__name__", "return", "CustomMetric", "(", "feval", ",", "name", ",", "allow_extra_outputs", ")" ]
37.8125
0.004835
def _set_hundredgigabitethernet(self, v, load=False): """ Setter method for hundredgigabitethernet, mapped from YANG variable /interface/hundredgigabitethernet (list) If this variable is read-only (config: false) in the source YANG file, then _set_hundredgigabitethernet is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_hundredgigabitethernet() directly. YANG Description: The list of HundredGigabitEthernet interfaces in the managed device. Each row represents a HundredGigabitEthernet interface. The list provides a way to discover all the 100G physical interfaces in a managed device. In case of logical-switch (VCS cluster), this list comprises of all the 100G physical interfaces across all the rbridges in the cluster. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",hundredgigabitethernet.hundredgigabitethernet, yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}), is_container='list', yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """hundredgigabitethernet must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",hundredgigabitethernet.hundredgigabitethernet, yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}), is_container='list', yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""", }) self.__hundredgigabitethernet = t if hasattr(self, '_set'): self._set()
[ "def", "_set_hundredgigabitethernet", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"name\"", ",", "hundredgigabitethernet", ".", "hundredgigabitethernet", ",", "yang_name", "=", "\"hundredgigabitethernet\"", ",", "rest_name", "=", "\"HundredGigabitEthernet\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "True", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'The list of HundredGigabitEthernet interfaces.'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'alt-name'", ":", "u'HundredGigabitEthernet'", ",", "u'sort-priority'", ":", "u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL'", ",", "u'cli-suppress-no'", ":", "None", ",", "u'cli-suppress-show-path'", ":", "None", ",", "u'cli-custom-range-actionpoint'", ":", "u'NsmRangeCliActionpoint'", ",", "u'cli-custom-range-enumerator'", ":", "u'NsmRangeCliActionpoint'", ",", "u'cli-no-match-completion'", ":", "None", ",", "u'cli-full-command'", ":", "None", ",", "u'callpoint'", ":", "u'interface_hundredgigabit'", ",", "u'cli-mode-name'", ":", "u'conf-if-hu-$(name)'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"hundredgigabitethernet\"", ",", "rest_name", "=", "\"HundredGigabitEthernet\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'The list of HundredGigabitEthernet interfaces.'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'alt-name'", ":", "u'HundredGigabitEthernet'", ",", "u'sort-priority'", ":", "u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL'", ",", "u'cli-suppress-no'", ":", "None", ",", "u'cli-suppress-show-path'", ":", "None", ",", "u'cli-custom-range-actionpoint'", ":", "u'NsmRangeCliActionpoint'", ",", "u'cli-custom-range-enumerator'", ":", "u'NsmRangeCliActionpoint'", ",", "u'cli-no-match-completion'", ":", "None", ",", "u'cli-full-command'", ":", "None", ",", "u'callpoint'", ":", "u'interface_hundredgigabit'", ",", "u'cli-mode-name'", ":", "u'conf-if-hu-$(name)'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"hundredgigabitethernet must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"name\",hundredgigabitethernet.hundredgigabitethernet, yang_name=\"hundredgigabitethernet\", rest_name=\"HundredGigabitEthernet\", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}), is_container='list', yang_name=\"hundredgigabitethernet\", rest_name=\"HundredGigabitEthernet\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__hundredgigabitethernet", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
145.870968
0.002636
def tokenize_words(self, text): """Tokenize an input string into a list of words (with punctuation removed).""" return [ self.strip_punctuation(word) for word in text.split(' ') if self.strip_punctuation(word) ]
[ "def", "tokenize_words", "(", "self", ",", "text", ")", ":", "return", "[", "self", ".", "strip_punctuation", "(", "word", ")", "for", "word", "in", "text", ".", "split", "(", "' '", ")", "if", "self", ".", "strip_punctuation", "(", "word", ")", "]" ]
42.333333
0.011583
def get(self): """ :return: response stats dict """ stats = {} if self.start_date is not None: stats["start_date"] = self.start_date if self.end_date is not None: stats["end_date"] = self.end_date if self.aggregated_by is not None: stats["aggregated_by"] = self.aggregated_by if self.sort_by_metric is not None: stats["sort_by_metric"] = self.sort_by_metric if self.sort_by_direction is not None: stats["sort_by_direction"] = self.sort_by_direction if self.limit is not None: stats["limit"] = self.limit if self.offset is not None: stats["offset"] = self.offset if self.categories is not None: stats['categories'] = [category.get() for category in self.categories] return stats
[ "def", "get", "(", "self", ")", ":", "stats", "=", "{", "}", "if", "self", ".", "start_date", "is", "not", "None", ":", "stats", "[", "\"start_date\"", "]", "=", "self", ".", "start_date", "if", "self", ".", "end_date", "is", "not", "None", ":", "stats", "[", "\"end_date\"", "]", "=", "self", ".", "end_date", "if", "self", ".", "aggregated_by", "is", "not", "None", ":", "stats", "[", "\"aggregated_by\"", "]", "=", "self", ".", "aggregated_by", "if", "self", ".", "sort_by_metric", "is", "not", "None", ":", "stats", "[", "\"sort_by_metric\"", "]", "=", "self", ".", "sort_by_metric", "if", "self", ".", "sort_by_direction", "is", "not", "None", ":", "stats", "[", "\"sort_by_direction\"", "]", "=", "self", ".", "sort_by_direction", "if", "self", ".", "limit", "is", "not", "None", ":", "stats", "[", "\"limit\"", "]", "=", "self", ".", "limit", "if", "self", ".", "offset", "is", "not", "None", ":", "stats", "[", "\"offset\"", "]", "=", "self", ".", "offset", "if", "self", ".", "categories", "is", "not", "None", ":", "stats", "[", "'categories'", "]", "=", "[", "category", ".", "get", "(", ")", "for", "category", "in", "self", ".", "categories", "]", "return", "stats" ]
38.695652
0.002193
def submit_registration_form(self, form): """Submit a registration form. [client only] :Parameters: - `form`: the filled-in form. When form is `None` or its type is "cancel" the registration is to be canceled. :Types: - `form`: `pyxmpp.jabber.dataforms.Form`""" self.lock.acquire() try: if form and form.type!="cancel": self.registration_form = form iq = Iq(stanza_type = "set") iq.set_content(self.__register.submit_form(form)) self.set_response_handlers(iq, self.registration_success, self.registration_error) self.send(iq) else: self.__register = None finally: self.lock.release()
[ "def", "submit_registration_form", "(", "self", ",", "form", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "if", "form", "and", "form", ".", "type", "!=", "\"cancel\"", ":", "self", ".", "registration_form", "=", "form", "iq", "=", "Iq", "(", "stanza_type", "=", "\"set\"", ")", "iq", ".", "set_content", "(", "self", ".", "__register", ".", "submit_form", "(", "form", ")", ")", "self", ".", "set_response_handlers", "(", "iq", ",", "self", ".", "registration_success", ",", "self", ".", "registration_error", ")", "self", ".", "send", "(", "iq", ")", "else", ":", "self", ".", "__register", "=", "None", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
34.347826
0.007389
def sendall(self, s): """ Send data to the channel, without allowing partial results. Unlike `send`, this method continues to send data from the given string until either all data has been sent or an error occurs. Nothing is returned. :param str s: data to send. :raises socket.timeout: if sending stalled for longer than the timeout set by `settimeout`. :raises socket.error: if an error occurred before the entire string was sent. .. note:: If the channel is closed while only part of the data has been sent, there is no way to determine how much data (if any) was sent. This is irritating, but identically follows Python's API. """ while s: sent = self.send(s) s = s[sent:] return None
[ "def", "sendall", "(", "self", ",", "s", ")", ":", "while", "s", ":", "sent", "=", "self", ".", "send", "(", "s", ")", "s", "=", "s", "[", "sent", ":", "]", "return", "None" ]
38.363636
0.002312
def parse_coach_bsites_inf(infile): """Parse the Bsites.inf output file of COACH and return a list of rank-ordered binding site predictions Bsites.inf contains the summary of COACH clustering results after all other prediction algorithms have finished For each site (cluster), there are three lines: - Line 1: site number, c-score of coach prediction, cluster size - Line 2: algorithm, PDB ID, ligand ID, center of binding site (cartesian coordinates), c-score of the algorithm's prediction, binding residues from single template - Line 3: Statistics of ligands in the cluster C-score information: - "In our training data, a prediction with C-score>0.35 has average false positive and false negative rates below 0.16 and 0.13, respectively." (https://zhanglab.ccmb.med.umich.edu/COACH/COACH.pdf) Args: infile (str): Path to Bsites.inf Returns: list: Ranked list of dictionaries, keys defined below - ``site_num``: cluster which is the consensus binding site - ``c_score``: confidence score of the cluster prediction - ``cluster_size``: number of predictions within this cluster - ``algorithm``: main? algorithm used to make the prediction - ``pdb_template_id``: PDB ID of the template used to make the prediction - ``pdb_template_chain``: chain of the PDB which has the ligand - ``pdb_ligand``: predicted ligand to bind - ``binding_location_coords``: centroid of the predicted ligand position in the homology model - ``c_score_method``: confidence score for the main algorithm - ``binding_residues``: predicted residues to bind the ligand - ``ligand_cluster_counts``: number of predictions per ligand """ bsites_results = [] with open(infile) as pp: lines = list(filter(None, (line.rstrip() for line in pp))) for i in range(len(lines) // 3): bsites_site_dict = {} line1 = lines[i * 3].split('\t') line2 = lines[i * 3 + 1].split('\t') line3 = lines[i * 3 + 2] bsites_site_dict['site_num'] = line1[0] bsites_site_dict['c_score'] = float(line1[1]) bsites_site_dict['cluster_size'] = line1[2] bsites_site_dict['algorithm'] = line2[0] bsites_site_dict['pdb_template_id'] = line2[1][:4] bsites_site_dict['pdb_template_chain'] = line2[1][4] bsites_site_dict['pdb_ligand'] = line2[2] bsites_site_dict['binding_location_coords'] = tuple(float(x) for x in line2[3].split()) # TODO: what's the difference between this c-score and the cluster's c-score? # how is the cluster's c-score computed? it's not the average c-score of all methods # also why are some COFACTOR c-scores >1? # 160411 - seems like the COFACTOR "BS-score" is being reported here, not its c-score... tmp_split = line2[4].split(' :') bsites_site_dict['c_score_method'] = tmp_split[0] bsites_site_dict['binding_residues'] = tmp_split[1] bsites_site_dict['ligand_cluster_counts'] = line3 bsites_results.append(bsites_site_dict) return bsites_results
[ "def", "parse_coach_bsites_inf", "(", "infile", ")", ":", "bsites_results", "=", "[", "]", "with", "open", "(", "infile", ")", "as", "pp", ":", "lines", "=", "list", "(", "filter", "(", "None", ",", "(", "line", ".", "rstrip", "(", ")", "for", "line", "in", "pp", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "lines", ")", "//", "3", ")", ":", "bsites_site_dict", "=", "{", "}", "line1", "=", "lines", "[", "i", "*", "3", "]", ".", "split", "(", "'\\t'", ")", "line2", "=", "lines", "[", "i", "*", "3", "+", "1", "]", ".", "split", "(", "'\\t'", ")", "line3", "=", "lines", "[", "i", "*", "3", "+", "2", "]", "bsites_site_dict", "[", "'site_num'", "]", "=", "line1", "[", "0", "]", "bsites_site_dict", "[", "'c_score'", "]", "=", "float", "(", "line1", "[", "1", "]", ")", "bsites_site_dict", "[", "'cluster_size'", "]", "=", "line1", "[", "2", "]", "bsites_site_dict", "[", "'algorithm'", "]", "=", "line2", "[", "0", "]", "bsites_site_dict", "[", "'pdb_template_id'", "]", "=", "line2", "[", "1", "]", "[", ":", "4", "]", "bsites_site_dict", "[", "'pdb_template_chain'", "]", "=", "line2", "[", "1", "]", "[", "4", "]", "bsites_site_dict", "[", "'pdb_ligand'", "]", "=", "line2", "[", "2", "]", "bsites_site_dict", "[", "'binding_location_coords'", "]", "=", "tuple", "(", "float", "(", "x", ")", "for", "x", "in", "line2", "[", "3", "]", ".", "split", "(", ")", ")", "# TODO: what's the difference between this c-score and the cluster's c-score?", "# how is the cluster's c-score computed? it's not the average c-score of all methods", "# also why are some COFACTOR c-scores >1?", "# 160411 - seems like the COFACTOR \"BS-score\" is being reported here, not its c-score...", "tmp_split", "=", "line2", "[", "4", "]", ".", "split", "(", "' :'", ")", "bsites_site_dict", "[", "'c_score_method'", "]", "=", "tmp_split", "[", "0", "]", "bsites_site_dict", "[", "'binding_residues'", "]", "=", "tmp_split", "[", "1", "]", "bsites_site_dict", "[", "'ligand_cluster_counts'", "]", "=", "line3", "bsites_results", ".", "append", "(", "bsites_site_dict", ")", "return", "bsites_results" ]
44.859155
0.004916
def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums """ pmf = MakePmfFromList(RandomSum(dists) for i in xrange(n)) return pmf
[ "def", "SampleSum", "(", "dists", ",", "n", ")", ":", "pmf", "=", "MakePmfFromList", "(", "RandomSum", "(", "dists", ")", "for", "i", "in", "xrange", "(", "n", ")", ")", "return", "pmf" ]
25.3
0.003817
def download_s3(bucket_name, file_key, file_path, force=False): """Download a remote file from S3. """ file_path = path(file_path) bucket = open_s3(bucket_name) file_dir = file_path.dirname() file_dir.makedirs() s3_key = bucket.get_key(file_key) if file_path.exists(): file_data = file_path.bytes() file_md5, file_md5_64 = s3_key.get_md5_from_hexdigest(hashlib.md5(file_data).hexdigest()) # Check the hash. try: s3_md5 = s3_key.etag.replace('"', '') except KeyError: pass else: if s3_md5 == file_md5: info('Hash is the same. Skipping %s' % file_path) return elif not force: # Check if file on S3 is older than local file. s3_datetime = datetime.datetime(*time.strptime( s3_key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')[0:6]) local_datetime = datetime.datetime.utcfromtimestamp(file_path.stat().st_mtime) if s3_datetime < local_datetime: info("File at %s is less recent than the local version." % (file_key)) return # If it is newer, let's process and upload info("Downloading %s..." % (file_key)) try: with open(file_path, 'w') as fo: s3_key.get_contents_to_file(fo) except Exception as e: error("Failed: %s" % e) raise
[ "def", "download_s3", "(", "bucket_name", ",", "file_key", ",", "file_path", ",", "force", "=", "False", ")", ":", "file_path", "=", "path", "(", "file_path", ")", "bucket", "=", "open_s3", "(", "bucket_name", ")", "file_dir", "=", "file_path", ".", "dirname", "(", ")", "file_dir", ".", "makedirs", "(", ")", "s3_key", "=", "bucket", ".", "get_key", "(", "file_key", ")", "if", "file_path", ".", "exists", "(", ")", ":", "file_data", "=", "file_path", ".", "bytes", "(", ")", "file_md5", ",", "file_md5_64", "=", "s3_key", ".", "get_md5_from_hexdigest", "(", "hashlib", ".", "md5", "(", "file_data", ")", ".", "hexdigest", "(", ")", ")", "# Check the hash.", "try", ":", "s3_md5", "=", "s3_key", ".", "etag", ".", "replace", "(", "'\"'", ",", "''", ")", "except", "KeyError", ":", "pass", "else", ":", "if", "s3_md5", "==", "file_md5", ":", "info", "(", "'Hash is the same. Skipping %s'", "%", "file_path", ")", "return", "elif", "not", "force", ":", "# Check if file on S3 is older than local file.", "s3_datetime", "=", "datetime", ".", "datetime", "(", "*", "time", ".", "strptime", "(", "s3_key", ".", "last_modified", ",", "'%a, %d %b %Y %H:%M:%S %Z'", ")", "[", "0", ":", "6", "]", ")", "local_datetime", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "file_path", ".", "stat", "(", ")", ".", "st_mtime", ")", "if", "s3_datetime", "<", "local_datetime", ":", "info", "(", "\"File at %s is less recent than the local version.\"", "%", "(", "file_key", ")", ")", "return", "# If it is newer, let's process and upload", "info", "(", "\"Downloading %s...\"", "%", "(", "file_key", ")", ")", "try", ":", "with", "open", "(", "file_path", ",", "'w'", ")", "as", "fo", ":", "s3_key", ".", "get_contents_to_file", "(", "fo", ")", "except", "Exception", "as", "e", ":", "error", "(", "\"Failed: %s\"", "%", "e", ")", "raise" ]
33.833333
0.002736
def quick_url(comment): """Return the URL for the comment without fetching its submission.""" def to_id(fullname): return fullname.split('_', 1)[1] return ('http://www.reddit.com/r/{}/comments/{}/_/{}?context=3' .format(comment.subreddit.display_name, to_id(comment.link_id), comment.id))
[ "def", "quick_url", "(", "comment", ")", ":", "def", "to_id", "(", "fullname", ")", ":", "return", "fullname", ".", "split", "(", "'_'", ",", "1", ")", "[", "1", "]", "return", "(", "'http://www.reddit.com/r/{}/comments/{}/_/{}?context=3'", ".", "format", "(", "comment", ".", "subreddit", ".", "display_name", ",", "to_id", "(", "comment", ".", "link_id", ")", ",", "comment", ".", "id", ")", ")" ]
47.714286
0.002941
def safe_joinall(greenlets, timeout=None, raise_error=False): """ Wrapper for gevent.joinall if the greenlet that waits for the joins is killed, it kills all the greenlets it joins for. """ greenlets = list(greenlets) try: gevent.joinall(greenlets, timeout=timeout, raise_error=raise_error) except gevent.GreenletExit: [greenlet.kill() for greenlet in greenlets if not greenlet.ready()] raise return greenlets
[ "def", "safe_joinall", "(", "greenlets", ",", "timeout", "=", "None", ",", "raise_error", "=", "False", ")", ":", "greenlets", "=", "list", "(", "greenlets", ")", "try", ":", "gevent", ".", "joinall", "(", "greenlets", ",", "timeout", "=", "timeout", ",", "raise_error", "=", "raise_error", ")", "except", "gevent", ".", "GreenletExit", ":", "[", "greenlet", ".", "kill", "(", ")", "for", "greenlet", "in", "greenlets", "if", "not", "greenlet", ".", "ready", "(", ")", "]", "raise", "return", "greenlets" ]
37.833333
0.004301
def setup_versioneer(): """ Generate (temporarily) versioneer.py file in project root directory :return: """ try: # assume versioneer.py was generated using "versioneer install" command import versioneer versioneer.get_version() except ImportError: # it looks versioneer.py is missing # lets assume that versioneer package is installed # and versioneer binary is present in $PATH import subprocess try: # call versioneer install to generate versioneer.py subprocess.check_output(["versioneer", "install"]) except OSError: # it looks versioneer is missing from $PATH # probably versioneer is installed in some user directory # query pip for list of files in versioneer package # line below is equivalen to putting result of # "pip show -f versioneer" command to string output output = pip_command_output(["show", "-f", "versioneer"]) # now we parse the results import os # find absolute path where *versioneer package* was installed # and store it in main_path main_path = [x[len("Location: "):] for x in output.splitlines() if x.startswith("Location")][0] # find path relative to main_path where # *versioneer binary* was installed bin_path = [x[len(" "):] for x in output.splitlines() if x.endswith(os.path.sep + "versioneer")][0] # exe_path is absolute path to *versioneer binary* exe_path = os.path.join(main_path, bin_path) # call versioneer install to generate versioneer.py # line below is equivalent to running in terminal # "python versioneer install" subprocess.check_output(["python", exe_path, "install"])
[ "def", "setup_versioneer", "(", ")", ":", "try", ":", "# assume versioneer.py was generated using \"versioneer install\" command", "import", "versioneer", "versioneer", ".", "get_version", "(", ")", "except", "ImportError", ":", "# it looks versioneer.py is missing", "# lets assume that versioneer package is installed", "# and versioneer binary is present in $PATH", "import", "subprocess", "try", ":", "# call versioneer install to generate versioneer.py", "subprocess", ".", "check_output", "(", "[", "\"versioneer\"", ",", "\"install\"", "]", ")", "except", "OSError", ":", "# it looks versioneer is missing from $PATH", "# probably versioneer is installed in some user directory", "# query pip for list of files in versioneer package", "# line below is equivalen to putting result of", "# \"pip show -f versioneer\" command to string output", "output", "=", "pip_command_output", "(", "[", "\"show\"", ",", "\"-f\"", ",", "\"versioneer\"", "]", ")", "# now we parse the results", "import", "os", "# find absolute path where *versioneer package* was installed", "# and store it in main_path", "main_path", "=", "[", "x", "[", "len", "(", "\"Location: \"", ")", ":", "]", "for", "x", "in", "output", ".", "splitlines", "(", ")", "if", "x", ".", "startswith", "(", "\"Location\"", ")", "]", "[", "0", "]", "# find path relative to main_path where", "# *versioneer binary* was installed", "bin_path", "=", "[", "x", "[", "len", "(", "\" \"", ")", ":", "]", "for", "x", "in", "output", ".", "splitlines", "(", ")", "if", "x", ".", "endswith", "(", "os", ".", "path", ".", "sep", "+", "\"versioneer\"", ")", "]", "[", "0", "]", "# exe_path is absolute path to *versioneer binary*", "exe_path", "=", "os", ".", "path", ".", "join", "(", "main_path", ",", "bin_path", ")", "# call versioneer install to generate versioneer.py", "# line below is equivalent to running in terminal", "# \"python versioneer install\"", "subprocess", ".", "check_output", "(", "[", "\"python\"", ",", "exe_path", ",", "\"install\"", "]", ")" ]
43.906977
0.000518
def update(self, dtrain, iteration, fobj=None): """ Update for one iteration, with objective function calculated internally. Parameters ---------- dtrain : DMatrix Training data. iteration : int Current iteration number. fobj : function Customized objective function. """ if not isinstance(dtrain, DMatrix): raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__)) self._validate_features(dtrain) if fobj is None: _check_call(_LIB.XGBoosterUpdateOneIter(self.handle, iteration, dtrain.handle)) else: pred = self.predict(dtrain) grad, hess = fobj(pred, dtrain) self.boost(dtrain, grad, hess)
[ "def", "update", "(", "self", ",", "dtrain", ",", "iteration", ",", "fobj", "=", "None", ")", ":", "if", "not", "isinstance", "(", "dtrain", ",", "DMatrix", ")", ":", "raise", "TypeError", "(", "'invalid training matrix: {}'", ".", "format", "(", "type", "(", "dtrain", ")", ".", "__name__", ")", ")", "self", ".", "_validate_features", "(", "dtrain", ")", "if", "fobj", "is", "None", ":", "_check_call", "(", "_LIB", ".", "XGBoosterUpdateOneIter", "(", "self", ".", "handle", ",", "iteration", ",", "dtrain", ".", "handle", ")", ")", "else", ":", "pred", "=", "self", ".", "predict", "(", "dtrain", ")", "grad", ",", "hess", "=", "fobj", "(", "pred", ",", "dtrain", ")", "self", ".", "boost", "(", "dtrain", ",", "grad", ",", "hess", ")" ]
34
0.006219
def _get_encrypted_credentials(self, context): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 5 https://msdn.microsoft.com/en-us/library/cc226791.aspx After the client has verified the server's authenticity, it encrypts the user's credentials with the authentication protocol's encryption services. The resulting value is encapsulated in the authInfo field of the TSRequest structure and sent over the encrypted TLS channel to the server :param context: The authenticated security context :return: The encrypted TSRequest that contains the user's credentials """ ts_password = TSPasswordCreds() ts_password['domainName'] = context.domain.encode('utf-16-le') ts_password['userName'] = context.username.encode('utf-16-le') ts_password['password'] = context.password.encode('utf-16-le') ts_credentials = TSCredentials() ts_credentials['credType'] = ts_password.CRED_TYPE ts_credentials['credentials'] = encoder.encode(ts_password) ts_request = TSRequest() enc_credentials = context.wrap(encoder.encode(ts_credentials)) ts_request['authInfo'] = enc_credentials return encoder.encode(ts_request)
[ "def", "_get_encrypted_credentials", "(", "self", ",", "context", ")", ":", "ts_password", "=", "TSPasswordCreds", "(", ")", "ts_password", "[", "'domainName'", "]", "=", "context", ".", "domain", ".", "encode", "(", "'utf-16-le'", ")", "ts_password", "[", "'userName'", "]", "=", "context", ".", "username", ".", "encode", "(", "'utf-16-le'", ")", "ts_password", "[", "'password'", "]", "=", "context", ".", "password", ".", "encode", "(", "'utf-16-le'", ")", "ts_credentials", "=", "TSCredentials", "(", ")", "ts_credentials", "[", "'credType'", "]", "=", "ts_password", ".", "CRED_TYPE", "ts_credentials", "[", "'credentials'", "]", "=", "encoder", ".", "encode", "(", "ts_password", ")", "ts_request", "=", "TSRequest", "(", ")", "enc_credentials", "=", "context", ".", "wrap", "(", "encoder", ".", "encode", "(", "ts_credentials", ")", ")", "ts_request", "[", "'authInfo'", "]", "=", "enc_credentials", "return", "encoder", ".", "encode", "(", "ts_request", ")" ]
45.107143
0.00155
def _exponentiate_general_case(pauli_term, param): """ Returns a Quil (Program()) object corresponding to the exponential of the pauli_term object, i.e. exp[-1.0j * param * pauli_term] :param PauliTerm pauli_term: A PauliTerm to exponentiate :param float param: scalar, non-complex, value :returns: A Quil program object :rtype: Program """ def reverse_hack(p): # A hack to produce a *temporary* program which reverses p. revp = Program() revp.inst(list(reversed(p.instructions))) return revp quil_prog = Program() change_to_z_basis = Program() change_to_original_basis = Program() cnot_seq = Program() prev_index = None highest_target_index = None for index, op in pauli_term: if 'X' == op: change_to_z_basis.inst(H(index)) change_to_original_basis.inst(H(index)) elif 'Y' == op: change_to_z_basis.inst(RX(np.pi / 2.0, index)) change_to_original_basis.inst(RX(-np.pi / 2.0, index)) elif 'I' == op: continue if prev_index is not None: cnot_seq.inst(CNOT(prev_index, index)) prev_index = index highest_target_index = index # building rotation circuit quil_prog += change_to_z_basis quil_prog += cnot_seq quil_prog.inst(RZ(2.0 * pauli_term.coefficient * param, highest_target_index)) quil_prog += reverse_hack(cnot_seq) quil_prog += change_to_original_basis return quil_prog
[ "def", "_exponentiate_general_case", "(", "pauli_term", ",", "param", ")", ":", "def", "reverse_hack", "(", "p", ")", ":", "# A hack to produce a *temporary* program which reverses p.", "revp", "=", "Program", "(", ")", "revp", ".", "inst", "(", "list", "(", "reversed", "(", "p", ".", "instructions", ")", ")", ")", "return", "revp", "quil_prog", "=", "Program", "(", ")", "change_to_z_basis", "=", "Program", "(", ")", "change_to_original_basis", "=", "Program", "(", ")", "cnot_seq", "=", "Program", "(", ")", "prev_index", "=", "None", "highest_target_index", "=", "None", "for", "index", ",", "op", "in", "pauli_term", ":", "if", "'X'", "==", "op", ":", "change_to_z_basis", ".", "inst", "(", "H", "(", "index", ")", ")", "change_to_original_basis", ".", "inst", "(", "H", "(", "index", ")", ")", "elif", "'Y'", "==", "op", ":", "change_to_z_basis", ".", "inst", "(", "RX", "(", "np", ".", "pi", "/", "2.0", ",", "index", ")", ")", "change_to_original_basis", ".", "inst", "(", "RX", "(", "-", "np", ".", "pi", "/", "2.0", ",", "index", ")", ")", "elif", "'I'", "==", "op", ":", "continue", "if", "prev_index", "is", "not", "None", ":", "cnot_seq", ".", "inst", "(", "CNOT", "(", "prev_index", ",", "index", ")", ")", "prev_index", "=", "index", "highest_target_index", "=", "index", "# building rotation circuit", "quil_prog", "+=", "change_to_z_basis", "quil_prog", "+=", "cnot_seq", "quil_prog", ".", "inst", "(", "RZ", "(", "2.0", "*", "pauli_term", ".", "coefficient", "*", "param", ",", "highest_target_index", ")", ")", "quil_prog", "+=", "reverse_hack", "(", "cnot_seq", ")", "quil_prog", "+=", "change_to_original_basis", "return", "quil_prog" ]
29.54
0.001311
def _check_disabled(self): """Check if health check is disabled. It logs a message if health check is disabled and it also adds an item to the action queue based on 'on_disabled' setting. Returns: True if check is disabled otherwise False. """ if self.config['check_disabled']: if self.config['on_disabled'] == 'withdraw': self.log.info("Check is disabled and ip_prefix will be " "withdrawn") self.log.info("adding %s in the queue", self.ip_with_prefixlen) self.action.put(self.del_operation) self.log.info("Check is now permanently disabled") elif self.config['on_disabled'] == 'advertise': self.log.info("check is disabled, ip_prefix wont be withdrawn") self.log.info("adding %s in the queue", self.ip_with_prefixlen) self.action.put(self.add_operation) self.log.info('check is now permanently disabled') return True return False
[ "def", "_check_disabled", "(", "self", ")", ":", "if", "self", ".", "config", "[", "'check_disabled'", "]", ":", "if", "self", ".", "config", "[", "'on_disabled'", "]", "==", "'withdraw'", ":", "self", ".", "log", ".", "info", "(", "\"Check is disabled and ip_prefix will be \"", "\"withdrawn\"", ")", "self", ".", "log", ".", "info", "(", "\"adding %s in the queue\"", ",", "self", ".", "ip_with_prefixlen", ")", "self", ".", "action", ".", "put", "(", "self", ".", "del_operation", ")", "self", ".", "log", ".", "info", "(", "\"Check is now permanently disabled\"", ")", "elif", "self", ".", "config", "[", "'on_disabled'", "]", "==", "'advertise'", ":", "self", ".", "log", ".", "info", "(", "\"check is disabled, ip_prefix wont be withdrawn\"", ")", "self", ".", "log", ".", "info", "(", "\"adding %s in the queue\"", ",", "self", ".", "ip_with_prefixlen", ")", "self", ".", "action", ".", "put", "(", "self", ".", "add_operation", ")", "self", ".", "log", ".", "info", "(", "'check is now permanently disabled'", ")", "return", "True", "return", "False" ]
41.269231
0.001821
def set_(key, value, setting=None, conf_file=_DEFAULT_CONF): ''' Set a new value for a specific configuration line. :param str key: The command or block to configure. :param str value: The command value or command of the block specified by the key parameter. :param str setting: The command value for the command specified by the value parameter. :param str conf_file: The logrotate configuration file. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' logrotate.set rotate 2 Can also be used to set a single value inside a multiline configuration block. For instance, to change rotate in the following block: .. code-block:: text /var/log/wtmp { monthly create 0664 root root rotate 1 } Use the following command: .. code-block:: bash salt '*' logrotate.set /var/log/wtmp rotate 2 This module also has the ability to scan files inside an include directory, and make changes in the appropriate file. ''' conf = _parse_conf(conf_file) for include in conf['include files']: if key in conf['include files'][include]: conf_file = os.path.join(conf['include'], include) new_line = six.text_type() kwargs = { 'flags': 8, 'backup': False, 'path': conf_file, 'pattern': '^{0}.*'.format(key), 'show_changes': False } if setting is None: current_value = conf.get(key, False) if isinstance(current_value, dict): error_msg = ('Error: {0} includes a dict, and a specific setting inside the ' 'dict was not declared').format(key) raise SaltInvocationError(error_msg) if value == current_value: _LOG.debug("Command '%s' already has: %s", key, value) return True # This is the new config line that will be set if value is True: new_line = key elif value: new_line = '{0} {1}'.format(key, value) kwargs.update({'prepend_if_not_found': True}) else: stanza = conf.get(key, dict()) if stanza and not isinstance(stanza, dict): error_msg = ('Error: A setting for a dict was declared, but the ' 'configuration line given is not a dict') raise SaltInvocationError(error_msg) if setting == stanza.get(value, False): _LOG.debug("Command '%s' already has: %s", value, setting) return True # We're going to be rewriting an entire stanza if setting: stanza[value] = setting else: del stanza[value] new_line = _dict_to_stanza(key, stanza) kwargs.update({ 'pattern': '^{0}.*?{{.*?}}'.format(key), 'flags': 24, 'append_if_not_found': True }) kwargs.update({'repl': new_line}) _LOG.debug("Setting file '%s' line: %s", conf_file, new_line) return __salt__['file.replace'](**kwargs)
[ "def", "set_", "(", "key", ",", "value", ",", "setting", "=", "None", ",", "conf_file", "=", "_DEFAULT_CONF", ")", ":", "conf", "=", "_parse_conf", "(", "conf_file", ")", "for", "include", "in", "conf", "[", "'include files'", "]", ":", "if", "key", "in", "conf", "[", "'include files'", "]", "[", "include", "]", ":", "conf_file", "=", "os", ".", "path", ".", "join", "(", "conf", "[", "'include'", "]", ",", "include", ")", "new_line", "=", "six", ".", "text_type", "(", ")", "kwargs", "=", "{", "'flags'", ":", "8", ",", "'backup'", ":", "False", ",", "'path'", ":", "conf_file", ",", "'pattern'", ":", "'^{0}.*'", ".", "format", "(", "key", ")", ",", "'show_changes'", ":", "False", "}", "if", "setting", "is", "None", ":", "current_value", "=", "conf", ".", "get", "(", "key", ",", "False", ")", "if", "isinstance", "(", "current_value", ",", "dict", ")", ":", "error_msg", "=", "(", "'Error: {0} includes a dict, and a specific setting inside the '", "'dict was not declared'", ")", ".", "format", "(", "key", ")", "raise", "SaltInvocationError", "(", "error_msg", ")", "if", "value", "==", "current_value", ":", "_LOG", ".", "debug", "(", "\"Command '%s' already has: %s\"", ",", "key", ",", "value", ")", "return", "True", "# This is the new config line that will be set", "if", "value", "is", "True", ":", "new_line", "=", "key", "elif", "value", ":", "new_line", "=", "'{0} {1}'", ".", "format", "(", "key", ",", "value", ")", "kwargs", ".", "update", "(", "{", "'prepend_if_not_found'", ":", "True", "}", ")", "else", ":", "stanza", "=", "conf", ".", "get", "(", "key", ",", "dict", "(", ")", ")", "if", "stanza", "and", "not", "isinstance", "(", "stanza", ",", "dict", ")", ":", "error_msg", "=", "(", "'Error: A setting for a dict was declared, but the '", "'configuration line given is not a dict'", ")", "raise", "SaltInvocationError", "(", "error_msg", ")", "if", "setting", "==", "stanza", ".", "get", "(", "value", ",", "False", ")", ":", "_LOG", ".", "debug", "(", "\"Command '%s' already has: %s\"", ",", "value", ",", "setting", ")", "return", "True", "# We're going to be rewriting an entire stanza", "if", "setting", ":", "stanza", "[", "value", "]", "=", "setting", "else", ":", "del", "stanza", "[", "value", "]", "new_line", "=", "_dict_to_stanza", "(", "key", ",", "stanza", ")", "kwargs", ".", "update", "(", "{", "'pattern'", ":", "'^{0}.*?{{.*?}}'", ".", "format", "(", "key", ")", ",", "'flags'", ":", "24", ",", "'append_if_not_found'", ":", "True", "}", ")", "kwargs", ".", "update", "(", "{", "'repl'", ":", "new_line", "}", ")", "_LOG", ".", "debug", "(", "\"Setting file '%s' line: %s\"", ",", "conf_file", ",", "new_line", ")", "return", "__salt__", "[", "'file.replace'", "]", "(", "*", "*", "kwargs", ")" ]
30.089109
0.001274
def sort(self, *args, **kwargs): """Sort the MultiMap. Takes the same arguments as list.sort, and operates on tuples of (key, value) pairs. >>> m = MutableMultiMap() >>> m['c'] = 1 >>> m['b'] = 3 >>> m['a'] = 2 >>> m.sort() >>> m.keys() ['a', 'b', 'c'] >>> m.sort(key=lambda x: x[1]) >>> m.keys() ['c', 'a', 'b'] """ self._pairs.sort(*args, **kwargs) self._rebuild_key_ids()
[ "def", "sort", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_pairs", ".", "sort", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_rebuild_key_ids", "(", ")" ]
25.5
0.009452
def uninstall(**kwargs): """Uninstall the current pre-commit hook.""" force = kwargs.get('force') restore_legacy = kwargs.get('restore_legacy') colorama.init(strip=kwargs.get('no_color')) git_dir = current_git_dir() if git_dir is None: output(NOT_GIT_REPO_MSG) exit(1) hook_path = os.path.join(git_dir, 'hooks', 'pre-commit') if not os.path.isfile(hook_path): output(NO_HOOK_INSTALLED_MSG) exit(0) hook_hash = identify_hook(hook_path) if hook_hash: if not force: if not click.confirm(CONFIRM_UNINSTALL_HOOK_MSG, default=False): output(UNINSTALL_ABORTED_MSG) exit(1) else: output(CURRENT_HOOK_NOT_THERAPIST_MSG) exit(1) legacy_hook_path = os.path.join(git_dir, 'hooks', 'pre-commit.legacy') if os.path.isfile(legacy_hook_path): if not force and not restore_legacy: output(LEGACY_HOOK_EXISTS_MSG) restore_legacy = click.confirm(CONFIRM_RESTORE_LEGACY_HOOK_MSG, default=True) if restore_legacy: output(COPYING_LEGACY_HOOK_MSG, end='') shutil.copy2(legacy_hook_path, hook_path) os.remove(legacy_hook_path) output(DONE_COPYING_LEGACY_HOOK_MSG) exit(0) else: if force or click.confirm('Would you like to remove the legacy hook?', default=False): output(REMOVING_LEGACY_HOOK_MSG, end='') os.remove(legacy_hook_path) output(DONE_REMOVING_LEGACY_HOOK_MSG) output(UNINSTALLING_HOOK_MSG, end='') os.remove(hook_path) output(DONE_UNINSTALLING_HOOK_MSG)
[ "def", "uninstall", "(", "*", "*", "kwargs", ")", ":", "force", "=", "kwargs", ".", "get", "(", "'force'", ")", "restore_legacy", "=", "kwargs", ".", "get", "(", "'restore_legacy'", ")", "colorama", ".", "init", "(", "strip", "=", "kwargs", ".", "get", "(", "'no_color'", ")", ")", "git_dir", "=", "current_git_dir", "(", ")", "if", "git_dir", "is", "None", ":", "output", "(", "NOT_GIT_REPO_MSG", ")", "exit", "(", "1", ")", "hook_path", "=", "os", ".", "path", ".", "join", "(", "git_dir", ",", "'hooks'", ",", "'pre-commit'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "hook_path", ")", ":", "output", "(", "NO_HOOK_INSTALLED_MSG", ")", "exit", "(", "0", ")", "hook_hash", "=", "identify_hook", "(", "hook_path", ")", "if", "hook_hash", ":", "if", "not", "force", ":", "if", "not", "click", ".", "confirm", "(", "CONFIRM_UNINSTALL_HOOK_MSG", ",", "default", "=", "False", ")", ":", "output", "(", "UNINSTALL_ABORTED_MSG", ")", "exit", "(", "1", ")", "else", ":", "output", "(", "CURRENT_HOOK_NOT_THERAPIST_MSG", ")", "exit", "(", "1", ")", "legacy_hook_path", "=", "os", ".", "path", ".", "join", "(", "git_dir", ",", "'hooks'", ",", "'pre-commit.legacy'", ")", "if", "os", ".", "path", ".", "isfile", "(", "legacy_hook_path", ")", ":", "if", "not", "force", "and", "not", "restore_legacy", ":", "output", "(", "LEGACY_HOOK_EXISTS_MSG", ")", "restore_legacy", "=", "click", ".", "confirm", "(", "CONFIRM_RESTORE_LEGACY_HOOK_MSG", ",", "default", "=", "True", ")", "if", "restore_legacy", ":", "output", "(", "COPYING_LEGACY_HOOK_MSG", ",", "end", "=", "''", ")", "shutil", ".", "copy2", "(", "legacy_hook_path", ",", "hook_path", ")", "os", ".", "remove", "(", "legacy_hook_path", ")", "output", "(", "DONE_COPYING_LEGACY_HOOK_MSG", ")", "exit", "(", "0", ")", "else", ":", "if", "force", "or", "click", ".", "confirm", "(", "'Would you like to remove the legacy hook?'", ",", "default", "=", "False", ")", ":", "output", "(", "REMOVING_LEGACY_HOOK_MSG", ",", "end", "=", "''", ")", "os", ".", "remove", "(", "legacy_hook_path", ")", "output", "(", "DONE_REMOVING_LEGACY_HOOK_MSG", ")", "output", "(", "UNINSTALLING_HOOK_MSG", ",", "end", "=", "''", ")", "os", ".", "remove", "(", "hook_path", ")", "output", "(", "DONE_UNINSTALLING_HOOK_MSG", ")" ]
32
0.001784
def main(): """ The main function of the Andes command-line tool. This function executes the following workflow: * Parse the command line inputs * Show the tool preamble * Output the requested helps, edit/save configs or remove outputs. Exit the main program if any of the above is executed * Process the input files and call ``main.run()`` using single- or multi-processing * Show the execution time and exit Returns ------- None """ t0, s = elapsed() # parser command line arguments args = vars(cli_new()) # configure stream handler verbose level config_logger(log_path=misc.get_log_dir(), stream_level=args['verbose']) # show preamble preamble() logger.debug('command line arguments:') logger.debug(pprint.pformat(args)) if andeshelp(**args) or search(**args) or edit_conf(**args) or remove_output(**args) \ or save_config(**args): return # process input files if len(args['filename']) == 0: logger.info('error: no input file. Try \'andes -h\' for help.') # preprocess cli args path = args.get('input_path', os.getcwd()) ncpu = args['ncpu'] if ncpu == 0 or ncpu > os.cpu_count(): ncpu = os.cpu_count() cases = [] for file in args['filename']: # use absolute path for cases which will be respected by FileMan full_paths = os.path.abspath(os.path.join(path, file)) found = glob.glob(full_paths) if len(found) == 0: logger.info('error: file {} does not exist.'.format(full_paths)) else: cases += found # remove folders and make cases unique cases = list(set(cases)) valid_cases = [] for case in cases: if os.path.isfile(case): valid_cases.append(case) logger.debug('Found files: ' + pprint.pformat(valid_cases)) if len(valid_cases) <= 0: pass elif len(valid_cases) == 1: run(valid_cases[0], **args) else: # set verbose level for multi processing logger.info('Processing {} jobs on {} CPUs'.format(len(valid_cases), ncpu)) logger.handlers[1].setLevel(logging.WARNING) # start processes jobs = [] for idx, file in enumerate(valid_cases): args['pid'] = idx job = Process( name='Process {0:d}'.format(idx), target=run, args=(file, ), kwargs=args) jobs.append(job) job.start() start_msg = 'Process {:d} <{:s}> started.'.format(idx, file) print(start_msg) logger.debug(start_msg) if (idx % ncpu == ncpu - 1) or (idx == len(valid_cases) - 1): sleep(0.1) for job in jobs: job.join() jobs = [] # restore command line output when all jobs are done logger.handlers[1].setLevel(logging.INFO) t0, s0 = elapsed(t0) if len(valid_cases) == 1: logger.info('-> Single process finished in {:s}.'.format(s0)) elif len(valid_cases) >= 2: logger.info('-> Multiple processes finished in {:s}.'.format(s0)) return
[ "def", "main", "(", ")", ":", "t0", ",", "s", "=", "elapsed", "(", ")", "# parser command line arguments", "args", "=", "vars", "(", "cli_new", "(", ")", ")", "# configure stream handler verbose level", "config_logger", "(", "log_path", "=", "misc", ".", "get_log_dir", "(", ")", ",", "stream_level", "=", "args", "[", "'verbose'", "]", ")", "# show preamble", "preamble", "(", ")", "logger", ".", "debug", "(", "'command line arguments:'", ")", "logger", ".", "debug", "(", "pprint", ".", "pformat", "(", "args", ")", ")", "if", "andeshelp", "(", "*", "*", "args", ")", "or", "search", "(", "*", "*", "args", ")", "or", "edit_conf", "(", "*", "*", "args", ")", "or", "remove_output", "(", "*", "*", "args", ")", "or", "save_config", "(", "*", "*", "args", ")", ":", "return", "# process input files", "if", "len", "(", "args", "[", "'filename'", "]", ")", "==", "0", ":", "logger", ".", "info", "(", "'error: no input file. Try \\'andes -h\\' for help.'", ")", "# preprocess cli args", "path", "=", "args", ".", "get", "(", "'input_path'", ",", "os", ".", "getcwd", "(", ")", ")", "ncpu", "=", "args", "[", "'ncpu'", "]", "if", "ncpu", "==", "0", "or", "ncpu", ">", "os", ".", "cpu_count", "(", ")", ":", "ncpu", "=", "os", ".", "cpu_count", "(", ")", "cases", "=", "[", "]", "for", "file", "in", "args", "[", "'filename'", "]", ":", "# use absolute path for cases which will be respected by FileMan", "full_paths", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "file", ")", ")", "found", "=", "glob", ".", "glob", "(", "full_paths", ")", "if", "len", "(", "found", ")", "==", "0", ":", "logger", ".", "info", "(", "'error: file {} does not exist.'", ".", "format", "(", "full_paths", ")", ")", "else", ":", "cases", "+=", "found", "# remove folders and make cases unique", "cases", "=", "list", "(", "set", "(", "cases", ")", ")", "valid_cases", "=", "[", "]", "for", "case", "in", "cases", ":", "if", "os", ".", "path", ".", "isfile", "(", "case", ")", ":", "valid_cases", ".", "append", "(", "case", ")", "logger", ".", "debug", "(", "'Found files: '", "+", "pprint", ".", "pformat", "(", "valid_cases", ")", ")", "if", "len", "(", "valid_cases", ")", "<=", "0", ":", "pass", "elif", "len", "(", "valid_cases", ")", "==", "1", ":", "run", "(", "valid_cases", "[", "0", "]", ",", "*", "*", "args", ")", "else", ":", "# set verbose level for multi processing", "logger", ".", "info", "(", "'Processing {} jobs on {} CPUs'", ".", "format", "(", "len", "(", "valid_cases", ")", ",", "ncpu", ")", ")", "logger", ".", "handlers", "[", "1", "]", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "# start processes", "jobs", "=", "[", "]", "for", "idx", ",", "file", "in", "enumerate", "(", "valid_cases", ")", ":", "args", "[", "'pid'", "]", "=", "idx", "job", "=", "Process", "(", "name", "=", "'Process {0:d}'", ".", "format", "(", "idx", ")", ",", "target", "=", "run", ",", "args", "=", "(", "file", ",", ")", ",", "kwargs", "=", "args", ")", "jobs", ".", "append", "(", "job", ")", "job", ".", "start", "(", ")", "start_msg", "=", "'Process {:d} <{:s}> started.'", ".", "format", "(", "idx", ",", "file", ")", "print", "(", "start_msg", ")", "logger", ".", "debug", "(", "start_msg", ")", "if", "(", "idx", "%", "ncpu", "==", "ncpu", "-", "1", ")", "or", "(", "idx", "==", "len", "(", "valid_cases", ")", "-", "1", ")", ":", "sleep", "(", "0.1", ")", "for", "job", "in", "jobs", ":", "job", ".", "join", "(", ")", "jobs", "=", "[", "]", "# restore command line output when all jobs are done", "logger", ".", "handlers", "[", "1", "]", ".", "setLevel", "(", "logging", ".", "INFO", ")", "t0", ",", "s0", "=", "elapsed", "(", "t0", ")", "if", "len", "(", "valid_cases", ")", "==", "1", ":", "logger", ".", "info", "(", "'-> Single process finished in {:s}.'", ".", "format", "(", "s0", ")", ")", "elif", "len", "(", "valid_cases", ")", ">=", "2", ":", "logger", ".", "info", "(", "'-> Multiple processes finished in {:s}.'", ".", "format", "(", "s0", ")", ")", "return" ]
29.027778
0.000925
def _move_here(self): """Move the cursor to this item.""" cu = self.scraper.current_item # Already here? if self is cu: return # A child? if cu.items and self in cu.items: self.scraper.move_to(self) return # A parent? if self is cu.parent: self.scraper.move_up() # A sibling? if self.parent and self in self.parent.items: self.scraper.move_up() self.scraper.move_to(self) return # Last resort: Move to top and all the way down again self.scraper.move_to_top() for step in self.path: self.scraper.move_to(step)
[ "def", "_move_here", "(", "self", ")", ":", "cu", "=", "self", ".", "scraper", ".", "current_item", "# Already here?", "if", "self", "is", "cu", ":", "return", "# A child?", "if", "cu", ".", "items", "and", "self", "in", "cu", ".", "items", ":", "self", ".", "scraper", ".", "move_to", "(", "self", ")", "return", "# A parent?", "if", "self", "is", "cu", ".", "parent", ":", "self", ".", "scraper", ".", "move_up", "(", ")", "# A sibling?", "if", "self", ".", "parent", "and", "self", "in", "self", ".", "parent", ".", "items", ":", "self", ".", "scraper", ".", "move_up", "(", ")", "self", ".", "scraper", ".", "move_to", "(", "self", ")", "return", "# Last resort: Move to top and all the way down again", "self", ".", "scraper", ".", "move_to_top", "(", ")", "for", "step", "in", "self", ".", "path", ":", "self", ".", "scraper", ".", "move_to", "(", "step", ")" ]
31.272727
0.002821
def do_up(self, arg): """Run up migration with name or numeric id matching arg""" print "running up migration" self.manager.run(arg, Direction.UP)
[ "def", "do_up", "(", "self", ",", "arg", ")", ":", "print", "\"running up migration\"", "self", ".", "manager", ".", "run", "(", "arg", ",", "Direction", ".", "UP", ")" ]
41.75
0.011765
def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \ -> Iterator[str]: """ Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files. """ # N.B. this method will only ever return files, as directories cannot # be "modified" if self.is_modified: yield str(base / self.right.name)
[ "def", "modified", "(", "self", ",", "base", ":", "pathlib", ".", "PurePath", "=", "pathlib", ".", "PurePath", "(", ")", ")", "->", "Iterator", "[", "str", "]", ":", "# N.B. this method will only ever return files, as directories cannot", "# be \"modified\"", "if", "self", ".", "is_modified", ":", "yield", "str", "(", "base", "/", "self", ".", "right", ".", "name", ")" ]
41.8
0.00312
def get_all( self, target_resource=None, target_resource_group=None, target_resource_type=None, monitor_service=None, monitor_condition=None, severity=None, smart_group_state=None, time_range=None, page_count=None, sort_by=None, sort_order=None, custom_headers=None, raw=False, **operation_config): """Get all smartGroups within the subscription. List all the smartGroups within the specified subscription. . :param target_resource: Filter by target resource( which is full ARM ID) Default value is select all. :type target_resource: str :param target_resource_group: Filter by target resource group name. Default value is select all. :type target_resource_group: str :param target_resource_type: Filter by target resource type. Default value is select all. :type target_resource_type: str :param monitor_service: Filter by monitor service which is the source of the alert instance. Default value is select all. Possible values include: 'Application Insights', 'ActivityLog Administrative', 'ActivityLog Security', 'ActivityLog Recommendation', 'ActivityLog Policy', 'ActivityLog Autoscale', 'Log Analytics', 'Nagios', 'Platform', 'SCOM', 'ServiceHealth', 'SmartDetector', 'VM Insights', 'Zabbix' :type monitor_service: str or ~azure.mgmt.alertsmanagement.models.MonitorService :param monitor_condition: Filter by monitor condition which is the state of the monitor(alertRule) at monitor service. Default value is to select all. Possible values include: 'Fired', 'Resolved' :type monitor_condition: str or ~azure.mgmt.alertsmanagement.models.MonitorCondition :param severity: Filter by severity. Defaut value is select all. Possible values include: 'Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4' :type severity: str or ~azure.mgmt.alertsmanagement.models.Severity :param smart_group_state: Filter by state of the smart group. Default value is to select all. Possible values include: 'New', 'Acknowledged', 'Closed' :type smart_group_state: str or ~azure.mgmt.alertsmanagement.models.AlertState :param time_range: Filter by time range by below listed values. Default value is 1 day. Possible values include: '1h', '1d', '7d', '30d' :type time_range: str or ~azure.mgmt.alertsmanagement.models.TimeRange :param page_count: Determines number of alerts returned per page in response. Permissible value is between 1 to 250. When the "includeContent" filter is selected, maximum value allowed is 25. Default value is 25. :type page_count: int :param sort_by: Sort the query results by input field Default value is sort by 'lastModifiedDateTime'. Possible values include: 'alertsCount', 'state', 'severity', 'startDateTime', 'lastModifiedDateTime' :type sort_by: str or ~azure.mgmt.alertsmanagement.models.SmartGroupsSortByFields :param sort_order: Sort the query results order in either ascending or descending. Default value is 'desc' for time fields and 'asc' for others. Possible values include: 'asc', 'desc' :type sort_order: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SmartGroupsList or ClientRawResponse if raw=true :rtype: ~azure.mgmt.alertsmanagement.models.SmartGroupsList or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.alertsmanagement.models.ErrorResponseException>` """ # Construct URL url = self.get_all.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if target_resource is not None: query_parameters['targetResource'] = self._serialize.query("target_resource", target_resource, 'str') if target_resource_group is not None: query_parameters['targetResourceGroup'] = self._serialize.query("target_resource_group", target_resource_group, 'str') if target_resource_type is not None: query_parameters['targetResourceType'] = self._serialize.query("target_resource_type", target_resource_type, 'str') if monitor_service is not None: query_parameters['monitorService'] = self._serialize.query("monitor_service", monitor_service, 'str') if monitor_condition is not None: query_parameters['monitorCondition'] = self._serialize.query("monitor_condition", monitor_condition, 'str') if severity is not None: query_parameters['severity'] = self._serialize.query("severity", severity, 'str') if smart_group_state is not None: query_parameters['smartGroupState'] = self._serialize.query("smart_group_state", smart_group_state, 'str') if time_range is not None: query_parameters['timeRange'] = self._serialize.query("time_range", time_range, 'str') if page_count is not None: query_parameters['pageCount'] = self._serialize.query("page_count", page_count, 'int') if sort_by is not None: query_parameters['sortBy'] = self._serialize.query("sort_by", sort_by, 'str') if sort_order is not None: query_parameters['sortOrder'] = self._serialize.query("sort_order", sort_order, 'str') query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('SmartGroupsList', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "get_all", "(", "self", ",", "target_resource", "=", "None", ",", "target_resource_group", "=", "None", ",", "target_resource_type", "=", "None", ",", "monitor_service", "=", "None", ",", "monitor_condition", "=", "None", ",", "severity", "=", "None", ",", "smart_group_state", "=", "None", ",", "time_range", "=", "None", ",", "page_count", "=", "None", ",", "sort_by", "=", "None", ",", "sort_order", "=", "None", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "operation_config", ")", ":", "# Construct URL", "url", "=", "self", ".", "get_all", ".", "metadata", "[", "'url'", "]", "path_format_arguments", "=", "{", "'subscriptionId'", ":", "self", ".", "_serialize", ".", "url", "(", "\"self.config.subscription_id\"", ",", "self", ".", "config", ".", "subscription_id", ",", "'str'", ")", "}", "url", "=", "self", ".", "_client", ".", "format_url", "(", "url", ",", "*", "*", "path_format_arguments", ")", "# Construct parameters", "query_parameters", "=", "{", "}", "if", "target_resource", "is", "not", "None", ":", "query_parameters", "[", "'targetResource'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"target_resource\"", ",", "target_resource", ",", "'str'", ")", "if", "target_resource_group", "is", "not", "None", ":", "query_parameters", "[", "'targetResourceGroup'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"target_resource_group\"", ",", "target_resource_group", ",", "'str'", ")", "if", "target_resource_type", "is", "not", "None", ":", "query_parameters", "[", "'targetResourceType'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"target_resource_type\"", ",", "target_resource_type", ",", "'str'", ")", "if", "monitor_service", "is", "not", "None", ":", "query_parameters", "[", "'monitorService'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"monitor_service\"", ",", "monitor_service", ",", "'str'", ")", "if", "monitor_condition", "is", "not", "None", ":", "query_parameters", "[", "'monitorCondition'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"monitor_condition\"", ",", "monitor_condition", ",", "'str'", ")", "if", "severity", "is", "not", "None", ":", "query_parameters", "[", "'severity'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"severity\"", ",", "severity", ",", "'str'", ")", "if", "smart_group_state", "is", "not", "None", ":", "query_parameters", "[", "'smartGroupState'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"smart_group_state\"", ",", "smart_group_state", ",", "'str'", ")", "if", "time_range", "is", "not", "None", ":", "query_parameters", "[", "'timeRange'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"time_range\"", ",", "time_range", ",", "'str'", ")", "if", "page_count", "is", "not", "None", ":", "query_parameters", "[", "'pageCount'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"page_count\"", ",", "page_count", ",", "'int'", ")", "if", "sort_by", "is", "not", "None", ":", "query_parameters", "[", "'sortBy'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"sort_by\"", ",", "sort_by", ",", "'str'", ")", "if", "sort_order", "is", "not", "None", ":", "query_parameters", "[", "'sortOrder'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"sort_order\"", ",", "sort_order", ",", "'str'", ")", "query_parameters", "[", "'api-version'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"self.api_version\"", ",", "self", ".", "api_version", ",", "'str'", ")", "# Construct headers", "header_parameters", "=", "{", "}", "header_parameters", "[", "'Accept'", "]", "=", "'application/json'", "if", "self", ".", "config", ".", "generate_client_request_id", ":", "header_parameters", "[", "'x-ms-client-request-id'", "]", "=", "str", "(", "uuid", ".", "uuid1", "(", ")", ")", "if", "custom_headers", ":", "header_parameters", ".", "update", "(", "custom_headers", ")", "if", "self", ".", "config", ".", "accept_language", "is", "not", "None", ":", "header_parameters", "[", "'accept-language'", "]", "=", "self", ".", "_serialize", ".", "header", "(", "\"self.config.accept_language\"", ",", "self", ".", "config", ".", "accept_language", ",", "'str'", ")", "# Construct and send request", "request", "=", "self", ".", "_client", ".", "get", "(", "url", ",", "query_parameters", ",", "header_parameters", ")", "response", "=", "self", ".", "_client", ".", "send", "(", "request", ",", "stream", "=", "False", ",", "*", "*", "operation_config", ")", "if", "response", ".", "status_code", "not", "in", "[", "200", "]", ":", "raise", "models", ".", "ErrorResponseException", "(", "self", ".", "_deserialize", ",", "response", ")", "deserialized", "=", "None", "if", "response", ".", "status_code", "==", "200", ":", "deserialized", "=", "self", ".", "_deserialize", "(", "'SmartGroupsList'", ",", "response", ")", "if", "raw", ":", "client_raw_response", "=", "ClientRawResponse", "(", "deserialized", ",", "response", ")", "return", "client_raw_response", "return", "deserialized" ]
55.874016
0.002354
def random_outdir(): # type: () -> Text """ Return the random directory name chosen to use for tool / workflow output """ # compute this once and store it as a function attribute - each subsequent call will return the same value if not hasattr(random_outdir, 'outdir'): random_outdir.outdir = '/' + ''.join([random.choice(string.ascii_letters) for _ in range(6)]) # type: ignore # nosec return random_outdir.outdir
[ "def", "random_outdir", "(", ")", ":", "# type: () -> Text", "# compute this once and store it as a function attribute - each subsequent call will return the same value", "if", "not", "hasattr", "(", "random_outdir", ",", "'outdir'", ")", ":", "random_outdir", ".", "outdir", "=", "'/'", "+", "''", ".", "join", "(", "[", "random", ".", "choice", "(", "string", ".", "ascii_letters", ")", "for", "_", "in", "range", "(", "6", ")", "]", ")", "# type: ignore # nosec", "return", "random_outdir", ".", "outdir" ]
72.666667
0.00907