text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def from_fortran_file(cls, fortran_file: str, tmpdir: str = "."): """Builds GrFN object from a Fortran program.""" stem = Path(fortran_file).stem if tmpdir == "." and "/" in fortran_file: tmpdir = Path(fortran_file).parent preprocessed_fortran_file = f"{tmpdir}/{stem}_preprocessed.f" lambdas_path = f"{tmpdir}/{stem}_lambdas.py" json_filename = stem + ".json" with open(fortran_file, "r") as f: inputLines = f.readlines() with open(preprocessed_fortran_file, "w") as f: f.write(preprocessor.process(inputLines)) xml_string = sp.run( [ "java", "fortran.ofp.FrontEnd", "--class", "fortran.ofp.XMLPrinter", "--verbosity", "0", preprocessed_fortran_file, ], stdout=sp.PIPE, ).stdout trees = [ET.fromstring(xml_string)] comments = get_comments.get_comments(preprocessed_fortran_file) os.remove(preprocessed_fortran_file) xml_to_json_translator = translate.XMLToJSONTranslator() outputDict = xml_to_json_translator.analyze(trees, comments) pySrc = pyTranslate.create_python_source_list(outputDict)[0][0] G = cls.from_python_src(pySrc, lambdas_path, json_filename, stem) return G
[ "def", "from_fortran_file", "(", "cls", ",", "fortran_file", ":", "str", ",", "tmpdir", ":", "str", "=", "\".\"", ")", ":", "stem", "=", "Path", "(", "fortran_file", ")", ".", "stem", "if", "tmpdir", "==", "\".\"", "and", "\"/\"", "in", "fortran_file", ":", "tmpdir", "=", "Path", "(", "fortran_file", ")", ".", "parent", "preprocessed_fortran_file", "=", "f\"{tmpdir}/{stem}_preprocessed.f\"", "lambdas_path", "=", "f\"{tmpdir}/{stem}_lambdas.py\"", "json_filename", "=", "stem", "+", "\".json\"", "with", "open", "(", "fortran_file", ",", "\"r\"", ")", "as", "f", ":", "inputLines", "=", "f", ".", "readlines", "(", ")", "with", "open", "(", "preprocessed_fortran_file", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "preprocessor", ".", "process", "(", "inputLines", ")", ")", "xml_string", "=", "sp", ".", "run", "(", "[", "\"java\"", ",", "\"fortran.ofp.FrontEnd\"", ",", "\"--class\"", ",", "\"fortran.ofp.XMLPrinter\"", ",", "\"--verbosity\"", ",", "\"0\"", ",", "preprocessed_fortran_file", ",", "]", ",", "stdout", "=", "sp", ".", "PIPE", ",", ")", ".", "stdout", "trees", "=", "[", "ET", ".", "fromstring", "(", "xml_string", ")", "]", "comments", "=", "get_comments", ".", "get_comments", "(", "preprocessed_fortran_file", ")", "os", ".", "remove", "(", "preprocessed_fortran_file", ")", "xml_to_json_translator", "=", "translate", ".", "XMLToJSONTranslator", "(", ")", "outputDict", "=", "xml_to_json_translator", ".", "analyze", "(", "trees", ",", "comments", ")", "pySrc", "=", "pyTranslate", ".", "create_python_source_list", "(", "outputDict", ")", "[", "0", "]", "[", "0", "]", "G", "=", "cls", ".", "from_python_src", "(", "pySrc", ",", "lambdas_path", ",", "json_filename", ",", "stem", ")", "return", "G" ]
38.055556
0.001423
def get_serializer(context): """Returns a serializer for a given context""" cluster_config = context.get_cluster_config() serializer_clsname = cluster_config.get(constants.TOPOLOGY_SERIALIZER_CLASSNAME, None) if serializer_clsname is None: return PythonSerializer() else: try: topo_pex_path = context.get_topology_pex_path() pex_loader.load_pex(topo_pex_path) serializer_cls = pex_loader.import_and_get_class(topo_pex_path, serializer_clsname) serializer = serializer_cls() return serializer except Exception as e: raise RuntimeError("Error with loading custom serializer class: %s, with error message: %s" % (serializer_clsname, str(e)))
[ "def", "get_serializer", "(", "context", ")", ":", "cluster_config", "=", "context", ".", "get_cluster_config", "(", ")", "serializer_clsname", "=", "cluster_config", ".", "get", "(", "constants", ".", "TOPOLOGY_SERIALIZER_CLASSNAME", ",", "None", ")", "if", "serializer_clsname", "is", "None", ":", "return", "PythonSerializer", "(", ")", "else", ":", "try", ":", "topo_pex_path", "=", "context", ".", "get_topology_pex_path", "(", ")", "pex_loader", ".", "load_pex", "(", "topo_pex_path", ")", "serializer_cls", "=", "pex_loader", ".", "import_and_get_class", "(", "topo_pex_path", ",", "serializer_clsname", ")", "serializer", "=", "serializer_cls", "(", ")", "return", "serializer", "except", "Exception", "as", "e", ":", "raise", "RuntimeError", "(", "\"Error with loading custom serializer class: %s, with error message: %s\"", "%", "(", "serializer_clsname", ",", "str", "(", "e", ")", ")", ")" ]
46
0.009321
def reset_parameter(**kwargs): """Create a callback that resets the parameter after the first iteration. Note ---- The initial parameter will still take in-effect on first iteration. Parameters ---------- **kwargs : value should be list or function List of parameters for each boosting round or a customized function that calculates the parameter in terms of current number of round (e.g. yields learning rate decay). If list lst, parameter = lst[current_round]. If function func, parameter = func(current_round). Returns ------- callback : function The callback that resets the parameter after the first iteration. """ def _callback(env): new_parameters = {} for key, value in kwargs.items(): if key in ['num_class', 'num_classes', 'boosting', 'boost', 'boosting_type', 'metric', 'metrics', 'metric_types']: raise RuntimeError("cannot reset {} during training".format(repr(key))) if isinstance(value, list): if len(value) != env.end_iteration - env.begin_iteration: raise ValueError("Length of list {} has to equal to 'num_boost_round'." .format(repr(key))) new_param = value[env.iteration - env.begin_iteration] else: new_param = value(env.iteration - env.begin_iteration) if new_param != env.params.get(key, None): new_parameters[key] = new_param if new_parameters: env.model.reset_parameter(new_parameters) env.params.update(new_parameters) _callback.before_iteration = True _callback.order = 10 return _callback
[ "def", "reset_parameter", "(", "*", "*", "kwargs", ")", ":", "def", "_callback", "(", "env", ")", ":", "new_parameters", "=", "{", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "in", "[", "'num_class'", ",", "'num_classes'", ",", "'boosting'", ",", "'boost'", ",", "'boosting_type'", ",", "'metric'", ",", "'metrics'", ",", "'metric_types'", "]", ":", "raise", "RuntimeError", "(", "\"cannot reset {} during training\"", ".", "format", "(", "repr", "(", "key", ")", ")", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "if", "len", "(", "value", ")", "!=", "env", ".", "end_iteration", "-", "env", ".", "begin_iteration", ":", "raise", "ValueError", "(", "\"Length of list {} has to equal to 'num_boost_round'.\"", ".", "format", "(", "repr", "(", "key", ")", ")", ")", "new_param", "=", "value", "[", "env", ".", "iteration", "-", "env", ".", "begin_iteration", "]", "else", ":", "new_param", "=", "value", "(", "env", ".", "iteration", "-", "env", ".", "begin_iteration", ")", "if", "new_param", "!=", "env", ".", "params", ".", "get", "(", "key", ",", "None", ")", ":", "new_parameters", "[", "key", "]", "=", "new_param", "if", "new_parameters", ":", "env", ".", "model", ".", "reset_parameter", "(", "new_parameters", ")", "env", ".", "params", ".", "update", "(", "new_parameters", ")", "_callback", ".", "before_iteration", "=", "True", "_callback", ".", "order", "=", "10", "return", "_callback" ]
41.023256
0.001661
def _filter(self, filename): """ return 'true' if filename doesn't match name_filter regex and should be filtered out of the list. @param filename: @return: """ return self.name_filter is not None and re.search(self.name_filter, filename) is None
[ "def", "_filter", "(", "self", ",", "filename", ")", ":", "return", "self", ".", "name_filter", "is", "not", "None", "and", "re", ".", "search", "(", "self", ".", "name_filter", ",", "filename", ")", "is", "None" ]
41.142857
0.013605
def load_source(source): """ Common entry point for loading some form of raw swagger schema. Supports: - python object (dictionary-like) - path to yaml file - path to json file - file object (json or yaml). - json string. - yaml string. """ if isinstance(source, collections.Mapping): return deepcopy(source) elif hasattr(source, 'read') and callable(source.read): raw_source = source.read() elif os.path.exists(os.path.expanduser(str(source))): with open(os.path.expanduser(str(source)), 'r') as source_file: raw_source = source_file.read() elif isinstance(source, six.string_types): parts = urlparse.urlparse(source) if parts.scheme and parts.netloc: response = requests.get(source) if isinstance(response.content, six.binary_type): raw_source = six.text_type(response.content, encoding='utf-8') else: raw_source = response.content else: raw_source = source try: try: return json.loads(raw_source) except ValueError: pass try: return yaml.safe_load(raw_source) except (yaml.scanner.ScannerError, yaml.parser.ParserError): pass except NameError: pass raise ValueError( "Unable to parse `{0}`. Tried yaml and json.".format(source), )
[ "def", "load_source", "(", "source", ")", ":", "if", "isinstance", "(", "source", ",", "collections", ".", "Mapping", ")", ":", "return", "deepcopy", "(", "source", ")", "elif", "hasattr", "(", "source", ",", "'read'", ")", "and", "callable", "(", "source", ".", "read", ")", ":", "raw_source", "=", "source", ".", "read", "(", ")", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "expanduser", "(", "str", "(", "source", ")", ")", ")", ":", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "str", "(", "source", ")", ")", ",", "'r'", ")", "as", "source_file", ":", "raw_source", "=", "source_file", ".", "read", "(", ")", "elif", "isinstance", "(", "source", ",", "six", ".", "string_types", ")", ":", "parts", "=", "urlparse", ".", "urlparse", "(", "source", ")", "if", "parts", ".", "scheme", "and", "parts", ".", "netloc", ":", "response", "=", "requests", ".", "get", "(", "source", ")", "if", "isinstance", "(", "response", ".", "content", ",", "six", ".", "binary_type", ")", ":", "raw_source", "=", "six", ".", "text_type", "(", "response", ".", "content", ",", "encoding", "=", "'utf-8'", ")", "else", ":", "raw_source", "=", "response", ".", "content", "else", ":", "raw_source", "=", "source", "try", ":", "try", ":", "return", "json", ".", "loads", "(", "raw_source", ")", "except", "ValueError", ":", "pass", "try", ":", "return", "yaml", ".", "safe_load", "(", "raw_source", ")", "except", "(", "yaml", ".", "scanner", ".", "ScannerError", ",", "yaml", ".", "parser", ".", "ParserError", ")", ":", "pass", "except", "NameError", ":", "pass", "raise", "ValueError", "(", "\"Unable to parse `{0}`. Tried yaml and json.\"", ".", "format", "(", "source", ")", ",", ")" ]
30.978261
0.00068
def metric(self, slug, num=1, category=None, expire=None, date=None): """Records a metric, creating it if it doesn't exist or incrementing it if it does. All metrics are prefixed with 'm', and automatically aggregate for Seconds, Minutes, Hours, Day, Week, Month, and Year. Parameters: * ``slug`` -- a unique value to identify the metric; used in construction of redis keys (see below). * ``num`` -- Set or Increment the metric by this number; default is 1. * ``category`` -- (optional) Assign the metric to a Category (a string) * ``expire`` -- (optional) Specify the number of seconds in which the metric will expire. * ``date`` -- (optional) Specify the timestamp for the metric; default used to build the keys will be the current date and time in UTC form. Redis keys for each metric (slug) take the form: m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute m:<slug>:h:<yyyy-mm-dd-hh> # Hour m:<slug>:<yyyy-mm-dd> # Day m:<slug>:w:<yyyy-num> # Week (year - week number) m:<slug>:m:<yyyy-mm> # Month m:<slug>:y:<yyyy> # Year """ # Add the slug to the set of metric slugs self.r.sadd(self._metric_slugs_key, slug) if category: self._categorize(slug, category) # Increment keys. NOTE: current redis-py (2.7.2) doesn't include an # incrby method; .incr accepts a second ``amount`` parameter. keys = self._build_keys(slug, date=date) # Use a pipeline to speed up incrementing multiple keys pipe = self.r.pipeline() for key in keys: pipe.incr(key, num) if expire: pipe.expire(key, expire) pipe.execute()
[ "def", "metric", "(", "self", ",", "slug", ",", "num", "=", "1", ",", "category", "=", "None", ",", "expire", "=", "None", ",", "date", "=", "None", ")", ":", "# Add the slug to the set of metric slugs", "self", ".", "r", ".", "sadd", "(", "self", ".", "_metric_slugs_key", ",", "slug", ")", "if", "category", ":", "self", ".", "_categorize", "(", "slug", ",", "category", ")", "# Increment keys. NOTE: current redis-py (2.7.2) doesn't include an", "# incrby method; .incr accepts a second ``amount`` parameter.", "keys", "=", "self", ".", "_build_keys", "(", "slug", ",", "date", "=", "date", ")", "# Use a pipeline to speed up incrementing multiple keys", "pipe", "=", "self", ".", "r", ".", "pipeline", "(", ")", "for", "key", "in", "keys", ":", "pipe", ".", "incr", "(", "key", ",", "num", ")", "if", "expire", ":", "pipe", ".", "expire", "(", "key", ",", "expire", ")", "pipe", ".", "execute", "(", ")" ]
42.886364
0.001036
def weather_from_dictionary(d): """ Builds a *Weather* object out of a data dictionary. Only certain properties of the dictionary are used: if these properties are not found or cannot be read, an error is issued. :param d: a data dictionary :type d: dict :returns: a *Weather* instance :raises: *KeyError* if it is impossible to find or read the data needed to build the instance """ # -- times if 'dt' in d: reference_time = d['dt'] elif 'dt' in d['last']: reference_time = d['last']['dt'] if 'sys' in d and 'sunset' in d['sys']: sunset_time = d['sys']['sunset'] else: sunset_time = 0 if 'sys' in d and 'sunrise' in d['sys']: sunrise_time = d['sys']['sunrise'] else: sunrise_time = 0 # -- calc if 'calc' in d: if 'dewpoint' in d['calc']: dewpoint = d['calc']['dewpoint'] else: dewpoint = None if 'humidex' in d['calc']: humidex = d['calc']['humidex'] else: humidex = None if 'heatindex' in d['calc']: heat_index = d['calc']['heatindex'] else: heat_index = None elif 'last' in d: if 'calc' in d['last']: if 'dewpoint' in d['last']['calc']: dewpoint = d['last']['calc']['dewpoint'] else: dewpoint = None if 'humidex' in d['last']['calc']: humidex = d['last']['calc']['humidex'] else: humidex = None if 'heatindex' in d['last']['calc']: heat_index = d['last']['calc']['heatindex'] else: heat_index = None else: dewpoint = None humidex = None heat_index = None # -- visibility if 'visibility' in d: if isinstance(d['visibility'], int): visibility_distance = d['visibility'] elif 'distance' in d['visibility']: visibility_distance = d['visibility']['distance'] else: visibility_distance = None elif 'last' in d and 'visibility' in d['last']: if isinstance(d['last']['visibility'], int): visibility_distance = d['last']['visibility'] elif 'distance' in d['last']['visibility']: visibility_distance = d['last']['visibility']['distance'] else: visibility_distance = None else: visibility_distance = None # -- clouds if 'clouds' in d: if isinstance(d['clouds'], int) or isinstance(d['clouds'], float): clouds = d['clouds'] elif 'all' in d['clouds']: clouds = d['clouds']['all'] else: clouds = 0 else: clouds = 0 # -- rain if 'rain' in d: if isinstance(d['rain'], int) or isinstance(d['rain'], float): rain = {'all': d['rain']} else: if d['rain'] is not None: rain = d['rain'].copy() else: rain = dict() else: rain = dict() # -- wind if 'wind' in d and d['wind'] is not None: wind = d['wind'].copy() elif 'last' in d: if 'wind' in d['last'] and d['last']['wind'] is not None: wind = d['last']['wind'].copy() else: wind = dict() else: wind = dict() if 'speed' in d: wind['speed'] = d['speed'] if 'deg' in d: wind['deg'] = d['deg'] # -- humidity if 'humidity' in d: humidity = d['humidity'] elif 'main' in d and 'humidity' in d['main']: humidity = d['main']['humidity'] elif 'last' in d and 'main' in d['last'] and 'humidity' in d['last']['main']: humidity = d['last']['main']['humidity'] else: humidity = 0 # -- snow if 'snow' in d: if isinstance(d['snow'], int) or isinstance(d['snow'], float): snow = {'all': d['snow']} else: if d['snow'] is not None: snow = d['snow'].copy() else: snow = dict() else: snow = dict() # -- pressure if 'pressure' in d: atm_press = d['pressure'] elif 'main' in d and 'pressure' in d['main']: atm_press = d['main']['pressure'] elif 'last' in d: if 'main' in d['last']: atm_press = d['last']['main']['pressure'] else: atm_press = None if 'main' in d and 'sea_level' in d['main']: sea_level_press = d['main']['sea_level'] else: sea_level_press = None pressure = {'press': atm_press, 'sea_level': sea_level_press} # -- temperature if 'temp' in d: if d['temp'] is not None: temperature = d['temp'].copy() else: temperature = dict() elif 'main' in d and 'temp' in d['main']: temp = d['main']['temp'] if 'temp_kf' in d['main']: temp_kf = d['main']['temp_kf'] else: temp_kf = None if 'temp_max' in d['main']: temp_max = d['main']['temp_max'] else: temp_max = None if 'temp_min' in d['main']: temp_min = d['main']['temp_min'] else: temp_min = None temperature = {'temp': temp, 'temp_kf': temp_kf, 'temp_max': temp_max, 'temp_min': temp_min } elif 'last' in d: if 'main' in d['last']: temperature = dict(temp=d['last']['main']['temp']) else: temperature = dict() # -- weather status info if 'weather' in d: status = d['weather'][0]['main'] detailed_status = d['weather'][0]['description'] weather_code = d['weather'][0]['id'] weather_icon_name = d['weather'][0]['icon'] else: status = '' detailed_status = '' weather_code = 0 weather_icon_name = '' return Weather(reference_time, sunset_time, sunrise_time, clouds, rain, snow, wind, humidity, pressure, temperature, status, detailed_status, weather_code, weather_icon_name, visibility_distance, dewpoint, humidex, heat_index)
[ "def", "weather_from_dictionary", "(", "d", ")", ":", "# -- times", "if", "'dt'", "in", "d", ":", "reference_time", "=", "d", "[", "'dt'", "]", "elif", "'dt'", "in", "d", "[", "'last'", "]", ":", "reference_time", "=", "d", "[", "'last'", "]", "[", "'dt'", "]", "if", "'sys'", "in", "d", "and", "'sunset'", "in", "d", "[", "'sys'", "]", ":", "sunset_time", "=", "d", "[", "'sys'", "]", "[", "'sunset'", "]", "else", ":", "sunset_time", "=", "0", "if", "'sys'", "in", "d", "and", "'sunrise'", "in", "d", "[", "'sys'", "]", ":", "sunrise_time", "=", "d", "[", "'sys'", "]", "[", "'sunrise'", "]", "else", ":", "sunrise_time", "=", "0", "# -- calc", "if", "'calc'", "in", "d", ":", "if", "'dewpoint'", "in", "d", "[", "'calc'", "]", ":", "dewpoint", "=", "d", "[", "'calc'", "]", "[", "'dewpoint'", "]", "else", ":", "dewpoint", "=", "None", "if", "'humidex'", "in", "d", "[", "'calc'", "]", ":", "humidex", "=", "d", "[", "'calc'", "]", "[", "'humidex'", "]", "else", ":", "humidex", "=", "None", "if", "'heatindex'", "in", "d", "[", "'calc'", "]", ":", "heat_index", "=", "d", "[", "'calc'", "]", "[", "'heatindex'", "]", "else", ":", "heat_index", "=", "None", "elif", "'last'", "in", "d", ":", "if", "'calc'", "in", "d", "[", "'last'", "]", ":", "if", "'dewpoint'", "in", "d", "[", "'last'", "]", "[", "'calc'", "]", ":", "dewpoint", "=", "d", "[", "'last'", "]", "[", "'calc'", "]", "[", "'dewpoint'", "]", "else", ":", "dewpoint", "=", "None", "if", "'humidex'", "in", "d", "[", "'last'", "]", "[", "'calc'", "]", ":", "humidex", "=", "d", "[", "'last'", "]", "[", "'calc'", "]", "[", "'humidex'", "]", "else", ":", "humidex", "=", "None", "if", "'heatindex'", "in", "d", "[", "'last'", "]", "[", "'calc'", "]", ":", "heat_index", "=", "d", "[", "'last'", "]", "[", "'calc'", "]", "[", "'heatindex'", "]", "else", ":", "heat_index", "=", "None", "else", ":", "dewpoint", "=", "None", "humidex", "=", "None", "heat_index", "=", "None", "# -- visibility", "if", "'visibility'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'visibility'", "]", ",", "int", ")", ":", "visibility_distance", "=", "d", "[", "'visibility'", "]", "elif", "'distance'", "in", "d", "[", "'visibility'", "]", ":", "visibility_distance", "=", "d", "[", "'visibility'", "]", "[", "'distance'", "]", "else", ":", "visibility_distance", "=", "None", "elif", "'last'", "in", "d", "and", "'visibility'", "in", "d", "[", "'last'", "]", ":", "if", "isinstance", "(", "d", "[", "'last'", "]", "[", "'visibility'", "]", ",", "int", ")", ":", "visibility_distance", "=", "d", "[", "'last'", "]", "[", "'visibility'", "]", "elif", "'distance'", "in", "d", "[", "'last'", "]", "[", "'visibility'", "]", ":", "visibility_distance", "=", "d", "[", "'last'", "]", "[", "'visibility'", "]", "[", "'distance'", "]", "else", ":", "visibility_distance", "=", "None", "else", ":", "visibility_distance", "=", "None", "# -- clouds", "if", "'clouds'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'clouds'", "]", ",", "int", ")", "or", "isinstance", "(", "d", "[", "'clouds'", "]", ",", "float", ")", ":", "clouds", "=", "d", "[", "'clouds'", "]", "elif", "'all'", "in", "d", "[", "'clouds'", "]", ":", "clouds", "=", "d", "[", "'clouds'", "]", "[", "'all'", "]", "else", ":", "clouds", "=", "0", "else", ":", "clouds", "=", "0", "# -- rain", "if", "'rain'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'rain'", "]", ",", "int", ")", "or", "isinstance", "(", "d", "[", "'rain'", "]", ",", "float", ")", ":", "rain", "=", "{", "'all'", ":", "d", "[", "'rain'", "]", "}", "else", ":", "if", "d", "[", "'rain'", "]", "is", "not", "None", ":", "rain", "=", "d", "[", "'rain'", "]", ".", "copy", "(", ")", "else", ":", "rain", "=", "dict", "(", ")", "else", ":", "rain", "=", "dict", "(", ")", "# -- wind", "if", "'wind'", "in", "d", "and", "d", "[", "'wind'", "]", "is", "not", "None", ":", "wind", "=", "d", "[", "'wind'", "]", ".", "copy", "(", ")", "elif", "'last'", "in", "d", ":", "if", "'wind'", "in", "d", "[", "'last'", "]", "and", "d", "[", "'last'", "]", "[", "'wind'", "]", "is", "not", "None", ":", "wind", "=", "d", "[", "'last'", "]", "[", "'wind'", "]", ".", "copy", "(", ")", "else", ":", "wind", "=", "dict", "(", ")", "else", ":", "wind", "=", "dict", "(", ")", "if", "'speed'", "in", "d", ":", "wind", "[", "'speed'", "]", "=", "d", "[", "'speed'", "]", "if", "'deg'", "in", "d", ":", "wind", "[", "'deg'", "]", "=", "d", "[", "'deg'", "]", "# -- humidity", "if", "'humidity'", "in", "d", ":", "humidity", "=", "d", "[", "'humidity'", "]", "elif", "'main'", "in", "d", "and", "'humidity'", "in", "d", "[", "'main'", "]", ":", "humidity", "=", "d", "[", "'main'", "]", "[", "'humidity'", "]", "elif", "'last'", "in", "d", "and", "'main'", "in", "d", "[", "'last'", "]", "and", "'humidity'", "in", "d", "[", "'last'", "]", "[", "'main'", "]", ":", "humidity", "=", "d", "[", "'last'", "]", "[", "'main'", "]", "[", "'humidity'", "]", "else", ":", "humidity", "=", "0", "# -- snow", "if", "'snow'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'snow'", "]", ",", "int", ")", "or", "isinstance", "(", "d", "[", "'snow'", "]", ",", "float", ")", ":", "snow", "=", "{", "'all'", ":", "d", "[", "'snow'", "]", "}", "else", ":", "if", "d", "[", "'snow'", "]", "is", "not", "None", ":", "snow", "=", "d", "[", "'snow'", "]", ".", "copy", "(", ")", "else", ":", "snow", "=", "dict", "(", ")", "else", ":", "snow", "=", "dict", "(", ")", "# -- pressure", "if", "'pressure'", "in", "d", ":", "atm_press", "=", "d", "[", "'pressure'", "]", "elif", "'main'", "in", "d", "and", "'pressure'", "in", "d", "[", "'main'", "]", ":", "atm_press", "=", "d", "[", "'main'", "]", "[", "'pressure'", "]", "elif", "'last'", "in", "d", ":", "if", "'main'", "in", "d", "[", "'last'", "]", ":", "atm_press", "=", "d", "[", "'last'", "]", "[", "'main'", "]", "[", "'pressure'", "]", "else", ":", "atm_press", "=", "None", "if", "'main'", "in", "d", "and", "'sea_level'", "in", "d", "[", "'main'", "]", ":", "sea_level_press", "=", "d", "[", "'main'", "]", "[", "'sea_level'", "]", "else", ":", "sea_level_press", "=", "None", "pressure", "=", "{", "'press'", ":", "atm_press", ",", "'sea_level'", ":", "sea_level_press", "}", "# -- temperature", "if", "'temp'", "in", "d", ":", "if", "d", "[", "'temp'", "]", "is", "not", "None", ":", "temperature", "=", "d", "[", "'temp'", "]", ".", "copy", "(", ")", "else", ":", "temperature", "=", "dict", "(", ")", "elif", "'main'", "in", "d", "and", "'temp'", "in", "d", "[", "'main'", "]", ":", "temp", "=", "d", "[", "'main'", "]", "[", "'temp'", "]", "if", "'temp_kf'", "in", "d", "[", "'main'", "]", ":", "temp_kf", "=", "d", "[", "'main'", "]", "[", "'temp_kf'", "]", "else", ":", "temp_kf", "=", "None", "if", "'temp_max'", "in", "d", "[", "'main'", "]", ":", "temp_max", "=", "d", "[", "'main'", "]", "[", "'temp_max'", "]", "else", ":", "temp_max", "=", "None", "if", "'temp_min'", "in", "d", "[", "'main'", "]", ":", "temp_min", "=", "d", "[", "'main'", "]", "[", "'temp_min'", "]", "else", ":", "temp_min", "=", "None", "temperature", "=", "{", "'temp'", ":", "temp", ",", "'temp_kf'", ":", "temp_kf", ",", "'temp_max'", ":", "temp_max", ",", "'temp_min'", ":", "temp_min", "}", "elif", "'last'", "in", "d", ":", "if", "'main'", "in", "d", "[", "'last'", "]", ":", "temperature", "=", "dict", "(", "temp", "=", "d", "[", "'last'", "]", "[", "'main'", "]", "[", "'temp'", "]", ")", "else", ":", "temperature", "=", "dict", "(", ")", "# -- weather status info", "if", "'weather'", "in", "d", ":", "status", "=", "d", "[", "'weather'", "]", "[", "0", "]", "[", "'main'", "]", "detailed_status", "=", "d", "[", "'weather'", "]", "[", "0", "]", "[", "'description'", "]", "weather_code", "=", "d", "[", "'weather'", "]", "[", "0", "]", "[", "'id'", "]", "weather_icon_name", "=", "d", "[", "'weather'", "]", "[", "0", "]", "[", "'icon'", "]", "else", ":", "status", "=", "''", "detailed_status", "=", "''", "weather_code", "=", "0", "weather_icon_name", "=", "''", "return", "Weather", "(", "reference_time", ",", "sunset_time", ",", "sunrise_time", ",", "clouds", ",", "rain", ",", "snow", ",", "wind", ",", "humidity", ",", "pressure", ",", "temperature", ",", "status", ",", "detailed_status", ",", "weather_code", ",", "weather_icon_name", ",", "visibility_distance", ",", "dewpoint", ",", "humidex", ",", "heat_index", ")" ]
31.874346
0.000796
def run(self, agent_host): """run the agent on the world""" total_reward = 0 current_r = 0 tol = 0.01 self.prev_s = None self.prev_a = None # wait for a valid observation world_state = agent_host.peekWorldState() while world_state.is_mission_running and all(e.text=='{}' for e in world_state.observations): world_state = agent_host.peekWorldState() # wait for a frame to arrive after that num_frames_seen = world_state.number_of_video_frames_since_last_state while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen: world_state = agent_host.peekWorldState() world_state = agent_host.getWorldState() for err in world_state.errors: print(err) if not world_state.is_mission_running: return 0 # mission already ended assert len(world_state.video_frames) > 0, 'No video frames!?' obs = json.loads( world_state.observations[-1].text ) prev_x = obs[u'XPos'] prev_z = obs[u'ZPos'] print('Initial position:',prev_x,',',prev_z) if save_images: # save the frame, for debugging frame = world_state.video_frames[-1] image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) ) iFrame = 0 self.rep = self.rep + 1 image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '.png' ) # take first action total_reward += self.act(world_state,agent_host,current_r) require_move = True check_expected_position = True # main loop: while world_state.is_mission_running: # wait for the position to have changed and a reward received print('Waiting for data...', end=' ') while True: world_state = agent_host.peekWorldState() if not world_state.is_mission_running: print('mission ended.') break if len(world_state.rewards) > 0 and not all(e.text=='{}' for e in world_state.observations): obs = json.loads( world_state.observations[-1].text ) curr_x = obs[u'XPos'] curr_z = obs[u'ZPos'] if require_move: if math.hypot( curr_x - prev_x, curr_z - prev_z ) > tol: print('received.') break else: print('received.') break # wait for a frame to arrive after that num_frames_seen = world_state.number_of_video_frames_since_last_state while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen: world_state = agent_host.peekWorldState() num_frames_before_get = len(world_state.video_frames) world_state = agent_host.getWorldState() for err in world_state.errors: print(err) current_r = sum(r.getValue() for r in world_state.rewards) if save_images: # save the frame, for debugging if world_state.is_mission_running: assert len(world_state.video_frames) > 0, 'No video frames!?' frame = world_state.video_frames[-1] image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) ) iFrame = iFrame + 1 image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '_after_' + self.actions[self.prev_a] + '.png' ) if world_state.is_mission_running: assert len(world_state.video_frames) > 0, 'No video frames!?' num_frames_after_get = len(world_state.video_frames) assert num_frames_after_get >= num_frames_before_get, 'Fewer frames after getWorldState!?' frame = world_state.video_frames[-1] obs = json.loads( world_state.observations[-1].text ) curr_x = obs[u'XPos'] curr_z = obs[u'ZPos'] print('New position from observation:',curr_x,',',curr_z,'after action:',self.actions[self.prev_a], end=' ') #NSWE if check_expected_position: expected_x = prev_x + [0,0,-1,1][self.prev_a] expected_z = prev_z + [-1,1,0,0][self.prev_a] if math.hypot( curr_x - expected_x, curr_z - expected_z ) > tol: print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z) input("Press Enter to continue...") else: print('as expected.') curr_x_from_render = frame.xPos curr_z_from_render = frame.zPos print('New position from render:',curr_x_from_render,',',curr_z_from_render,'after action:',self.actions[self.prev_a], end=' ') #NSWE if math.hypot( curr_x_from_render - expected_x, curr_z_from_render - expected_z ) > tol: print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z) input("Press Enter to continue...") else: print('as expected.') else: print() prev_x = curr_x prev_z = curr_z # act total_reward += self.act(world_state, agent_host, current_r) # process final reward self.logger.debug("Final reward: %d" % current_r) total_reward += current_r # update Q values if self.training and self.prev_s is not None and self.prev_a is not None: old_q = self.q_table[self.prev_s][self.prev_a] self.q_table[self.prev_s][self.prev_a] = old_q + self.alpha * ( current_r - old_q ) self.drawQ() return total_reward
[ "def", "run", "(", "self", ",", "agent_host", ")", ":", "total_reward", "=", "0", "current_r", "=", "0", "tol", "=", "0.01", "self", ".", "prev_s", "=", "None", "self", ".", "prev_a", "=", "None", "# wait for a valid observation", "world_state", "=", "agent_host", ".", "peekWorldState", "(", ")", "while", "world_state", ".", "is_mission_running", "and", "all", "(", "e", ".", "text", "==", "'{}'", "for", "e", "in", "world_state", ".", "observations", ")", ":", "world_state", "=", "agent_host", ".", "peekWorldState", "(", ")", "# wait for a frame to arrive after that", "num_frames_seen", "=", "world_state", ".", "number_of_video_frames_since_last_state", "while", "world_state", ".", "is_mission_running", "and", "world_state", ".", "number_of_video_frames_since_last_state", "==", "num_frames_seen", ":", "world_state", "=", "agent_host", ".", "peekWorldState", "(", ")", "world_state", "=", "agent_host", ".", "getWorldState", "(", ")", "for", "err", "in", "world_state", ".", "errors", ":", "print", "(", "err", ")", "if", "not", "world_state", ".", "is_mission_running", ":", "return", "0", "# mission already ended", "assert", "len", "(", "world_state", ".", "video_frames", ")", ">", "0", ",", "'No video frames!?'", "obs", "=", "json", ".", "loads", "(", "world_state", ".", "observations", "[", "-", "1", "]", ".", "text", ")", "prev_x", "=", "obs", "[", "u'XPos'", "]", "prev_z", "=", "obs", "[", "u'ZPos'", "]", "print", "(", "'Initial position:'", ",", "prev_x", ",", "','", ",", "prev_z", ")", "if", "save_images", ":", "# save the frame, for debugging", "frame", "=", "world_state", ".", "video_frames", "[", "-", "1", "]", "image", "=", "Image", ".", "frombytes", "(", "'RGB'", ",", "(", "frame", ".", "width", ",", "frame", ".", "height", ")", ",", "bytes", "(", "frame", ".", "pixels", ")", ")", "iFrame", "=", "0", "self", ".", "rep", "=", "self", ".", "rep", "+", "1", "image", ".", "save", "(", "'rep_'", "+", "str", "(", "self", ".", "rep", ")", ".", "zfill", "(", "3", ")", "+", "'_saved_frame_'", "+", "str", "(", "iFrame", ")", ".", "zfill", "(", "4", ")", "+", "'.png'", ")", "# take first action", "total_reward", "+=", "self", ".", "act", "(", "world_state", ",", "agent_host", ",", "current_r", ")", "require_move", "=", "True", "check_expected_position", "=", "True", "# main loop:", "while", "world_state", ".", "is_mission_running", ":", "# wait for the position to have changed and a reward received", "print", "(", "'Waiting for data...'", ",", "end", "=", "' '", ")", "while", "True", ":", "world_state", "=", "agent_host", ".", "peekWorldState", "(", ")", "if", "not", "world_state", ".", "is_mission_running", ":", "print", "(", "'mission ended.'", ")", "break", "if", "len", "(", "world_state", ".", "rewards", ")", ">", "0", "and", "not", "all", "(", "e", ".", "text", "==", "'{}'", "for", "e", "in", "world_state", ".", "observations", ")", ":", "obs", "=", "json", ".", "loads", "(", "world_state", ".", "observations", "[", "-", "1", "]", ".", "text", ")", "curr_x", "=", "obs", "[", "u'XPos'", "]", "curr_z", "=", "obs", "[", "u'ZPos'", "]", "if", "require_move", ":", "if", "math", ".", "hypot", "(", "curr_x", "-", "prev_x", ",", "curr_z", "-", "prev_z", ")", ">", "tol", ":", "print", "(", "'received.'", ")", "break", "else", ":", "print", "(", "'received.'", ")", "break", "# wait for a frame to arrive after that", "num_frames_seen", "=", "world_state", ".", "number_of_video_frames_since_last_state", "while", "world_state", ".", "is_mission_running", "and", "world_state", ".", "number_of_video_frames_since_last_state", "==", "num_frames_seen", ":", "world_state", "=", "agent_host", ".", "peekWorldState", "(", ")", "num_frames_before_get", "=", "len", "(", "world_state", ".", "video_frames", ")", "world_state", "=", "agent_host", ".", "getWorldState", "(", ")", "for", "err", "in", "world_state", ".", "errors", ":", "print", "(", "err", ")", "current_r", "=", "sum", "(", "r", ".", "getValue", "(", ")", "for", "r", "in", "world_state", ".", "rewards", ")", "if", "save_images", ":", "# save the frame, for debugging", "if", "world_state", ".", "is_mission_running", ":", "assert", "len", "(", "world_state", ".", "video_frames", ")", ">", "0", ",", "'No video frames!?'", "frame", "=", "world_state", ".", "video_frames", "[", "-", "1", "]", "image", "=", "Image", ".", "frombytes", "(", "'RGB'", ",", "(", "frame", ".", "width", ",", "frame", ".", "height", ")", ",", "bytes", "(", "frame", ".", "pixels", ")", ")", "iFrame", "=", "iFrame", "+", "1", "image", ".", "save", "(", "'rep_'", "+", "str", "(", "self", ".", "rep", ")", ".", "zfill", "(", "3", ")", "+", "'_saved_frame_'", "+", "str", "(", "iFrame", ")", ".", "zfill", "(", "4", ")", "+", "'_after_'", "+", "self", ".", "actions", "[", "self", ".", "prev_a", "]", "+", "'.png'", ")", "if", "world_state", ".", "is_mission_running", ":", "assert", "len", "(", "world_state", ".", "video_frames", ")", ">", "0", ",", "'No video frames!?'", "num_frames_after_get", "=", "len", "(", "world_state", ".", "video_frames", ")", "assert", "num_frames_after_get", ">=", "num_frames_before_get", ",", "'Fewer frames after getWorldState!?'", "frame", "=", "world_state", ".", "video_frames", "[", "-", "1", "]", "obs", "=", "json", ".", "loads", "(", "world_state", ".", "observations", "[", "-", "1", "]", ".", "text", ")", "curr_x", "=", "obs", "[", "u'XPos'", "]", "curr_z", "=", "obs", "[", "u'ZPos'", "]", "print", "(", "'New position from observation:'", ",", "curr_x", ",", "','", ",", "curr_z", ",", "'after action:'", ",", "self", ".", "actions", "[", "self", ".", "prev_a", "]", ",", "end", "=", "' '", ")", "#NSWE", "if", "check_expected_position", ":", "expected_x", "=", "prev_x", "+", "[", "0", ",", "0", ",", "-", "1", ",", "1", "]", "[", "self", ".", "prev_a", "]", "expected_z", "=", "prev_z", "+", "[", "-", "1", ",", "1", ",", "0", ",", "0", "]", "[", "self", ".", "prev_a", "]", "if", "math", ".", "hypot", "(", "curr_x", "-", "expected_x", ",", "curr_z", "-", "expected_z", ")", ">", "tol", ":", "print", "(", "' - ERROR DETECTED! Expected:'", ",", "expected_x", ",", "','", ",", "expected_z", ")", "input", "(", "\"Press Enter to continue...\"", ")", "else", ":", "print", "(", "'as expected.'", ")", "curr_x_from_render", "=", "frame", ".", "xPos", "curr_z_from_render", "=", "frame", ".", "zPos", "print", "(", "'New position from render:'", ",", "curr_x_from_render", ",", "','", ",", "curr_z_from_render", ",", "'after action:'", ",", "self", ".", "actions", "[", "self", ".", "prev_a", "]", ",", "end", "=", "' '", ")", "#NSWE", "if", "math", ".", "hypot", "(", "curr_x_from_render", "-", "expected_x", ",", "curr_z_from_render", "-", "expected_z", ")", ">", "tol", ":", "print", "(", "' - ERROR DETECTED! Expected:'", ",", "expected_x", ",", "','", ",", "expected_z", ")", "input", "(", "\"Press Enter to continue...\"", ")", "else", ":", "print", "(", "'as expected.'", ")", "else", ":", "print", "(", ")", "prev_x", "=", "curr_x", "prev_z", "=", "curr_z", "# act", "total_reward", "+=", "self", ".", "act", "(", "world_state", ",", "agent_host", ",", "current_r", ")", "# process final reward", "self", ".", "logger", ".", "debug", "(", "\"Final reward: %d\"", "%", "current_r", ")", "total_reward", "+=", "current_r", "# update Q values", "if", "self", ".", "training", "and", "self", ".", "prev_s", "is", "not", "None", "and", "self", ".", "prev_a", "is", "not", "None", ":", "old_q", "=", "self", ".", "q_table", "[", "self", ".", "prev_s", "]", "[", "self", ".", "prev_a", "]", "self", ".", "q_table", "[", "self", ".", "prev_s", "]", "[", "self", ".", "prev_a", "]", "=", "old_q", "+", "self", ".", "alpha", "*", "(", "current_r", "-", "old_q", ")", "self", ".", "drawQ", "(", ")", "return", "total_reward" ]
47.090909
0.014337
def autodiscover(module_name=None): """ Autodiscover INSTALLED_APPS perms.py modules and fail silently when not present. This forces an import on them to register any permissions bits they may want. """ from django.utils.module_loading import module_has_submodule from permission.compat import import_module from permission.conf import settings module_name = module_name or settings.PERMISSION_AUTODISCOVER_MODULE_NAME app_names = (app.name for app in apps.app_configs.values()) for app in app_names: mod = import_module(app) # Attempt to import the app's perms module try: # discover the permission module discover(app, module_name=module_name) except: # Decide whether to bubble up this error. If the app just doesn't # have an perms module, we can just ignore the error attempting # to import it, otherwise we want it to bubble up. if module_has_submodule(mod, module_name): raise
[ "def", "autodiscover", "(", "module_name", "=", "None", ")", ":", "from", "django", ".", "utils", ".", "module_loading", "import", "module_has_submodule", "from", "permission", ".", "compat", "import", "import_module", "from", "permission", ".", "conf", "import", "settings", "module_name", "=", "module_name", "or", "settings", ".", "PERMISSION_AUTODISCOVER_MODULE_NAME", "app_names", "=", "(", "app", ".", "name", "for", "app", "in", "apps", ".", "app_configs", ".", "values", "(", ")", ")", "for", "app", "in", "app_names", ":", "mod", "=", "import_module", "(", "app", ")", "# Attempt to import the app's perms module", "try", ":", "# discover the permission module", "discover", "(", "app", ",", "module_name", "=", "module_name", ")", "except", ":", "# Decide whether to bubble up this error. If the app just doesn't", "# have an perms module, we can just ignore the error attempting", "# to import it, otherwise we want it to bubble up.", "if", "module_has_submodule", "(", "mod", ",", "module_name", ")", ":", "raise" ]
41
0.001907
def json2py(json_obj): """ Converts the inputted JSON object to a python value. :param json_obj | <variant> """ for key, value in json_obj.items(): if type(value) not in (str, unicode): continue # restore a datetime if re.match('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d+$', value): value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S:%f') elif re.match('^\d{4}-\d{2}-\d{2}$', value): year, month, day = map(int, value.split('-')) value = datetime.date(year, month, day) elif re.match('^\d{2}:\d{2}:\d{2}:\d+$', value): hour, minute, second, micro = map(int, value.split(':')) value = datetime.time(hour, minute, second, micro) else: found = False for decoder in _decoders: success, new_value = decoder(value) if success: value = new_value found = True break if not found: continue json_obj[key] = value return json_obj
[ "def", "json2py", "(", "json_obj", ")", ":", "for", "key", ",", "value", "in", "json_obj", ".", "items", "(", ")", ":", "if", "type", "(", "value", ")", "not", "in", "(", "str", ",", "unicode", ")", ":", "continue", "# restore a datetime", "if", "re", ".", "match", "(", "'^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}:\\d+$'", ",", "value", ")", ":", "value", "=", "datetime", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%d %H:%M:%S:%f'", ")", "elif", "re", ".", "match", "(", "'^\\d{4}-\\d{2}-\\d{2}$'", ",", "value", ")", ":", "year", ",", "month", ",", "day", "=", "map", "(", "int", ",", "value", ".", "split", "(", "'-'", ")", ")", "value", "=", "datetime", ".", "date", "(", "year", ",", "month", ",", "day", ")", "elif", "re", ".", "match", "(", "'^\\d{2}:\\d{2}:\\d{2}:\\d+$'", ",", "value", ")", ":", "hour", ",", "minute", ",", "second", ",", "micro", "=", "map", "(", "int", ",", "value", ".", "split", "(", "':'", ")", ")", "value", "=", "datetime", ".", "time", "(", "hour", ",", "minute", ",", "second", ",", "micro", ")", "else", ":", "found", "=", "False", "for", "decoder", "in", "_decoders", ":", "success", ",", "new_value", "=", "decoder", "(", "value", ")", "if", "success", ":", "value", "=", "new_value", "found", "=", "True", "break", "if", "not", "found", ":", "continue", "json_obj", "[", "key", "]", "=", "value", "return", "json_obj" ]
33.424242
0.014097
def join_multiline_pairs(source, pair="()"): """ Finds and removes newlines in multiline matching pairs of characters in *source*. By default it joins parens () but it will join any two characters given via the *pair* variable. .. note:: Doesn't remove extraneous whitespace that ends up between the pair. Use `reduce_operators()` for that. Example:: test = ( "This is inside a multi-line pair of parentheses" ) Will become:: test = ( "This is inside a multi-line pair of parentheses" ) """ opener = pair[0] closer = pair[1] io_obj = io.StringIO(source) out_tokens = [] open_count = 0 for tok in tokenize.generate_tokens(io_obj.readline): token_type = tok[0] token_string = tok[1] if token_type == tokenize.OP and token_string in pair: if token_string == opener: open_count += 1 elif token_string == closer: open_count -= 1 out_tokens.append(tok) elif token_type in (tokenize.NL, tokenize.NEWLINE): if open_count == 0: out_tokens.append(tok) else: out_tokens.append(tok) return token_utils.untokenize(out_tokens)
[ "def", "join_multiline_pairs", "(", "source", ",", "pair", "=", "\"()\"", ")", ":", "opener", "=", "pair", "[", "0", "]", "closer", "=", "pair", "[", "1", "]", "io_obj", "=", "io", ".", "StringIO", "(", "source", ")", "out_tokens", "=", "[", "]", "open_count", "=", "0", "for", "tok", "in", "tokenize", ".", "generate_tokens", "(", "io_obj", ".", "readline", ")", ":", "token_type", "=", "tok", "[", "0", "]", "token_string", "=", "tok", "[", "1", "]", "if", "token_type", "==", "tokenize", ".", "OP", "and", "token_string", "in", "pair", ":", "if", "token_string", "==", "opener", ":", "open_count", "+=", "1", "elif", "token_string", "==", "closer", ":", "open_count", "-=", "1", "out_tokens", ".", "append", "(", "tok", ")", "elif", "token_type", "in", "(", "tokenize", ".", "NL", ",", "tokenize", ".", "NEWLINE", ")", ":", "if", "open_count", "==", "0", ":", "out_tokens", ".", "append", "(", "tok", ")", "else", ":", "out_tokens", ".", "append", "(", "tok", ")", "return", "token_utils", ".", "untokenize", "(", "out_tokens", ")" ]
28.613636
0.001536
def do_watch(self, params): """ \x1b[1mNAME\x1b[0m watch - Recursively watch for all changes under a path. \x1b[1mSYNOPSIS\x1b[0m watch <start|stop|stats> <path> [options] \x1b[1mDESCRIPTION\x1b[0m watch start <path> [debug] [depth] with debug=true, print watches as they fire. depth is the level for recursively setting watches: * -1: recurse all the way * 0: don't recurse, only watch the given path * > 0: recurse up to <level> children watch stats <path> [repeat] [sleep] with repeat=0 this command will loop until interrupted. sleep sets the pause duration in between each iteration. watch stop <path> \x1b[1mEXAMPLES\x1b[0m > watch start /foo/bar > watch stop /foo/bar > watch stats /foo/bar """ wm = get_watch_manager(self._zk) if params.command == "start": debug = to_bool(params.debug) children = to_int(params.sleep, -1) wm.add(params.path, debug, children) elif params.command == "stop": wm.remove(params.path) elif params.command == "stats": repeat = to_int(params.debug, 1) sleep = to_int(params.sleep, 1) if repeat == 0: while True: wm.stats(params.path) time.sleep(sleep) else: for _ in range(0, repeat): wm.stats(params.path) time.sleep(sleep) else: self.show_output("watch <start|stop|stats> <path> [verbose]")
[ "def", "do_watch", "(", "self", ",", "params", ")", ":", "wm", "=", "get_watch_manager", "(", "self", ".", "_zk", ")", "if", "params", ".", "command", "==", "\"start\"", ":", "debug", "=", "to_bool", "(", "params", ".", "debug", ")", "children", "=", "to_int", "(", "params", ".", "sleep", ",", "-", "1", ")", "wm", ".", "add", "(", "params", ".", "path", ",", "debug", ",", "children", ")", "elif", "params", ".", "command", "==", "\"stop\"", ":", "wm", ".", "remove", "(", "params", ".", "path", ")", "elif", "params", ".", "command", "==", "\"stats\"", ":", "repeat", "=", "to_int", "(", "params", ".", "debug", ",", "1", ")", "sleep", "=", "to_int", "(", "params", ".", "sleep", ",", "1", ")", "if", "repeat", "==", "0", ":", "while", "True", ":", "wm", ".", "stats", "(", "params", ".", "path", ")", "time", ".", "sleep", "(", "sleep", ")", "else", ":", "for", "_", "in", "range", "(", "0", ",", "repeat", ")", ":", "wm", ".", "stats", "(", "params", ".", "path", ")", "time", ".", "sleep", "(", "sleep", ")", "else", ":", "self", ".", "show_output", "(", "\"watch <start|stop|stats> <path> [verbose]\"", ")" ]
31.254902
0.001217
def from_archive(cls, archive: Archive, predictor_name: str = None) -> 'Predictor': """ Instantiate a :class:`Predictor` from an :class:`~allennlp.models.archival.Archive`; that is, from the result of training a model. Optionally specify which `Predictor` subclass; otherwise, the default one for the model will be used. """ # Duplicate the config so that the config inside the archive doesn't get consumed config = archive.config.duplicate() if not predictor_name: model_type = config.get("model").get("type") if not model_type in DEFAULT_PREDICTORS: raise ConfigurationError(f"No default predictor for model type {model_type}.\n"\ f"Please specify a predictor explicitly.") predictor_name = DEFAULT_PREDICTORS[model_type] dataset_reader_params = config["dataset_reader"] dataset_reader = DatasetReader.from_params(dataset_reader_params) model = archive.model model.eval() return Predictor.by_name(predictor_name)(model, dataset_reader)
[ "def", "from_archive", "(", "cls", ",", "archive", ":", "Archive", ",", "predictor_name", ":", "str", "=", "None", ")", "->", "'Predictor'", ":", "# Duplicate the config so that the config inside the archive doesn't get consumed", "config", "=", "archive", ".", "config", ".", "duplicate", "(", ")", "if", "not", "predictor_name", ":", "model_type", "=", "config", ".", "get", "(", "\"model\"", ")", ".", "get", "(", "\"type\"", ")", "if", "not", "model_type", "in", "DEFAULT_PREDICTORS", ":", "raise", "ConfigurationError", "(", "f\"No default predictor for model type {model_type}.\\n\"", "f\"Please specify a predictor explicitly.\"", ")", "predictor_name", "=", "DEFAULT_PREDICTORS", "[", "model_type", "]", "dataset_reader_params", "=", "config", "[", "\"dataset_reader\"", "]", "dataset_reader", "=", "DatasetReader", ".", "from_params", "(", "dataset_reader_params", ")", "model", "=", "archive", ".", "model", "model", ".", "eval", "(", ")", "return", "Predictor", ".", "by_name", "(", "predictor_name", ")", "(", "model", ",", "dataset_reader", ")" ]
48.521739
0.008787
def block(context_name, parent_block_func, view_func=None): """A decorator that is used for inserting the decorated block function in the block template hierarchy. The :func:`block` decorator accepts the following arguments: :param context_name: key in the `g.blocks` dictionary in which the result of the decorated block function will be stored for further processing by the parent block function `parent_block_func`. :param parent_block_func: parent block function in the template hierarchy which will use the stored result. :param view_func: the decorated block will take an effect only in the execution context of the specified view function. If the default value of `None` is used, then the block will be used as default for the specified `context_name`. Internally this parameter is converted to a Werkzeug endpoint in the same way Flask is doing that with the `Flask.route` decorator. """ def decorator(block_func): block = Block(block_func, view_func) parent_block = Block.block_mapping[parent_block_func] parent_block.append_context_block(context_name, block) return block_func return decorator
[ "def", "block", "(", "context_name", ",", "parent_block_func", ",", "view_func", "=", "None", ")", ":", "def", "decorator", "(", "block_func", ")", ":", "block", "=", "Block", "(", "block_func", ",", "view_func", ")", "parent_block", "=", "Block", ".", "block_mapping", "[", "parent_block_func", "]", "parent_block", ".", "append_context_block", "(", "context_name", ",", "block", ")", "return", "block_func", "return", "decorator" ]
53.307692
0.000709
def keys(self): """Create an ordered dict of the names and values of key fields.""" keys = OrderedDict() def order_key(_): (k, v) = _ cache_key = getattr(type(self), k) return cache_key.order items = [(k, getattr(type(self), k)) for k in dir(type(self)) ] items = [(k, v) for (k, v) in items if isinstance(v, Key) ] for k, v in sorted(items, key=order_key): keys[k] = getattr(self, k) return keys
[ "def", "keys", "(", "self", ")", ":", "keys", "=", "OrderedDict", "(", ")", "def", "order_key", "(", "_", ")", ":", "(", "k", ",", "v", ")", "=", "_", "cache_key", "=", "getattr", "(", "type", "(", "self", ")", ",", "k", ")", "return", "cache_key", ".", "order", "items", "=", "[", "(", "k", ",", "getattr", "(", "type", "(", "self", ")", ",", "k", ")", ")", "for", "k", "in", "dir", "(", "type", "(", "self", ")", ")", "]", "items", "=", "[", "(", "k", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "items", "if", "isinstance", "(", "v", ",", "Key", ")", "]", "for", "k", ",", "v", "in", "sorted", "(", "items", ",", "key", "=", "order_key", ")", ":", "keys", "[", "k", "]", "=", "getattr", "(", "self", ",", "k", ")", "return", "keys" ]
25.52381
0.01259
def update(self, points, pointvol=0., vol_dec=0.5, vol_check=2., rstate=None, bootstrap=0, pool=None, mc_integrate=False): """ Update the set of ellipsoids to bound the collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) The set of points to bound. pointvol : float, optional The minimum volume associated with each point. Default is `0.`. vol_dec : float, optional The required fractional reduction in volume after splitting an ellipsoid in order to to accept the split. Default is `0.5`. vol_check : float, optional The factor used when checking if the volume of the original bounding ellipsoid is large enough to warrant `> 2` splits via `ell.vol > vol_check * nlive * pointvol`. Default is `2.0`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. bootstrap : int, optional The number of bootstrapped realizations of the ellipsoids. The maximum distance to the set of points "left out" during each iteration is used to enlarge the resulting volumes. Default is `0`. pool : user-provided pool, optional Use this pool of workers to execute operations in parallel. mc_integrate : bool, optional Whether to use Monte Carlo methods to compute the effective volume and fractional overlap of the final union of ellipsoids with the unit cube. Default is `False`. """ if rstate is None: rstate = np.random if not HAVE_KMEANS: raise ValueError("scipy.cluster.vq.kmeans2 is required " "to compute ellipsoid decompositions.") npoints, ndim = points.shape # Calculate the bounding ellipsoid for the points, possibly # enlarged to a minimum volume. firstell = bounding_ellipsoid(points, pointvol=pointvol) # Recursively split the bounding ellipsoid using `vol_check` # until the volume of each split no longer decreases by a # factor of `vol_dec`. ells = _bounding_ellipsoids(points, firstell, pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check) # Update the set of ellipsoids. self.nells = len(ells) self.ells = ells self.ctrs = np.array([ell.ctr for ell in self.ells]) self.covs = np.array([ell.cov for ell in self.ells]) self.ams = np.array([ell.am for ell in self.ells]) self.vols = np.array([ell.vol for ell in self.ells]) self.vol_tot = sum(self.vols) # Compute expansion factor. expands = np.array([ell.expand for ell in self.ells]) vols_orig = self.vols / expands vol_tot_orig = sum(vols_orig) self.expand_tot = self.vol_tot / vol_tot_orig # Use bootstrapping to determine the volume expansion factor. if bootstrap > 0: # If provided, compute bootstraps in parallel using a pool. if pool is None: M = map else: M = pool.map ps = [points for it in range(bootstrap)] pvs = [pointvol for it in range(bootstrap)] vds = [vol_dec for it in range(bootstrap)] vcs = [vol_check for it in range(bootstrap)] args = zip(ps, pvs, vds, vcs) expands = list(M(_ellipsoids_bootstrap_expand, args)) # Conservatively set the expansion factor to be the maximum # factor derived from our set of bootstraps. expand = max(expands) # If our ellipsoids are overly constrained, expand them. if expand > 1.: vs = self.vols * expand**ndim self.scale_to_vols(vs) # Estimate the volume and fractional overlap with the unit cube # using Monte Carlo integration. if mc_integrate: self.vol, self.funit = self.monte_carlo_vol(return_overlap=True)
[ "def", "update", "(", "self", ",", "points", ",", "pointvol", "=", "0.", ",", "vol_dec", "=", "0.5", ",", "vol_check", "=", "2.", ",", "rstate", "=", "None", ",", "bootstrap", "=", "0", ",", "pool", "=", "None", ",", "mc_integrate", "=", "False", ")", ":", "if", "rstate", "is", "None", ":", "rstate", "=", "np", ".", "random", "if", "not", "HAVE_KMEANS", ":", "raise", "ValueError", "(", "\"scipy.cluster.vq.kmeans2 is required \"", "\"to compute ellipsoid decompositions.\"", ")", "npoints", ",", "ndim", "=", "points", ".", "shape", "# Calculate the bounding ellipsoid for the points, possibly", "# enlarged to a minimum volume.", "firstell", "=", "bounding_ellipsoid", "(", "points", ",", "pointvol", "=", "pointvol", ")", "# Recursively split the bounding ellipsoid using `vol_check`", "# until the volume of each split no longer decreases by a", "# factor of `vol_dec`.", "ells", "=", "_bounding_ellipsoids", "(", "points", ",", "firstell", ",", "pointvol", "=", "pointvol", ",", "vol_dec", "=", "vol_dec", ",", "vol_check", "=", "vol_check", ")", "# Update the set of ellipsoids.", "self", ".", "nells", "=", "len", "(", "ells", ")", "self", ".", "ells", "=", "ells", "self", ".", "ctrs", "=", "np", ".", "array", "(", "[", "ell", ".", "ctr", "for", "ell", "in", "self", ".", "ells", "]", ")", "self", ".", "covs", "=", "np", ".", "array", "(", "[", "ell", ".", "cov", "for", "ell", "in", "self", ".", "ells", "]", ")", "self", ".", "ams", "=", "np", ".", "array", "(", "[", "ell", ".", "am", "for", "ell", "in", "self", ".", "ells", "]", ")", "self", ".", "vols", "=", "np", ".", "array", "(", "[", "ell", ".", "vol", "for", "ell", "in", "self", ".", "ells", "]", ")", "self", ".", "vol_tot", "=", "sum", "(", "self", ".", "vols", ")", "# Compute expansion factor.", "expands", "=", "np", ".", "array", "(", "[", "ell", ".", "expand", "for", "ell", "in", "self", ".", "ells", "]", ")", "vols_orig", "=", "self", ".", "vols", "/", "expands", "vol_tot_orig", "=", "sum", "(", "vols_orig", ")", "self", ".", "expand_tot", "=", "self", ".", "vol_tot", "/", "vol_tot_orig", "# Use bootstrapping to determine the volume expansion factor.", "if", "bootstrap", ">", "0", ":", "# If provided, compute bootstraps in parallel using a pool.", "if", "pool", "is", "None", ":", "M", "=", "map", "else", ":", "M", "=", "pool", ".", "map", "ps", "=", "[", "points", "for", "it", "in", "range", "(", "bootstrap", ")", "]", "pvs", "=", "[", "pointvol", "for", "it", "in", "range", "(", "bootstrap", ")", "]", "vds", "=", "[", "vol_dec", "for", "it", "in", "range", "(", "bootstrap", ")", "]", "vcs", "=", "[", "vol_check", "for", "it", "in", "range", "(", "bootstrap", ")", "]", "args", "=", "zip", "(", "ps", ",", "pvs", ",", "vds", ",", "vcs", ")", "expands", "=", "list", "(", "M", "(", "_ellipsoids_bootstrap_expand", ",", "args", ")", ")", "# Conservatively set the expansion factor to be the maximum", "# factor derived from our set of bootstraps.", "expand", "=", "max", "(", "expands", ")", "# If our ellipsoids are overly constrained, expand them.", "if", "expand", ">", "1.", ":", "vs", "=", "self", ".", "vols", "*", "expand", "**", "ndim", "self", ".", "scale_to_vols", "(", "vs", ")", "# Estimate the volume and fractional overlap with the unit cube", "# using Monte Carlo integration.", "if", "mc_integrate", ":", "self", ".", "vol", ",", "self", ".", "funit", "=", "self", ".", "monte_carlo_vol", "(", "return_overlap", "=", "True", ")" ]
39.07619
0.000713
def get(self, name, return_json=False, quiet=False): '''get is a list for a single instance. It is assumed to be running, and we need to look up the PID, etc. ''' from spython.utils import check_install check_install() # Ensure compatible for singularity prior to 3.0, and after 3.0 subgroup = "instance.list" if 'version 3' in self.version(): subgroup = ["instance", "list"] cmd = self._init_command(subgroup) cmd.append(name) output = run_command(cmd, quiet=True) # Success, we have instances if output['return_code'] == 0: # Only print the table if we are returning json if quiet is False: print(''.join(output['message'])) # Prepare json result from table header = ['daemon_name','pid','container_image'] instances = parse_table(output['message'][0], header) # Does the user want instance objects instead? listing = [] if return_json is False: for i in instances: new_instance = Instance(pid=i['pid'], name=i['daemon_name'], image=i['container_image'], start=False) listing.append(new_instance) instances = listing # Couldn't get UID elif output['return_code'] == 255: bot.error("Couldn't get UID") # Return code of 0 else: bot.info('No instances found.') # If we are given a name, return just one if name is not None and len(instances) == 1: instances = instances[0] return instances
[ "def", "get", "(", "self", ",", "name", ",", "return_json", "=", "False", ",", "quiet", "=", "False", ")", ":", "from", "spython", ".", "utils", "import", "check_install", "check_install", "(", ")", "# Ensure compatible for singularity prior to 3.0, and after 3.0", "subgroup", "=", "\"instance.list\"", "if", "'version 3'", "in", "self", ".", "version", "(", ")", ":", "subgroup", "=", "[", "\"instance\"", ",", "\"list\"", "]", "cmd", "=", "self", ".", "_init_command", "(", "subgroup", ")", "cmd", ".", "append", "(", "name", ")", "output", "=", "run_command", "(", "cmd", ",", "quiet", "=", "True", ")", "# Success, we have instances", "if", "output", "[", "'return_code'", "]", "==", "0", ":", "# Only print the table if we are returning json", "if", "quiet", "is", "False", ":", "print", "(", "''", ".", "join", "(", "output", "[", "'message'", "]", ")", ")", "# Prepare json result from table", "header", "=", "[", "'daemon_name'", ",", "'pid'", ",", "'container_image'", "]", "instances", "=", "parse_table", "(", "output", "[", "'message'", "]", "[", "0", "]", ",", "header", ")", "# Does the user want instance objects instead?", "listing", "=", "[", "]", "if", "return_json", "is", "False", ":", "for", "i", "in", "instances", ":", "new_instance", "=", "Instance", "(", "pid", "=", "i", "[", "'pid'", "]", ",", "name", "=", "i", "[", "'daemon_name'", "]", ",", "image", "=", "i", "[", "'container_image'", "]", ",", "start", "=", "False", ")", "listing", ".", "append", "(", "new_instance", ")", "instances", "=", "listing", "# Couldn't get UID", "elif", "output", "[", "'return_code'", "]", "==", "255", ":", "bot", ".", "error", "(", "\"Couldn't get UID\"", ")", "# Return code of 0", "else", ":", "bot", ".", "info", "(", "'No instances found.'", ")", "# If we are given a name, return just one", "if", "name", "is", "not", "None", "and", "len", "(", "instances", ")", "==", "1", ":", "instances", "=", "instances", "[", "0", "]", "return", "instances" ]
28.473684
0.002382
def removed(name, updates=None): ''' Ensure Microsoft Updates are uninstalled. Args: name (str): The identifier of a single update to uninstall. updates (list): A list of identifiers for updates to be removed. Overrides ``name``. Default is None. .. note:: Identifiers can be the GUID, the KB number, or any part of the Title of the Microsoft update. GUIDs and KBs are the preferred method to ensure you're uninstalling the correct update. .. warning:: Using a partial KB number or a partial Title could result in more than one update being removed. Returns: dict: A dictionary containing the results of the removal CLI Example: .. code-block:: yaml # using a GUID uninstall_update: wua.removed: - name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211 # using a KB uninstall_update: wua.removed: - name: KB3194343 # using the full Title uninstall_update: wua.removed: - name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343) # Install multiple updates uninstall_updates: wua.removed: - updates: - KB3194343 - 28cf1b09-2b1a-458c-9bd1-971d1b26b211 ''' if isinstance(updates, six.string_types): updates = [updates] if not updates: updates = name ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} wua = salt.utils.win_update.WindowsUpdateAgent() # Search for updates updates = wua.search(updates) # No updates found if updates.count() == 0: ret['comment'] = 'No updates found' return ret # List of updates to uninstall uninstall = salt.utils.win_update.Updates() removed_updates = [] for item in updates.updates: if salt.utils.data.is_true(item.IsInstalled): uninstall.updates.Add(item) else: removed_updates.extend('KB' + kb for kb in item.KBArticleIDs) if uninstall.count() == 0: ret['comment'] = 'Updates already removed: ' ret['comment'] += '\n - '.join(removed_updates) return ret # Return comment of changes if test. if __opts__['test']: ret['result'] = None ret['comment'] = 'Updates will be removed:' for update in uninstall.updates: ret['comment'] += '\n' ret['comment'] += ': '.join( [update.Identity.UpdateID, update.Title]) return ret # Install updates wua.uninstall(uninstall) # Refresh windows update info wua.refresh() post_info = wua.updates().list() # Verify the installation for item in uninstall.list(): if salt.utils.data.is_true(post_info[item]['Installed']): ret['changes']['failed'] = { item: {'Title': post_info[item]['Title'][:40] + '...', 'KBs': post_info[item]['KBs']} } ret['result'] = False else: ret['changes']['removed'] = { item: {'Title': post_info[item]['Title'][:40] + '...', 'NeedsReboot': post_info[item]['NeedsReboot'], 'KBs': post_info[item]['KBs']} } if ret['changes'].get('failed', False): ret['comment'] = 'Updates failed' else: ret['comment'] = 'Updates removed successfully' return ret
[ "def", "removed", "(", "name", ",", "updates", "=", "None", ")", ":", "if", "isinstance", "(", "updates", ",", "six", ".", "string_types", ")", ":", "updates", "=", "[", "updates", "]", "if", "not", "updates", ":", "updates", "=", "name", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "wua", "=", "salt", ".", "utils", ".", "win_update", ".", "WindowsUpdateAgent", "(", ")", "# Search for updates", "updates", "=", "wua", ".", "search", "(", "updates", ")", "# No updates found", "if", "updates", ".", "count", "(", ")", "==", "0", ":", "ret", "[", "'comment'", "]", "=", "'No updates found'", "return", "ret", "# List of updates to uninstall", "uninstall", "=", "salt", ".", "utils", ".", "win_update", ".", "Updates", "(", ")", "removed_updates", "=", "[", "]", "for", "item", "in", "updates", ".", "updates", ":", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "item", ".", "IsInstalled", ")", ":", "uninstall", ".", "updates", ".", "Add", "(", "item", ")", "else", ":", "removed_updates", ".", "extend", "(", "'KB'", "+", "kb", "for", "kb", "in", "item", ".", "KBArticleIDs", ")", "if", "uninstall", ".", "count", "(", ")", "==", "0", ":", "ret", "[", "'comment'", "]", "=", "'Updates already removed: '", "ret", "[", "'comment'", "]", "+=", "'\\n - '", ".", "join", "(", "removed_updates", ")", "return", "ret", "# Return comment of changes if test.", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Updates will be removed:'", "for", "update", "in", "uninstall", ".", "updates", ":", "ret", "[", "'comment'", "]", "+=", "'\\n'", "ret", "[", "'comment'", "]", "+=", "': '", ".", "join", "(", "[", "update", ".", "Identity", ".", "UpdateID", ",", "update", ".", "Title", "]", ")", "return", "ret", "# Install updates", "wua", ".", "uninstall", "(", "uninstall", ")", "# Refresh windows update info", "wua", ".", "refresh", "(", ")", "post_info", "=", "wua", ".", "updates", "(", ")", ".", "list", "(", ")", "# Verify the installation", "for", "item", "in", "uninstall", ".", "list", "(", ")", ":", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "post_info", "[", "item", "]", "[", "'Installed'", "]", ")", ":", "ret", "[", "'changes'", "]", "[", "'failed'", "]", "=", "{", "item", ":", "{", "'Title'", ":", "post_info", "[", "item", "]", "[", "'Title'", "]", "[", ":", "40", "]", "+", "'...'", ",", "'KBs'", ":", "post_info", "[", "item", "]", "[", "'KBs'", "]", "}", "}", "ret", "[", "'result'", "]", "=", "False", "else", ":", "ret", "[", "'changes'", "]", "[", "'removed'", "]", "=", "{", "item", ":", "{", "'Title'", ":", "post_info", "[", "item", "]", "[", "'Title'", "]", "[", ":", "40", "]", "+", "'...'", ",", "'NeedsReboot'", ":", "post_info", "[", "item", "]", "[", "'NeedsReboot'", "]", ",", "'KBs'", ":", "post_info", "[", "item", "]", "[", "'KBs'", "]", "}", "}", "if", "ret", "[", "'changes'", "]", ".", "get", "(", "'failed'", ",", "False", ")", ":", "ret", "[", "'comment'", "]", "=", "'Updates failed'", "else", ":", "ret", "[", "'comment'", "]", "=", "'Updates removed successfully'", "return", "ret" ]
28.52459
0.000833
def alter_edge(self, from_index, to_index, to_jimage=None, new_weight=None, new_edge_properties=None): """ Alters either the weight or the edge_properties of an edge in the StructureGraph. :param from_index: int :param to_index: int :param to_jimage: tuple :param new_weight: alter_edge does not require that weight be altered. As such, by default, this is None. If weight is to be changed, it should be a float. :param new_edge_properties: alter_edge does not require that edge_properties be altered. As such, by default, this is None. If any edge properties are to be changed, it should be a dictionary of edge properties to be changed. :return: """ existing_edges = self.graph.get_edge_data(from_index, to_index) # ensure that edge exists before attempting to change it if not existing_edges: raise ValueError("Edge between {} and {} cannot be altered;\ no edge exists between those sites.".format( from_index, to_index )) if to_jimage is None: edge_index = 0 else: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i if new_weight is not None: self.graph[from_index][to_index][edge_index]['weight'] = new_weight if new_edge_properties is not None: for prop in list(new_edge_properties.keys()): self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
[ "def", "alter_edge", "(", "self", ",", "from_index", ",", "to_index", ",", "to_jimage", "=", "None", ",", "new_weight", "=", "None", ",", "new_edge_properties", "=", "None", ")", ":", "existing_edges", "=", "self", ".", "graph", ".", "get_edge_data", "(", "from_index", ",", "to_index", ")", "# ensure that edge exists before attempting to change it", "if", "not", "existing_edges", ":", "raise", "ValueError", "(", "\"Edge between {} and {} cannot be altered;\\\n no edge exists between those sites.\"", ".", "format", "(", "from_index", ",", "to_index", ")", ")", "if", "to_jimage", "is", "None", ":", "edge_index", "=", "0", "else", ":", "for", "i", ",", "properties", "in", "existing_edges", ".", "items", "(", ")", ":", "if", "properties", "[", "\"to_jimage\"", "]", "==", "to_jimage", ":", "edge_index", "=", "i", "if", "new_weight", "is", "not", "None", ":", "self", ".", "graph", "[", "from_index", "]", "[", "to_index", "]", "[", "edge_index", "]", "[", "'weight'", "]", "=", "new_weight", "if", "new_edge_properties", "is", "not", "None", ":", "for", "prop", "in", "list", "(", "new_edge_properties", ".", "keys", "(", ")", ")", ":", "self", ".", "graph", "[", "from_index", "]", "[", "to_index", "]", "[", "edge_index", "]", "[", "prop", "]", "=", "new_edge_properties", "[", "prop", "]" ]
40.5
0.002296
def render_registration(self): ''' Render pinned points on video frame as red rectangle. ''' surface = self.get_surface() if self.canvas is None or self.df_canvas_corners.shape[0] == 0: return surface corners = self.df_canvas_corners.copy() corners['w'] = 1 transform = self.canvas.shapes_to_canvas_transform canvas_corners = corners.values.dot(transform.T.values).T points_x = canvas_corners[0] points_y = canvas_corners[1] cairo_context = cairo.Context(surface) cairo_context.move_to(points_x[0], points_y[0]) for x, y in zip(points_x[1:], points_y[1:]): cairo_context.line_to(x, y) cairo_context.line_to(points_x[0], points_y[0]) cairo_context.set_source_rgb(1, 0, 0) cairo_context.stroke() return surface
[ "def", "render_registration", "(", "self", ")", ":", "surface", "=", "self", ".", "get_surface", "(", ")", "if", "self", ".", "canvas", "is", "None", "or", "self", ".", "df_canvas_corners", ".", "shape", "[", "0", "]", "==", "0", ":", "return", "surface", "corners", "=", "self", ".", "df_canvas_corners", ".", "copy", "(", ")", "corners", "[", "'w'", "]", "=", "1", "transform", "=", "self", ".", "canvas", ".", "shapes_to_canvas_transform", "canvas_corners", "=", "corners", ".", "values", ".", "dot", "(", "transform", ".", "T", ".", "values", ")", ".", "T", "points_x", "=", "canvas_corners", "[", "0", "]", "points_y", "=", "canvas_corners", "[", "1", "]", "cairo_context", "=", "cairo", ".", "Context", "(", "surface", ")", "cairo_context", ".", "move_to", "(", "points_x", "[", "0", "]", ",", "points_y", "[", "0", "]", ")", "for", "x", ",", "y", "in", "zip", "(", "points_x", "[", "1", ":", "]", ",", "points_y", "[", "1", ":", "]", ")", ":", "cairo_context", ".", "line_to", "(", "x", ",", "y", ")", "cairo_context", ".", "line_to", "(", "points_x", "[", "0", "]", ",", "points_y", "[", "0", "]", ")", "cairo_context", ".", "set_source_rgb", "(", "1", ",", "0", ",", "0", ")", "cairo_context", ".", "stroke", "(", ")", "return", "surface" ]
34.24
0.002273
def completed_number(prefix, length): """ 'prefix' is the start of the CC number as a string, any number of digits. 'length' is the length of the CC number to generate. Typically 13 or 16 """ ccnumber = prefix # generate digits while len(ccnumber) < (length - 1): digit = random.choice(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']) ccnumber.append(digit) # Calculate sum sum = 0 pos = 0 reversedCCnumber = [] reversedCCnumber.extend(ccnumber) reversedCCnumber.reverse() while pos < length - 1: odd = int( reversedCCnumber[pos] ) * 2 if odd > 9: odd -= 9 sum += odd if pos != (length - 2): sum += int( reversedCCnumber[pos+1] ) pos += 2 # Calculate check digit checkdigit = ((sum / 10 + 1) * 10 - sum) % 10 ccnumber.append( str(int(checkdigit)) ) return ''.join(ccnumber)
[ "def", "completed_number", "(", "prefix", ",", "length", ")", ":", "ccnumber", "=", "prefix", "# generate digits", "while", "len", "(", "ccnumber", ")", "<", "(", "length", "-", "1", ")", ":", "digit", "=", "random", ".", "choice", "(", "[", "'0'", ",", "'1'", ",", "'2'", ",", "'3'", ",", "'4'", ",", "'5'", ",", "'6'", ",", "'7'", ",", "'8'", ",", "'9'", "]", ")", "ccnumber", ".", "append", "(", "digit", ")", "# Calculate sum ", "sum", "=", "0", "pos", "=", "0", "reversedCCnumber", "=", "[", "]", "reversedCCnumber", ".", "extend", "(", "ccnumber", ")", "reversedCCnumber", ".", "reverse", "(", ")", "while", "pos", "<", "length", "-", "1", ":", "odd", "=", "int", "(", "reversedCCnumber", "[", "pos", "]", ")", "*", "2", "if", "odd", ">", "9", ":", "odd", "-=", "9", "sum", "+=", "odd", "if", "pos", "!=", "(", "length", "-", "2", ")", ":", "sum", "+=", "int", "(", "reversedCCnumber", "[", "pos", "+", "1", "]", ")", "pos", "+=", "2", "# Calculate check digit", "checkdigit", "=", "(", "(", "sum", "/", "10", "+", "1", ")", "*", "10", "-", "sum", ")", "%", "10", "ccnumber", ".", "append", "(", "str", "(", "int", "(", "checkdigit", ")", ")", ")", "return", "''", ".", "join", "(", "ccnumber", ")" ]
32.25
0.009677
def get_field_info(wrapper,entity_type): 'type: wrapper :atws.Wrapper' fields = wrapper.new('GetFieldInfo') fields.psObjectType = entity_type return wrapper.GetFieldInfo(fields)
[ "def", "get_field_info", "(", "wrapper", ",", "entity_type", ")", ":", "fields", "=", "wrapper", ".", "new", "(", "'GetFieldInfo'", ")", "fields", ".", "psObjectType", "=", "entity_type", "return", "wrapper", ".", "GetFieldInfo", "(", "fields", ")" ]
37.8
0.010363
def graph_from_seeds(seeds, cell_source): """ This creates/updates a networkx graph from a list of cells. The graph is created when the cell_source is an instance of ExcelCompiler The graph is updated when the cell_source is an instance of Spreadsheet """ # when called from Spreadsheet instance, use the Spreadsheet cellmap and graph if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet cellmap = cell_source.cellmap cells = cellmap G = cell_source.G for c in seeds: G.add_node(c) cellmap[c.address()] = c # when called from ExcelCompiler instance, construct cellmap and graph from seeds else: # ~ cell_source is a ExcelCompiler cellmap = dict([(x.address(),x) for x in seeds]) cells = cell_source.cells # directed graph G = networkx.DiGraph() # match the info in cellmap for c in cellmap.values(): G.add_node(c) # cells to analyze: only formulas todo = [s for s in seeds if s.formula] steps = [i for i,s in enumerate(todo)] names = cell_source.named_ranges while todo: c1 = todo.pop() step = steps.pop() cursheet = c1.sheet ###### 1) looking for cell c1 dependencies #################### # print 'C1', c1.address() # in case a formula, get all cells that are arguments pystr, ast = cell2code(c1, names) # set the code & compile it (will flag problems sooner rather than later) c1.python_expression = pystr.replace('"', "'") # compilation is done later if 'OFFSET' in c1.formula or 'INDEX' in c1.formula: if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler cell_source.pointers.add(c1.address()) # get all the cells/ranges this formula refers to deps = [x for x in ast.nodes() if isinstance(x,RangeNode)] # remove dupes deps = uniqueify(deps) ###### 2) connect dependencies in cells in graph #################### # ### LOG # tmp = [] # for dep in deps: # if dep not in names: # if "!" not in dep and cursheet != None: # dep = cursheet + "!" + dep # if dep not in cellmap: # tmp.append(dep) # #deps = tmp # logStep = "%s %s = %s " % ('|'*step, c1.address(), '',) # print logStep # if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'): # print logStep, "[%s...%s]" % (deps[0], deps[-1]) # elif len(deps) > 0: # print logStep, "->", deps # else: # print logStep, "done" for dep in deps: dep_name = dep.tvalue.replace('$','') # this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError if dep_name.startswith(':') or dep_name.endswith(':'): dep_name = dep_name.replace(':', '') # if not pointer, we need an absolute address if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None: dep_name = cursheet + "!" + dep_name # Named_ranges + ranges already parsed (previous iterations) if dep_name in cellmap: origins = [cellmap[dep_name]] target = cellmap[c1.address()] # if the dep_name is a multi-cell range, create a range object elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])): if dep_name in names: reference = names[dep_name] else: reference = dep_name if 'OFFSET' in reference or 'INDEX' in reference: start_end = prepare_pointer(reference, names, ref_cell = c1) rng = cell_source.Range(start_end) if dep_name in names: # dep is a pointer range address = dep_name else: if c1.address() in names: # c1 holds is a pointer range address = c1.address() else: # a pointer range with no name, its address will be its name address = '%s:%s' % (start_end["start"], start_end["end"]) cell_source.pointers.add(address) else: address = dep_name # get a list of the addresses in this range that are not yet in the graph range_addresses = list(resolve_range(reference, should_flatten=True)[0]) cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()] if len(cellmap_add_addresses) > 0: # this means there are cells to be added # get row and col dimensions for the sheet, assuming the whole range is in one sheet sheet_initial = split_address(cellmap_add_addresses[0])[0] max_rows, max_cols = max_dimension(cellmap, sheet_initial) # create empty cells that aren't in the cellmap for addr in cellmap_add_addresses: sheet_new, col_new, row_new = split_address(addr) # if somehow a new sheet comes up in the range, get the new dimensions if sheet_new != sheet_initial: sheet_initial = sheet_new max_rows, max_cols = max_dimension(cellmap, sheet_new) # add the empty cells if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols: # only add cells within the maximum bounds of the sheet to avoid too many evaluations # for A:A or 1:1 ranges cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object cellmap[addr] = cell_new # add it to the cellmap G.add_node(cell_new) # add it to the graph cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function rng = cell_source.Range(reference) if address in cellmap: virtual_cell = cellmap[address] else: virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True ) # save the range cellmap[address] = virtual_cell # add an edge from the range to the parent G.add_node(virtual_cell) # Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1 G.add_edge(virtual_cell, c1) # cells in the range should point to the range as their parent target = virtual_cell origins = [] if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here... for child in rng.addresses: if child not in cellmap: origins.append(cells[child]) else: origins.append(cellmap[child]) else: # not a range if dep_name in names: reference = names[dep_name] else: reference = dep_name if reference in cells: if dep_name in names: virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True ) G.add_node(virtual_cell) G.add_edge(cells[reference], virtual_cell) origins = [virtual_cell] else: cell = cells[reference] origins = [cell] cell = origins[0] if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula): cell_source.pointers.add(cell.address()) else: virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True ) origins = [virtual_cell] target = c1 # process each cell for c2 in flatten(origins): # if we havent treated this cell allready if c2.address() not in cellmap: if c2.formula: # cell with a formula, needs to be added to the todo list todo.append(c2) steps.append(step+1) else: # constant cell, no need for further processing, just remember to set the code pystr,ast = cell2code(c2, names) c2.python_expression = pystr c2.compile() # save in the cellmap cellmap[c2.address()] = c2 # add to the graph G.add_node(c2) # add an edge from the cell to the parent (range or cell) if(target != []): # print "Adding edge %s --> %s" % (c2.address(), target.address()) G.add_edge(c2,target) c1.compile() # cell compilation is done here because pointer ranges might update python_expressions return (cellmap, G)
[ "def", "graph_from_seeds", "(", "seeds", ",", "cell_source", ")", ":", "# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph", "if", "hasattr", "(", "cell_source", ",", "'G'", ")", ":", "# ~ cell_source is a Spreadsheet", "cellmap", "=", "cell_source", ".", "cellmap", "cells", "=", "cellmap", "G", "=", "cell_source", ".", "G", "for", "c", "in", "seeds", ":", "G", ".", "add_node", "(", "c", ")", "cellmap", "[", "c", ".", "address", "(", ")", "]", "=", "c", "# when called from ExcelCompiler instance, construct cellmap and graph from seeds", "else", ":", "# ~ cell_source is a ExcelCompiler", "cellmap", "=", "dict", "(", "[", "(", "x", ".", "address", "(", ")", ",", "x", ")", "for", "x", "in", "seeds", "]", ")", "cells", "=", "cell_source", ".", "cells", "# directed graph", "G", "=", "networkx", ".", "DiGraph", "(", ")", "# match the info in cellmap", "for", "c", "in", "cellmap", ".", "values", "(", ")", ":", "G", ".", "add_node", "(", "c", ")", "# cells to analyze: only formulas", "todo", "=", "[", "s", "for", "s", "in", "seeds", "if", "s", ".", "formula", "]", "steps", "=", "[", "i", "for", "i", ",", "s", "in", "enumerate", "(", "todo", ")", "]", "names", "=", "cell_source", ".", "named_ranges", "while", "todo", ":", "c1", "=", "todo", ".", "pop", "(", ")", "step", "=", "steps", ".", "pop", "(", ")", "cursheet", "=", "c1", ".", "sheet", "###### 1) looking for cell c1 dependencies ####################", "# print 'C1', c1.address()", "# in case a formula, get all cells that are arguments", "pystr", ",", "ast", "=", "cell2code", "(", "c1", ",", "names", ")", "# set the code & compile it (will flag problems sooner rather than later)", "c1", ".", "python_expression", "=", "pystr", ".", "replace", "(", "'\"'", ",", "\"'\"", ")", "# compilation is done later", "if", "'OFFSET'", "in", "c1", ".", "formula", "or", "'INDEX'", "in", "c1", ".", "formula", ":", "if", "c1", ".", "address", "(", ")", "not", "in", "cell_source", ".", "named_ranges", ":", "# pointers names already treated in ExcelCompiler", "cell_source", ".", "pointers", ".", "add", "(", "c1", ".", "address", "(", ")", ")", "# get all the cells/ranges this formula refers to", "deps", "=", "[", "x", "for", "x", "in", "ast", ".", "nodes", "(", ")", "if", "isinstance", "(", "x", ",", "RangeNode", ")", "]", "# remove dupes", "deps", "=", "uniqueify", "(", "deps", ")", "###### 2) connect dependencies in cells in graph ####################", "# ### LOG", "# tmp = []", "# for dep in deps:", "# if dep not in names:", "# if \"!\" not in dep and cursheet != None:", "# dep = cursheet + \"!\" + dep", "# if dep not in cellmap:", "# tmp.append(dep)", "# #deps = tmp", "# logStep = \"%s %s = %s \" % ('|'*step, c1.address(), '',)", "# print logStep", "# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):", "# print logStep, \"[%s...%s]\" % (deps[0], deps[-1])", "# elif len(deps) > 0:", "# print logStep, \"->\", deps", "# else:", "# print logStep, \"done\"", "for", "dep", "in", "deps", ":", "dep_name", "=", "dep", ".", "tvalue", ".", "replace", "(", "'$'", ",", "''", ")", "# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError", "if", "dep_name", ".", "startswith", "(", "':'", ")", "or", "dep_name", ".", "endswith", "(", "':'", ")", ":", "dep_name", "=", "dep_name", ".", "replace", "(", "':'", ",", "''", ")", "# if not pointer, we need an absolute address", "if", "dep", ".", "tsubtype", "!=", "'pointer'", "and", "dep_name", "not", "in", "names", "and", "\"!\"", "not", "in", "dep_name", "and", "cursheet", "!=", "None", ":", "dep_name", "=", "cursheet", "+", "\"!\"", "+", "dep_name", "# Named_ranges + ranges already parsed (previous iterations)", "if", "dep_name", "in", "cellmap", ":", "origins", "=", "[", "cellmap", "[", "dep_name", "]", "]", "target", "=", "cellmap", "[", "c1", ".", "address", "(", ")", "]", "# if the dep_name is a multi-cell range, create a range object", "elif", "is_range", "(", "dep_name", ")", "or", "(", "dep_name", "in", "names", "and", "is_range", "(", "names", "[", "dep_name", "]", ")", ")", ":", "if", "dep_name", "in", "names", ":", "reference", "=", "names", "[", "dep_name", "]", "else", ":", "reference", "=", "dep_name", "if", "'OFFSET'", "in", "reference", "or", "'INDEX'", "in", "reference", ":", "start_end", "=", "prepare_pointer", "(", "reference", ",", "names", ",", "ref_cell", "=", "c1", ")", "rng", "=", "cell_source", ".", "Range", "(", "start_end", ")", "if", "dep_name", "in", "names", ":", "# dep is a pointer range", "address", "=", "dep_name", "else", ":", "if", "c1", ".", "address", "(", ")", "in", "names", ":", "# c1 holds is a pointer range", "address", "=", "c1", ".", "address", "(", ")", "else", ":", "# a pointer range with no name, its address will be its name", "address", "=", "'%s:%s'", "%", "(", "start_end", "[", "\"start\"", "]", ",", "start_end", "[", "\"end\"", "]", ")", "cell_source", ".", "pointers", ".", "add", "(", "address", ")", "else", ":", "address", "=", "dep_name", "# get a list of the addresses in this range that are not yet in the graph", "range_addresses", "=", "list", "(", "resolve_range", "(", "reference", ",", "should_flatten", "=", "True", ")", "[", "0", "]", ")", "cellmap_add_addresses", "=", "[", "addr", "for", "addr", "in", "range_addresses", "if", "addr", "not", "in", "cellmap", ".", "keys", "(", ")", "]", "if", "len", "(", "cellmap_add_addresses", ")", ">", "0", ":", "# this means there are cells to be added", "# get row and col dimensions for the sheet, assuming the whole range is in one sheet", "sheet_initial", "=", "split_address", "(", "cellmap_add_addresses", "[", "0", "]", ")", "[", "0", "]", "max_rows", ",", "max_cols", "=", "max_dimension", "(", "cellmap", ",", "sheet_initial", ")", "# create empty cells that aren't in the cellmap", "for", "addr", "in", "cellmap_add_addresses", ":", "sheet_new", ",", "col_new", ",", "row_new", "=", "split_address", "(", "addr", ")", "# if somehow a new sheet comes up in the range, get the new dimensions", "if", "sheet_new", "!=", "sheet_initial", ":", "sheet_initial", "=", "sheet_new", "max_rows", ",", "max_cols", "=", "max_dimension", "(", "cellmap", ",", "sheet_new", ")", "# add the empty cells", "if", "int", "(", "row_new", ")", "<=", "max_rows", "and", "int", "(", "col2num", "(", "col_new", ")", ")", "<=", "max_cols", ":", "# only add cells within the maximum bounds of the sheet to avoid too many evaluations", "# for A:A or 1:1 ranges", "cell_new", "=", "Cell", "(", "addr", ",", "sheet_new", ",", "value", "=", "\"\"", ",", "should_eval", "=", "'False'", ")", "# create new cell object", "cellmap", "[", "addr", "]", "=", "cell_new", "# add it to the cellmap", "G", ".", "add_node", "(", "cell_new", ")", "# add it to the graph", "cell_source", ".", "cells", "[", "addr", "]", "=", "cell_new", "# add it to the cell_source, used in this function", "rng", "=", "cell_source", ".", "Range", "(", "reference", ")", "if", "address", "in", "cellmap", ":", "virtual_cell", "=", "cellmap", "[", "address", "]", "else", ":", "virtual_cell", "=", "Cell", "(", "address", ",", "None", ",", "value", "=", "rng", ",", "formula", "=", "reference", ",", "is_range", "=", "True", ",", "is_named_range", "=", "True", ")", "# save the range", "cellmap", "[", "address", "]", "=", "virtual_cell", "# add an edge from the range to the parent", "G", ".", "add_node", "(", "virtual_cell", ")", "# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1", "G", ".", "add_edge", "(", "virtual_cell", ",", "c1", ")", "# cells in the range should point to the range as their parent", "target", "=", "virtual_cell", "origins", "=", "[", "]", "if", "len", "(", "list", "(", "rng", ".", "keys", "(", ")", ")", ")", "!=", "0", ":", "# could be better, but can't check on Exception types here...", "for", "child", "in", "rng", ".", "addresses", ":", "if", "child", "not", "in", "cellmap", ":", "origins", ".", "append", "(", "cells", "[", "child", "]", ")", "else", ":", "origins", ".", "append", "(", "cellmap", "[", "child", "]", ")", "else", ":", "# not a range", "if", "dep_name", "in", "names", ":", "reference", "=", "names", "[", "dep_name", "]", "else", ":", "reference", "=", "dep_name", "if", "reference", "in", "cells", ":", "if", "dep_name", "in", "names", ":", "virtual_cell", "=", "Cell", "(", "dep_name", ",", "None", ",", "value", "=", "cells", "[", "reference", "]", ".", "value", ",", "formula", "=", "reference", ",", "is_range", "=", "False", ",", "is_named_range", "=", "True", ")", "G", ".", "add_node", "(", "virtual_cell", ")", "G", ".", "add_edge", "(", "cells", "[", "reference", "]", ",", "virtual_cell", ")", "origins", "=", "[", "virtual_cell", "]", "else", ":", "cell", "=", "cells", "[", "reference", "]", "origins", "=", "[", "cell", "]", "cell", "=", "origins", "[", "0", "]", "if", "cell", ".", "formula", "is", "not", "None", "and", "(", "'OFFSET'", "in", "cell", ".", "formula", "or", "'INDEX'", "in", "cell", ".", "formula", ")", ":", "cell_source", ".", "pointers", ".", "add", "(", "cell", ".", "address", "(", ")", ")", "else", ":", "virtual_cell", "=", "Cell", "(", "dep_name", ",", "None", ",", "value", "=", "None", ",", "formula", "=", "None", ",", "is_range", "=", "False", ",", "is_named_range", "=", "True", ")", "origins", "=", "[", "virtual_cell", "]", "target", "=", "c1", "# process each cell", "for", "c2", "in", "flatten", "(", "origins", ")", ":", "# if we havent treated this cell allready", "if", "c2", ".", "address", "(", ")", "not", "in", "cellmap", ":", "if", "c2", ".", "formula", ":", "# cell with a formula, needs to be added to the todo list", "todo", ".", "append", "(", "c2", ")", "steps", ".", "append", "(", "step", "+", "1", ")", "else", ":", "# constant cell, no need for further processing, just remember to set the code", "pystr", ",", "ast", "=", "cell2code", "(", "c2", ",", "names", ")", "c2", ".", "python_expression", "=", "pystr", "c2", ".", "compile", "(", ")", "# save in the cellmap", "cellmap", "[", "c2", ".", "address", "(", ")", "]", "=", "c2", "# add to the graph", "G", ".", "add_node", "(", "c2", ")", "# add an edge from the cell to the parent (range or cell)", "if", "(", "target", "!=", "[", "]", ")", ":", "# print \"Adding edge %s --> %s\" % (c2.address(), target.address())", "G", ".", "add_edge", "(", "c2", ",", "target", ")", "c1", ".", "compile", "(", ")", "# cell compilation is done here because pointer ranges might update python_expressions", "return", "(", "cellmap", ",", "G", ")" ]
43.918552
0.008966
def port(alias_name, default=None, allow_none=False): """Get the port from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.port('DB') 5432 """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) try: return int(_split_docker_link(alias_name)[2]) except KeyError as err: if default or allow_none: return default else: raise err
[ "def", "port", "(", "alias_name", ",", "default", "=", "None", ",", "allow_none", "=", "False", ")", ":", "warnings", ".", "warn", "(", "'Will be removed in v1.0'", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "try", ":", "return", "int", "(", "_split_docker_link", "(", "alias_name", ")", "[", "2", "]", ")", "except", "KeyError", "as", "err", ":", "if", "default", "or", "allow_none", ":", "return", "default", "else", ":", "raise", "err" ]
34.478261
0.002454
def value_to_db(self, value): """ Returns field's single value prepared for saving into a database. """ assert isinstance(value, six.integer_types) return str(value).encode("utf_8")
[ "def", "value_to_db", "(", "self", ",", "value", ")", ":", "assert", "isinstance", "(", "value", ",", "six", ".", "integer_types", ")", "return", "str", "(", "value", ")", ".", "encode", "(", "\"utf_8\"", ")" ]
50.5
0.014634
def get(self, sid): """ Constructs a OriginationUrlContext :param sid: The unique string that identifies the resource :returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext :rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext """ return OriginationUrlContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "OriginationUrlContext", "(", "self", ".", "_version", ",", "trunk_sid", "=", "self", ".", "_solution", "[", "'trunk_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
41.8
0.01171
def from_header(self, header): """Generate a SpanContext object using the trace context header. The value of enabled parsed from header is int. Need to convert to bool. :type header: str :param header: Trace context header which was extracted from the HTTP request headers. :rtype: :class:`~opencensus.trace.span_context.SpanContext` :returns: SpanContext generated from the trace context header. """ if header is None: return SpanContext() try: match = re.search(_TRACE_CONTEXT_HEADER_RE, header) except TypeError: logging.warning( 'Header should be str, got {}. Cannot parse the header.' .format(header.__class__.__name__)) raise if match: trace_id = match.group(1) span_id = match.group(3) trace_options = match.group(5) if trace_options is None: trace_options = 1 span_context = SpanContext( trace_id=trace_id, span_id=span_id, trace_options=TraceOptions(trace_options), from_header=True) return span_context else: logging.warning( 'Cannot parse the header {}, generate a new context instead.' .format(header)) return SpanContext()
[ "def", "from_header", "(", "self", ",", "header", ")", ":", "if", "header", "is", "None", ":", "return", "SpanContext", "(", ")", "try", ":", "match", "=", "re", ".", "search", "(", "_TRACE_CONTEXT_HEADER_RE", ",", "header", ")", "except", "TypeError", ":", "logging", ".", "warning", "(", "'Header should be str, got {}. Cannot parse the header.'", ".", "format", "(", "header", ".", "__class__", ".", "__name__", ")", ")", "raise", "if", "match", ":", "trace_id", "=", "match", ".", "group", "(", "1", ")", "span_id", "=", "match", ".", "group", "(", "3", ")", "trace_options", "=", "match", ".", "group", "(", "5", ")", "if", "trace_options", "is", "None", ":", "trace_options", "=", "1", "span_context", "=", "SpanContext", "(", "trace_id", "=", "trace_id", ",", "span_id", "=", "span_id", ",", "trace_options", "=", "TraceOptions", "(", "trace_options", ")", ",", "from_header", "=", "True", ")", "return", "span_context", "else", ":", "logging", ".", "warning", "(", "'Cannot parse the header {}, generate a new context instead.'", ".", "format", "(", "header", ")", ")", "return", "SpanContext", "(", ")" ]
33.690476
0.001374
def apply_multicolor_transit(self,band,depth): """ Applies constraint corresponding to measuring transit in different band This is not implemented yet. """ if '{} band transit'.format(band) not in self.constraints: self.constraints.append('{} band transit'.format(band)) for pop in self.poplist: pop.apply_multicolor_transit(band,depth)
[ "def", "apply_multicolor_transit", "(", "self", ",", "band", ",", "depth", ")", ":", "if", "'{} band transit'", ".", "format", "(", "band", ")", "not", "in", "self", ".", "constraints", ":", "self", ".", "constraints", ".", "append", "(", "'{} band transit'", ".", "format", "(", "band", ")", ")", "for", "pop", "in", "self", ".", "poplist", ":", "pop", ".", "apply_multicolor_transit", "(", "band", ",", "depth", ")" ]
40
0.012225
def add_view( self, baseview, name, href="", icon="", label="", category="", category_icon="", category_label="", ): """ Add your views associated with menus using this method. :param baseview: A BaseView type class instantiated or not. This method will instantiate the class for you if needed. :param name: The string name that identifies the menu. :param href: Override the generated href for the menu. You can use an url string or an endpoint name if non provided default_view from view will be set as href. :param icon: Font-Awesome icon name, optional. :param label: The label that will be displayed on the menu, if absent param name will be used :param category: The menu category where the menu will be included, if non provided the view will be acessible as a top menu. :param category_icon: Font-Awesome icon name for the category, optional. :param category_label: The label that will be displayed on the menu, if absent param name will be used Examples:: appbuilder = AppBuilder(app, db) # Register a view, rendering a top menu without icon. appbuilder.add_view(MyModelView(), "My View") # or not instantiated appbuilder.add_view(MyModelView, "My View") # Register a view, a submenu "Other View" from "Other" with a phone icon. appbuilder.add_view( MyOtherModelView, "Other View", icon='fa-phone', category="Others" ) # Register a view, with category icon and translation. appbuilder.add_view( YetOtherModelView, "Other View", icon='fa-phone', label=_('Other View'), category="Others", category_icon='fa-envelop', category_label=_('Other View') ) # Add a link appbuilder.add_link("google", href="www.google.com", icon = "fa-google-plus") """ baseview = self._check_and_init(baseview) log.info(LOGMSG_INF_FAB_ADD_VIEW.format(baseview.__class__.__name__, name)) if not self._view_exists(baseview): baseview.appbuilder = self self.baseviews.append(baseview) self._process_inner_views() if self.app: self.register_blueprint(baseview) self._add_permission(baseview) self.add_link( name=name, href=href, icon=icon, label=label, category=category, category_icon=category_icon, category_label=category_label, baseview=baseview, ) return baseview
[ "def", "add_view", "(", "self", ",", "baseview", ",", "name", ",", "href", "=", "\"\"", ",", "icon", "=", "\"\"", ",", "label", "=", "\"\"", ",", "category", "=", "\"\"", ",", "category_icon", "=", "\"\"", ",", "category_label", "=", "\"\"", ",", ")", ":", "baseview", "=", "self", ".", "_check_and_init", "(", "baseview", ")", "log", ".", "info", "(", "LOGMSG_INF_FAB_ADD_VIEW", ".", "format", "(", "baseview", ".", "__class__", ".", "__name__", ",", "name", ")", ")", "if", "not", "self", ".", "_view_exists", "(", "baseview", ")", ":", "baseview", ".", "appbuilder", "=", "self", "self", ".", "baseviews", ".", "append", "(", "baseview", ")", "self", ".", "_process_inner_views", "(", ")", "if", "self", ".", "app", ":", "self", ".", "register_blueprint", "(", "baseview", ")", "self", ".", "_add_permission", "(", "baseview", ")", "self", ".", "add_link", "(", "name", "=", "name", ",", "href", "=", "href", ",", "icon", "=", "icon", ",", "label", "=", "label", ",", "category", "=", "category", ",", "category_icon", "=", "category_icon", ",", "category_label", "=", "category_label", ",", "baseview", "=", "baseview", ",", ")", "return", "baseview" ]
34.858824
0.001969
def do_some_work( self, work_dict): """do_some_work :param work_dict: dictionary for key/values """ label = "do_some_work" log.info(("task - {} - start " "work_dict={}") .format(label, work_dict)) ret_data = { "job_results": ("some response key={}").format( str(uuid.uuid4())) } log.info(("task - {} - result={} done") .format( ret_data, label)) return ret_data
[ "def", "do_some_work", "(", "self", ",", "work_dict", ")", ":", "label", "=", "\"do_some_work\"", "log", ".", "info", "(", "(", "\"task - {} - start \"", "\"work_dict={}\"", ")", ".", "format", "(", "label", ",", "work_dict", ")", ")", "ret_data", "=", "{", "\"job_results\"", ":", "(", "\"some response key={}\"", ")", ".", "format", "(", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", "}", "log", ".", "info", "(", "(", "\"task - {} - result={} done\"", ")", ".", "format", "(", "ret_data", ",", "label", ")", ")", "return", "ret_data" ]
20.192308
0.001818
def active_vectors_info(self): """Return the active scalar's field and name: [field, name]""" if not hasattr(self, '_active_vectors_info'): self._active_vectors_info = [POINT_DATA_FIELD, None] # field and name _, name = self._active_vectors_info # rare error where scalar name isn't a valid scalar if name not in self.point_arrays: if name not in self.cell_arrays: name = None return self._active_vectors_info
[ "def", "active_vectors_info", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_active_vectors_info'", ")", ":", "self", ".", "_active_vectors_info", "=", "[", "POINT_DATA_FIELD", ",", "None", "]", "# field and name", "_", ",", "name", "=", "self", ".", "_active_vectors_info", "# rare error where scalar name isn't a valid scalar", "if", "name", "not", "in", "self", ".", "point_arrays", ":", "if", "name", "not", "in", "self", ".", "cell_arrays", ":", "name", "=", "None", "return", "self", ".", "_active_vectors_info" ]
40.666667
0.008016
def prepare_lineage(func): """ Prepares the lineage inlets and outlets. Inlets can be: * "auto" -> picks up any outlets from direct upstream tasks that have outlets defined, as such that if A -> B -> C and B does not have outlets but A does, these are provided as inlets. * "list of task_ids" -> picks up outlets from the upstream task_ids * "list of datasets" -> manually defined list of DataSet """ @wraps(func) def wrapper(self, context, *args, **kwargs): self.log.debug("Preparing lineage inlets and outlets") task_ids = set(self._inlets['task_ids']).intersection( self.get_flat_relative_ids(upstream=True) ) if task_ids: inlets = self.xcom_pull(context, task_ids=task_ids, dag_id=self.dag_id, key=PIPELINE_OUTLETS) inlets = [item for sublist in inlets if sublist for item in sublist] inlets = [DataSet.map_type(i['typeName'])(data=i['attributes']) for i in inlets] self.inlets.extend(inlets) if self._inlets['auto']: # dont append twice task_ids = set(self._inlets['task_ids']).symmetric_difference( self.upstream_task_ids ) inlets = self.xcom_pull(context, task_ids=task_ids, dag_id=self.dag_id, key=PIPELINE_OUTLETS) inlets = [item for sublist in inlets if sublist for item in sublist] inlets = [DataSet.map_type(i['typeName'])(data=i['attributes']) for i in inlets] self.inlets.extend(inlets) if len(self._inlets['datasets']) > 0: self.inlets.extend(self._inlets['datasets']) # outlets if len(self._outlets['datasets']) > 0: self.outlets.extend(self._outlets['datasets']) self.log.debug("inlets: %s, outlets: %s", self.inlets, self.outlets) for dataset in chain(self.inlets, self.outlets): dataset.set_context(context) return func(self, context, *args, **kwargs) return wrapper
[ "def", "prepare_lineage", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "context", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Preparing lineage inlets and outlets\"", ")", "task_ids", "=", "set", "(", "self", ".", "_inlets", "[", "'task_ids'", "]", ")", ".", "intersection", "(", "self", ".", "get_flat_relative_ids", "(", "upstream", "=", "True", ")", ")", "if", "task_ids", ":", "inlets", "=", "self", ".", "xcom_pull", "(", "context", ",", "task_ids", "=", "task_ids", ",", "dag_id", "=", "self", ".", "dag_id", ",", "key", "=", "PIPELINE_OUTLETS", ")", "inlets", "=", "[", "item", "for", "sublist", "in", "inlets", "if", "sublist", "for", "item", "in", "sublist", "]", "inlets", "=", "[", "DataSet", ".", "map_type", "(", "i", "[", "'typeName'", "]", ")", "(", "data", "=", "i", "[", "'attributes'", "]", ")", "for", "i", "in", "inlets", "]", "self", ".", "inlets", ".", "extend", "(", "inlets", ")", "if", "self", ".", "_inlets", "[", "'auto'", "]", ":", "# dont append twice", "task_ids", "=", "set", "(", "self", ".", "_inlets", "[", "'task_ids'", "]", ")", ".", "symmetric_difference", "(", "self", ".", "upstream_task_ids", ")", "inlets", "=", "self", ".", "xcom_pull", "(", "context", ",", "task_ids", "=", "task_ids", ",", "dag_id", "=", "self", ".", "dag_id", ",", "key", "=", "PIPELINE_OUTLETS", ")", "inlets", "=", "[", "item", "for", "sublist", "in", "inlets", "if", "sublist", "for", "item", "in", "sublist", "]", "inlets", "=", "[", "DataSet", ".", "map_type", "(", "i", "[", "'typeName'", "]", ")", "(", "data", "=", "i", "[", "'attributes'", "]", ")", "for", "i", "in", "inlets", "]", "self", ".", "inlets", ".", "extend", "(", "inlets", ")", "if", "len", "(", "self", ".", "_inlets", "[", "'datasets'", "]", ")", ">", "0", ":", "self", ".", "inlets", ".", "extend", "(", "self", ".", "_inlets", "[", "'datasets'", "]", ")", "# outlets", "if", "len", "(", "self", ".", "_outlets", "[", "'datasets'", "]", ")", ">", "0", ":", "self", ".", "outlets", ".", "extend", "(", "self", ".", "_outlets", "[", "'datasets'", "]", ")", "self", ".", "log", ".", "debug", "(", "\"inlets: %s, outlets: %s\"", ",", "self", ".", "inlets", ",", "self", ".", "outlets", ")", "for", "dataset", "in", "chain", "(", "self", ".", "inlets", ",", "self", ".", "outlets", ")", ":", "dataset", ".", "set_context", "(", "context", ")", "return", "func", "(", "self", ",", "context", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
39.75
0.002192
def pixel_scale_angle_at_skycoord(skycoord, wcs, offset=1. * u.arcsec): """ Calculate the pixel scale and WCS rotation angle at the position of a SkyCoord coordinate. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The SkyCoord coordinate. wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. offset : `~astropy.units.Quantity` A small angular offset to use to compute the pixel scale and position angle. Returns ------- scale : `~astropy.units.Quantity` The pixel scale in arcsec/pixel. angle : `~astropy.units.Quantity` The angle (in degrees) measured counterclockwise from the positive x axis to the "North" axis of the celestial coordinate system. Notes ----- If distortions are present in the image, the x and y pixel scales likely differ. This function computes a single pixel scale along the North/South axis. """ # We take a point directly "above" (in latitude) the input position # and convert it to pixel coordinates, then we use the pixel deltas # between the input and offset point to calculate the pixel scale and # angle. # Find the coordinates as a representation object coord = skycoord.represent_as('unitspherical') # Add a a small perturbation in the latitude direction (since longitude # is more difficult because it is not directly an angle) coord_new = UnitSphericalRepresentation(coord.lon, coord.lat + offset) coord_offset = skycoord.realize_frame(coord_new) # Find pixel coordinates of offset coordinates and pixel deltas x_offset, y_offset = skycoord_to_pixel(coord_offset, wcs, mode='all') x, y = skycoord_to_pixel(skycoord, wcs, mode='all') dx = x_offset - x dy = y_offset - y scale = offset.to(u.arcsec) / (np.hypot(dx, dy) * u.pixel) angle = (np.arctan2(dy, dx) * u.radian).to(u.deg) return scale, angle
[ "def", "pixel_scale_angle_at_skycoord", "(", "skycoord", ",", "wcs", ",", "offset", "=", "1.", "*", "u", ".", "arcsec", ")", ":", "# We take a point directly \"above\" (in latitude) the input position", "# and convert it to pixel coordinates, then we use the pixel deltas", "# between the input and offset point to calculate the pixel scale and", "# angle.", "# Find the coordinates as a representation object", "coord", "=", "skycoord", ".", "represent_as", "(", "'unitspherical'", ")", "# Add a a small perturbation in the latitude direction (since longitude", "# is more difficult because it is not directly an angle)", "coord_new", "=", "UnitSphericalRepresentation", "(", "coord", ".", "lon", ",", "coord", ".", "lat", "+", "offset", ")", "coord_offset", "=", "skycoord", ".", "realize_frame", "(", "coord_new", ")", "# Find pixel coordinates of offset coordinates and pixel deltas", "x_offset", ",", "y_offset", "=", "skycoord_to_pixel", "(", "coord_offset", ",", "wcs", ",", "mode", "=", "'all'", ")", "x", ",", "y", "=", "skycoord_to_pixel", "(", "skycoord", ",", "wcs", ",", "mode", "=", "'all'", ")", "dx", "=", "x_offset", "-", "x", "dy", "=", "y_offset", "-", "y", "scale", "=", "offset", ".", "to", "(", "u", ".", "arcsec", ")", "/", "(", "np", ".", "hypot", "(", "dx", ",", "dy", ")", "*", "u", ".", "pixel", ")", "angle", "=", "(", "np", ".", "arctan2", "(", "dy", ",", "dx", ")", "*", "u", ".", "radian", ")", ".", "to", "(", "u", ".", "deg", ")", "return", "scale", ",", "angle" ]
35.981481
0.000501
def som_simulate(som_pointer, pattern): """! @brief Processes input pattern (no learining) and returns index of neuron-winner. @details Using index of neuron winner catched object can be obtained using property capture_objects. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. @param[in] pattern (list): input pattern. @return Returns index of neuron-winner. """ pointer_data = package_builder(pattern, c_double).create() ccore = ccore_library.get() ccore.som_simulate.restype = c_size_t return ccore.som_simulate(som_pointer, pointer_data)
[ "def", "som_simulate", "(", "som_pointer", ",", "pattern", ")", ":", "pointer_data", "=", "package_builder", "(", "pattern", ",", "c_double", ")", ".", "create", "(", ")", "ccore", "=", "ccore_library", ".", "get", "(", ")", "ccore", ".", "som_simulate", ".", "restype", "=", "c_size_t", "return", "ccore", ".", "som_simulate", "(", "som_pointer", ",", "pointer_data", ")" ]
37.470588
0.013783
def bb_pad_collate(samples:BatchSamples, pad_idx:int=0) -> Tuple[FloatTensor, Tuple[LongTensor, LongTensor]]: "Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`." if isinstance(samples[0][1], int): return data_collate(samples) max_len = max([len(s[1].data[1]) for s in samples]) bboxes = torch.zeros(len(samples), max_len, 4) labels = torch.zeros(len(samples), max_len).long() + pad_idx imgs = [] for i,s in enumerate(samples): imgs.append(s[0].data[None]) bbs, lbls = s[1].data if not (bbs.nelement() == 0): bboxes[i,-len(lbls):] = bbs labels[i,-len(lbls):] = tensor(lbls) return torch.cat(imgs,0), (bboxes,labels)
[ "def", "bb_pad_collate", "(", "samples", ":", "BatchSamples", ",", "pad_idx", ":", "int", "=", "0", ")", "->", "Tuple", "[", "FloatTensor", ",", "Tuple", "[", "LongTensor", ",", "LongTensor", "]", "]", ":", "if", "isinstance", "(", "samples", "[", "0", "]", "[", "1", "]", ",", "int", ")", ":", "return", "data_collate", "(", "samples", ")", "max_len", "=", "max", "(", "[", "len", "(", "s", "[", "1", "]", ".", "data", "[", "1", "]", ")", "for", "s", "in", "samples", "]", ")", "bboxes", "=", "torch", ".", "zeros", "(", "len", "(", "samples", ")", ",", "max_len", ",", "4", ")", "labels", "=", "torch", ".", "zeros", "(", "len", "(", "samples", ")", ",", "max_len", ")", ".", "long", "(", ")", "+", "pad_idx", "imgs", "=", "[", "]", "for", "i", ",", "s", "in", "enumerate", "(", "samples", ")", ":", "imgs", ".", "append", "(", "s", "[", "0", "]", ".", "data", "[", "None", "]", ")", "bbs", ",", "lbls", "=", "s", "[", "1", "]", ".", "data", "if", "not", "(", "bbs", ".", "nelement", "(", ")", "==", "0", ")", ":", "bboxes", "[", "i", ",", "-", "len", "(", "lbls", ")", ":", "]", "=", "bbs", "labels", "[", "i", ",", "-", "len", "(", "lbls", ")", ":", "]", "=", "tensor", "(", "lbls", ")", "return", "torch", ".", "cat", "(", "imgs", ",", "0", ")", ",", "(", "bboxes", ",", "labels", ")" ]
51.071429
0.017857
def gen_search_article_url(keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime, article_type=WechatSogouConst.search_article_type.all, ft=None, et=None): """拼接搜索 文章 URL Parameters ---------- keyword : str or unicode 搜索文字 page : int, optional 页数 the default is 1 timesn : WechatSogouConst.search_article_time 时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定 默认是 anytime article_type : WechatSogouConst.search_article_type 含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有 默认是 all ft, et : datetime.date 当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01 当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15 Returns ------- str search_article_url """ assert isinstance(page, int) and page > 0 assert timesn in [WechatSogouConst.search_article_time.anytime, WechatSogouConst.search_article_time.day, WechatSogouConst.search_article_time.week, WechatSogouConst.search_article_time.month, WechatSogouConst.search_article_time.year, WechatSogouConst.search_article_time.specific] if timesn == WechatSogouConst.search_article_time.specific: assert isinstance(ft, datetime.date) assert isinstance(et, datetime.date) assert ft <= et else: ft = '' et = '' interation_image = 458754 interation_video = 458756 if article_type == WechatSogouConst.search_article_type.rich: interation = '{},{}'.format(interation_image, interation_video) elif article_type == WechatSogouConst.search_article_type.image: interation = interation_image elif article_type == WechatSogouConst.search_article_type.video: interation = interation_video else: interation = '' qs_dict = OrderedDict() qs_dict['type'] = _search_type_article qs_dict['page'] = page qs_dict['ie'] = 'utf8' qs_dict['query'] = keyword qs_dict['interation'] = interation if timesn != 0: qs_dict['tsn'] = timesn qs_dict['ft'] = str(ft) qs_dict['et'] = str(et) # TODO 账号内搜索 # '账号内 http://weixin.sogou.com/weixin?type=2&ie=utf8&query=%E9%AB%98%E8%80%83&tsn=3&ft=&et=&interation=458754 # &wxid=oIWsFt1tmWoG6vO6BcsS7St61bRE&usip=nanhangqinggong' # qs['wxid'] = wxid # qs['usip'] = usip return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))
[ "def", "gen_search_article_url", "(", "keyword", ",", "page", "=", "1", ",", "timesn", "=", "WechatSogouConst", ".", "search_article_time", ".", "anytime", ",", "article_type", "=", "WechatSogouConst", ".", "search_article_type", ".", "all", ",", "ft", "=", "None", ",", "et", "=", "None", ")", ":", "assert", "isinstance", "(", "page", ",", "int", ")", "and", "page", ">", "0", "assert", "timesn", "in", "[", "WechatSogouConst", ".", "search_article_time", ".", "anytime", ",", "WechatSogouConst", ".", "search_article_time", ".", "day", ",", "WechatSogouConst", ".", "search_article_time", ".", "week", ",", "WechatSogouConst", ".", "search_article_time", ".", "month", ",", "WechatSogouConst", ".", "search_article_time", ".", "year", ",", "WechatSogouConst", ".", "search_article_time", ".", "specific", "]", "if", "timesn", "==", "WechatSogouConst", ".", "search_article_time", ".", "specific", ":", "assert", "isinstance", "(", "ft", ",", "datetime", ".", "date", ")", "assert", "isinstance", "(", "et", ",", "datetime", ".", "date", ")", "assert", "ft", "<=", "et", "else", ":", "ft", "=", "''", "et", "=", "''", "interation_image", "=", "458754", "interation_video", "=", "458756", "if", "article_type", "==", "WechatSogouConst", ".", "search_article_type", ".", "rich", ":", "interation", "=", "'{},{}'", ".", "format", "(", "interation_image", ",", "interation_video", ")", "elif", "article_type", "==", "WechatSogouConst", ".", "search_article_type", ".", "image", ":", "interation", "=", "interation_image", "elif", "article_type", "==", "WechatSogouConst", ".", "search_article_type", ".", "video", ":", "interation", "=", "interation_video", "else", ":", "interation", "=", "''", "qs_dict", "=", "OrderedDict", "(", ")", "qs_dict", "[", "'type'", "]", "=", "_search_type_article", "qs_dict", "[", "'page'", "]", "=", "page", "qs_dict", "[", "'ie'", "]", "=", "'utf8'", "qs_dict", "[", "'query'", "]", "=", "keyword", "qs_dict", "[", "'interation'", "]", "=", "interation", "if", "timesn", "!=", "0", ":", "qs_dict", "[", "'tsn'", "]", "=", "timesn", "qs_dict", "[", "'ft'", "]", "=", "str", "(", "ft", ")", "qs_dict", "[", "'et'", "]", "=", "str", "(", "et", ")", "# TODO 账号内搜索", "# '账号内 http://weixin.sogou.com/weixin?type=2&ie=utf8&query=%E9%AB%98%E8%80%83&tsn=3&ft=&et=&interation=458754", "# &wxid=oIWsFt1tmWoG6vO6BcsS7St61bRE&usip=nanhangqinggong'", "# qs['wxid'] = wxid", "# qs['usip'] = usip", "return", "'http://weixin.sogou.com/weixin?{}'", ".", "format", "(", "urlencode", "(", "qs_dict", ")", ")" ]
39.128571
0.002493
def querypath(self, block, path): """An XPath-like interface to `query`.""" class BadPath(Exception): """Bad path exception thrown when path cannot be found.""" pass results = self.query(block) ROOT, SEP, WORD, FINAL = six.moves.range(4) # pylint: disable=C0103 state = ROOT lexer = RegexLexer( ("dotdot", r"\.\."), ("dot", r"\."), ("slashslash", r"//"), ("slash", r"/"), ("atword", r"@\w+"), ("word", r"\w+"), ("err", r"."), ) for tokname, toktext in lexer.lex(path): if state == FINAL: # Shouldn't be any tokens after a last token. raise BadPath() if tokname == "dotdot": # .. (parent) if state == WORD: raise BadPath() results = results.parent() state = WORD elif tokname == "dot": # . (current node) if state == WORD: raise BadPath() state = WORD elif tokname == "slashslash": # // (descendants) if state == SEP: raise BadPath() if state == ROOT: raise NotImplementedError() results = results.descendants() state = SEP elif tokname == "slash": # / (here) if state == SEP: raise BadPath() if state == ROOT: raise NotImplementedError() state = SEP elif tokname == "atword": # @xxx (attribute access) if state != SEP: raise BadPath() results = results.attr(toktext[1:]) state = FINAL elif tokname == "word": # xxx (tag selection) if state != SEP: raise BadPath() results = results.children().tagged(toktext) state = WORD else: raise BadPath("Invalid thing: %r" % toktext) return results
[ "def", "querypath", "(", "self", ",", "block", ",", "path", ")", ":", "class", "BadPath", "(", "Exception", ")", ":", "\"\"\"Bad path exception thrown when path cannot be found.\"\"\"", "pass", "results", "=", "self", ".", "query", "(", "block", ")", "ROOT", ",", "SEP", ",", "WORD", ",", "FINAL", "=", "six", ".", "moves", ".", "range", "(", "4", ")", "# pylint: disable=C0103", "state", "=", "ROOT", "lexer", "=", "RegexLexer", "(", "(", "\"dotdot\"", ",", "r\"\\.\\.\"", ")", ",", "(", "\"dot\"", ",", "r\"\\.\"", ")", ",", "(", "\"slashslash\"", ",", "r\"//\"", ")", ",", "(", "\"slash\"", ",", "r\"/\"", ")", ",", "(", "\"atword\"", ",", "r\"@\\w+\"", ")", ",", "(", "\"word\"", ",", "r\"\\w+\"", ")", ",", "(", "\"err\"", ",", "r\".\"", ")", ",", ")", "for", "tokname", ",", "toktext", "in", "lexer", ".", "lex", "(", "path", ")", ":", "if", "state", "==", "FINAL", ":", "# Shouldn't be any tokens after a last token.", "raise", "BadPath", "(", ")", "if", "tokname", "==", "\"dotdot\"", ":", "# .. (parent)", "if", "state", "==", "WORD", ":", "raise", "BadPath", "(", ")", "results", "=", "results", ".", "parent", "(", ")", "state", "=", "WORD", "elif", "tokname", "==", "\"dot\"", ":", "# . (current node)", "if", "state", "==", "WORD", ":", "raise", "BadPath", "(", ")", "state", "=", "WORD", "elif", "tokname", "==", "\"slashslash\"", ":", "# // (descendants)", "if", "state", "==", "SEP", ":", "raise", "BadPath", "(", ")", "if", "state", "==", "ROOT", ":", "raise", "NotImplementedError", "(", ")", "results", "=", "results", ".", "descendants", "(", ")", "state", "=", "SEP", "elif", "tokname", "==", "\"slash\"", ":", "# / (here)", "if", "state", "==", "SEP", ":", "raise", "BadPath", "(", ")", "if", "state", "==", "ROOT", ":", "raise", "NotImplementedError", "(", ")", "state", "=", "SEP", "elif", "tokname", "==", "\"atword\"", ":", "# @xxx (attribute access)", "if", "state", "!=", "SEP", ":", "raise", "BadPath", "(", ")", "results", "=", "results", ".", "attr", "(", "toktext", "[", "1", ":", "]", ")", "state", "=", "FINAL", "elif", "tokname", "==", "\"word\"", ":", "# xxx (tag selection)", "if", "state", "!=", "SEP", ":", "raise", "BadPath", "(", ")", "results", "=", "results", ".", "children", "(", ")", ".", "tagged", "(", "toktext", ")", "state", "=", "WORD", "else", ":", "raise", "BadPath", "(", "\"Invalid thing: %r\"", "%", "toktext", ")", "return", "results" ]
35.612903
0.001322
def to_string(self, style=None, utcoffset=None): """Return a |str| object representing the actual date in accordance with the given style and the eventually given UTC offset (in minutes). Without any input arguments, the actual |Date.style| is used to return a date string in your local time zone: >>> from hydpy import Date >>> date = Date('01.11.1997 00:00:00') >>> date.to_string() '01.11.1997 00:00:00' Passing a style string affects the returned |str| object, but not the |Date.style| property: >>> date.style 'din1' >>> date.to_string(style='iso2') '1997-11-01 00:00:00' >>> date.style 'din1' When passing the `utcoffset` in minutes, the offset string is appended: >>> date.to_string(style='iso2', utcoffset=60) '1997-11-01 00:00:00+01:00' If the given offset does not correspond to your local offset defined by |Options.utcoffset| (which defaults to UTC+01:00), the date string is adapted: >>> date.to_string(style='iso1', utcoffset=0) '1997-10-31T23:00:00+00:00' """ if not style: style = self.style if utcoffset is None: string = '' date = self.datetime else: sign = '+' if utcoffset >= 0 else '-' hours = abs(utcoffset // 60) minutes = abs(utcoffset % 60) string = f'{sign}{hours:02d}:{minutes:02d}' offset = utcoffset-hydpy.pub.options.utcoffset date = self.datetime + datetime.timedelta(minutes=offset) return date.strftime(self._formatstrings[style]) + string
[ "def", "to_string", "(", "self", ",", "style", "=", "None", ",", "utcoffset", "=", "None", ")", ":", "if", "not", "style", ":", "style", "=", "self", ".", "style", "if", "utcoffset", "is", "None", ":", "string", "=", "''", "date", "=", "self", ".", "datetime", "else", ":", "sign", "=", "'+'", "if", "utcoffset", ">=", "0", "else", "'-'", "hours", "=", "abs", "(", "utcoffset", "//", "60", ")", "minutes", "=", "abs", "(", "utcoffset", "%", "60", ")", "string", "=", "f'{sign}{hours:02d}:{minutes:02d}'", "offset", "=", "utcoffset", "-", "hydpy", ".", "pub", ".", "options", ".", "utcoffset", "date", "=", "self", ".", "datetime", "+", "datetime", ".", "timedelta", "(", "minutes", "=", "offset", ")", "return", "date", ".", "strftime", "(", "self", ".", "_formatstrings", "[", "style", "]", ")", "+", "string" ]
34.44898
0.001152
def convert_double_to_two_registers(doubleValue): """ Convert 32 Bit Value to two 16 Bit Value to send as Modbus Registers doubleValue: Value to be converted return: 16 Bit Register values int[] """ myList = list() myList.append(int(doubleValue & 0x0000FFFF)) #Append Least Significant Word myList.append(int((doubleValue & 0xFFFF0000)>>16)) #Append Most Significant Word return myList
[ "def", "convert_double_to_two_registers", "(", "doubleValue", ")", ":", "myList", "=", "list", "(", ")", "myList", ".", "append", "(", "int", "(", "doubleValue", "&", "0x0000FFFF", ")", ")", "#Append Least Significant Word ", "myList", ".", "append", "(", "int", "(", "(", "doubleValue", "&", "0xFFFF0000", ")", ">>", "16", ")", ")", "#Append Most Significant Word ", "return", "myList" ]
43.6
0.020225
def delete_group_dampening(self, group_id, dampening_id): """ Delete an existing group dampening :param group_id: Group Trigger id to be retrieved :param dampening_id: id of the Dampening to be deleted """ self._delete(self._service_url(['triggers', 'groups', group_id, 'dampenings', dampening_id]))
[ "def", "delete_group_dampening", "(", "self", ",", "group_id", ",", "dampening_id", ")", ":", "self", ".", "_delete", "(", "self", ".", "_service_url", "(", "[", "'triggers'", ",", "'groups'", ",", "group_id", ",", "'dampenings'", ",", "dampening_id", "]", ")", ")" ]
42.625
0.008621
def find_matching(cls, message, channel): """ Yield ``cls`` subclasses that match message and channel """ return ( handler for handler in cls._registry if isinstance(handler, cls) and handler.match(message, channel) )
[ "def", "find_matching", "(", "cls", ",", "message", ",", "channel", ")", ":", "return", "(", "handler", "for", "handler", "in", "cls", ".", "_registry", "if", "isinstance", "(", "handler", ",", "cls", ")", "and", "handler", ".", "match", "(", "message", ",", "channel", ")", ")" ]
23
0.046025
def create_event_hub(self, hub_name, hub=None, fail_on_exist=False): ''' Creates a new Event Hub. hub_name: Name of event hub. hub: Optional. Event hub properties. Instance of EventHub class. hub.message_retention_in_days: Number of days to retain the events for this Event Hub. hub.status: Status of the Event Hub (enabled or disabled). hub.user_metadata: User metadata. hub.partition_count: Number of shards on the Event Hub. fail_on_exist: Specify whether to throw an exception when the event hub exists. ''' _validate_not_none('hub_name', hub_name) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = '/' + _str(hub_name) + '?api-version=2014-01' request.body = _get_request_body(_convert_event_hub_to_xml(hub)) request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) if not fail_on_exist: try: self._perform_request(request) return True except AzureHttpError as ex: _dont_fail_on_exist(ex) return False else: self._perform_request(request) return True
[ "def", "create_event_hub", "(", "self", ",", "hub_name", ",", "hub", "=", "None", ",", "fail_on_exist", "=", "False", ")", ":", "_validate_not_none", "(", "'hub_name'", ",", "hub_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'PUT'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "'/'", "+", "_str", "(", "hub_name", ")", "+", "'?api-version=2014-01'", "request", ".", "body", "=", "_get_request_body", "(", "_convert_event_hub_to_xml", "(", "hub", ")", ")", "request", ".", "path", ",", "request", ".", "query", "=", "self", ".", "_httpclient", ".", "_update_request_uri_query", "(", "request", ")", "# pylint: disable=protected-access", "request", ".", "headers", "=", "self", ".", "_update_service_bus_header", "(", "request", ")", "if", "not", "fail_on_exist", ":", "try", ":", "self", ".", "_perform_request", "(", "request", ")", "return", "True", "except", "AzureHttpError", "as", "ex", ":", "_dont_fail_on_exist", "(", "ex", ")", "return", "False", "else", ":", "self", ".", "_perform_request", "(", "request", ")", "return", "True" ]
38.918919
0.002033
def get_url_file_name(url): """Get the file name from an url Parameters ---------- url : str Returns ------- str The file name """ assert isinstance(url, (str, _oldstr)) return urlparse.urlparse(url).path.split('/')[-1]
[ "def", "get_url_file_name", "(", "url", ")", ":", "assert", "isinstance", "(", "url", ",", "(", "str", ",", "_oldstr", ")", ")", "return", "urlparse", ".", "urlparse", "(", "url", ")", ".", "path", ".", "split", "(", "'/'", ")", "[", "-", "1", "]" ]
17.4
0.010909
async def api_call(self, verb, action, params=None, add_authorization_token=True, retry=False): """Send api call.""" if add_authorization_token and not self.token: await self.refresh_token() try: return await self._api_call_impl(verb, action, params, add_authorization_token) except InvalidToken: if not retry and add_authorization_token: await self.refresh_token() # Recursive call of api_call return await self.api_call(verb, action, params, add_authorization_token, True) raise
[ "async", "def", "api_call", "(", "self", ",", "verb", ",", "action", ",", "params", "=", "None", ",", "add_authorization_token", "=", "True", ",", "retry", "=", "False", ")", ":", "if", "add_authorization_token", "and", "not", "self", ".", "token", ":", "await", "self", ".", "refresh_token", "(", ")", "try", ":", "return", "await", "self", ".", "_api_call_impl", "(", "verb", ",", "action", ",", "params", ",", "add_authorization_token", ")", "except", "InvalidToken", ":", "if", "not", "retry", "and", "add_authorization_token", ":", "await", "self", ".", "refresh_token", "(", ")", "# Recursive call of api_call", "return", "await", "self", ".", "api_call", "(", "verb", ",", "action", ",", "params", ",", "add_authorization_token", ",", "True", ")", "raise" ]
45.923077
0.00821
def georadius(self, key, longitude, latitude, radius, unit='m', *, with_dist=False, with_hash=False, with_coord=False, count=None, sort=None, encoding=_NOTSET): """Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point. Return value follows Redis convention: * if none of ``WITH*`` flags are set -- list of strings returned: >>> await redis.georadius('Sicily', 15, 37, 200, 'km') [b"Palermo", b"Catania"] * if any flag (or all) is set -- list of named tuples returned: >>> await redis.georadius('Sicily', 15, 37, 200, 'km', ... with_dist=True) [GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None), GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)] :raises TypeError: radius is not float or int :raises TypeError: count is not int :raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft`` :raises ValueError: if sort not equal ``ASC`` or ``DESC`` :rtype: list[str] or list[GeoMember] """ args = validate_georadius_options( radius, unit, with_dist, with_hash, with_coord, count, sort ) fut = self.execute( b'GEORADIUS', key, longitude, latitude, radius, unit, *args, encoding=encoding ) if with_dist or with_hash or with_coord: return wait_convert(fut, make_geomember, with_dist=with_dist, with_hash=with_hash, with_coord=with_coord) return fut
[ "def", "georadius", "(", "self", ",", "key", ",", "longitude", ",", "latitude", ",", "radius", ",", "unit", "=", "'m'", ",", "*", ",", "with_dist", "=", "False", ",", "with_hash", "=", "False", ",", "with_coord", "=", "False", ",", "count", "=", "None", ",", "sort", "=", "None", ",", "encoding", "=", "_NOTSET", ")", ":", "args", "=", "validate_georadius_options", "(", "radius", ",", "unit", ",", "with_dist", ",", "with_hash", ",", "with_coord", ",", "count", ",", "sort", ")", "fut", "=", "self", ".", "execute", "(", "b'GEORADIUS'", ",", "key", ",", "longitude", ",", "latitude", ",", "radius", ",", "unit", ",", "*", "args", ",", "encoding", "=", "encoding", ")", "if", "with_dist", "or", "with_hash", "or", "with_coord", ":", "return", "wait_convert", "(", "fut", ",", "make_geomember", ",", "with_dist", "=", "with_dist", ",", "with_hash", "=", "with_hash", ",", "with_coord", "=", "with_coord", ")", "return", "fut" ]
41.97561
0.002271
def bz2_compress_stream(src, level=9): """Compress data from `src`. Args: src (iterable): iterable that yields blocks of data to compress level (int): compression level (1-9) default is 9 Yields: blocks of compressed data """ compressor = bz2.BZ2Compressor(level) for block in src: encoded = compressor.compress(block) if encoded: yield encoded yield compressor.flush()
[ "def", "bz2_compress_stream", "(", "src", ",", "level", "=", "9", ")", ":", "compressor", "=", "bz2", ".", "BZ2Compressor", "(", "level", ")", "for", "block", "in", "src", ":", "encoded", "=", "compressor", ".", "compress", "(", "block", ")", "if", "encoded", ":", "yield", "encoded", "yield", "compressor", ".", "flush", "(", ")" ]
25.647059
0.002212
def on(self, *args): """ If no arguments are specified, turn all the LEDs on. If arguments are specified, they must be the indexes of the LEDs you wish to turn on. For example:: from gpiozero import LEDBoard leds = LEDBoard(2, 3, 4, 5) leds.on(0) # turn on the first LED (pin 2) leds.on(-1) # turn on the last LED (pin 5) leds.on(1, 2) # turn on the middle LEDs (pins 3 and 4) leds.off() # turn off all LEDs leds.on() # turn on all LEDs If :meth:`blink` is currently active, it will be stopped first. :param int args: The index(es) of the LED(s) to turn on. If no indexes are specified turn on all LEDs. """ self._stop_blink() if args: for index in args: self[index].on() else: super(LEDBoard, self).on()
[ "def", "on", "(", "self", ",", "*", "args", ")", ":", "self", ".", "_stop_blink", "(", ")", "if", "args", ":", "for", "index", "in", "args", ":", "self", "[", "index", "]", ".", "on", "(", ")", "else", ":", "super", "(", "LEDBoard", ",", "self", ")", ".", "on", "(", ")" ]
34.148148
0.00211
def get_querydict(self): """ 这个函数跟 self.method有关 self.method 暂时没用, querydict都是POST的 """ if self.method: querydict = getattr(self.request, self.method.upper()) else: querydict = getattr(self.request, 'POST'.upper()) # copy make querydict mutable query_dict = dict(querydict.items()) return query_dict
[ "def", "get_querydict", "(", "self", ")", ":", "if", "self", ".", "method", ":", "querydict", "=", "getattr", "(", "self", ".", "request", ",", "self", ".", "method", ".", "upper", "(", ")", ")", "else", ":", "querydict", "=", "getattr", "(", "self", ".", "request", ",", "'POST'", ".", "upper", "(", ")", ")", "# copy make querydict mutable", "query_dict", "=", "dict", "(", "querydict", ".", "items", "(", ")", ")", "return", "query_dict" ]
29.846154
0.015
def makevFunc(self,solution): ''' Creates the value function for this period, defined over market resources m and persistent income p. self must have the attribute EndOfPrdvFunc in order to execute. Parameters ---------- solution : ConsumerSolution The solution to this single period problem, which must include the consumption function. Returns ------- vFuncNow : ValueFunc A representation of the value function for this period, defined over market resources m and persistent income p: v = vFuncNow(m,p). ''' mSize = self.aXtraGrid.size pSize = self.pLvlGrid.size # Compute expected value and marginal value on a grid of market resources pLvl_temp = np.tile(self.pLvlGrid,(mSize,1)) # Tile pLvl across m values mLvl_temp = np.tile(self.mLvlMinNow(self.pLvlGrid),(mSize,1)) + np.tile(np.reshape(self.aXtraGrid,(mSize,1)),(1,pSize))*pLvl_temp cLvlNow = solution.cFunc(mLvl_temp,pLvl_temp) aLvlNow = mLvl_temp - cLvlNow vNow = self.u(cLvlNow) + self.EndOfPrdvFunc(aLvlNow,pLvl_temp) vPnow = self.uP(cLvlNow) # Calculate pseudo-inverse value and its first derivative (wrt mLvl) vNvrs = self.uinv(vNow) # value transformed through inverse utility vNvrsP = vPnow*self.uinvP(vNow) # Add data at the lower bound of m mLvl_temp = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pSize)),mLvl_temp),axis=0) vNvrs = np.concatenate((np.zeros((1,pSize)),vNvrs),axis=0) vNvrsP = np.concatenate((np.reshape(vNvrsP[0,:],(1,vNvrsP.shape[1])),vNvrsP),axis=0) # Add data at the lower bound of p MPCminNvrs = self.MPCminNow**(-self.CRRA/(1.0-self.CRRA)) m_temp = np.reshape(mLvl_temp[:,0],(mSize+1,1)) mLvl_temp = np.concatenate((m_temp,mLvl_temp),axis=1) vNvrs = np.concatenate((MPCminNvrs*m_temp,vNvrs),axis=1) vNvrsP = np.concatenate((MPCminNvrs*np.ones((mSize+1,1)),vNvrsP),axis=1) # Construct the pseudo-inverse value function vNvrsFunc_list = [] for j in range(pSize+1): pLvl = np.insert(self.pLvlGrid,0,0.0)[j] vNvrsFunc_list.append(CubicInterp(mLvl_temp[:,j]-self.mLvlMinNow(pLvl),vNvrs[:,j],vNvrsP[:,j],MPCminNvrs*self.hLvlNow(pLvl),MPCminNvrs)) vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_list,np.insert(self.pLvlGrid,0,0.0)) # Value function "shifted" vNvrsFuncNow = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) # "Re-curve" the pseudo-inverse value function into the value function vFuncNow = ValueFunc2D(vNvrsFuncNow,self.CRRA) return vFuncNow
[ "def", "makevFunc", "(", "self", ",", "solution", ")", ":", "mSize", "=", "self", ".", "aXtraGrid", ".", "size", "pSize", "=", "self", ".", "pLvlGrid", ".", "size", "# Compute expected value and marginal value on a grid of market resources", "pLvl_temp", "=", "np", ".", "tile", "(", "self", ".", "pLvlGrid", ",", "(", "mSize", ",", "1", ")", ")", "# Tile pLvl across m values", "mLvl_temp", "=", "np", ".", "tile", "(", "self", ".", "mLvlMinNow", "(", "self", ".", "pLvlGrid", ")", ",", "(", "mSize", ",", "1", ")", ")", "+", "np", ".", "tile", "(", "np", ".", "reshape", "(", "self", ".", "aXtraGrid", ",", "(", "mSize", ",", "1", ")", ")", ",", "(", "1", ",", "pSize", ")", ")", "*", "pLvl_temp", "cLvlNow", "=", "solution", ".", "cFunc", "(", "mLvl_temp", ",", "pLvl_temp", ")", "aLvlNow", "=", "mLvl_temp", "-", "cLvlNow", "vNow", "=", "self", ".", "u", "(", "cLvlNow", ")", "+", "self", ".", "EndOfPrdvFunc", "(", "aLvlNow", ",", "pLvl_temp", ")", "vPnow", "=", "self", ".", "uP", "(", "cLvlNow", ")", "# Calculate pseudo-inverse value and its first derivative (wrt mLvl)", "vNvrs", "=", "self", ".", "uinv", "(", "vNow", ")", "# value transformed through inverse utility", "vNvrsP", "=", "vPnow", "*", "self", ".", "uinvP", "(", "vNow", ")", "# Add data at the lower bound of m", "mLvl_temp", "=", "np", ".", "concatenate", "(", "(", "np", ".", "reshape", "(", "self", ".", "mLvlMinNow", "(", "self", ".", "pLvlGrid", ")", ",", "(", "1", ",", "pSize", ")", ")", ",", "mLvl_temp", ")", ",", "axis", "=", "0", ")", "vNvrs", "=", "np", ".", "concatenate", "(", "(", "np", ".", "zeros", "(", "(", "1", ",", "pSize", ")", ")", ",", "vNvrs", ")", ",", "axis", "=", "0", ")", "vNvrsP", "=", "np", ".", "concatenate", "(", "(", "np", ".", "reshape", "(", "vNvrsP", "[", "0", ",", ":", "]", ",", "(", "1", ",", "vNvrsP", ".", "shape", "[", "1", "]", ")", ")", ",", "vNvrsP", ")", ",", "axis", "=", "0", ")", "# Add data at the lower bound of p", "MPCminNvrs", "=", "self", ".", "MPCminNow", "**", "(", "-", "self", ".", "CRRA", "/", "(", "1.0", "-", "self", ".", "CRRA", ")", ")", "m_temp", "=", "np", ".", "reshape", "(", "mLvl_temp", "[", ":", ",", "0", "]", ",", "(", "mSize", "+", "1", ",", "1", ")", ")", "mLvl_temp", "=", "np", ".", "concatenate", "(", "(", "m_temp", ",", "mLvl_temp", ")", ",", "axis", "=", "1", ")", "vNvrs", "=", "np", ".", "concatenate", "(", "(", "MPCminNvrs", "*", "m_temp", ",", "vNvrs", ")", ",", "axis", "=", "1", ")", "vNvrsP", "=", "np", ".", "concatenate", "(", "(", "MPCminNvrs", "*", "np", ".", "ones", "(", "(", "mSize", "+", "1", ",", "1", ")", ")", ",", "vNvrsP", ")", ",", "axis", "=", "1", ")", "# Construct the pseudo-inverse value function", "vNvrsFunc_list", "=", "[", "]", "for", "j", "in", "range", "(", "pSize", "+", "1", ")", ":", "pLvl", "=", "np", ".", "insert", "(", "self", ".", "pLvlGrid", ",", "0", ",", "0.0", ")", "[", "j", "]", "vNvrsFunc_list", ".", "append", "(", "CubicInterp", "(", "mLvl_temp", "[", ":", ",", "j", "]", "-", "self", ".", "mLvlMinNow", "(", "pLvl", ")", ",", "vNvrs", "[", ":", ",", "j", "]", ",", "vNvrsP", "[", ":", ",", "j", "]", ",", "MPCminNvrs", "*", "self", ".", "hLvlNow", "(", "pLvl", ")", ",", "MPCminNvrs", ")", ")", "vNvrsFuncBase", "=", "LinearInterpOnInterp1D", "(", "vNvrsFunc_list", ",", "np", ".", "insert", "(", "self", ".", "pLvlGrid", ",", "0", ",", "0.0", ")", ")", "# Value function \"shifted\"", "vNvrsFuncNow", "=", "VariableLowerBoundFunc2D", "(", "vNvrsFuncBase", ",", "self", ".", "mLvlMinNow", ")", "# \"Re-curve\" the pseudo-inverse value function into the value function", "vFuncNow", "=", "ValueFunc2D", "(", "vNvrsFuncNow", ",", "self", ".", "CRRA", ")", "return", "vFuncNow" ]
49.946429
0.028401
def get_deploy_data(self): ''' Gets any default data attached to the current deploy, if any. ''' if self.state and self.state.deploy_data: return self.state.deploy_data return {}
[ "def", "get_deploy_data", "(", "self", ")", ":", "if", "self", ".", "state", "and", "self", ".", "state", ".", "deploy_data", ":", "return", "self", ".", "state", ".", "deploy_data", "return", "{", "}" ]
24.888889
0.008621
def _seconds_as_string(seconds): """ Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s' """ TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)] unit_strings = [] cur = max(int(seconds), 1) for suffix, size in TIME_UNITS: if size is not None: cur, rest = divmod(cur, size) else: rest = cur if rest > 0: unit_strings.insert(0, '%d%s' % (rest, suffix)) return ' '.join(unit_strings)
[ "def", "_seconds_as_string", "(", "seconds", ")", ":", "TIME_UNITS", "=", "[", "(", "'s'", ",", "60", ")", ",", "(", "'m'", ",", "60", ")", ",", "(", "'h'", ",", "24", ")", ",", "(", "'d'", ",", "None", ")", "]", "unit_strings", "=", "[", "]", "cur", "=", "max", "(", "int", "(", "seconds", ")", ",", "1", ")", "for", "suffix", ",", "size", "in", "TIME_UNITS", ":", "if", "size", "is", "not", "None", ":", "cur", ",", "rest", "=", "divmod", "(", "cur", ",", "size", ")", "else", ":", "rest", "=", "cur", "if", "rest", ">", "0", ":", "unit_strings", ".", "insert", "(", "0", ",", "'%d%s'", "%", "(", "rest", ",", "suffix", ")", ")", "return", "' '", ".", "join", "(", "unit_strings", ")" ]
31.933333
0.002028
def _get_branches(self): """Get branches from org/repo.""" if self.offline: local_path = Path(LOCAL_PATH).expanduser() / self.org / self.repo get_refs = f"git -C {shlex.quote(str(local_path))} show-ref --heads" else: get_refs = f"git ls-remote --heads https://github.com/{self.org}/{self.repo}" try: # Parse get_refs output for the actual branch names return (line.split()[1].replace("refs/heads/", "") for line in _run(get_refs, timeout=3).split("\n")) except Error: return []
[ "def", "_get_branches", "(", "self", ")", ":", "if", "self", ".", "offline", ":", "local_path", "=", "Path", "(", "LOCAL_PATH", ")", ".", "expanduser", "(", ")", "/", "self", ".", "org", "/", "self", ".", "repo", "get_refs", "=", "f\"git -C {shlex.quote(str(local_path))} show-ref --heads\"", "else", ":", "get_refs", "=", "f\"git ls-remote --heads https://github.com/{self.org}/{self.repo}\"", "try", ":", "# Parse get_refs output for the actual branch names", "return", "(", "line", ".", "split", "(", ")", "[", "1", "]", ".", "replace", "(", "\"refs/heads/\"", ",", "\"\"", ")", "for", "line", "in", "_run", "(", "get_refs", ",", "timeout", "=", "3", ")", ".", "split", "(", "\"\\n\"", ")", ")", "except", "Error", ":", "return", "[", "]" ]
48.166667
0.008489
def partitions(collection): """Generate all set partitions of a collection. Example: >>> list(partitions(range(3))) # doctest: +NORMALIZE_WHITESPACE [[[0, 1, 2]], [[0], [1, 2]], [[0, 1], [2]], [[1], [0, 2]], [[0], [1], [2]]] """ collection = list(collection) # Special cases if not collection: return if len(collection) == 1: yield [collection] return first = collection[0] for smaller in partitions(collection[1:]): for n, subset in enumerate(smaller): yield smaller[:n] + [[first] + subset] + smaller[n+1:] yield [[first]] + smaller
[ "def", "partitions", "(", "collection", ")", ":", "collection", "=", "list", "(", "collection", ")", "# Special cases", "if", "not", "collection", ":", "return", "if", "len", "(", "collection", ")", "==", "1", ":", "yield", "[", "collection", "]", "return", "first", "=", "collection", "[", "0", "]", "for", "smaller", "in", "partitions", "(", "collection", "[", "1", ":", "]", ")", ":", "for", "n", ",", "subset", "in", "enumerate", "(", "smaller", ")", ":", "yield", "smaller", "[", ":", "n", "]", "+", "[", "[", "first", "]", "+", "subset", "]", "+", "smaller", "[", "n", "+", "1", ":", "]", "yield", "[", "[", "first", "]", "]", "+", "smaller" ]
25.153846
0.001473
def write(self, filehandle, file_format): """Write :class:`~ctfile.ctfile.CTfile` data into file. :param filehandle: File-like object. :param str file_format: Format to use to write data: ``ctfile`` or ``json``. :return: None. :rtype: :py:obj:`None`. """ try: filehandle.write(self.writestr(file_format=file_format)) except IOError: raise IOError('"filehandle" parameter must be writable.')
[ "def", "write", "(", "self", ",", "filehandle", ",", "file_format", ")", ":", "try", ":", "filehandle", ".", "write", "(", "self", ".", "writestr", "(", "file_format", "=", "file_format", ")", ")", "except", "IOError", ":", "raise", "IOError", "(", "'\"filehandle\" parameter must be writable.'", ")" ]
39.083333
0.008333
def find_vlans( self, number, name, iexact, environment, net_type, network, ip_version, subnet, acl, pagination): """ Find vlans by all search parameters :param number: Filter by vlan number column :param name: Filter by vlan name column :param iexact: Filter by name will be exact? :param environment: Filter by environment ID related :param net_type: Filter by network_type ID related :param network: Filter by each octs in network :param ip_version: Get only version (0:ipv4, 1:ipv6, 2:all) :param subnet: Filter by octs will search by subnets? :param acl: Filter by vlan acl column :param pagination: Class with all data needed to paginate :return: Following dictionary: :: {'vlan': {'id': < id_vlan >, 'nome': < nome_vlan >, 'num_vlan': < num_vlan >, 'id_ambiente': < id_ambiente >, 'descricao': < descricao >, 'acl_file_name': < acl_file_name >, 'acl_valida': < acl_valida >, 'acl_file_name_v6': < acl_file_name_v6 >, 'acl_valida_v6': < acl_valida_v6 >, 'ativada': < ativada >, 'ambiente_name': < divisao_dc-ambiente_logico-grupo_l3 > 'redeipv4': [ { all networkipv4 related } ], 'redeipv6': [ { all networkipv6 related } ] }, 'total': {< total_registros >} } :raise InvalidParameterError: Some parameter was invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not isinstance(pagination, Pagination): raise InvalidParameterError( u"Invalid parameter: pagination must be a class of type 'Pagination'.") vlan_map = dict() vlan_map['start_record'] = pagination.start_record vlan_map['end_record'] = pagination.end_record vlan_map['asorting_cols'] = pagination.asorting_cols vlan_map['searchable_columns'] = pagination.searchable_columns vlan_map['custom_search'] = pagination.custom_search vlan_map['numero'] = number vlan_map['nome'] = name vlan_map['exato'] = iexact vlan_map['ambiente'] = environment vlan_map['tipo_rede'] = net_type vlan_map['rede'] = network vlan_map['versao'] = ip_version vlan_map['subrede'] = subnet vlan_map['acl'] = acl url = 'vlan/find/' code, xml = self.submit({'vlan': vlan_map}, 'POST', url) key = 'vlan' return get_list_map( self.response( code, xml, [ key, 'redeipv4', 'redeipv6', 'equipamentos']), key)
[ "def", "find_vlans", "(", "self", ",", "number", ",", "name", ",", "iexact", ",", "environment", ",", "net_type", ",", "network", ",", "ip_version", ",", "subnet", ",", "acl", ",", "pagination", ")", ":", "if", "not", "isinstance", "(", "pagination", ",", "Pagination", ")", ":", "raise", "InvalidParameterError", "(", "u\"Invalid parameter: pagination must be a class of type 'Pagination'.\"", ")", "vlan_map", "=", "dict", "(", ")", "vlan_map", "[", "'start_record'", "]", "=", "pagination", ".", "start_record", "vlan_map", "[", "'end_record'", "]", "=", "pagination", ".", "end_record", "vlan_map", "[", "'asorting_cols'", "]", "=", "pagination", ".", "asorting_cols", "vlan_map", "[", "'searchable_columns'", "]", "=", "pagination", ".", "searchable_columns", "vlan_map", "[", "'custom_search'", "]", "=", "pagination", ".", "custom_search", "vlan_map", "[", "'numero'", "]", "=", "number", "vlan_map", "[", "'nome'", "]", "=", "name", "vlan_map", "[", "'exato'", "]", "=", "iexact", "vlan_map", "[", "'ambiente'", "]", "=", "environment", "vlan_map", "[", "'tipo_rede'", "]", "=", "net_type", "vlan_map", "[", "'rede'", "]", "=", "network", "vlan_map", "[", "'versao'", "]", "=", "ip_version", "vlan_map", "[", "'subrede'", "]", "=", "subnet", "vlan_map", "[", "'acl'", "]", "=", "acl", "url", "=", "'vlan/find/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'vlan'", ":", "vlan_map", "}", ",", "'POST'", ",", "url", ")", "key", "=", "'vlan'", "return", "get_list_map", "(", "self", ".", "response", "(", "code", ",", "xml", ",", "[", "key", ",", "'redeipv4'", ",", "'redeipv6'", ",", "'equipamentos'", "]", ")", ",", "key", ")" ]
34.765432
0.001036
def _finite_well_energy(P, n=1, atol=1e-6): ''' Returns the nth bound-state energy for a finite-potential quantum well with the given well-strength parameter, `P`. ''' assert n > 0 and n <= _finite_well_states(P) pi_2 = pi / 2. r = (1 / (P + pi_2)) * (n * pi_2) eta = n * pi_2 - arcsin(r) - r * P w = 1 # relaxation parameter (for succesive relaxation) while True: assert r <= 1 if abs(eta) < atol: break r2 = r ** 2. sqrt_1mr2 = sqrt(1. - r2) denom = (1. + P * sqrt_1mr2) t1 = P * sqrt_1mr2 / denom * eta # t2 = -r * P / (2 * (1. + P * sqrt_1mr2) ** 3) * eta ** 2 while True: next_r = (1 - w) * r + w * (r + t1) # next_r = (1 - w) * r + w * (r + t1 + t2) next_eta = n * pi_2 - arcsin(next_r) - next_r * P # decrease w until eta is converging if abs(next_eta / eta) < 1: r = next_r eta = next_eta break else: w *= 0.5 alpha = P * r E = 2 * (alpha) ** 2 # hbar**2 / (m * L**2) return E
[ "def", "_finite_well_energy", "(", "P", ",", "n", "=", "1", ",", "atol", "=", "1e-6", ")", ":", "assert", "n", ">", "0", "and", "n", "<=", "_finite_well_states", "(", "P", ")", "pi_2", "=", "pi", "/", "2.", "r", "=", "(", "1", "/", "(", "P", "+", "pi_2", ")", ")", "*", "(", "n", "*", "pi_2", ")", "eta", "=", "n", "*", "pi_2", "-", "arcsin", "(", "r", ")", "-", "r", "*", "P", "w", "=", "1", "# relaxation parameter (for succesive relaxation)", "while", "True", ":", "assert", "r", "<=", "1", "if", "abs", "(", "eta", ")", "<", "atol", ":", "break", "r2", "=", "r", "**", "2.", "sqrt_1mr2", "=", "sqrt", "(", "1.", "-", "r2", ")", "denom", "=", "(", "1.", "+", "P", "*", "sqrt_1mr2", ")", "t1", "=", "P", "*", "sqrt_1mr2", "/", "denom", "*", "eta", "# t2 = -r * P / (2 * (1. + P * sqrt_1mr2) ** 3) * eta ** 2", "while", "True", ":", "next_r", "=", "(", "1", "-", "w", ")", "*", "r", "+", "w", "*", "(", "r", "+", "t1", ")", "# next_r = (1 - w) * r + w * (r + t1 + t2)", "next_eta", "=", "n", "*", "pi_2", "-", "arcsin", "(", "next_r", ")", "-", "next_r", "*", "P", "# decrease w until eta is converging", "if", "abs", "(", "next_eta", "/", "eta", ")", "<", "1", ":", "r", "=", "next_r", "eta", "=", "next_eta", "break", "else", ":", "w", "*=", "0.5", "alpha", "=", "P", "*", "r", "E", "=", "2", "*", "(", "alpha", ")", "**", "2", "# hbar**2 / (m * L**2)", "return", "E" ]
32.882353
0.000869
def sample(problem, N, seed=None): """Generate model inputs using Latin hypercube sampling (LHS). Returns a NumPy matrix containing the model inputs generated by Latin hypercube sampling. The resulting matrix contains N rows and D columns, where D is the number of parameters. Parameters ---------- problem : dict The problem definition N : int The number of samples to generate """ if seed: np.random.seed(seed) D = problem['num_vars'] result = np.zeros([N, D]) temp = np.zeros([N]) d = 1.0 / N for i in range(D): for j in range(N): temp[j] = np.random.uniform( low=j * d, high=(j + 1) * d, size=1)[0] np.random.shuffle(temp) for j in range(N): result[j, i] = temp[j] scale_samples(result, problem['bounds']) return result
[ "def", "sample", "(", "problem", ",", "N", ",", "seed", "=", "None", ")", ":", "if", "seed", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "D", "=", "problem", "[", "'num_vars'", "]", "result", "=", "np", ".", "zeros", "(", "[", "N", ",", "D", "]", ")", "temp", "=", "np", ".", "zeros", "(", "[", "N", "]", ")", "d", "=", "1.0", "/", "N", "for", "i", "in", "range", "(", "D", ")", ":", "for", "j", "in", "range", "(", "N", ")", ":", "temp", "[", "j", "]", "=", "np", ".", "random", ".", "uniform", "(", "low", "=", "j", "*", "d", ",", "high", "=", "(", "j", "+", "1", ")", "*", "d", ",", "size", "=", "1", ")", "[", "0", "]", "np", ".", "random", ".", "shuffle", "(", "temp", ")", "for", "j", "in", "range", "(", "N", ")", ":", "result", "[", "j", ",", "i", "]", "=", "temp", "[", "j", "]", "scale_samples", "(", "result", ",", "problem", "[", "'bounds'", "]", ")", "return", "result" ]
25.371429
0.001085
def sumMerge(dict1, dict2): """ Adds two dictionaries together, and merges into the first, dict1. Returns first dict. """ for key in dict2: dict1[key] = list(map(lambda a,b: a + b, dict1.get(key, [0,0,0,0]), dict2[key])) return dict1
[ "def", "sumMerge", "(", "dict1", ",", "dict2", ")", ":", "for", "key", "in", "dict2", ":", "dict1", "[", "key", "]", "=", "list", "(", "map", "(", "lambda", "a", ",", "b", ":", "a", "+", "b", ",", "dict1", ".", "get", "(", "key", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ")", ",", "dict2", "[", "key", "]", ")", ")", "return", "dict1" ]
32.25
0.022642
def get_kapur_threshold(image, mask=None): """The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space.""" cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)<3: return 0 if np.min(cropped_image) == np.max(cropped_image): return cropped_image[0] log_image = np.log2(smooth_with_noise(cropped_image, 8)) min_log_image = np.min(log_image) max_log_image = np.max(log_image) histogram = scipy.ndimage.histogram(log_image, min_log_image, max_log_image, 256) histogram_values = (min_log_image + (max_log_image - min_log_image)* np.arange(256, dtype=float) / 255) # drop any zero bins keep = histogram != 0 histogram = histogram[keep] histogram_values = histogram_values[keep] # check for corner cases if np.product(histogram_values)==1: return 2**histogram_values[0] # Normalize to probabilities p = histogram.astype(float) / float(np.sum(histogram)) # Find the probabilities totals up to and above each possible threshold. lo_sum = np.cumsum(p); hi_sum = lo_sum[-1] - lo_sum; lo_e = np.cumsum(p * np.log2(p)); hi_e = lo_e[-1] - lo_e; # compute the entropies lo_entropy = lo_e / lo_sum - np.log2(lo_sum); hi_entropy = hi_e / hi_sum - np.log2(hi_sum); sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]; sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf entry = np.argmin(sum_entropy); return 2**((histogram_values[entry] + histogram_values[entry+1]) / 2);
[ "def", "get_kapur_threshold", "(", "image", ",", "mask", "=", "None", ")", ":", "cropped_image", "=", "np", ".", "array", "(", "image", ".", "flat", ")", "if", "mask", "is", "None", "else", "image", "[", "mask", "]", "if", "np", ".", "product", "(", "cropped_image", ".", "shape", ")", "<", "3", ":", "return", "0", "if", "np", ".", "min", "(", "cropped_image", ")", "==", "np", ".", "max", "(", "cropped_image", ")", ":", "return", "cropped_image", "[", "0", "]", "log_image", "=", "np", ".", "log2", "(", "smooth_with_noise", "(", "cropped_image", ",", "8", ")", ")", "min_log_image", "=", "np", ".", "min", "(", "log_image", ")", "max_log_image", "=", "np", ".", "max", "(", "log_image", ")", "histogram", "=", "scipy", ".", "ndimage", ".", "histogram", "(", "log_image", ",", "min_log_image", ",", "max_log_image", ",", "256", ")", "histogram_values", "=", "(", "min_log_image", "+", "(", "max_log_image", "-", "min_log_image", ")", "*", "np", ".", "arange", "(", "256", ",", "dtype", "=", "float", ")", "/", "255", ")", "# drop any zero bins", "keep", "=", "histogram", "!=", "0", "histogram", "=", "histogram", "[", "keep", "]", "histogram_values", "=", "histogram_values", "[", "keep", "]", "# check for corner cases", "if", "np", ".", "product", "(", "histogram_values", ")", "==", "1", ":", "return", "2", "**", "histogram_values", "[", "0", "]", "# Normalize to probabilities", "p", "=", "histogram", ".", "astype", "(", "float", ")", "/", "float", "(", "np", ".", "sum", "(", "histogram", ")", ")", "# Find the probabilities totals up to and above each possible threshold.", "lo_sum", "=", "np", ".", "cumsum", "(", "p", ")", "hi_sum", "=", "lo_sum", "[", "-", "1", "]", "-", "lo_sum", "lo_e", "=", "np", ".", "cumsum", "(", "p", "*", "np", ".", "log2", "(", "p", ")", ")", "hi_e", "=", "lo_e", "[", "-", "1", "]", "-", "lo_e", "# compute the entropies", "lo_entropy", "=", "lo_e", "/", "lo_sum", "-", "np", ".", "log2", "(", "lo_sum", ")", "hi_entropy", "=", "hi_e", "/", "hi_sum", "-", "np", ".", "log2", "(", "hi_sum", ")", "sum_entropy", "=", "lo_entropy", "[", ":", "-", "1", "]", "+", "hi_entropy", "[", ":", "-", "1", "]", "sum_entropy", "[", "np", ".", "logical_not", "(", "np", ".", "isfinite", "(", "sum_entropy", ")", ")", "]", "=", "np", ".", "Inf", "entry", "=", "np", ".", "argmin", "(", "sum_entropy", ")", "return", "2", "**", "(", "(", "histogram_values", "[", "entry", "]", "+", "histogram_values", "[", "entry", "+", "1", "]", ")", "/", "2", ")" ]
42.923077
0.008762
def describe(self, pid, vendorSpecific=None): """Note: If the server returns a status code other than 200 OK, a ServiceFailure will be raised, as this method is based on a HEAD request, which cannot carry exception information.""" response = self.describeResponse(pid, vendorSpecific=vendorSpecific) return self._read_header_response(response)
[ "def", "describe", "(", "self", ",", "pid", ",", "vendorSpecific", "=", "None", ")", ":", "response", "=", "self", ".", "describeResponse", "(", "pid", ",", "vendorSpecific", "=", "vendorSpecific", ")", "return", "self", ".", "_read_header_response", "(", "response", ")" ]
63
0.010444
def is_valid_program(self,p): """checks whether program p makes a syntactically valid tree. checks that the accumulated program length is always greater than the accumulated arities, indicating that the appropriate number of arguments is alway present for functions. It then checks that the sum of arties +1 exactly equals the length of the stack, indicating that there are no missing arguments. """ # print("p:",p) arities = list(a.arity[a.in_type] for a in p) accu_arities = list(accumulate(arities)) accu_len = list(np.arange(len(p))+1) check = list(a < b for a,b in zip(accu_arities,accu_len)) # print("accu_arities:",accu_arities) # print("accu_len:",accu_len) # print("accu_arities < accu_len:",accu_arities<accu_len) return all(check) and sum(a.arity[a.in_type] for a in p) +1 == len(p) and len(p)>0
[ "def", "is_valid_program", "(", "self", ",", "p", ")", ":", "# print(\"p:\",p)", "arities", "=", "list", "(", "a", ".", "arity", "[", "a", ".", "in_type", "]", "for", "a", "in", "p", ")", "accu_arities", "=", "list", "(", "accumulate", "(", "arities", ")", ")", "accu_len", "=", "list", "(", "np", ".", "arange", "(", "len", "(", "p", ")", ")", "+", "1", ")", "check", "=", "list", "(", "a", "<", "b", "for", "a", ",", "b", "in", "zip", "(", "accu_arities", ",", "accu_len", ")", ")", "# print(\"accu_arities:\",accu_arities)", "# print(\"accu_len:\",accu_len)", "# print(\"accu_arities < accu_len:\",accu_arities<accu_len)", "return", "all", "(", "check", ")", "and", "sum", "(", "a", ".", "arity", "[", "a", ".", "in_type", "]", "for", "a", "in", "p", ")", "+", "1", "==", "len", "(", "p", ")", "and", "len", "(", "p", ")", ">", "0" ]
51
0.009626
def send(self, command): """send function sends the command to the oxd server and recieves the response. Parameters: * **command (dict):** Dict representation of the JSON command string Returns: **response (dict):** The JSON response from the oxd Server as a dict """ cmd = json.dumps(command) cmd = "{:04d}".format(len(cmd)) + cmd msg_length = len(cmd) # make the first time connection if not self.firstDone: logger.info('Initiating first time socket connection.') self.__connect() self.firstDone = True # Send the message the to the server totalsent = 0 while totalsent < msg_length: try: logger.debug("Sending: %s", cmd[totalsent:]) sent = self.sock.send(cmd[totalsent:]) totalsent = totalsent + sent except socket.error as e: logger.exception("Reconneting due to socket error. %s", e) self.__connect() logger.info("Reconnected to socket.") # Check and receive the response if available parts = [] resp_length = 0 received = 0 done = False while not done: part = self.sock.recv(1024) if part == "": logger.error("Socket connection broken, read empty.") self.__connect() logger.info("Reconnected to socket.") # Find out the length of the response if len(part) > 0 and resp_length == 0: resp_length = int(part[0:4]) part = part[4:] # Set Done flag received = received + len(part) if received >= resp_length: done = True parts.append(part) response = "".join(parts) # return the JSON as a namedtuple object return json.loads(response)
[ "def", "send", "(", "self", ",", "command", ")", ":", "cmd", "=", "json", ".", "dumps", "(", "command", ")", "cmd", "=", "\"{:04d}\"", ".", "format", "(", "len", "(", "cmd", ")", ")", "+", "cmd", "msg_length", "=", "len", "(", "cmd", ")", "# make the first time connection", "if", "not", "self", ".", "firstDone", ":", "logger", ".", "info", "(", "'Initiating first time socket connection.'", ")", "self", ".", "__connect", "(", ")", "self", ".", "firstDone", "=", "True", "# Send the message the to the server", "totalsent", "=", "0", "while", "totalsent", "<", "msg_length", ":", "try", ":", "logger", ".", "debug", "(", "\"Sending: %s\"", ",", "cmd", "[", "totalsent", ":", "]", ")", "sent", "=", "self", ".", "sock", ".", "send", "(", "cmd", "[", "totalsent", ":", "]", ")", "totalsent", "=", "totalsent", "+", "sent", "except", "socket", ".", "error", "as", "e", ":", "logger", ".", "exception", "(", "\"Reconneting due to socket error. %s\"", ",", "e", ")", "self", ".", "__connect", "(", ")", "logger", ".", "info", "(", "\"Reconnected to socket.\"", ")", "# Check and receive the response if available", "parts", "=", "[", "]", "resp_length", "=", "0", "received", "=", "0", "done", "=", "False", "while", "not", "done", ":", "part", "=", "self", ".", "sock", ".", "recv", "(", "1024", ")", "if", "part", "==", "\"\"", ":", "logger", ".", "error", "(", "\"Socket connection broken, read empty.\"", ")", "self", ".", "__connect", "(", ")", "logger", ".", "info", "(", "\"Reconnected to socket.\"", ")", "# Find out the length of the response", "if", "len", "(", "part", ")", ">", "0", "and", "resp_length", "==", "0", ":", "resp_length", "=", "int", "(", "part", "[", "0", ":", "4", "]", ")", "part", "=", "part", "[", "4", ":", "]", "# Set Done flag", "received", "=", "received", "+", "len", "(", "part", ")", "if", "received", ">=", "resp_length", ":", "done", "=", "True", "parts", ".", "append", "(", "part", ")", "response", "=", "\"\"", ".", "join", "(", "parts", ")", "# return the JSON as a namedtuple object", "return", "json", ".", "loads", "(", "response", ")" ]
32.762712
0.002009
def is_active(self): """Determines whether this plugin is active. This plugin is active if any health pills information is present for any run. Returns: A boolean. Whether this plugin is active. """ return bool( self._grpc_port is not None and self._event_multiplexer and self._event_multiplexer.PluginRunToTagToContent( constants.DEBUGGER_PLUGIN_NAME))
[ "def", "is_active", "(", "self", ")", ":", "return", "bool", "(", "self", ".", "_grpc_port", "is", "not", "None", "and", "self", ".", "_event_multiplexer", "and", "self", ".", "_event_multiplexer", ".", "PluginRunToTagToContent", "(", "constants", ".", "DEBUGGER_PLUGIN_NAME", ")", ")" ]
29.142857
0.002375
def present(name, ip, clean=False): # pylint: disable=C0103 ''' Ensures that the named host is present with the given ip name The host to assign an ip to ip The ip addr(s) to apply to the host. Can be a single IP or a list of IP addresses. clean : False Remove any entries which don't match those configured in the ``ip`` option. .. versionadded:: 2018.3.4 ''' ret = {'name': name, 'changes': {}, 'result': None if __opts__['test'] else True, 'comment': ''} if not isinstance(ip, list): ip = [ip] all_hosts = __salt__['hosts.list_hosts']() comments = [] to_add = set() to_remove = set() # First check for IPs not currently in the hosts file to_add.update([(addr, name) for addr in ip if addr not in all_hosts]) # Now sweep through the hosts file and look for entries matching either the # IP address(es) or hostname. for addr, aliases in six.iteritems(all_hosts): if addr not in ip: if name in aliases: # Found match for hostname, but the corresponding IP is not in # our list, so we need to remove it. if clean: to_remove.add((addr, name)) else: ret.setdefault('warnings', []).append( 'Host {0} present for IP address {1}. To get rid of ' 'this warning, either run this state with \'clean\' ' 'set to True to remove {0} from {1}, or add {1} to ' 'the \'ip\' argument.'.format(name, addr) ) else: if name in aliases: # No changes needed for this IP address and hostname comments.append( 'Host {0} ({1}) already present'.format(name, addr) ) else: # IP address listed in hosts file, but hostname is not present. # We will need to add it. if salt.utils.validate.net.ip_addr(addr): to_add.add((addr, name)) else: ret['result'] = False comments.append( 'Invalid IP Address for {0} ({1})'.format(name, addr) ) for addr, name in to_add: if __opts__['test']: comments.append( 'Host {0} ({1}) would be added'.format(name, addr) ) else: if __salt__['hosts.add_host'](addr, name): comments.append('Added host {0} ({1})'.format(name, addr)) else: ret['result'] = False comments.append('Failed to add host {0} ({1})'.format(name, addr)) continue ret['changes'].setdefault('added', {}).setdefault(addr, []).append(name) for addr, name in to_remove: if __opts__['test']: comments.append( 'Host {0} ({1}) would be removed'.format(name, addr) ) else: if __salt__['hosts.rm_host'](addr, name): comments.append('Removed host {0} ({1})'.format(name, addr)) else: ret['result'] = False comments.append('Failed to remove host {0} ({1})'.format(name, addr)) continue ret['changes'].setdefault('removed', {}).setdefault(addr, []).append(name) ret['comment'] = '\n'.join(comments) return ret
[ "def", "present", "(", "name", ",", "ip", ",", "clean", "=", "False", ")", ":", "# pylint: disable=C0103", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", "if", "__opts__", "[", "'test'", "]", "else", "True", ",", "'comment'", ":", "''", "}", "if", "not", "isinstance", "(", "ip", ",", "list", ")", ":", "ip", "=", "[", "ip", "]", "all_hosts", "=", "__salt__", "[", "'hosts.list_hosts'", "]", "(", ")", "comments", "=", "[", "]", "to_add", "=", "set", "(", ")", "to_remove", "=", "set", "(", ")", "# First check for IPs not currently in the hosts file", "to_add", ".", "update", "(", "[", "(", "addr", ",", "name", ")", "for", "addr", "in", "ip", "if", "addr", "not", "in", "all_hosts", "]", ")", "# Now sweep through the hosts file and look for entries matching either the", "# IP address(es) or hostname.", "for", "addr", ",", "aliases", "in", "six", ".", "iteritems", "(", "all_hosts", ")", ":", "if", "addr", "not", "in", "ip", ":", "if", "name", "in", "aliases", ":", "# Found match for hostname, but the corresponding IP is not in", "# our list, so we need to remove it.", "if", "clean", ":", "to_remove", ".", "add", "(", "(", "addr", ",", "name", ")", ")", "else", ":", "ret", ".", "setdefault", "(", "'warnings'", ",", "[", "]", ")", ".", "append", "(", "'Host {0} present for IP address {1}. To get rid of '", "'this warning, either run this state with \\'clean\\' '", "'set to True to remove {0} from {1}, or add {1} to '", "'the \\'ip\\' argument.'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "if", "name", "in", "aliases", ":", "# No changes needed for this IP address and hostname", "comments", ".", "append", "(", "'Host {0} ({1}) already present'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "# IP address listed in hosts file, but hostname is not present.", "# We will need to add it.", "if", "salt", ".", "utils", ".", "validate", ".", "net", ".", "ip_addr", "(", "addr", ")", ":", "to_add", ".", "add", "(", "(", "addr", ",", "name", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "comments", ".", "append", "(", "'Invalid IP Address for {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "for", "addr", ",", "name", "in", "to_add", ":", "if", "__opts__", "[", "'test'", "]", ":", "comments", ".", "append", "(", "'Host {0} ({1}) would be added'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "if", "__salt__", "[", "'hosts.add_host'", "]", "(", "addr", ",", "name", ")", ":", "comments", ".", "append", "(", "'Added host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "comments", ".", "append", "(", "'Failed to add host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "continue", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'added'", ",", "{", "}", ")", ".", "setdefault", "(", "addr", ",", "[", "]", ")", ".", "append", "(", "name", ")", "for", "addr", ",", "name", "in", "to_remove", ":", "if", "__opts__", "[", "'test'", "]", ":", "comments", ".", "append", "(", "'Host {0} ({1}) would be removed'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "if", "__salt__", "[", "'hosts.rm_host'", "]", "(", "addr", ",", "name", ")", ":", "comments", ".", "append", "(", "'Removed host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "comments", ".", "append", "(", "'Failed to remove host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "continue", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'removed'", ",", "{", "}", ")", ".", "setdefault", "(", "addr", ",", "[", "]", ")", ".", "append", "(", "name", ")", "ret", "[", "'comment'", "]", "=", "'\\n'", ".", "join", "(", "comments", ")", "return", "ret" ]
36.135417
0.001403
def set_training(model, mode): """ A context manager to temporarily set the training mode of 'model' to 'mode', resetting it when we exit the with-block. A no-op if mode is None. """ if mode is None: yield return old_mode = model.training if old_mode != mode: model.train(mode) try: yield finally: if old_mode != mode: model.train(old_mode)
[ "def", "set_training", "(", "model", ",", "mode", ")", ":", "if", "mode", "is", "None", ":", "yield", "return", "old_mode", "=", "model", ".", "training", "if", "old_mode", "!=", "mode", ":", "model", ".", "train", "(", "mode", ")", "try", ":", "yield", "finally", ":", "if", "old_mode", "!=", "mode", ":", "model", ".", "train", "(", "old_mode", ")" ]
24.529412
0.002309
def find_path(target, from_path=None, direction='both', depth_first=False): """ Finds a file or subdirectory from the given path, defaulting to a breadth-first search. :param target: str of file or subdirectory to be found :param from_path: str of path from which to search (defaults to relative) :param direction: str enum of up, down, both :param depth_first: bool of changes search to depth-first :return: str of path to desired file or subdirectory """ from_path = from_path if from_path else relative_path('', 2) if direction == 'up' or direction == 'both': path = from_path for i in range(100): try: file_path = os.path.abspath(os.path.join(path, target)) if os.path.exists(file_path): return file_path path = os.path.split(path)[0] if len(path) <= 1: break except Exception: break if os.path.exists(os.path.join(path, target)): return os.path.join(path, target) if direction == 'down' or direction == 'both': check = [''] while len(check) != 0: dir = check.pop(0) try: roster = os.listdir(os.path.join(from_path, dir)) except Exception: continue # ignore directories that are inaccessible if target in roster: return os.path.join(from_path, dir, target) else: stack = [os.path.join(from_path, dir, i) for i in roster if '.' not in i] if depth_first: check = stack + check else: check += stack raise FileNotFoundError("Failed to find file: %s from %s", file, from_path)
[ "def", "find_path", "(", "target", ",", "from_path", "=", "None", ",", "direction", "=", "'both'", ",", "depth_first", "=", "False", ")", ":", "from_path", "=", "from_path", "if", "from_path", "else", "relative_path", "(", "''", ",", "2", ")", "if", "direction", "==", "'up'", "or", "direction", "==", "'both'", ":", "path", "=", "from_path", "for", "i", "in", "range", "(", "100", ")", ":", "try", ":", "file_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "target", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "return", "file_path", "path", "=", "os", ".", "path", ".", "split", "(", "path", ")", "[", "0", "]", "if", "len", "(", "path", ")", "<=", "1", ":", "break", "except", "Exception", ":", "break", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "target", ")", ")", ":", "return", "os", ".", "path", ".", "join", "(", "path", ",", "target", ")", "if", "direction", "==", "'down'", "or", "direction", "==", "'both'", ":", "check", "=", "[", "''", "]", "while", "len", "(", "check", ")", "!=", "0", ":", "dir", "=", "check", ".", "pop", "(", "0", ")", "try", ":", "roster", "=", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "from_path", ",", "dir", ")", ")", "except", "Exception", ":", "continue", "# ignore directories that are inaccessible", "if", "target", "in", "roster", ":", "return", "os", ".", "path", ".", "join", "(", "from_path", ",", "dir", ",", "target", ")", "else", ":", "stack", "=", "[", "os", ".", "path", ".", "join", "(", "from_path", ",", "dir", ",", "i", ")", "for", "i", "in", "roster", "if", "'.'", "not", "in", "i", "]", "if", "depth_first", ":", "check", "=", "stack", "+", "check", "else", ":", "check", "+=", "stack", "raise", "FileNotFoundError", "(", "\"Failed to find file: %s from %s\"", ",", "file", ",", "from_path", ")" ]
39.673913
0.000535
def run(self): """Run command.""" command = ['npm', 'install'] self.announce( 'Running command: %s' % str(command), level=INFO) subprocess.check_call(command)
[ "def", "run", "(", "self", ")", ":", "command", "=", "[", "'npm'", ",", "'install'", "]", "self", ".", "announce", "(", "'Running command: %s'", "%", "str", "(", "command", ")", ",", "level", "=", "INFO", ")", "subprocess", ".", "check_call", "(", "command", ")" ]
29.714286
0.009346

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
2
Add dataset card