text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def rotation(ATTITUDE): '''return the current DCM rotation matrix''' r = Matrix3() r.from_euler(ATTITUDE.roll, ATTITUDE.pitch, ATTITUDE.yaw) return r
[ "def", "rotation", "(", "ATTITUDE", ")", ":", "r", "=", "Matrix3", "(", ")", "r", ".", "from_euler", "(", "ATTITUDE", ".", "roll", ",", "ATTITUDE", ".", "pitch", ",", "ATTITUDE", ".", "yaw", ")", "return", "r" ]
32.2
0.006061
def t(self): """:obj:`numpy.ndarray` : The 3x1 translation matrix for this projection """ t = np.array([self._plane_width / 2, self._plane_height / 2, self._depth_scale / 2]) return t
[ "def", "t", "(", "self", ")", ":", "t", "=", "np", ".", "array", "(", "[", "self", ".", "_plane_width", "/", "2", ",", "self", ".", "_plane_height", "/", "2", ",", "self", ".", "_depth_scale", "/", "2", "]", ")", "return", "t" ]
36.142857
0.011583
def items(self): """ ITERATE THROUGH ALL coord, value PAIRS """ for c in self._all_combos(): _, value = _getitem(self.cube, c) yield c, value
[ "def", "items", "(", "self", ")", ":", "for", "c", "in", "self", ".", "_all_combos", "(", ")", ":", "_", ",", "value", "=", "_getitem", "(", "self", ".", "cube", ",", "c", ")", "yield", "c", ",", "value" ]
27.285714
0.010152
def get_monitoring_problems(self): """Get the current scheduler livesynthesis :return: live synthesis and problems dictionary :rtype: dict """ res = {} if not self.sched: return res # Get statistics from the scheduler scheduler_stats = self.sched.get_scheduler_stats(details=True) if 'livesynthesis' in scheduler_stats: res['livesynthesis'] = scheduler_stats['livesynthesis'] if 'problems' in scheduler_stats: res['problems'] = scheduler_stats['problems'] return res
[ "def", "get_monitoring_problems", "(", "self", ")", ":", "res", "=", "{", "}", "if", "not", "self", ".", "sched", ":", "return", "res", "# Get statistics from the scheduler", "scheduler_stats", "=", "self", ".", "sched", ".", "get_scheduler_stats", "(", "details", "=", "True", ")", "if", "'livesynthesis'", "in", "scheduler_stats", ":", "res", "[", "'livesynthesis'", "]", "=", "scheduler_stats", "[", "'livesynthesis'", "]", "if", "'problems'", "in", "scheduler_stats", ":", "res", "[", "'problems'", "]", "=", "scheduler_stats", "[", "'problems'", "]", "return", "res" ]
32
0.003373
def add_labels(self, objects, count=1): """Add multiple labels to the sheet. Parameters ---------- objects: iterable An iterable of the objects to add. Each of these will be passed to the add_label method. Note that if this is a generator it will be consumed. count: positive integer or iterable of positive integers, default 1 The number of copies of each label to add. If a single integer, that many copies of every label are added. If an iterable, then each value specifies how many copies of the corresponding label to add. The iterables are advanced in parallel until one is exhausted; extra values in the other one are ignored. This means that if there are fewer count entries than objects, the objects corresponding to the missing counts will not be added to the sheet. Note that if this is a generator it will be consumed. Also note that the drawing function will only be called once for each label and the results copied for the repeats. If the drawing function maintains any state internally then using this parameter may break it. """ # If we can convert it to an int, do so and use the itertools.repeat() # method to create an infinite iterator from it. Otherwise, assume it # is an iterable or sequence. try: count = int(count) except TypeError: pass else: count = repeat(count) # If it is not an iterable (e.g., a list or range object), # create an iterator over it. if not hasattr(count, 'next') and not hasattr(count, '__next__'): count = iter(count) # Go through the objects. for obj in objects: # Check we have a count for this one. try: thiscount = next(count) except StopIteration: break # Draw it. self._draw_label(obj, thiscount)
[ "def", "add_labels", "(", "self", ",", "objects", ",", "count", "=", "1", ")", ":", "# If we can convert it to an int, do so and use the itertools.repeat()", "# method to create an infinite iterator from it. Otherwise, assume it", "# is an iterable or sequence.", "try", ":", "count", "=", "int", "(", "count", ")", "except", "TypeError", ":", "pass", "else", ":", "count", "=", "repeat", "(", "count", ")", "# If it is not an iterable (e.g., a list or range object),", "# create an iterator over it.", "if", "not", "hasattr", "(", "count", ",", "'next'", ")", "and", "not", "hasattr", "(", "count", ",", "'__next__'", ")", ":", "count", "=", "iter", "(", "count", ")", "# Go through the objects.", "for", "obj", "in", "objects", ":", "# Check we have a count for this one.", "try", ":", "thiscount", "=", "next", "(", "count", ")", "except", "StopIteration", ":", "break", "# Draw it.", "self", ".", "_draw_label", "(", "obj", ",", "thiscount", ")" ]
41.28
0.000947
def snap(self, path=None): """Get a snapshot and save it to disk.""" if path is None: path = "/tmp" else: path = path.rstrip("/") day_dir = datetime.datetime.now().strftime("%d%m%Y") hour_dir = datetime.datetime.now().strftime("%H%M") ensure_snapshot_dir(path+"/"+self.cam_id+"/"+day_dir+"/"+hour_dir) f_path = "{0}/{1}/{2}/{3}/{4}.jpg".format( path, self.cam_id, day_dir, hour_dir, datetime.datetime.now().strftime("%S"), ) urllib.urlretrieve( 'http://{0}/snapshot.cgi?user={1}&pwd={2}'.format( self.address, self.user, self.pswd, ), f_path, )
[ "def", "snap", "(", "self", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "\"/tmp\"", "else", ":", "path", "=", "path", ".", "rstrip", "(", "\"/\"", ")", "day_dir", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%d%m%Y\"", ")", "hour_dir", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%H%M\"", ")", "ensure_snapshot_dir", "(", "path", "+", "\"/\"", "+", "self", ".", "cam_id", "+", "\"/\"", "+", "day_dir", "+", "\"/\"", "+", "hour_dir", ")", "f_path", "=", "\"{0}/{1}/{2}/{3}/{4}.jpg\"", ".", "format", "(", "path", ",", "self", ".", "cam_id", ",", "day_dir", ",", "hour_dir", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%S\"", ")", ",", ")", "urllib", ".", "urlretrieve", "(", "'http://{0}/snapshot.cgi?user={1}&pwd={2}'", ".", "format", "(", "self", ".", "address", ",", "self", ".", "user", ",", "self", ".", "pswd", ",", ")", ",", "f_path", ",", ")" ]
35
0.004449
def _GetAPFSVolumeIdentifiers(self, scan_node): """Determines the APFS volume identifiers. Args: scan_node (dfvfs.SourceScanNode): scan node. Returns: list[str]: APFS volume identifiers. Raises: SourceScannerError: if the format of or within the source is not supported or the the scan node is invalid. UserAbort: if the user requested to abort. """ if not scan_node or not scan_node.path_spec: raise errors.SourceScannerError('Invalid scan node.') volume_system = apfs_volume_system.APFSVolumeSystem() volume_system.Open(scan_node.path_spec) volume_identifiers = self._source_scanner.GetVolumeIdentifiers( volume_system) if not volume_identifiers: return [] # TODO: refactor self._volumes to use scan options. if self._volumes: if self._volumes == 'all': volumes = range(1, volume_system.number_of_volumes + 1) else: volumes = self._volumes selected_volume_identifiers = self._NormalizedVolumeIdentifiers( volume_system, volumes, prefix='apfs') if not set(selected_volume_identifiers).difference(volume_identifiers): return selected_volume_identifiers if len(volume_identifiers) > 1: try: volume_identifiers = self._PromptUserForAPFSVolumeIdentifiers( volume_system, volume_identifiers) except KeyboardInterrupt: raise errors.UserAbort('File system scan aborted.') return self._NormalizedVolumeIdentifiers( volume_system, volume_identifiers, prefix='apfs')
[ "def", "_GetAPFSVolumeIdentifiers", "(", "self", ",", "scan_node", ")", ":", "if", "not", "scan_node", "or", "not", "scan_node", ".", "path_spec", ":", "raise", "errors", ".", "SourceScannerError", "(", "'Invalid scan node.'", ")", "volume_system", "=", "apfs_volume_system", ".", "APFSVolumeSystem", "(", ")", "volume_system", ".", "Open", "(", "scan_node", ".", "path_spec", ")", "volume_identifiers", "=", "self", ".", "_source_scanner", ".", "GetVolumeIdentifiers", "(", "volume_system", ")", "if", "not", "volume_identifiers", ":", "return", "[", "]", "# TODO: refactor self._volumes to use scan options.", "if", "self", ".", "_volumes", ":", "if", "self", ".", "_volumes", "==", "'all'", ":", "volumes", "=", "range", "(", "1", ",", "volume_system", ".", "number_of_volumes", "+", "1", ")", "else", ":", "volumes", "=", "self", ".", "_volumes", "selected_volume_identifiers", "=", "self", ".", "_NormalizedVolumeIdentifiers", "(", "volume_system", ",", "volumes", ",", "prefix", "=", "'apfs'", ")", "if", "not", "set", "(", "selected_volume_identifiers", ")", ".", "difference", "(", "volume_identifiers", ")", ":", "return", "selected_volume_identifiers", "if", "len", "(", "volume_identifiers", ")", ">", "1", ":", "try", ":", "volume_identifiers", "=", "self", ".", "_PromptUserForAPFSVolumeIdentifiers", "(", "volume_system", ",", "volume_identifiers", ")", "except", "KeyboardInterrupt", ":", "raise", "errors", ".", "UserAbort", "(", "'File system scan aborted.'", ")", "return", "self", ".", "_NormalizedVolumeIdentifiers", "(", "volume_system", ",", "volume_identifiers", ",", "prefix", "=", "'apfs'", ")" ]
32.702128
0.005685
def make_url(invocation, args=[], kwargs={}): """ >>> make_url('some/path/program', ['arg1', 'arg2'], {'arg3': 4}) '/some/path/program/arg1/arg2?arg3=4' """ if not invocation.endswith('/'): invocation += '/' if not invocation.startswith('/'): invocation = '/' + invocation url = invocation for arg in args: url += str(arg) + "/" if kwargs: url = url[:-1] url += "?" + urlencode(kwargs) return url
[ "def", "make_url", "(", "invocation", ",", "args", "=", "[", "]", ",", "kwargs", "=", "{", "}", ")", ":", "if", "not", "invocation", ".", "endswith", "(", "'/'", ")", ":", "invocation", "+=", "'/'", "if", "not", "invocation", ".", "startswith", "(", "'/'", ")", ":", "invocation", "=", "'/'", "+", "invocation", "url", "=", "invocation", "for", "arg", "in", "args", ":", "url", "+=", "str", "(", "arg", ")", "+", "\"/\"", "if", "kwargs", ":", "url", "=", "url", "[", ":", "-", "1", "]", "url", "+=", "\"?\"", "+", "urlencode", "(", "kwargs", ")", "return", "url" ]
23.25
0.004132
def read_tex_file(root_filepath, root_dir=None): r"""Read a TeX file, automatically processing and normalizing it (including other input files, removing comments, and deleting trailing whitespace). Parameters ---------- root_filepath : `str` Filepath to a TeX file. root_dir : `str` Root directory of the TeX project. This only needs to be set when recursively reading in ``\input`` or ``\include`` files. Returns ------- tex_source : `str` TeX source. """ with open(root_filepath, 'r') as f: tex_source = f.read() if root_dir is None: root_dir = os.path.dirname(root_filepath) # Text processing pipline tex_source = remove_comments(tex_source) tex_source = remove_trailing_whitespace(tex_source) tex_source = process_inputs(tex_source, root_dir=root_dir) return tex_source
[ "def", "read_tex_file", "(", "root_filepath", ",", "root_dir", "=", "None", ")", ":", "with", "open", "(", "root_filepath", ",", "'r'", ")", "as", "f", ":", "tex_source", "=", "f", ".", "read", "(", ")", "if", "root_dir", "is", "None", ":", "root_dir", "=", "os", ".", "path", ".", "dirname", "(", "root_filepath", ")", "# Text processing pipline", "tex_source", "=", "remove_comments", "(", "tex_source", ")", "tex_source", "=", "remove_trailing_whitespace", "(", "tex_source", ")", "tex_source", "=", "process_inputs", "(", "tex_source", ",", "root_dir", "=", "root_dir", ")", "return", "tex_source" ]
28.966667
0.001114
def execute(self, expr, params=None, limit='default', **kwargs): """ Compile and execute Ibis expression using this backend client interface, returning results in-memory in the appropriate object type Parameters ---------- expr : Expr limit : int, default None For expressions yielding result yets; retrieve at most this number of values/rows. Overrides any limit already set on the expression. params : not yet implemented Returns ------- output : input type dependent Table expressions: pandas.DataFrame Array expressions: pandas.Series Scalar expressions: Python scalar value """ query_ast = self._build_ast_ensure_limit(expr, limit, params=params) result = self._execute_query(query_ast, **kwargs) return result
[ "def", "execute", "(", "self", ",", "expr", ",", "params", "=", "None", ",", "limit", "=", "'default'", ",", "*", "*", "kwargs", ")", ":", "query_ast", "=", "self", ".", "_build_ast_ensure_limit", "(", "expr", ",", "limit", ",", "params", "=", "params", ")", "result", "=", "self", ".", "_execute_query", "(", "query_ast", ",", "*", "*", "kwargs", ")", "return", "result" ]
37.608696
0.002255
def _subscribe(self, sub_id, channel, callback, **kwargs): """Listen to a queue, notify via callback function. :param sub_id: ID for this subscription in the transport layer :param channel: Queue name to subscribe to :param callback: Function to be called when messages are received :param **kwargs: Further parameters for the transport layer. For example acknowledgement: If true receipt of each message needs to be acknowledged. exclusive: Attempt to become exclusive subscriber to the queue. ignore_namespace: Do not apply namespace to the destination name priority: Consumer priority, messages are sent to higher priority consumers whenever possible. selector: Only receive messages filtered by a selector. See https://activemq.apache.org/activemq-message-properties.html for potential filter criteria. Uses SQL 92 syntax. transformation: Transform messages into different format. If set to True, will use 'jms-object-json' formatting. """ headers = {} if kwargs.get("exclusive"): headers["activemq.exclusive"] = "true" if kwargs.get("ignore_namespace"): destination = "/queue/" + channel else: destination = "/queue/" + self._namespace + channel if kwargs.get("priority"): headers["activemq.priority"] = kwargs["priority"] if kwargs.get("retroactive"): headers["activemq.retroactive"] = "true" if kwargs.get("selector"): headers["selector"] = kwargs["selector"] if kwargs.get("transformation"): if kwargs["transformation"] == True: headers["transformation"] = "jms-object-json" else: headers["transformation"] = kwargs["transformation"] if kwargs.get("acknowledgement"): ack = "client-individual" else: ack = "auto" self._conn.subscribe(destination, sub_id, headers=headers, ack=ack)
[ "def", "_subscribe", "(", "self", ",", "sub_id", ",", "channel", ",", "callback", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "{", "}", "if", "kwargs", ".", "get", "(", "\"exclusive\"", ")", ":", "headers", "[", "\"activemq.exclusive\"", "]", "=", "\"true\"", "if", "kwargs", ".", "get", "(", "\"ignore_namespace\"", ")", ":", "destination", "=", "\"/queue/\"", "+", "channel", "else", ":", "destination", "=", "\"/queue/\"", "+", "self", ".", "_namespace", "+", "channel", "if", "kwargs", ".", "get", "(", "\"priority\"", ")", ":", "headers", "[", "\"activemq.priority\"", "]", "=", "kwargs", "[", "\"priority\"", "]", "if", "kwargs", ".", "get", "(", "\"retroactive\"", ")", ":", "headers", "[", "\"activemq.retroactive\"", "]", "=", "\"true\"", "if", "kwargs", ".", "get", "(", "\"selector\"", ")", ":", "headers", "[", "\"selector\"", "]", "=", "kwargs", "[", "\"selector\"", "]", "if", "kwargs", ".", "get", "(", "\"transformation\"", ")", ":", "if", "kwargs", "[", "\"transformation\"", "]", "==", "True", ":", "headers", "[", "\"transformation\"", "]", "=", "\"jms-object-json\"", "else", ":", "headers", "[", "\"transformation\"", "]", "=", "kwargs", "[", "\"transformation\"", "]", "if", "kwargs", ".", "get", "(", "\"acknowledgement\"", ")", ":", "ack", "=", "\"client-individual\"", "else", ":", "ack", "=", "\"auto\"", "self", ".", "_conn", ".", "subscribe", "(", "destination", ",", "sub_id", ",", "headers", "=", "headers", ",", "ack", "=", "ack", ")" ]
51.47619
0.00227
def process_error_labels(value): """ Process the error labels of a dependent variable 'value' to ensure uniqueness. """ observed_error_labels = {} for error in value.get('errors', []): label = error.get('label', 'error') if label not in observed_error_labels: observed_error_labels[label] = 0 observed_error_labels[label] += 1 if observed_error_labels[label] > 1: error['label'] = label + '_' + str(observed_error_labels[label]) # append "_1" to first error label that has a duplicate if observed_error_labels[label] == 2: for error1 in value.get('errors', []): error1_label = error1.get('label', 'error') if error1_label == label: error1['label'] = label + "_1" break
[ "def", "process_error_labels", "(", "value", ")", ":", "observed_error_labels", "=", "{", "}", "for", "error", "in", "value", ".", "get", "(", "'errors'", ",", "[", "]", ")", ":", "label", "=", "error", ".", "get", "(", "'label'", ",", "'error'", ")", "if", "label", "not", "in", "observed_error_labels", ":", "observed_error_labels", "[", "label", "]", "=", "0", "observed_error_labels", "[", "label", "]", "+=", "1", "if", "observed_error_labels", "[", "label", "]", ">", "1", ":", "error", "[", "'label'", "]", "=", "label", "+", "'_'", "+", "str", "(", "observed_error_labels", "[", "label", "]", ")", "# append \"_1\" to first error label that has a duplicate", "if", "observed_error_labels", "[", "label", "]", "==", "2", ":", "for", "error1", "in", "value", ".", "get", "(", "'errors'", ",", "[", "]", ")", ":", "error1_label", "=", "error1", ".", "get", "(", "'label'", ",", "'error'", ")", "if", "error1_label", "==", "label", ":", "error1", "[", "'label'", "]", "=", "label", "+", "\"_1\"", "break" ]
42.095238
0.004425
def calc_prediction(registered_map, preregistration_mesh, native_mesh, model): ''' calc_registration_prediction is a pimms calculator that creates the both the prediction and the registration_prediction, both of which are pimms itables including the fields 'polar_angle', 'eccentricity', and 'visual_area'. The registration_prediction data describe the vertices for the registered_map, not necessarily of the native_mesh, while the prediction describes the native mesh. Provided efferent values: @ registered_mesh Will be a mesh object that is equivalent to the preregistration_mesh but with the coordinates and predicted fields (from the registration) filled in. Note that this mesh is still in the resampled configuration is resampling was performed. @ registration_prediction Will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The registration_prediction describes the values on the registrered_mesh. @ prediction will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The prediction describes the values on the native_mesh and the predicted_mesh. ''' # invert the map projection to make the registration map into a mesh coords3d = np.array(preregistration_mesh.coordinates) idcs = registered_map.labels coords3d[:,idcs] = registered_map.meta('projection').inverse(registered_map.coordinates) rmesh = preregistration_mesh.copy(coordinates=coords3d) # go ahead and get the model predictions... d = model.cortex_to_angle(registered_map.coordinates) id2n = model.area_id_to_name (ang, ecc) = d[0:2] lbl = np.asarray(d[2], dtype=np.int) rad = np.asarray([predict_pRF_radius(e, id2n[l]) if l > 0 else 0 for (e,l) in zip(ecc,lbl)]) d = {'polar_angle':ang, 'eccentricity':ecc, 'visual_area':lbl, 'radius':rad} # okay, put these on the mesh rpred = {} for (k,v) in six.iteritems(d): v.setflags(write=False) tmp = np.zeros(rmesh.vertex_count, dtype=v.dtype) tmp[registered_map.labels] = v tmp.setflags(write=False) rpred[k] = tmp rpred = pyr.pmap(rpred) rmesh = rmesh.with_prop(rpred) # next, do all of this for the native mesh.. if native_mesh is preregistration_mesh: pred = rpred pmesh = rmesh else: # we need to address the native coordinates in the prereg coordinates then unaddress them # in the registered coordinates; this will let us make a native-registered-map and repeat # the exercise above addr = preregistration_mesh.address(native_mesh.coordinates) natreg_mesh = native_mesh.copy(coordinates=rmesh.unaddress(addr)) d = model.cortex_to_angle(natreg_mesh) (ang,ecc) = d[0:2] lbl = np.asarray(d[2], dtype=np.int) rad = np.asarray([predict_pRF_radius(e, id2n[l]) if l > 0 else 0 for (e,l) in zip(ecc,lbl)]) pred = pyr.m(polar_angle=ang, eccentricity=ecc, radius=rad, visual_area=lbl) pmesh = natreg_mesh.with_prop(pred) return {'registered_mesh' : rmesh, 'registration_prediction': rpred, 'prediction' : pred, 'predicted_mesh' : pmesh}
[ "def", "calc_prediction", "(", "registered_map", ",", "preregistration_mesh", ",", "native_mesh", ",", "model", ")", ":", "# invert the map projection to make the registration map into a mesh", "coords3d", "=", "np", ".", "array", "(", "preregistration_mesh", ".", "coordinates", ")", "idcs", "=", "registered_map", ".", "labels", "coords3d", "[", ":", ",", "idcs", "]", "=", "registered_map", ".", "meta", "(", "'projection'", ")", ".", "inverse", "(", "registered_map", ".", "coordinates", ")", "rmesh", "=", "preregistration_mesh", ".", "copy", "(", "coordinates", "=", "coords3d", ")", "# go ahead and get the model predictions...", "d", "=", "model", ".", "cortex_to_angle", "(", "registered_map", ".", "coordinates", ")", "id2n", "=", "model", ".", "area_id_to_name", "(", "ang", ",", "ecc", ")", "=", "d", "[", "0", ":", "2", "]", "lbl", "=", "np", ".", "asarray", "(", "d", "[", "2", "]", ",", "dtype", "=", "np", ".", "int", ")", "rad", "=", "np", ".", "asarray", "(", "[", "predict_pRF_radius", "(", "e", ",", "id2n", "[", "l", "]", ")", "if", "l", ">", "0", "else", "0", "for", "(", "e", ",", "l", ")", "in", "zip", "(", "ecc", ",", "lbl", ")", "]", ")", "d", "=", "{", "'polar_angle'", ":", "ang", ",", "'eccentricity'", ":", "ecc", ",", "'visual_area'", ":", "lbl", ",", "'radius'", ":", "rad", "}", "# okay, put these on the mesh", "rpred", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "d", ")", ":", "v", ".", "setflags", "(", "write", "=", "False", ")", "tmp", "=", "np", ".", "zeros", "(", "rmesh", ".", "vertex_count", ",", "dtype", "=", "v", ".", "dtype", ")", "tmp", "[", "registered_map", ".", "labels", "]", "=", "v", "tmp", ".", "setflags", "(", "write", "=", "False", ")", "rpred", "[", "k", "]", "=", "tmp", "rpred", "=", "pyr", ".", "pmap", "(", "rpred", ")", "rmesh", "=", "rmesh", ".", "with_prop", "(", "rpred", ")", "# next, do all of this for the native mesh..", "if", "native_mesh", "is", "preregistration_mesh", ":", "pred", "=", "rpred", "pmesh", "=", "rmesh", "else", ":", "# we need to address the native coordinates in the prereg coordinates then unaddress them", "# in the registered coordinates; this will let us make a native-registered-map and repeat", "# the exercise above", "addr", "=", "preregistration_mesh", ".", "address", "(", "native_mesh", ".", "coordinates", ")", "natreg_mesh", "=", "native_mesh", ".", "copy", "(", "coordinates", "=", "rmesh", ".", "unaddress", "(", "addr", ")", ")", "d", "=", "model", ".", "cortex_to_angle", "(", "natreg_mesh", ")", "(", "ang", ",", "ecc", ")", "=", "d", "[", "0", ":", "2", "]", "lbl", "=", "np", ".", "asarray", "(", "d", "[", "2", "]", ",", "dtype", "=", "np", ".", "int", ")", "rad", "=", "np", ".", "asarray", "(", "[", "predict_pRF_radius", "(", "e", ",", "id2n", "[", "l", "]", ")", "if", "l", ">", "0", "else", "0", "for", "(", "e", ",", "l", ")", "in", "zip", "(", "ecc", ",", "lbl", ")", "]", ")", "pred", "=", "pyr", ".", "m", "(", "polar_angle", "=", "ang", ",", "eccentricity", "=", "ecc", ",", "radius", "=", "rad", ",", "visual_area", "=", "lbl", ")", "pmesh", "=", "natreg_mesh", ".", "with_prop", "(", "pred", ")", "return", "{", "'registered_mesh'", ":", "rmesh", ",", "'registration_prediction'", ":", "rpred", ",", "'prediction'", ":", "pred", ",", "'predicted_mesh'", ":", "pmesh", "}" ]
55.650794
0.01009
def detectMeegoPhone(self): """Return detection of a Meego phone Detects a phone running the Meego OS. """ return UAgentInfo.deviceMeego in self.__userAgent \ and UAgentInfo.mobi in self.__userAgent
[ "def", "detectMeegoPhone", "(", "self", ")", ":", "return", "UAgentInfo", ".", "deviceMeego", "in", "self", ".", "__userAgent", "and", "UAgentInfo", ".", "mobi", "in", "self", ".", "__userAgent" ]
33.857143
0.00823
def init_config(cls): """ Initialize Gandi CLI configuration. Create global configuration directory with API credentials """ try: # first load current conf and only overwrite needed params # we don't want to reset everything config_file = os.path.expanduser(cls.home_config) config = cls.load(config_file, 'global') cls._del('global', 'api.env') hidden_apikey = '%s...' % cls.get('api.key', '')[:6] apikey = click.prompt('Api key (xmlrpc)', default=hidden_apikey) if apikey == hidden_apikey: # if default value then use actual value not hidden one apikey = cls.get('api.key') env_choice = click.Choice(list(cls.apienvs.keys())) apienv = click.prompt('Environnment [production]/ote', default=cls.default_apienv, type=env_choice, show_default=False) sshkey = click.prompt('SSH keyfile', default='~/.ssh/id_rsa.pub') hidden_apikeyrest = '%s...' % cls.get('apirest.key', '')[:6] apikeyrest = click.prompt('Api key (REST)', default=hidden_apikeyrest) if apikeyrest == hidden_apikeyrest: # if default value then use actual value not hidden one apikeyrest = cls.get('apirest.key') config.update({ 'api': {'key': apikey, 'host': cls.apienvs[apienv]}, }) if apikeyrest: config.update({ 'apirest': {'key': apikeyrest}, }) if sshkey is not None: sshkey_file = os.path.expanduser(sshkey) if os.path.exists(sshkey_file): config['sshkey'] = [sshkey_file] directory = os.path.expanduser(os.path.dirname(config_file)) if not os.path.exists(directory): mkpath(directory, 0o700) # save to disk cls.save(config_file, config) # load in memory cls.load(config_file, 'global') except (KeyboardInterrupt, click.exceptions.Abort): cls.echo('Aborted.') sys.exit(1)
[ "def", "init_config", "(", "cls", ")", ":", "try", ":", "# first load current conf and only overwrite needed params", "# we don't want to reset everything", "config_file", "=", "os", ".", "path", ".", "expanduser", "(", "cls", ".", "home_config", ")", "config", "=", "cls", ".", "load", "(", "config_file", ",", "'global'", ")", "cls", ".", "_del", "(", "'global'", ",", "'api.env'", ")", "hidden_apikey", "=", "'%s...'", "%", "cls", ".", "get", "(", "'api.key'", ",", "''", ")", "[", ":", "6", "]", "apikey", "=", "click", ".", "prompt", "(", "'Api key (xmlrpc)'", ",", "default", "=", "hidden_apikey", ")", "if", "apikey", "==", "hidden_apikey", ":", "# if default value then use actual value not hidden one", "apikey", "=", "cls", ".", "get", "(", "'api.key'", ")", "env_choice", "=", "click", ".", "Choice", "(", "list", "(", "cls", ".", "apienvs", ".", "keys", "(", ")", ")", ")", "apienv", "=", "click", ".", "prompt", "(", "'Environnment [production]/ote'", ",", "default", "=", "cls", ".", "default_apienv", ",", "type", "=", "env_choice", ",", "show_default", "=", "False", ")", "sshkey", "=", "click", ".", "prompt", "(", "'SSH keyfile'", ",", "default", "=", "'~/.ssh/id_rsa.pub'", ")", "hidden_apikeyrest", "=", "'%s...'", "%", "cls", ".", "get", "(", "'apirest.key'", ",", "''", ")", "[", ":", "6", "]", "apikeyrest", "=", "click", ".", "prompt", "(", "'Api key (REST)'", ",", "default", "=", "hidden_apikeyrest", ")", "if", "apikeyrest", "==", "hidden_apikeyrest", ":", "# if default value then use actual value not hidden one", "apikeyrest", "=", "cls", ".", "get", "(", "'apirest.key'", ")", "config", ".", "update", "(", "{", "'api'", ":", "{", "'key'", ":", "apikey", ",", "'host'", ":", "cls", ".", "apienvs", "[", "apienv", "]", "}", ",", "}", ")", "if", "apikeyrest", ":", "config", ".", "update", "(", "{", "'apirest'", ":", "{", "'key'", ":", "apikeyrest", "}", ",", "}", ")", "if", "sshkey", "is", "not", "None", ":", "sshkey_file", "=", "os", ".", "path", ".", "expanduser", "(", "sshkey", ")", "if", "os", ".", "path", ".", "exists", "(", "sshkey_file", ")", ":", "config", "[", "'sshkey'", "]", "=", "[", "sshkey_file", "]", "directory", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "dirname", "(", "config_file", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "mkpath", "(", "directory", ",", "0o700", ")", "# save to disk", "cls", ".", "save", "(", "config_file", ",", "config", ")", "# load in memory", "cls", ".", "load", "(", "config_file", ",", "'global'", ")", "except", "(", "KeyboardInterrupt", ",", "click", ".", "exceptions", ".", "Abort", ")", ":", "cls", ".", "echo", "(", "'Aborted.'", ")", "sys", ".", "exit", "(", "1", ")" ]
40.913793
0.000823
def setChecked(self, column, state): """ Sets the check state of the inputed column based on the given bool state. This is a convenience method on top of the setCheckState method. :param column | <int> state | <bool> """ self.setCheckState(column, QtCore.Qt.Checked if state else QtCore.Qt.Unchecked)
[ "def", "setChecked", "(", "self", ",", "column", ",", "state", ")", ":", "self", ".", "setCheckState", "(", "column", ",", "QtCore", ".", "Qt", ".", "Checked", "if", "state", "else", "QtCore", ".", "Qt", ".", "Unchecked", ")" ]
39.2
0.009975
def _run_command_in_extended_path(syslog_ng_sbin_dir, command, params): ''' Runs the specified command with the syslog_ng_sbin_dir in the PATH ''' orig_path = os.environ.get('PATH', '') env = None if syslog_ng_sbin_dir: # Custom environment variables should be str types. This code # normalizes the paths to unicode to join them together, and then # converts back to a str type. env = { str('PATH'): salt.utils.stringutils.to_str( # future lint: disable=blacklisted-function os.pathsep.join( salt.utils.data.decode( (orig_path, syslog_ng_sbin_dir) ) ) ) } return _run_command(command, options=params, env=env)
[ "def", "_run_command_in_extended_path", "(", "syslog_ng_sbin_dir", ",", "command", ",", "params", ")", ":", "orig_path", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", "env", "=", "None", "if", "syslog_ng_sbin_dir", ":", "# Custom environment variables should be str types. This code", "# normalizes the paths to unicode to join them together, and then", "# converts back to a str type.", "env", "=", "{", "str", "(", "'PATH'", ")", ":", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "# future lint: disable=blacklisted-function", "os", ".", "pathsep", ".", "join", "(", "salt", ".", "utils", ".", "data", ".", "decode", "(", "(", "orig_path", ",", "syslog_ng_sbin_dir", ")", ")", ")", ")", "}", "return", "_run_command", "(", "command", ",", "options", "=", "params", ",", "env", "=", "env", ")" ]
38.95
0.002506
def fit(self, X, y=None, **fit_params): """Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self """ # quic-specific outputs self.opt_ = None self.cputime_ = None self.iters_ = None self.duality_gap_ = None # these must be updated upon self.fit() self.sample_covariance_ = None self.lam_scale_ = None self.is_fitted_ = False self.path_ = _validate_path(self.path) X = check_array(X, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) self.init_coefs(X) if self.method == "quic": ( self.precision_, self.covariance_, self.opt_, self.cputime_, self.iters_, self.duality_gap_, ) = quic( self.sample_covariance_, self.lam * self.lam_scale_, mode=self.mode, tol=self.tol, max_iter=self.max_iter, Theta0=self.Theta0, Sigma0=self.Sigma0, path=self.path_, msg=self.verbose, ) else: raise NotImplementedError("Only method='quic' has been implemented.") self.is_fitted_ = True return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "fit_params", ")", ":", "# quic-specific outputs", "self", ".", "opt_", "=", "None", "self", ".", "cputime_", "=", "None", "self", ".", "iters_", "=", "None", "self", ".", "duality_gap_", "=", "None", "# these must be updated upon self.fit()", "self", ".", "sample_covariance_", "=", "None", "self", ".", "lam_scale_", "=", "None", "self", ".", "is_fitted_", "=", "False", "self", ".", "path_", "=", "_validate_path", "(", "self", ".", "path", ")", "X", "=", "check_array", "(", "X", ",", "ensure_min_features", "=", "2", ",", "estimator", "=", "self", ")", "X", "=", "as_float_array", "(", "X", ",", "copy", "=", "False", ",", "force_all_finite", "=", "False", ")", "self", ".", "init_coefs", "(", "X", ")", "if", "self", ".", "method", "==", "\"quic\"", ":", "(", "self", ".", "precision_", ",", "self", ".", "covariance_", ",", "self", ".", "opt_", ",", "self", ".", "cputime_", ",", "self", ".", "iters_", ",", "self", ".", "duality_gap_", ",", ")", "=", "quic", "(", "self", ".", "sample_covariance_", ",", "self", ".", "lam", "*", "self", ".", "lam_scale_", ",", "mode", "=", "self", ".", "mode", ",", "tol", "=", "self", ".", "tol", ",", "max_iter", "=", "self", ".", "max_iter", ",", "Theta0", "=", "self", ".", "Theta0", ",", "Sigma0", "=", "self", ".", "Sigma0", ",", "path", "=", "self", ".", "path_", ",", "msg", "=", "self", ".", "verbose", ",", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Only method='quic' has been implemented.\"", ")", "self", ".", "is_fitted_", "=", "True", "return", "self" ]
29.269231
0.001907
def query_all(**kwargs): ''' query all the posts. ''' kind = kwargs.get('kind', '1') limit = kwargs.get('limit', 10) return TabPost.select().where( (TabPost.kind == kind) & (TabPost.valid == 1) ).order_by( TabPost.time_update.desc() ).limit(limit)
[ "def", "query_all", "(", "*", "*", "kwargs", ")", ":", "kind", "=", "kwargs", ".", "get", "(", "'kind'", ",", "'1'", ")", "limit", "=", "kwargs", ".", "get", "(", "'limit'", ",", "10", ")", "return", "TabPost", ".", "select", "(", ")", ".", "where", "(", "(", "TabPost", ".", "kind", "==", "kind", ")", "&", "(", "TabPost", ".", "valid", "==", "1", ")", ")", ".", "order_by", "(", "TabPost", ".", "time_update", ".", "desc", "(", ")", ")", ".", "limit", "(", "limit", ")" ]
25.846154
0.005747
def get_option(self, key): """Return the current value of the option `key` (string). Instance method, only refers to current instance.""" return self._options.get(key, self._default_options[key])
[ "def", "get_option", "(", "self", ",", "key", ")", ":", "return", "self", ".", "_options", ".", "get", "(", "key", ",", "self", ".", "_default_options", "[", "key", "]", ")" ]
43.2
0.009091
def transform_image(self, content_metadata_item): """ Return the image URI of the content item. """ image_url = '' if content_metadata_item['content_type'] in ['course', 'program']: image_url = content_metadata_item.get('card_image_url') elif content_metadata_item['content_type'] == 'courserun': image_url = content_metadata_item.get('image_url') return image_url
[ "def", "transform_image", "(", "self", ",", "content_metadata_item", ")", ":", "image_url", "=", "''", "if", "content_metadata_item", "[", "'content_type'", "]", "in", "[", "'course'", ",", "'program'", "]", ":", "image_url", "=", "content_metadata_item", ".", "get", "(", "'card_image_url'", ")", "elif", "content_metadata_item", "[", "'content_type'", "]", "==", "'courserun'", ":", "image_url", "=", "content_metadata_item", ".", "get", "(", "'image_url'", ")", "return", "image_url" ]
39.545455
0.004494
def refresh(self): """! \~english Update current view content to display Supported: JMRPiDisplay_SSD1306 and Adafruit SSD1306 driver \~chinese 更新当前视图内容到显示屏 支持: JMRPiDisplay_SSD1306 和 Adafruit SSD1306 driver """ try: # suport for RPiDisplay SSD1306 driver self.Display.setImage( self._catchCurrentViewContent() ) except: try: # suport for Adafruit SSD1306 driver self.Display.image( self._catchCurrentViewContent() ) except: raise "Can not update image to buffer." self.Display.display()
[ "def", "refresh", "(", "self", ")", ":", "try", ":", "# suport for RPiDisplay SSD1306 driver", "self", ".", "Display", ".", "setImage", "(", "self", ".", "_catchCurrentViewContent", "(", ")", ")", "except", ":", "try", ":", "# suport for Adafruit SSD1306 driver", "self", ".", "Display", ".", "image", "(", "self", ".", "_catchCurrentViewContent", "(", ")", ")", "except", ":", "raise", "\"Can not update image to buffer.\"", "self", ".", "Display", ".", "display", "(", ")" ]
31.047619
0.014881
def _prepare_deprecation_data(self): """ Cycles through the list of AppSettingDeprecation instances set on ``self.deprecations`` and prepulates two new dictionary attributes: ``self._deprecated_settings``: Uses the deprecated setting names themselves as the keys. Used to check whether a request is for a deprecated setting. ``self._renamed_settings``: Uses the 'replacement setting' names as keys (where supplied). Used to allow the helper to temporarily support override settings defined using the old name, when the values for the new setting are requested. """ if not isinstance(self.deprecations, (list, tuple)): raise IncorrectDeprecationsValueType( "'deprecations' must be a list or tuple, not a {}." .format(type(self.deprecations).__name__) ) self._deprecated_settings = {} self._replacement_settings = defaultdict(list) for item in self.deprecations: item.prefix = self.get_prefix() if not self.in_defaults(item.setting_name): raise InvalidDeprecationDefinition( "There is an issue with one of your setting deprecation " "definitions. '{setting_name}' could not be found in " "{defaults_module_path}. Please ensure a default value " "remains there until the end of the setting's deprecation " "period.".format( setting_name=item.setting_name, defaults_module_path=self._defaults_module_path, ) ) if item.setting_name in self._deprecated_settings: raise DuplicateDeprecationError( "The setting name for each deprecation definition must be " "unique, but '{setting_name}' has been used more than once " "for {helper_class}.".format( setting_name=item.setting_name, helper_class=self.__class__.__name__, ) ) self._deprecated_settings[item.setting_name] = item if item.replacement_name: if not self.in_defaults(item.replacement_name): raise InvalidDeprecationDefinition( "There is an issue with one of your settings " "deprecation definitions. '{replacement_name}' is not " "a valid replacement for '{setting_name}', as no such " "value can be found in {defaults_module_path}." .format( replacement_name=item.replacement_name, setting_name=item.setting_name, defaults_module_path=self._defaults_module_path, ) ) self._replacement_settings[item.replacement_name].append(item)
[ "def", "_prepare_deprecation_data", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "deprecations", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "IncorrectDeprecationsValueType", "(", "\"'deprecations' must be a list or tuple, not a {}.\"", ".", "format", "(", "type", "(", "self", ".", "deprecations", ")", ".", "__name__", ")", ")", "self", ".", "_deprecated_settings", "=", "{", "}", "self", ".", "_replacement_settings", "=", "defaultdict", "(", "list", ")", "for", "item", "in", "self", ".", "deprecations", ":", "item", ".", "prefix", "=", "self", ".", "get_prefix", "(", ")", "if", "not", "self", ".", "in_defaults", "(", "item", ".", "setting_name", ")", ":", "raise", "InvalidDeprecationDefinition", "(", "\"There is an issue with one of your setting deprecation \"", "\"definitions. '{setting_name}' could not be found in \"", "\"{defaults_module_path}. Please ensure a default value \"", "\"remains there until the end of the setting's deprecation \"", "\"period.\"", ".", "format", "(", "setting_name", "=", "item", ".", "setting_name", ",", "defaults_module_path", "=", "self", ".", "_defaults_module_path", ",", ")", ")", "if", "item", ".", "setting_name", "in", "self", ".", "_deprecated_settings", ":", "raise", "DuplicateDeprecationError", "(", "\"The setting name for each deprecation definition must be \"", "\"unique, but '{setting_name}' has been used more than once \"", "\"for {helper_class}.\"", ".", "format", "(", "setting_name", "=", "item", ".", "setting_name", ",", "helper_class", "=", "self", ".", "__class__", ".", "__name__", ",", ")", ")", "self", ".", "_deprecated_settings", "[", "item", ".", "setting_name", "]", "=", "item", "if", "item", ".", "replacement_name", ":", "if", "not", "self", ".", "in_defaults", "(", "item", ".", "replacement_name", ")", ":", "raise", "InvalidDeprecationDefinition", "(", "\"There is an issue with one of your settings \"", "\"deprecation definitions. '{replacement_name}' is not \"", "\"a valid replacement for '{setting_name}', as no such \"", "\"value can be found in {defaults_module_path}.\"", ".", "format", "(", "replacement_name", "=", "item", ".", "replacement_name", ",", "setting_name", "=", "item", ".", "setting_name", ",", "defaults_module_path", "=", "self", ".", "_defaults_module_path", ",", ")", ")", "self", ".", "_replacement_settings", "[", "item", ".", "replacement_name", "]", ".", "append", "(", "item", ")" ]
45.552239
0.000962
def warnpy3k(message, category=None, stacklevel=1): """Issue a deprecation warning for Python 3.x related changes. Warnings are omitted unless Python is started with the -3 option. """ if sys.py3kwarning: if category is None: category = DeprecationWarning warn(message, category, stacklevel+1)
[ "def", "warnpy3k", "(", "message", ",", "category", "=", "None", ",", "stacklevel", "=", "1", ")", ":", "if", "sys", ".", "py3kwarning", ":", "if", "category", "is", "None", ":", "category", "=", "DeprecationWarning", "warn", "(", "message", ",", "category", ",", "stacklevel", "+", "1", ")" ]
36.666667
0.002959
def stop_service(conn, service='ceph'): """ Stop a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection. """ if is_systemd(conn): # Without the check, an error is raised trying to stop an # already stopped service if is_systemd_service_active(conn, service): remoto.process.run( conn, [ 'systemctl', 'stop', '{service}'.format(service=service), ] )
[ "def", "stop_service", "(", "conn", ",", "service", "=", "'ceph'", ")", ":", "if", "is_systemd", "(", "conn", ")", ":", "# Without the check, an error is raised trying to stop an", "# already stopped service", "if", "is_systemd_service_active", "(", "conn", ",", "service", ")", ":", "remoto", ".", "process", ".", "run", "(", "conn", ",", "[", "'systemctl'", ",", "'stop'", ",", "'{service}'", ".", "format", "(", "service", "=", "service", ")", ",", "]", ")" ]
33.263158
0.001538
def _read_gtf(gtf): """ Load GTF file with precursor positions on genome """ if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"): continue cols = line.strip().split("\t") name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")] chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6] if cols[2] == "miRNA_primary_transcript": db[name[0]].append([chrom, int(start), int(end), strand]) return db
[ "def", "_read_gtf", "(", "gtf", ")", ":", "if", "not", "gtf", ":", "return", "gtf", "db", "=", "defaultdict", "(", "list", ")", "with", "open", "(", "gtf", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "cols", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "name", "=", "[", "n", ".", "split", "(", "\"=\"", ")", "[", "1", "]", "for", "n", "in", "cols", "[", "-", "1", "]", ".", "split", "(", "\";\"", ")", "if", "n", ".", "startswith", "(", "\"Name\"", ")", "]", "chrom", ",", "start", ",", "end", ",", "strand", "=", "cols", "[", "0", "]", ",", "cols", "[", "3", "]", ",", "cols", "[", "4", "]", ",", "cols", "[", "6", "]", "if", "cols", "[", "2", "]", "==", "\"miRNA_primary_transcript\"", ":", "db", "[", "name", "[", "0", "]", "]", ".", "append", "(", "[", "chrom", ",", "int", "(", "start", ")", ",", "int", "(", "end", ")", ",", "strand", "]", ")", "return", "db" ]
35.941176
0.00319
def bind(self, handler, argspec): """ :param handler: a function with :param argspec: :return: """ self.handlers[argspec.key].append((handler, argspec))
[ "def", "bind", "(", "self", ",", "handler", ",", "argspec", ")", ":", "self", ".", "handlers", "[", "argspec", ".", "key", "]", ".", "append", "(", "(", "handler", ",", "argspec", ")", ")" ]
27.714286
0.01
def __CheckAndUnifyQueryFormat(self, query_body): """Checks and unifies the format of the query body. :raises TypeError: If query_body is not of expected type (depending on the query compatibility mode). :raises ValueError: If query_body is a dict but doesn\'t have valid query text. :raises SystemError: If the query compatibility mode is undefined. :param (str or dict) query_body: :return: The formatted query body. :rtype: dict or string """ if (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Default or self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Query): if not isinstance(query_body, dict) and not isinstance(query_body, six.string_types): raise TypeError('query body must be a dict or string.') if isinstance(query_body, dict) and not query_body.get('query'): raise ValueError('query body must have valid query text with key "query".') if isinstance(query_body, six.string_types): return {'query': query_body} elif (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.SqlQuery and not isinstance(query_body, six.string_types)): raise TypeError('query body must be a string.') else: raise SystemError('Unexpected query compatibility mode.') return query_body
[ "def", "__CheckAndUnifyQueryFormat", "(", "self", ",", "query_body", ")", ":", "if", "(", "self", ".", "_query_compatibility_mode", "==", "CosmosClient", ".", "_QueryCompatibilityMode", ".", "Default", "or", "self", ".", "_query_compatibility_mode", "==", "CosmosClient", ".", "_QueryCompatibilityMode", ".", "Query", ")", ":", "if", "not", "isinstance", "(", "query_body", ",", "dict", ")", "and", "not", "isinstance", "(", "query_body", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "'query body must be a dict or string.'", ")", "if", "isinstance", "(", "query_body", ",", "dict", ")", "and", "not", "query_body", ".", "get", "(", "'query'", ")", ":", "raise", "ValueError", "(", "'query body must have valid query text with key \"query\".'", ")", "if", "isinstance", "(", "query_body", ",", "six", ".", "string_types", ")", ":", "return", "{", "'query'", ":", "query_body", "}", "elif", "(", "self", ".", "_query_compatibility_mode", "==", "CosmosClient", ".", "_QueryCompatibilityMode", ".", "SqlQuery", "and", "not", "isinstance", "(", "query_body", ",", "six", ".", "string_types", ")", ")", ":", "raise", "TypeError", "(", "'query body must be a string.'", ")", "else", ":", "raise", "SystemError", "(", "'Unexpected query compatibility mode.'", ")", "return", "query_body" ]
50.586207
0.006689
def match(self, expression=None, xpath=None, namespaces=None): """decorator that allows us to match by expression or by xpath for each transformation method""" class MatchObject(Dict): pass def _match(function): self.matches.append( MatchObject(expression=expression, xpath=xpath, function=function, namespaces=namespaces)) def wrapper(self, *args, **params): return function(self, *args, **params) return wrapper return _match
[ "def", "match", "(", "self", ",", "expression", "=", "None", ",", "xpath", "=", "None", ",", "namespaces", "=", "None", ")", ":", "class", "MatchObject", "(", "Dict", ")", ":", "pass", "def", "_match", "(", "function", ")", ":", "self", ".", "matches", ".", "append", "(", "MatchObject", "(", "expression", "=", "expression", ",", "xpath", "=", "xpath", ",", "function", "=", "function", ",", "namespaces", "=", "namespaces", ")", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "params", ")", ":", "return", "function", "(", "self", ",", "*", "args", ",", "*", "*", "params", ")", "return", "wrapper", "return", "_match" ]
48.181818
0.012963
def find_window_for_buffer_name(cli, buffer_name): """ Look for a :class:`~prompt_toolkit.layout.containers.Window` in the Layout that contains the :class:`~prompt_toolkit.layout.controls.BufferControl` for the given buffer and return it. If no such Window is found, return None. """ from prompt_toolkit.interface import CommandLineInterface assert isinstance(cli, CommandLineInterface) from .containers import Window from .controls import BufferControl for l in cli.layout.walk(cli): if isinstance(l, Window) and isinstance(l.content, BufferControl): if l.content.buffer_name == buffer_name: return l
[ "def", "find_window_for_buffer_name", "(", "cli", ",", "buffer_name", ")", ":", "from", "prompt_toolkit", ".", "interface", "import", "CommandLineInterface", "assert", "isinstance", "(", "cli", ",", "CommandLineInterface", ")", "from", ".", "containers", "import", "Window", "from", ".", "controls", "import", "BufferControl", "for", "l", "in", "cli", ".", "layout", ".", "walk", "(", "cli", ")", ":", "if", "isinstance", "(", "l", ",", "Window", ")", "and", "isinstance", "(", "l", ".", "content", ",", "BufferControl", ")", ":", "if", "l", ".", "content", ".", "buffer_name", "==", "buffer_name", ":", "return", "l" ]
41.5
0.004418
def deserialize(stream_or_string, **options): ''' Deserialize any string or stream like object into a Python data structure. :param stream_or_string: stream or string to deserialize. :param options: options given to lower configparser module. ''' if six.PY3: cp = configparser.ConfigParser(**options) else: cp = configparser.SafeConfigParser(**options) try: if not isinstance(stream_or_string, (bytes, six.string_types)): if six.PY3: cp.read_file(stream_or_string) else: cp.readfp(stream_or_string) else: if six.PY3: cp.read_file(six.moves.StringIO(stream_or_string)) else: # python2's ConfigParser cannot parse a config from a string cp.readfp(six.moves.StringIO(stream_or_string)) data = {} for section_name in cp.sections(): section = {} for k, v in cp.items(section_name): section[k] = v data[section_name] = section return data except Exception as error: raise DeserializationError(error)
[ "def", "deserialize", "(", "stream_or_string", ",", "*", "*", "options", ")", ":", "if", "six", ".", "PY3", ":", "cp", "=", "configparser", ".", "ConfigParser", "(", "*", "*", "options", ")", "else", ":", "cp", "=", "configparser", ".", "SafeConfigParser", "(", "*", "*", "options", ")", "try", ":", "if", "not", "isinstance", "(", "stream_or_string", ",", "(", "bytes", ",", "six", ".", "string_types", ")", ")", ":", "if", "six", ".", "PY3", ":", "cp", ".", "read_file", "(", "stream_or_string", ")", "else", ":", "cp", ".", "readfp", "(", "stream_or_string", ")", "else", ":", "if", "six", ".", "PY3", ":", "cp", ".", "read_file", "(", "six", ".", "moves", ".", "StringIO", "(", "stream_or_string", ")", ")", "else", ":", "# python2's ConfigParser cannot parse a config from a string", "cp", ".", "readfp", "(", "six", ".", "moves", ".", "StringIO", "(", "stream_or_string", ")", ")", "data", "=", "{", "}", "for", "section_name", "in", "cp", ".", "sections", "(", ")", ":", "section", "=", "{", "}", "for", "k", ",", "v", "in", "cp", ".", "items", "(", "section_name", ")", ":", "section", "[", "k", "]", "=", "v", "data", "[", "section_name", "]", "=", "section", "return", "data", "except", "Exception", "as", "error", ":", "raise", "DeserializationError", "(", "error", ")" ]
33.617647
0.00085
def parse_event(data, attendees=None, photos=None): """ Parse a ``MeetupEvent`` from the given response data. Returns ------- A ``pythonkc_meetups.types.MeetupEvent``. """ return MeetupEvent( id=data.get('id', None), name=data.get('name', None), description=data.get('description', None), time=parse_datetime(data.get('time', None), data.get('utc_offset', None)), status=data.get('status', None), yes_rsvp_count=data.get('yes_rsvp_count', None), maybe_rsvp_count=data.get('maybe_rsvp_count', None), event_url=data.get('event_url', None), photo_url=data.get('photo_url', None), venue=parse_venue(data['venue']) if 'venue' in data else None, attendees=attendees, photos=photos )
[ "def", "parse_event", "(", "data", ",", "attendees", "=", "None", ",", "photos", "=", "None", ")", ":", "return", "MeetupEvent", "(", "id", "=", "data", ".", "get", "(", "'id'", ",", "None", ")", ",", "name", "=", "data", ".", "get", "(", "'name'", ",", "None", ")", ",", "description", "=", "data", ".", "get", "(", "'description'", ",", "None", ")", ",", "time", "=", "parse_datetime", "(", "data", ".", "get", "(", "'time'", ",", "None", ")", ",", "data", ".", "get", "(", "'utc_offset'", ",", "None", ")", ")", ",", "status", "=", "data", ".", "get", "(", "'status'", ",", "None", ")", ",", "yes_rsvp_count", "=", "data", ".", "get", "(", "'yes_rsvp_count'", ",", "None", ")", ",", "maybe_rsvp_count", "=", "data", ".", "get", "(", "'maybe_rsvp_count'", ",", "None", ")", ",", "event_url", "=", "data", ".", "get", "(", "'event_url'", ",", "None", ")", ",", "photo_url", "=", "data", ".", "get", "(", "'photo_url'", ",", "None", ")", ",", "venue", "=", "parse_venue", "(", "data", "[", "'venue'", "]", ")", "if", "'venue'", "in", "data", "else", "None", ",", "attendees", "=", "attendees", ",", "photos", "=", "photos", ")" ]
33.791667
0.001199
def plotIndividual(self): ''' Plot the individual ''' pl.plot(self.x_int, self.y_int) pl.grid(True) pl.show()
[ "def", "plotIndividual", "(", "self", ")", ":", "pl", ".", "plot", "(", "self", ".", "x_int", ",", "self", ".", "y_int", ")", "pl", ".", "grid", "(", "True", ")", "pl", ".", "show", "(", ")" ]
16.428571
0.066116
def __shxRecords(self): """Writes the shx records.""" f = self.__getFileObj(self.shx) f.seek(100) for i in range(len(self._shapes)): f.write(pack(">i", self._offsets[i] // 2)) f.write(pack(">i", self._lengths[i]))
[ "def", "__shxRecords", "(", "self", ")", ":", "f", "=", "self", ".", "__getFileObj", "(", "self", ".", "shx", ")", "f", ".", "seek", "(", "100", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_shapes", ")", ")", ":", "f", ".", "write", "(", "pack", "(", "\">i\"", ",", "self", ".", "_offsets", "[", "i", "]", "//", "2", ")", ")", "f", ".", "write", "(", "pack", "(", "\">i\"", ",", "self", ".", "_lengths", "[", "i", "]", ")", ")" ]
38.428571
0.007273
def encrypt(self, sa, esp, key): """ Encrypt an ESP packet @param sa: the SecurityAssociation associated with the ESP packet. @param esp: an unencrypted _ESPPlain packet with valid padding @param key: the secret key used for encryption @return: a valid ESP packet encrypted with this algorithm """ data = esp.data_for_encryption() if self.cipher: mode_iv = self._format_mode_iv(algo=self, sa=sa, iv=esp.iv) cipher = self.new_cipher(key, mode_iv) encryptor = cipher.encryptor() if self.is_aead: aad = struct.pack('!LL', esp.spi, esp.seq) encryptor.authenticate_additional_data(aad) data = encryptor.update(data) + encryptor.finalize() data += encryptor.tag[:self.icv_size] else: data = encryptor.update(data) + encryptor.finalize() return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data)
[ "def", "encrypt", "(", "self", ",", "sa", ",", "esp", ",", "key", ")", ":", "data", "=", "esp", ".", "data_for_encryption", "(", ")", "if", "self", ".", "cipher", ":", "mode_iv", "=", "self", ".", "_format_mode_iv", "(", "algo", "=", "self", ",", "sa", "=", "sa", ",", "iv", "=", "esp", ".", "iv", ")", "cipher", "=", "self", ".", "new_cipher", "(", "key", ",", "mode_iv", ")", "encryptor", "=", "cipher", ".", "encryptor", "(", ")", "if", "self", ".", "is_aead", ":", "aad", "=", "struct", ".", "pack", "(", "'!LL'", ",", "esp", ".", "spi", ",", "esp", ".", "seq", ")", "encryptor", ".", "authenticate_additional_data", "(", "aad", ")", "data", "=", "encryptor", ".", "update", "(", "data", ")", "+", "encryptor", ".", "finalize", "(", ")", "data", "+=", "encryptor", ".", "tag", "[", ":", "self", ".", "icv_size", "]", "else", ":", "data", "=", "encryptor", ".", "update", "(", "data", ")", "+", "encryptor", ".", "finalize", "(", ")", "return", "ESP", "(", "spi", "=", "esp", ".", "spi", ",", "seq", "=", "esp", ".", "seq", ",", "data", "=", "esp", ".", "iv", "+", "data", ")" ]
38.230769
0.001963
def make_dynamic_fields(pattern_module, dynamic_field_patterns, attrs): """Add some Salesforce fields from a pattern_module models.py Parameters: pattern_module: Module where to search additional fields settings. It is an imported module created by introspection (inspectdb), usually named `models_template.py`. (You will probably not add it to version control for you because the diffs are frequent and huge.) dynamic_field_patterns: List of regular expression for Salesforce field names that should be included automatically into the model. attrs: Input/Output dictionary of model attributes. (no need to worry, added automatically) The patterns are applied sequentionally. If the pattern starts with "-" then the matched names are excluded. The search stops after the first match. A normal field that exists directly in a class is never rewritten by a dynamic field.. All ForeingKey fields should be created explicitely. (For now to prevent possible issues and also for better readibility of the model. The automatic "dynamic" fields are intended especially for "maybe can be useful" fields and will work with ForeignKey in simple cases, e.g. without Proxy models etc. Works good for me.) This is useful for development: Many fields or all fields can be easily accessed by the model without a huge code. Finally all wildcard fields except the explicit names can be removed when the development is ready or . If you create migrations, you probably want to disable "dynamic_field_patterns" by setting them empty. Example: Meta: db_table = 'Contact' dynamic_patterns = exported.models, ['Last', '.*Date$'] """ # pylint:disable=invalid-name,too-many-branches,too-many-locals import re attr_meta = attrs['Meta'] db_table = getattr(attr_meta, 'db_table', None) if not db_table: raise RuntimeError('The "db_table" must be set in Meta if "dynamic_field_patterns" is used.') is_custom_model = getattr(attr_meta, 'custom', False) patterns = [] for pat in dynamic_field_patterns: enabled = True if pat.startswith('-'): enabled = False pat = pat[1:] patterns.append((enabled, re.compile(r'^(?:{})$'.format(pat), re.I))) used_columns = [] for name, attr in attrs.items(): if isinstance(attr, SfField): field = attr if field.sf_custom is None and is_custom_model: field.sf_custom = True if not field.name: field.name = name attname, column = field.get_attname_column() # pylint:disable=unused-variable used_columns.append(column) if not pattern_module: raise RuntimeError("a pattern_module is required for dynamic fields.") for name, obj in vars(pattern_module).items(): if not name.startswith('_') and isclass(obj) and issubclass(obj, ModelTemplate): default_table = obj.__name__ if getattr(getattr(obj, 'Meta', None), 'db_table', default_table) == db_table: cls = obj break else: # not found db_table model, but decide between warning or exception if any(not x.startswith('__') for x in dir(pattern_module)): raise RuntimeError("No Model for table '%s' found in the module '%s'" % (db_table, pattern_module.__name__)) warnings.warn("The module '%s' is empty. (It is OK if you are " "rewriting new Models by pipe from inspectdb command.)" % pattern_module.__name__) return lazy_fields = [(name, obj) for name, obj in vars(cls).items() if isinstance(obj, LazyField) and issubclass(obj.klass, SfField) ] for name, obj in sorted(lazy_fields, key=lambda name_obj: name_obj[1].counter): for enabled, pat in patterns: if pat.match(name): break else: enabled = False if enabled: if issubclass(obj.klass, ForeignKey): to = obj.kw['to'] if isclass(to) and issubclass(to, ModelTemplate): obj.kw['to'] = to.__name__ field = obj.create() attrs[name] = field assert pattern_module
[ "def", "make_dynamic_fields", "(", "pattern_module", ",", "dynamic_field_patterns", ",", "attrs", ")", ":", "# pylint:disable=invalid-name,too-many-branches,too-many-locals", "import", "re", "attr_meta", "=", "attrs", "[", "'Meta'", "]", "db_table", "=", "getattr", "(", "attr_meta", ",", "'db_table'", ",", "None", ")", "if", "not", "db_table", ":", "raise", "RuntimeError", "(", "'The \"db_table\" must be set in Meta if \"dynamic_field_patterns\" is used.'", ")", "is_custom_model", "=", "getattr", "(", "attr_meta", ",", "'custom'", ",", "False", ")", "patterns", "=", "[", "]", "for", "pat", "in", "dynamic_field_patterns", ":", "enabled", "=", "True", "if", "pat", ".", "startswith", "(", "'-'", ")", ":", "enabled", "=", "False", "pat", "=", "pat", "[", "1", ":", "]", "patterns", ".", "append", "(", "(", "enabled", ",", "re", ".", "compile", "(", "r'^(?:{})$'", ".", "format", "(", "pat", ")", ",", "re", ".", "I", ")", ")", ")", "used_columns", "=", "[", "]", "for", "name", ",", "attr", "in", "attrs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "attr", ",", "SfField", ")", ":", "field", "=", "attr", "if", "field", ".", "sf_custom", "is", "None", "and", "is_custom_model", ":", "field", ".", "sf_custom", "=", "True", "if", "not", "field", ".", "name", ":", "field", ".", "name", "=", "name", "attname", ",", "column", "=", "field", ".", "get_attname_column", "(", ")", "# pylint:disable=unused-variable", "used_columns", ".", "append", "(", "column", ")", "if", "not", "pattern_module", ":", "raise", "RuntimeError", "(", "\"a pattern_module is required for dynamic fields.\"", ")", "for", "name", ",", "obj", "in", "vars", "(", "pattern_module", ")", ".", "items", "(", ")", ":", "if", "not", "name", ".", "startswith", "(", "'_'", ")", "and", "isclass", "(", "obj", ")", "and", "issubclass", "(", "obj", ",", "ModelTemplate", ")", ":", "default_table", "=", "obj", ".", "__name__", "if", "getattr", "(", "getattr", "(", "obj", ",", "'Meta'", ",", "None", ")", ",", "'db_table'", ",", "default_table", ")", "==", "db_table", ":", "cls", "=", "obj", "break", "else", ":", "# not found db_table model, but decide between warning or exception", "if", "any", "(", "not", "x", ".", "startswith", "(", "'__'", ")", "for", "x", "in", "dir", "(", "pattern_module", ")", ")", ":", "raise", "RuntimeError", "(", "\"No Model for table '%s' found in the module '%s'\"", "%", "(", "db_table", ",", "pattern_module", ".", "__name__", ")", ")", "warnings", ".", "warn", "(", "\"The module '%s' is empty. (It is OK if you are \"", "\"rewriting new Models by pipe from inspectdb command.)\"", "%", "pattern_module", ".", "__name__", ")", "return", "lazy_fields", "=", "[", "(", "name", ",", "obj", ")", "for", "name", ",", "obj", "in", "vars", "(", "cls", ")", ".", "items", "(", ")", "if", "isinstance", "(", "obj", ",", "LazyField", ")", "and", "issubclass", "(", "obj", ".", "klass", ",", "SfField", ")", "]", "for", "name", ",", "obj", "in", "sorted", "(", "lazy_fields", ",", "key", "=", "lambda", "name_obj", ":", "name_obj", "[", "1", "]", ".", "counter", ")", ":", "for", "enabled", ",", "pat", "in", "patterns", ":", "if", "pat", ".", "match", "(", "name", ")", ":", "break", "else", ":", "enabled", "=", "False", "if", "enabled", ":", "if", "issubclass", "(", "obj", ".", "klass", ",", "ForeignKey", ")", ":", "to", "=", "obj", ".", "kw", "[", "'to'", "]", "if", "isclass", "(", "to", ")", "and", "issubclass", "(", "to", ",", "ModelTemplate", ")", ":", "obj", ".", "kw", "[", "'to'", "]", "=", "to", ".", "__name__", "field", "=", "obj", ".", "create", "(", ")", "attrs", "[", "name", "]", "=", "field", "assert", "pattern_module" ]
45.010309
0.002017
def exchange_code(self, code): """Exchange one-use code for an access_token and request_token.""" params = {'client_id': self.client_id, 'client_secret': self.client_secret, 'grant_type': 'authorization_code', 'code': code} result = self._send_request(EXCHANGE_URL.format(self._base_url), params=params, method='POST', data_field=None) self.access_token = result['access_token'] self.refresh_token = result['refresh_token'] return self.access_token, self.refresh_token
[ "def", "exchange_code", "(", "self", ",", "code", ")", ":", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'client_secret'", ":", "self", ".", "client_secret", ",", "'grant_type'", ":", "'authorization_code'", ",", "'code'", ":", "code", "}", "result", "=", "self", ".", "_send_request", "(", "EXCHANGE_URL", ".", "format", "(", "self", ".", "_base_url", ")", ",", "params", "=", "params", ",", "method", "=", "'POST'", ",", "data_field", "=", "None", ")", "self", ".", "access_token", "=", "result", "[", "'access_token'", "]", "self", ".", "refresh_token", "=", "result", "[", "'refresh_token'", "]", "return", "self", ".", "access_token", ",", "self", ".", "refresh_token" ]
52.583333
0.003115
async def close(self): """Close this SAConnection. This results in a release of the underlying database resources, that is, the underlying connection referenced internally. The underlying connection is typically restored back to the connection-holding Pool referenced by the Engine that produced this SAConnection. Any transactional state present on the underlying connection is also unconditionally released via calling Transaction.rollback() method. After .close() is called, the SAConnection is permanently in a closed state, and will allow no further operations. """ if self._connection is None: return if self._transaction is not None: await self._transaction.rollback() self._transaction = None # don't close underlying connection, it can be reused by pool # conn.close() self._engine.release(self) self._connection = None self._engine = None
[ "async", "def", "close", "(", "self", ")", ":", "if", "self", ".", "_connection", "is", "None", ":", "return", "if", "self", ".", "_transaction", "is", "not", "None", ":", "await", "self", ".", "_transaction", ".", "rollback", "(", ")", "self", ".", "_transaction", "=", "None", "# don't close underlying connection, it can be reused by pool", "# conn.close()", "self", ".", "_engine", ".", "release", "(", "self", ")", "self", ".", "_connection", "=", "None", "self", ".", "_engine", "=", "None" ]
40.28
0.00194
def _parse_global_section(self, cfg_handler): """Parse global ([versionner]) section :param cfg_handler: :return: """ # global configuration if 'versionner' in cfg_handler: cfg = cfg_handler['versionner'] if 'file' in cfg: self.version_file = cfg['file'] if 'date_format' in cfg: self.date_format = cfg['date_format'] if 'up_part' in cfg: self.up_part = cfg['up_part'] if 'default_init_version' in cfg: self.default_init_version = cfg['default_init_version'] if 'default_increase_value' in cfg: self.default_increase_value = cfg.getint('default_increase_value')
[ "def", "_parse_global_section", "(", "self", ",", "cfg_handler", ")", ":", "# global configuration", "if", "'versionner'", "in", "cfg_handler", ":", "cfg", "=", "cfg_handler", "[", "'versionner'", "]", "if", "'file'", "in", "cfg", ":", "self", ".", "version_file", "=", "cfg", "[", "'file'", "]", "if", "'date_format'", "in", "cfg", ":", "self", ".", "date_format", "=", "cfg", "[", "'date_format'", "]", "if", "'up_part'", "in", "cfg", ":", "self", ".", "up_part", "=", "cfg", "[", "'up_part'", "]", "if", "'default_init_version'", "in", "cfg", ":", "self", ".", "default_init_version", "=", "cfg", "[", "'default_init_version'", "]", "if", "'default_increase_value'", "in", "cfg", ":", "self", ".", "default_increase_value", "=", "cfg", ".", "getint", "(", "'default_increase_value'", ")" ]
39.157895
0.003937
def print_all(self, out=sys.stdout): """ Prints all of the thread profiler results to a given file. (stdout by default) """ THREAD_FUNC_NAME_LEN = 25 THREAD_NAME_LEN = 13 THREAD_ID_LEN = 15 THREAD_SCHED_CNT_LEN = 10 out.write(CRLF) out.write("name tid ttot scnt") out.write(CRLF) for stat in self: out.write(StatString(stat.name).ltrim(THREAD_NAME_LEN)) out.write(" " * COLUMN_GAP) out.write(StatString(stat.id).rtrim(THREAD_ID_LEN)) out.write(" " * COLUMN_GAP) out.write(StatString(_fft(stat.ttot)).rtrim(TIME_COLUMN_LEN)) out.write(" " * COLUMN_GAP) out.write(StatString(stat.sched_count).rtrim(THREAD_SCHED_CNT_LEN)) out.write(CRLF)
[ "def", "print_all", "(", "self", ",", "out", "=", "sys", ".", "stdout", ")", ":", "THREAD_FUNC_NAME_LEN", "=", "25", "THREAD_NAME_LEN", "=", "13", "THREAD_ID_LEN", "=", "15", "THREAD_SCHED_CNT_LEN", "=", "10", "out", ".", "write", "(", "CRLF", ")", "out", ".", "write", "(", "\"name tid ttot scnt\"", ")", "out", ".", "write", "(", "CRLF", ")", "for", "stat", "in", "self", ":", "out", ".", "write", "(", "StatString", "(", "stat", ".", "name", ")", ".", "ltrim", "(", "THREAD_NAME_LEN", ")", ")", "out", ".", "write", "(", "\" \"", "*", "COLUMN_GAP", ")", "out", ".", "write", "(", "StatString", "(", "stat", ".", "id", ")", ".", "rtrim", "(", "THREAD_ID_LEN", ")", ")", "out", ".", "write", "(", "\" \"", "*", "COLUMN_GAP", ")", "out", ".", "write", "(", "StatString", "(", "_fft", "(", "stat", ".", "ttot", ")", ")", ".", "rtrim", "(", "TIME_COLUMN_LEN", ")", ")", "out", ".", "write", "(", "\" \"", "*", "COLUMN_GAP", ")", "out", ".", "write", "(", "StatString", "(", "stat", ".", "sched_count", ")", ".", "rtrim", "(", "THREAD_SCHED_CNT_LEN", ")", ")", "out", ".", "write", "(", "CRLF", ")" ]
40
0.004651
def item_link(self, item): """ Generates a link for a specific item of the feed. """ return reverse_lazy( 'forum_conversation:topic', kwargs={ 'forum_slug': item.forum.slug, 'forum_pk': item.forum.pk, 'slug': item.slug, 'pk': item.id, }, )
[ "def", "item_link", "(", "self", ",", "item", ")", ":", "return", "reverse_lazy", "(", "'forum_conversation:topic'", ",", "kwargs", "=", "{", "'forum_slug'", ":", "item", ".", "forum", ".", "slug", ",", "'forum_pk'", ":", "item", ".", "forum", ".", "pk", ",", "'slug'", ":", "item", ".", "slug", ",", "'pk'", ":", "item", ".", "id", ",", "}", ",", ")" ]
32.090909
0.00551
def apply_boundary_conditions(self, **kwargs): """Applies any boundary conditions to the given values (e.g., applying cyclic conditions, and/or reflecting values off of boundaries). This is done by running `apply_conditions` of each bounds in self on the corresponding value. See `boundaries.Bounds.apply_conditions` for details. Parameters ---------- \**kwargs : The keyword args should be the name of a parameter and value to apply its boundary conditions to. The arguments need not include all of the parameters in self. Any unrecognized arguments are ignored. Returns ------- dict A dictionary of the parameter names and the conditioned values. """ return dict([[p, self._bounds[p].apply_conditions(val)] for p,val in kwargs.items() if p in self._bounds])
[ "def", "apply_boundary_conditions", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "dict", "(", "[", "[", "p", ",", "self", ".", "_bounds", "[", "p", "]", ".", "apply_conditions", "(", "val", ")", "]", "for", "p", ",", "val", "in", "kwargs", ".", "items", "(", ")", "if", "p", "in", "self", ".", "_bounds", "]", ")" ]
42.045455
0.004228
def received_message(self, m): """Push upstream messages to downstream.""" # TODO: No support for binary messages m = str(m) logger.debug("Incoming upstream WS: %s", m) uwsgi.websocket_send(m) logger.debug("Send ok")
[ "def", "received_message", "(", "self", ",", "m", ")", ":", "# TODO: No support for binary messages", "m", "=", "str", "(", "m", ")", "logger", ".", "debug", "(", "\"Incoming upstream WS: %s\"", ",", "m", ")", "uwsgi", ".", "websocket_send", "(", "m", ")", "logger", ".", "debug", "(", "\"Send ok\"", ")" ]
32.25
0.007547
def join_conn_groups( conns, descs, mat_ids, concat = False ): """Join groups of the same element type.""" el = dict_from_keys_init( descs, list ) for ig, desc in enumerate( descs ): el[desc].append( ig ) groups = [ii for ii in el.values() if ii] ## print el, groups descs_out, conns_out, mat_ids_out = [], [], [] for group in groups: n_ep = conns[group[0]].shape[1] conn = nm.zeros( (0, n_ep), nm.int32 ) mat_id = nm.zeros( (0,), nm.int32 ) for ig in group: conn = nm.concatenate( (conn, conns[ig]) ) mat_id = nm.concatenate( (mat_id, mat_ids[ig]) ) if concat: conn = nm.concatenate( (conn, mat_id[:,nm.newaxis]), 1 ) else: mat_ids_out.append( mat_id ) conns_out.append( conn ) descs_out.append( descs[group[0]] ) if concat: return conns_out, descs_out else: return conns_out, descs_out, mat_ids_out
[ "def", "join_conn_groups", "(", "conns", ",", "descs", ",", "mat_ids", ",", "concat", "=", "False", ")", ":", "el", "=", "dict_from_keys_init", "(", "descs", ",", "list", ")", "for", "ig", ",", "desc", "in", "enumerate", "(", "descs", ")", ":", "el", "[", "desc", "]", ".", "append", "(", "ig", ")", "groups", "=", "[", "ii", "for", "ii", "in", "el", ".", "values", "(", ")", "if", "ii", "]", "## print el, groups", "descs_out", ",", "conns_out", ",", "mat_ids_out", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "group", "in", "groups", ":", "n_ep", "=", "conns", "[", "group", "[", "0", "]", "]", ".", "shape", "[", "1", "]", "conn", "=", "nm", ".", "zeros", "(", "(", "0", ",", "n_ep", ")", ",", "nm", ".", "int32", ")", "mat_id", "=", "nm", ".", "zeros", "(", "(", "0", ",", ")", ",", "nm", ".", "int32", ")", "for", "ig", "in", "group", ":", "conn", "=", "nm", ".", "concatenate", "(", "(", "conn", ",", "conns", "[", "ig", "]", ")", ")", "mat_id", "=", "nm", ".", "concatenate", "(", "(", "mat_id", ",", "mat_ids", "[", "ig", "]", ")", ")", "if", "concat", ":", "conn", "=", "nm", ".", "concatenate", "(", "(", "conn", ",", "mat_id", "[", ":", ",", "nm", ".", "newaxis", "]", ")", ",", "1", ")", "else", ":", "mat_ids_out", ".", "append", "(", "mat_id", ")", "conns_out", ".", "append", "(", "conn", ")", "descs_out", ".", "append", "(", "descs", "[", "group", "[", "0", "]", "]", ")", "if", "concat", ":", "return", "conns_out", ",", "descs_out", "else", ":", "return", "conns_out", ",", "descs_out", ",", "mat_ids_out" ]
31.6
0.029683
def get_instruction(self, idx, off=None): """ Get a particular instruction by using (default) the index of the address if specified :param idx: index of the instruction (the position in the list of the instruction) :type idx: int :param off: address of the instruction :type off: int :rtype: an :class:`Instruction` object """ if self.code is not None: return self.code.get_bc().get_instruction(idx, off) return None
[ "def", "get_instruction", "(", "self", ",", "idx", ",", "off", "=", "None", ")", ":", "if", "self", ".", "code", "is", "not", "None", ":", "return", "self", ".", "code", ".", "get_bc", "(", ")", ".", "get_instruction", "(", "idx", ",", "off", ")", "return", "None" ]
35.5
0.007843
def calc_changes(db, ignore_tables=None): migrator = None # expose eventually? if migrator is None: migrator = auto_detect_migrator(db) existing_tables = [unicode(t) for t in db.get_tables()] existing_indexes = {table:get_indexes_by_table(db, table) for table in existing_tables} existing_columns_by_table = get_columns_by_table(db) foreign_keys_by_table = get_foreign_keys_by_table(db) table_names_to_models = {_table_name(cls): cls for cls in all_models.keys()} to_run = [] table_adds, add_fks, table_deletes, table_renames = calc_table_changes(existing_tables, ignore_tables=ignore_tables) table_renamed_from = {v: k for k, v in table_renames.items()} for tbl in table_adds: to_run += create_table(table_names_to_models[tbl]) for field in add_fks: if hasattr(field, '__pwdbev__not_deferred') and field.__pwdbev__not_deferred: field.deferred = False to_run += create_foreign_key(field) for k, v in table_renames.items(): to_run += rename_table(migrator, k, v) rename_cols_by_table = {} deleted_cols_by_table = {} for etn, ecols in existing_columns_by_table.items(): if etn in table_deletes: continue ntn = table_renames.get(etn, etn) model = table_names_to_models.get(ntn) if not model: continue defined_fields = model._meta.sorted_fields defined_column_name_to_field = {unicode(_column_name(f)):f for f in defined_fields} existing_fks_by_column = {fk.column:fk for fk in foreign_keys_by_table[etn]} adds, deletes, renames, alter_statements = calc_column_changes(db, migrator, etn, ntn, ecols, defined_fields, existing_fks_by_column) for column_name in adds: field = defined_column_name_to_field[column_name] to_run += alter_add_column(db, migrator, ntn, column_name, field) if not field.null: # alter_add_column strips null constraints # add them back after setting any defaults if field.default is not None: to_run += set_default(db, migrator, ntn, column_name, field) else: to_run.append(('-- adding a not null column without a default will fail if the table is not empty',[])) to_run += add_not_null(db, migrator, ntn, column_name, field) for column_name in deletes: fk = existing_fks_by_column.get(column_name) if fk: to_run += drop_foreign_key(db, migrator, ntn, fk.name) to_run += drop_column(db, migrator, ntn, column_name) for ocn, ncn in renames.items(): field = defined_column_name_to_field[ncn] to_run += rename_column(db, migrator, ntn, ocn, ncn, field) to_run += alter_statements rename_cols_by_table[ntn] = renames deleted_cols_by_table[ntn] = deletes for ntn, model in table_names_to_models.items(): etn = table_renamed_from.get(ntn, ntn) deletes = deleted_cols_by_table.get(ntn,set()) existing_indexes_for_table = [i for i in existing_indexes.get(etn, []) if not any([(c in deletes) for c in i.columns])] to_run += calc_index_changes(db, migrator, existing_indexes_for_table, model, rename_cols_by_table.get(ntn, {})) ''' to_run += calc_perms_changes($schema_tables, noop) unless $check_perms_for.empty? ''' for tbl in table_deletes: to_run += drop_table(migrator, tbl) return to_run
[ "def", "calc_changes", "(", "db", ",", "ignore_tables", "=", "None", ")", ":", "migrator", "=", "None", "# expose eventually?", "if", "migrator", "is", "None", ":", "migrator", "=", "auto_detect_migrator", "(", "db", ")", "existing_tables", "=", "[", "unicode", "(", "t", ")", "for", "t", "in", "db", ".", "get_tables", "(", ")", "]", "existing_indexes", "=", "{", "table", ":", "get_indexes_by_table", "(", "db", ",", "table", ")", "for", "table", "in", "existing_tables", "}", "existing_columns_by_table", "=", "get_columns_by_table", "(", "db", ")", "foreign_keys_by_table", "=", "get_foreign_keys_by_table", "(", "db", ")", "table_names_to_models", "=", "{", "_table_name", "(", "cls", ")", ":", "cls", "for", "cls", "in", "all_models", ".", "keys", "(", ")", "}", "to_run", "=", "[", "]", "table_adds", ",", "add_fks", ",", "table_deletes", ",", "table_renames", "=", "calc_table_changes", "(", "existing_tables", ",", "ignore_tables", "=", "ignore_tables", ")", "table_renamed_from", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "table_renames", ".", "items", "(", ")", "}", "for", "tbl", "in", "table_adds", ":", "to_run", "+=", "create_table", "(", "table_names_to_models", "[", "tbl", "]", ")", "for", "field", "in", "add_fks", ":", "if", "hasattr", "(", "field", ",", "'__pwdbev__not_deferred'", ")", "and", "field", ".", "__pwdbev__not_deferred", ":", "field", ".", "deferred", "=", "False", "to_run", "+=", "create_foreign_key", "(", "field", ")", "for", "k", ",", "v", "in", "table_renames", ".", "items", "(", ")", ":", "to_run", "+=", "rename_table", "(", "migrator", ",", "k", ",", "v", ")", "rename_cols_by_table", "=", "{", "}", "deleted_cols_by_table", "=", "{", "}", "for", "etn", ",", "ecols", "in", "existing_columns_by_table", ".", "items", "(", ")", ":", "if", "etn", "in", "table_deletes", ":", "continue", "ntn", "=", "table_renames", ".", "get", "(", "etn", ",", "etn", ")", "model", "=", "table_names_to_models", ".", "get", "(", "ntn", ")", "if", "not", "model", ":", "continue", "defined_fields", "=", "model", ".", "_meta", ".", "sorted_fields", "defined_column_name_to_field", "=", "{", "unicode", "(", "_column_name", "(", "f", ")", ")", ":", "f", "for", "f", "in", "defined_fields", "}", "existing_fks_by_column", "=", "{", "fk", ".", "column", ":", "fk", "for", "fk", "in", "foreign_keys_by_table", "[", "etn", "]", "}", "adds", ",", "deletes", ",", "renames", ",", "alter_statements", "=", "calc_column_changes", "(", "db", ",", "migrator", ",", "etn", ",", "ntn", ",", "ecols", ",", "defined_fields", ",", "existing_fks_by_column", ")", "for", "column_name", "in", "adds", ":", "field", "=", "defined_column_name_to_field", "[", "column_name", "]", "to_run", "+=", "alter_add_column", "(", "db", ",", "migrator", ",", "ntn", ",", "column_name", ",", "field", ")", "if", "not", "field", ".", "null", ":", "# alter_add_column strips null constraints", "# add them back after setting any defaults", "if", "field", ".", "default", "is", "not", "None", ":", "to_run", "+=", "set_default", "(", "db", ",", "migrator", ",", "ntn", ",", "column_name", ",", "field", ")", "else", ":", "to_run", ".", "append", "(", "(", "'-- adding a not null column without a default will fail if the table is not empty'", ",", "[", "]", ")", ")", "to_run", "+=", "add_not_null", "(", "db", ",", "migrator", ",", "ntn", ",", "column_name", ",", "field", ")", "for", "column_name", "in", "deletes", ":", "fk", "=", "existing_fks_by_column", ".", "get", "(", "column_name", ")", "if", "fk", ":", "to_run", "+=", "drop_foreign_key", "(", "db", ",", "migrator", ",", "ntn", ",", "fk", ".", "name", ")", "to_run", "+=", "drop_column", "(", "db", ",", "migrator", ",", "ntn", ",", "column_name", ")", "for", "ocn", ",", "ncn", "in", "renames", ".", "items", "(", ")", ":", "field", "=", "defined_column_name_to_field", "[", "ncn", "]", "to_run", "+=", "rename_column", "(", "db", ",", "migrator", ",", "ntn", ",", "ocn", ",", "ncn", ",", "field", ")", "to_run", "+=", "alter_statements", "rename_cols_by_table", "[", "ntn", "]", "=", "renames", "deleted_cols_by_table", "[", "ntn", "]", "=", "deletes", "for", "ntn", ",", "model", "in", "table_names_to_models", ".", "items", "(", ")", ":", "etn", "=", "table_renamed_from", ".", "get", "(", "ntn", ",", "ntn", ")", "deletes", "=", "deleted_cols_by_table", ".", "get", "(", "ntn", ",", "set", "(", ")", ")", "existing_indexes_for_table", "=", "[", "i", "for", "i", "in", "existing_indexes", ".", "get", "(", "etn", ",", "[", "]", ")", "if", "not", "any", "(", "[", "(", "c", "in", "deletes", ")", "for", "c", "in", "i", ".", "columns", "]", ")", "]", "to_run", "+=", "calc_index_changes", "(", "db", ",", "migrator", ",", "existing_indexes_for_table", ",", "model", ",", "rename_cols_by_table", ".", "get", "(", "ntn", ",", "{", "}", ")", ")", "for", "tbl", "in", "table_deletes", ":", "to_run", "+=", "drop_table", "(", "migrator", ",", "tbl", ")", "return", "to_run" ]
43.849315
0.015582
def create_disk(kwargs=None, call=None): ''' Create a new persistent disk. Must specify `disk_name` and `location`, and optionally can specify 'disk_type' as pd-standard or pd-ssd, which defaults to pd-standard. Can also specify an `image` or `snapshot` but if neither of those are specified, a `size` (in GB) is required. CLI Example: .. code-block:: bash salt-cloud -f create_disk gce disk_name=pd size=300 location=us-central1-b ''' if call != 'function': raise SaltCloudSystemExit( 'The create_disk function must be called with -f or --function.' ) if kwargs is None: kwargs = {} name = kwargs.get('disk_name', None) image = kwargs.get('image', None) location = kwargs.get('location', None) size = kwargs.get('size', None) snapshot = kwargs.get('snapshot', None) disk_type = kwargs.get('type', 'pd-standard') if location is None: log.error( 'A location (zone) must be specified when creating a disk.' ) return False if name is None: log.error( 'A disk_name must be specified when creating a disk.' ) return False if size is None and image is None and snapshot is None: log.error( 'Must specify image, snapshot, or size.' ) return False conn = get_conn() location = conn.ex_get_zone(kwargs['location']) use_existing = True __utils__['cloud.fire_event']( 'event', 'create disk', 'salt/cloud/disk/creating', args={ 'name': name, 'location': location.name, 'image': image, 'snapshot': snapshot, }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) disk = conn.create_volume( size, name, location, snapshot, image, use_existing, disk_type ) __utils__['cloud.fire_event']( 'event', 'created disk', 'salt/cloud/disk/created', args={ 'name': name, 'location': location.name, 'image': image, 'snapshot': snapshot, }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return _expand_disk(disk)
[ "def", "create_disk", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The create_disk function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "name", "=", "kwargs", ".", "get", "(", "'disk_name'", ",", "None", ")", "image", "=", "kwargs", ".", "get", "(", "'image'", ",", "None", ")", "location", "=", "kwargs", ".", "get", "(", "'location'", ",", "None", ")", "size", "=", "kwargs", ".", "get", "(", "'size'", ",", "None", ")", "snapshot", "=", "kwargs", ".", "get", "(", "'snapshot'", ",", "None", ")", "disk_type", "=", "kwargs", ".", "get", "(", "'type'", ",", "'pd-standard'", ")", "if", "location", "is", "None", ":", "log", ".", "error", "(", "'A location (zone) must be specified when creating a disk.'", ")", "return", "False", "if", "name", "is", "None", ":", "log", ".", "error", "(", "'A disk_name must be specified when creating a disk.'", ")", "return", "False", "if", "size", "is", "None", "and", "image", "is", "None", "and", "snapshot", "is", "None", ":", "log", ".", "error", "(", "'Must specify image, snapshot, or size.'", ")", "return", "False", "conn", "=", "get_conn", "(", ")", "location", "=", "conn", ".", "ex_get_zone", "(", "kwargs", "[", "'location'", "]", ")", "use_existing", "=", "True", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'create disk'", ",", "'salt/cloud/disk/creating'", ",", "args", "=", "{", "'name'", ":", "name", ",", "'location'", ":", "location", ".", "name", ",", "'image'", ":", "image", ",", "'snapshot'", ":", "snapshot", ",", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")", "disk", "=", "conn", ".", "create_volume", "(", "size", ",", "name", ",", "location", ",", "snapshot", ",", "image", ",", "use_existing", ",", "disk_type", ")", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'created disk'", ",", "'salt/cloud/disk/created'", ",", "args", "=", "{", "'name'", ":", "name", ",", "'location'", ":", "location", ".", "name", ",", "'image'", ":", "image", ",", "'snapshot'", ":", "snapshot", ",", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")", "return", "_expand_disk", "(", "disk", ")" ]
26.571429
0.000864
def _migrate_ledger(data_directory, old_ledger_file, new_ledger_file, serializer: MappingSerializer = None): """ Test for the directory, open old and new ledger, migrate data, rename directories """ # we should have ChunkedFileStorage implementation of the Ledger if not os.path.isdir(os.path.join(data_directory, old_ledger_file)): msg = 'Could not find directory {} for migration.'.format( old_ledger_file) logger.error(msg) raise Exception(msg) # open the old ledger using the specified serializer old_ledger_file_backup = old_ledger_file + "_new" old_txn_log_store = ChunkedFileStore(data_directory, old_ledger_file_backup, isLineNoKey=True, storeContentHash=False) old_ledger = Ledger(CompactMerkleTree(), dataDir=data_directory, txn_serializer=serializer, hash_serializer=serializer, fileName=old_ledger_file_backup, transactionLogStore=old_txn_log_store) # open the new ledger with new serialization new_ledger = Ledger(CompactMerkleTree(), dataDir=data_directory, fileName=new_ledger_file) logger.info("new size for {}: {}".format( old_ledger_file_backup, str(new_ledger.size))) # add all txns into the old ledger for _, txn in new_ledger.getAllTxn(): old_ledger.add(txn) logger.info("old size for {}: {}".format( new_ledger_file, str(old_ledger.size))) old_ledger.stop() new_ledger.stop() # now that everything succeeded, remove the new files and move the old # files into place shutil.rmtree( os.path.join(data_directory, new_ledger_file)) os.rename( os.path.join(data_directory, old_ledger_file_backup), os.path.join(data_directory, old_ledger_file))
[ "def", "_migrate_ledger", "(", "data_directory", ",", "old_ledger_file", ",", "new_ledger_file", ",", "serializer", ":", "MappingSerializer", "=", "None", ")", ":", "# we should have ChunkedFileStorage implementation of the Ledger", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "data_directory", ",", "old_ledger_file", ")", ")", ":", "msg", "=", "'Could not find directory {} for migration.'", ".", "format", "(", "old_ledger_file", ")", "logger", ".", "error", "(", "msg", ")", "raise", "Exception", "(", "msg", ")", "# open the old ledger using the specified serializer", "old_ledger_file_backup", "=", "old_ledger_file", "+", "\"_new\"", "old_txn_log_store", "=", "ChunkedFileStore", "(", "data_directory", ",", "old_ledger_file_backup", ",", "isLineNoKey", "=", "True", ",", "storeContentHash", "=", "False", ")", "old_ledger", "=", "Ledger", "(", "CompactMerkleTree", "(", ")", ",", "dataDir", "=", "data_directory", ",", "txn_serializer", "=", "serializer", ",", "hash_serializer", "=", "serializer", ",", "fileName", "=", "old_ledger_file_backup", ",", "transactionLogStore", "=", "old_txn_log_store", ")", "# open the new ledger with new serialization", "new_ledger", "=", "Ledger", "(", "CompactMerkleTree", "(", ")", ",", "dataDir", "=", "data_directory", ",", "fileName", "=", "new_ledger_file", ")", "logger", ".", "info", "(", "\"new size for {}: {}\"", ".", "format", "(", "old_ledger_file_backup", ",", "str", "(", "new_ledger", ".", "size", ")", ")", ")", "# add all txns into the old ledger", "for", "_", ",", "txn", "in", "new_ledger", ".", "getAllTxn", "(", ")", ":", "old_ledger", ".", "add", "(", "txn", ")", "logger", ".", "info", "(", "\"old size for {}: {}\"", ".", "format", "(", "new_ledger_file", ",", "str", "(", "old_ledger", ".", "size", ")", ")", ")", "old_ledger", ".", "stop", "(", ")", "new_ledger", ".", "stop", "(", ")", "# now that everything succeeded, remove the new files and move the old", "# files into place", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "data_directory", ",", "new_ledger_file", ")", ")", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "data_directory", ",", "old_ledger_file_backup", ")", ",", "os", ".", "path", ".", "join", "(", "data_directory", ",", "old_ledger_file", ")", ")" ]
40.34
0.000968
def add_peer(self, peer_addr): "Build a connection to the Hub at a given ``(host, port)`` address" peer = connection.Peer( self._ident, self._dispatcher, peer_addr, backend.Socket()) peer.start() self._started_peers[peer_addr] = peer
[ "def", "add_peer", "(", "self", ",", "peer_addr", ")", ":", "peer", "=", "connection", ".", "Peer", "(", "self", ".", "_ident", ",", "self", ".", "_dispatcher", ",", "peer_addr", ",", "backend", ".", "Socket", "(", ")", ")", "peer", ".", "start", "(", ")", "self", ".", "_started_peers", "[", "peer_addr", "]", "=", "peer" ]
46
0.007117
def check_owners(self, request, **resources): """ Check parents of current resource. Recursive scanning of the fact that the child has FK to the parent and in resources we have right objects. We check that in request like /author/1/book/2/page/3 Page object with pk=3 has ForeignKey field linked to Book object with pk=2 and Book with pk=2 has ForeignKey field linked to Author object with pk=1. :return bool: If success else raise Exception """ if self._meta.allow_public_access or not self._meta.parent: return True self.parent.check_owners(request, **resources) objects = resources.get(self._meta.name) if self._meta.model and self._meta.parent._meta.model and objects: pr = resources.get(self._meta.parent._meta.name) check = all( pr.pk == getattr( o, "%s_id" % self._meta.parent._meta.name, None) for o in as_tuple(objects)) if not pr or not check: # 403 Error if there is error in parent-children relationship raise HttpError( "Access forbidden.", status=status.HTTP_403_FORBIDDEN) return True
[ "def", "check_owners", "(", "self", ",", "request", ",", "*", "*", "resources", ")", ":", "if", "self", ".", "_meta", ".", "allow_public_access", "or", "not", "self", ".", "_meta", ".", "parent", ":", "return", "True", "self", ".", "parent", ".", "check_owners", "(", "request", ",", "*", "*", "resources", ")", "objects", "=", "resources", ".", "get", "(", "self", ".", "_meta", ".", "name", ")", "if", "self", ".", "_meta", ".", "model", "and", "self", ".", "_meta", ".", "parent", ".", "_meta", ".", "model", "and", "objects", ":", "pr", "=", "resources", ".", "get", "(", "self", ".", "_meta", ".", "parent", ".", "_meta", ".", "name", ")", "check", "=", "all", "(", "pr", ".", "pk", "==", "getattr", "(", "o", ",", "\"%s_id\"", "%", "self", ".", "_meta", ".", "parent", ".", "_meta", ".", "name", ",", "None", ")", "for", "o", "in", "as_tuple", "(", "objects", ")", ")", "if", "not", "pr", "or", "not", "check", ":", "# 403 Error if there is error in parent-children relationship", "raise", "HttpError", "(", "\"Access forbidden.\"", ",", "status", "=", "status", ".", "HTTP_403_FORBIDDEN", ")", "return", "True" ]
35.4
0.001571
async def write_registers(self, address, values, skip_encode=False): """Write modbus registers. The Modbus protocol doesn't allow requests longer than 250 bytes (ie. 125 registers, 62 DF addresses), which this function manages by chunking larger requests. """ while len(values) > 62: await self._request('write_registers', address, values, skip_encode=skip_encode) address, values = address + 124, values[62:] await self._request('write_registers', address, values, skip_encode=skip_encode)
[ "async", "def", "write_registers", "(", "self", ",", "address", ",", "values", ",", "skip_encode", "=", "False", ")", ":", "while", "len", "(", "values", ")", ">", "62", ":", "await", "self", ".", "_request", "(", "'write_registers'", ",", "address", ",", "values", ",", "skip_encode", "=", "skip_encode", ")", "address", ",", "values", "=", "address", "+", "124", ",", "values", "[", "62", ":", "]", "await", "self", ".", "_request", "(", "'write_registers'", ",", "address", ",", "values", ",", "skip_encode", "=", "skip_encode", ")" ]
47.615385
0.00317
def index(value, array): """ Array search that behaves like I want it to. Totally dumb, I know. """ i = array.searchsorted(value) if i == len(array): return -1 else: return i
[ "def", "index", "(", "value", ",", "array", ")", ":", "i", "=", "array", ".", "searchsorted", "(", "value", ")", "if", "i", "==", "len", "(", "array", ")", ":", "return", "-", "1", "else", ":", "return", "i" ]
29.428571
0.018868
def _addProteinIdsToGroupMapping(self, proteinIds, groupId): """Add a groupId to one or multiple entries of the internal proteinToGroupId mapping. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupId: str, a groupId """ for proteinId in AUX.toList(proteinIds): self._proteinToGroupIds[proteinId].add(groupId)
[ "def", "_addProteinIdsToGroupMapping", "(", "self", ",", "proteinIds", ",", "groupId", ")", ":", "for", "proteinId", "in", "AUX", ".", "toList", "(", "proteinIds", ")", ":", "self", ".", "_proteinToGroupIds", "[", "proteinId", "]", ".", "add", "(", "groupId", ")" ]
42
0.004662
def _assemble_autophosphorylation(self, stmt): """Example: complex(p(HGNC:MAPK14), p(HGNC:TAB1)) => p(HGNC:MAPK14, pmod(Ph, Tyr, 100))""" sub_agent = deepcopy(stmt.enz) mc = stmt._get_mod_condition() sub_agent.mods.append(mc) # FIXME Ignore any bound conditions on the substrate!!! # This is because if they are included, a complex node will be returned, # which (at least currently) won't incorporate any protein # modifications. sub_agent.bound_conditions = [] # FIXME self._add_nodes_edges(stmt.enz, sub_agent, pc.DIRECTLY_INCREASES, stmt.evidence)
[ "def", "_assemble_autophosphorylation", "(", "self", ",", "stmt", ")", ":", "sub_agent", "=", "deepcopy", "(", "stmt", ".", "enz", ")", "mc", "=", "stmt", ".", "_get_mod_condition", "(", ")", "sub_agent", ".", "mods", ".", "append", "(", "mc", ")", "# FIXME Ignore any bound conditions on the substrate!!!", "# This is because if they are included, a complex node will be returned,", "# which (at least currently) won't incorporate any protein", "# modifications.", "sub_agent", ".", "bound_conditions", "=", "[", "]", "# FIXME", "self", ".", "_add_nodes_edges", "(", "stmt", ".", "enz", ",", "sub_agent", ",", "pc", ".", "DIRECTLY_INCREASES", ",", "stmt", ".", "evidence", ")" ]
49.714286
0.004231
def is_zombie(self, path): '''Is the node pointed to by @ref path a zombie object?''' node = self.get_node(path) if not node: return False return node.is_zombie
[ "def", "is_zombie", "(", "self", ",", "path", ")", ":", "node", "=", "self", ".", "get_node", "(", "path", ")", "if", "not", "node", ":", "return", "False", "return", "node", ".", "is_zombie" ]
33.166667
0.009804
def partial_transform(self, traj): """Featurize an MD trajectory into a vector space via pairwise atom-atom distances Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n_features) A featurized trajectory is a 2D array of shape `(length_of_trajectory x n_features)` where each `features[i]` vector is computed by applying the featurization function to the `i`th snapshot of the input trajectory. See Also -------- transform : simultaneously featurize a collection of MD trajectories """ d = md.geometry.compute_distances(traj, self.pair_indices, periodic=self.periodic) return d ** self.exponent
[ "def", "partial_transform", "(", "self", ",", "traj", ")", ":", "d", "=", "md", ".", "geometry", ".", "compute_distances", "(", "traj", ",", "self", ".", "pair_indices", ",", "periodic", "=", "self", ".", "periodic", ")", "return", "d", "**", "self", ".", "exponent" ]
37.541667
0.002165
def lookup_reverse(ip_address): """Perform a reverse lookup of IP address.""" try: type(ipaddress.ip_address(ip_address)) except ValueError: return {} record = reversename.from_address(ip_address) hostname = str(resolver.query(record, "PTR")[0])[:-1] return {'hostname': hostname}
[ "def", "lookup_reverse", "(", "ip_address", ")", ":", "try", ":", "type", "(", "ipaddress", ".", "ip_address", "(", "ip_address", ")", ")", "except", "ValueError", ":", "return", "{", "}", "record", "=", "reversename", ".", "from_address", "(", "ip_address", ")", "hostname", "=", "str", "(", "resolver", ".", "query", "(", "record", ",", "\"PTR\"", ")", "[", "0", "]", ")", "[", ":", "-", "1", "]", "return", "{", "'hostname'", ":", "hostname", "}" ]
31.2
0.003115
def get_dataset(self, key, info): """Load a dataset.""" logger.debug('Reading in get_dataset %s.', key.name) var_name = info.get('file_key', self.filetype_info.get('file_key')) if var_name: data = self[var_name] elif 'Sectorized_CMI' in self.nc: data = self['Sectorized_CMI'] elif 'data' in self.nc: data = self['data'] # NetCDF doesn't support multi-threaded reading, trick it by opening # as one whole chunk then split it up before we do any calculations data = data.chunk({'x': CHUNK_SIZE, 'y': CHUNK_SIZE}) # convert to satpy standard units factor = data.attrs.pop('scale_factor', 1) offset = data.attrs.pop('add_offset', 0) units = data.attrs.get('units', 1) # the '*1' unit is some weird convention added/needed by AWIPS if units in ['1', '*1'] and key.calibration == 'reflectance': data *= 100 factor *= 100 # used for valid_min/max data.attrs['units'] = '%' # set up all the attributes that might be useful to the user/satpy data.attrs.update({'platform_name': self.platform_name, 'sensor': data.attrs.get('sensor', self.sensor), }) if 'satellite_longitude' in self.nc.attrs: data.attrs['satellite_longitude'] = self.nc.attrs['satellite_longitude'] data.attrs['satellite_latitude'] = self.nc.attrs['satellite_latitude'] data.attrs['satellite_altitude'] = self.nc.attrs['satellite_altitude'] scene_id = self.nc.attrs.get('scene_id') if scene_id is not None: data.attrs['scene_id'] = scene_id data.attrs.update(key.to_dict()) data.attrs.pop('_FillValue', None) if 'valid_min' in data.attrs: vmin = data.attrs.pop('valid_min') vmax = data.attrs.pop('valid_max') vmin = vmin * factor + offset vmax = vmax * factor + offset data.attrs['valid_min'] = vmin data.attrs['valid_max'] = vmax return data
[ "def", "get_dataset", "(", "self", ",", "key", ",", "info", ")", ":", "logger", ".", "debug", "(", "'Reading in get_dataset %s.'", ",", "key", ".", "name", ")", "var_name", "=", "info", ".", "get", "(", "'file_key'", ",", "self", ".", "filetype_info", ".", "get", "(", "'file_key'", ")", ")", "if", "var_name", ":", "data", "=", "self", "[", "var_name", "]", "elif", "'Sectorized_CMI'", "in", "self", ".", "nc", ":", "data", "=", "self", "[", "'Sectorized_CMI'", "]", "elif", "'data'", "in", "self", ".", "nc", ":", "data", "=", "self", "[", "'data'", "]", "# NetCDF doesn't support multi-threaded reading, trick it by opening", "# as one whole chunk then split it up before we do any calculations", "data", "=", "data", ".", "chunk", "(", "{", "'x'", ":", "CHUNK_SIZE", ",", "'y'", ":", "CHUNK_SIZE", "}", ")", "# convert to satpy standard units", "factor", "=", "data", ".", "attrs", ".", "pop", "(", "'scale_factor'", ",", "1", ")", "offset", "=", "data", ".", "attrs", ".", "pop", "(", "'add_offset'", ",", "0", ")", "units", "=", "data", ".", "attrs", ".", "get", "(", "'units'", ",", "1", ")", "# the '*1' unit is some weird convention added/needed by AWIPS", "if", "units", "in", "[", "'1'", ",", "'*1'", "]", "and", "key", ".", "calibration", "==", "'reflectance'", ":", "data", "*=", "100", "factor", "*=", "100", "# used for valid_min/max", "data", ".", "attrs", "[", "'units'", "]", "=", "'%'", "# set up all the attributes that might be useful to the user/satpy", "data", ".", "attrs", ".", "update", "(", "{", "'platform_name'", ":", "self", ".", "platform_name", ",", "'sensor'", ":", "data", ".", "attrs", ".", "get", "(", "'sensor'", ",", "self", ".", "sensor", ")", ",", "}", ")", "if", "'satellite_longitude'", "in", "self", ".", "nc", ".", "attrs", ":", "data", ".", "attrs", "[", "'satellite_longitude'", "]", "=", "self", ".", "nc", ".", "attrs", "[", "'satellite_longitude'", "]", "data", ".", "attrs", "[", "'satellite_latitude'", "]", "=", "self", ".", "nc", ".", "attrs", "[", "'satellite_latitude'", "]", "data", ".", "attrs", "[", "'satellite_altitude'", "]", "=", "self", ".", "nc", ".", "attrs", "[", "'satellite_altitude'", "]", "scene_id", "=", "self", ".", "nc", ".", "attrs", ".", "get", "(", "'scene_id'", ")", "if", "scene_id", "is", "not", "None", ":", "data", ".", "attrs", "[", "'scene_id'", "]", "=", "scene_id", "data", ".", "attrs", ".", "update", "(", "key", ".", "to_dict", "(", ")", ")", "data", ".", "attrs", ".", "pop", "(", "'_FillValue'", ",", "None", ")", "if", "'valid_min'", "in", "data", ".", "attrs", ":", "vmin", "=", "data", ".", "attrs", ".", "pop", "(", "'valid_min'", ")", "vmax", "=", "data", ".", "attrs", ".", "pop", "(", "'valid_max'", ")", "vmin", "=", "vmin", "*", "factor", "+", "offset", "vmax", "=", "vmax", "*", "factor", "+", "offset", "data", ".", "attrs", "[", "'valid_min'", "]", "=", "vmin", "data", ".", "attrs", "[", "'valid_max'", "]", "=", "vmax", "return", "data" ]
45.586957
0.002334
def return_data(self, data, format=None): """Format and return data appropriate to the requested API format. data: The data retured by the api request """ if format is None: format = self.format if format == "json": formatted_data = json.loads(data) else: formatted_data = data return formatted_data
[ "def", "return_data", "(", "self", ",", "data", ",", "format", "=", "None", ")", ":", "if", "format", "is", "None", ":", "format", "=", "self", ".", "format", "if", "format", "==", "\"json\"", ":", "formatted_data", "=", "json", ".", "loads", "(", "data", ")", "else", ":", "formatted_data", "=", "data", "return", "formatted_data" ]
29.307692
0.005089
def negotiate_sasl(transport, xmlstream, sasl_providers, negotiation_timeout, jid, features): """ Perform SASL authentication on the given :class:`.protocol.XMLStream` `stream`. `transport` must be the :class:`asyncio.Transport` over which the `stream` runs. It is used to detect whether TLS is used and may be required by some SASL mechanisms. `sasl_providers` must be an iterable of :class:`SASLProvider` objects. They will be tried in iteration order to authenticate against the server. If one of the `sasl_providers` fails with a :class:`aiosasl.AuthenticationFailure` exception, the other providers are still tried; only if all providers fail, the last :class:`aiosasl.AuthenticationFailure` exception is re-raised. If no mechanism was able to authenticate but not due to authentication failures (other failures include no matching mechanism on the server side), :class:`aiosasl.SASLUnavailable` is raised. Return the :class:`.nonza.StreamFeatures` obtained after resetting the stream after successful SASL authentication. .. versionadded:: 0.6 .. deprecated:: 0.10 The `negotiation_timeout` argument is ignored. The timeout is controlled using the :attr:`~.XMLStream.deadtime_hard_limit` timeout of the stream. The argument will be removed in version 1.0. To prepare for this, please pass `jid` and `features` as keyword arguments. """ if not transport.get_extra_info("sslcontext"): transport = None last_auth_error = None for sasl_provider in sasl_providers: try: result = yield from sasl_provider.execute( jid, features, xmlstream, transport) except ValueError as err: raise errors.StreamNegotiationFailure( "invalid credentials: {}".format(err) ) from err except aiosasl.AuthenticationFailure as err: last_auth_error = err continue if result: features = yield from protocol.reset_stream_and_get_features( xmlstream ) break else: if last_auth_error: raise last_auth_error else: raise errors.SASLUnavailable("No common mechanisms") return features
[ "def", "negotiate_sasl", "(", "transport", ",", "xmlstream", ",", "sasl_providers", ",", "negotiation_timeout", ",", "jid", ",", "features", ")", ":", "if", "not", "transport", ".", "get_extra_info", "(", "\"sslcontext\"", ")", ":", "transport", "=", "None", "last_auth_error", "=", "None", "for", "sasl_provider", "in", "sasl_providers", ":", "try", ":", "result", "=", "yield", "from", "sasl_provider", ".", "execute", "(", "jid", ",", "features", ",", "xmlstream", ",", "transport", ")", "except", "ValueError", "as", "err", ":", "raise", "errors", ".", "StreamNegotiationFailure", "(", "\"invalid credentials: {}\"", ".", "format", "(", "err", ")", ")", "from", "err", "except", "aiosasl", ".", "AuthenticationFailure", "as", "err", ":", "last_auth_error", "=", "err", "continue", "if", "result", ":", "features", "=", "yield", "from", "protocol", ".", "reset_stream_and_get_features", "(", "xmlstream", ")", "break", "else", ":", "if", "last_auth_error", ":", "raise", "last_auth_error", "else", ":", "raise", "errors", ".", "SASLUnavailable", "(", "\"No common mechanisms\"", ")", "return", "features" ]
36.761905
0.000421
def install_brew(target_path): """ Install brew to the target path """ if not os.path.exists(target_path): try: os.makedirs(target_path) except OSError: logger.warn("Unable to create directory %s for brew." % target_path) logger.warn("Skipping...") return extract_targz(HOMEBREW_URL, target_path, remove_common_prefix=True)
[ "def", "install_brew", "(", "target_path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "target_path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "target_path", ")", "except", "OSError", ":", "logger", ".", "warn", "(", "\"Unable to create directory %s for brew.\"", "%", "target_path", ")", "logger", ".", "warn", "(", "\"Skipping...\"", ")", "return", "extract_targz", "(", "HOMEBREW_URL", ",", "target_path", ",", "remove_common_prefix", "=", "True", ")" ]
39
0.005013
def new(): """Create new group.""" form = GroupForm(request.form) if form.validate_on_submit(): try: group = Group.create(admins=[current_user], **form.data) flash(_('Group "%(name)s" created', name=group.name), 'success') return redirect(url_for(".index")) except IntegrityError: flash(_('Group creation failure'), 'error') return render_template( "invenio_groups/new.html", form=form, )
[ "def", "new", "(", ")", ":", "form", "=", "GroupForm", "(", "request", ".", "form", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "try", ":", "group", "=", "Group", ".", "create", "(", "admins", "=", "[", "current_user", "]", ",", "*", "*", "form", ".", "data", ")", "flash", "(", "_", "(", "'Group \"%(name)s\" created'", ",", "name", "=", "group", ".", "name", ")", ",", "'success'", ")", "return", "redirect", "(", "url_for", "(", "\".index\"", ")", ")", "except", "IntegrityError", ":", "flash", "(", "_", "(", "'Group creation failure'", ")", ",", "'error'", ")", "return", "render_template", "(", "\"invenio_groups/new.html\"", ",", "form", "=", "form", ",", ")" ]
27.941176
0.002037
def create_transfer_config( self, parent, transfer_config, authorization_code=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a new data transfer configuration. Example: >>> from google.cloud import bigquery_datatransfer_v1 >>> >>> client = bigquery_datatransfer_v1.DataTransferServiceClient() >>> >>> parent = client.project_path('[PROJECT]') >>> >>> # TODO: Initialize `transfer_config`: >>> transfer_config = {} >>> >>> response = client.create_transfer_config(parent, transfer_config) Args: parent (str): The BigQuery project id where the transfer configuration should be created. Must be in the format /projects/{project\_id}/locations/{location\_id} If specified location and location of the destination bigquery dataset do not match - the request will fail. transfer_config (Union[dict, ~google.cloud.bigquery_datatransfer_v1.types.TransferConfig]): Data transfer configuration to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigquery_datatransfer_v1.types.TransferConfig` authorization_code (str): Optional OAuth2 authorization code to use with this transfer configuration. This is required if new credentials are needed, as indicated by ``CheckValidCreds``. In order to obtain authorization\_code, please make a request to https://www.gstatic.com/bigquerydatatransfer/oauthz/auth?client\_id=&scope=<data\_source\_scopes>&redirect\_uri=<redirect\_uri> - client\_id should be OAuth client\_id of BigQuery DTS API for the given data source returned by ListDataSources method. - data\_source\_scopes are the scopes returned by ListDataSources method. - redirect\_uri is an optional parameter. If not specified, then authorization code is posted to the opener of authorization flow window. Otherwise it will be sent to the redirect uri. A special value of urn:ietf:wg:oauth:2.0:oob means that authorization code should be returned in the title bar of the browser, with the page text prompting the user to copy the code and paste it in the application. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigquery_datatransfer_v1.types.TransferConfig` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_transfer_config" not in self._inner_api_calls: self._inner_api_calls[ "create_transfer_config" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_transfer_config, default_retry=self._method_configs["CreateTransferConfig"].retry, default_timeout=self._method_configs["CreateTransferConfig"].timeout, client_info=self._client_info, ) request = datatransfer_pb2.CreateTransferConfigRequest( parent=parent, transfer_config=transfer_config, authorization_code=authorization_code, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_transfer_config"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "create_transfer_config", "(", "self", ",", "parent", ",", "transfer_config", ",", "authorization_code", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"create_transfer_config\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"create_transfer_config\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "create_transfer_config", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"CreateTransferConfig\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"CreateTransferConfig\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "datatransfer_pb2", ".", "CreateTransferConfigRequest", "(", "parent", "=", "parent", ",", "transfer_config", "=", "transfer_config", ",", "authorization_code", "=", "authorization_code", ",", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"parent\"", ",", "parent", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"create_transfer_config\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
48.04902
0.007197
def import_object(object_name): """Import an object from its Fully Qualified Name.""" package, name = object_name.rsplit('.', 1) return getattr(importlib.import_module(package), name)
[ "def", "import_object", "(", "object_name", ")", ":", "package", ",", "name", "=", "object_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "return", "getattr", "(", "importlib", ".", "import_module", "(", "package", ")", ",", "name", ")" ]
48
0.005128
def get_calltip(project, source_code, offset, resource=None, maxfixes=1, ignore_unknown=False, remove_self=False): """Get the calltip of a function The format of the returned string is ``module_name.holding_scope_names.function_name(arguments)``. For classes `__init__()` and for normal objects `__call__()` function is used. Note that the offset is on the function itself *not* after the its open parenthesis. (Actually it used to be the other way but it was easily confused when string literals were involved. So I decided it is better for it not to try to be too clever when it cannot be clever enough). You can use a simple search like:: offset = source_code.rindex('(', 0, offset) - 1 to handle simple situations. If `ignore_unknown` is `True`, `None` is returned for functions without source-code like builtins and extensions. If `remove_self` is `True`, the first parameter whose name is self will be removed for methods. """ fixer = fixsyntax.FixSyntax(project, source_code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is None: return None pyobject = pyname.get_object() return PyDocExtractor().get_calltip(pyobject, ignore_unknown, remove_self)
[ "def", "get_calltip", "(", "project", ",", "source_code", ",", "offset", ",", "resource", "=", "None", ",", "maxfixes", "=", "1", ",", "ignore_unknown", "=", "False", ",", "remove_self", "=", "False", ")", ":", "fixer", "=", "fixsyntax", ".", "FixSyntax", "(", "project", ",", "source_code", ",", "resource", ",", "maxfixes", ")", "pyname", "=", "fixer", ".", "pyname_at", "(", "offset", ")", "if", "pyname", "is", "None", ":", "return", "None", "pyobject", "=", "pyname", ".", "get_object", "(", ")", "return", "PyDocExtractor", "(", ")", ".", "get_calltip", "(", "pyobject", ",", "ignore_unknown", ",", "remove_self", ")" ]
40.903226
0.00077
def to_dict(self, model): """Create a dictionary serialization for a model. Parameters ---------- model : ModelHandle Returns ------- dict Dictionary serialization for a model """ # Get the basic Json object from the super class obj = super(ModelRegistry, self).to_dict(model) # Add model parameter obj['parameters'] = [ para.to_dict() for para in model.parameters ] obj['outputs'] = model.outputs.to_dict() obj['connector'] = model.connector return obj
[ "def", "to_dict", "(", "self", ",", "model", ")", ":", "# Get the basic Json object from the super class", "obj", "=", "super", "(", "ModelRegistry", ",", "self", ")", ".", "to_dict", "(", "model", ")", "# Add model parameter", "obj", "[", "'parameters'", "]", "=", "[", "para", ".", "to_dict", "(", ")", "for", "para", "in", "model", ".", "parameters", "]", "obj", "[", "'outputs'", "]", "=", "model", ".", "outputs", ".", "to_dict", "(", ")", "obj", "[", "'connector'", "]", "=", "model", ".", "connector", "return", "obj" ]
27.952381
0.003295
def invert_dict(dict_, unique_vals=True): """ Reverses the keys and values in a dictionary. Set unique_vals to False if the values in the dict are not unique. Args: dict_ (dict_): dictionary unique_vals (bool): if False, inverted keys are returned in a list. Returns: dict: inverted_dict CommandLine: python -m utool.util_dict --test-invert_dict Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {'a': 1, 'b': 2} >>> inverted_dict = invert_dict(dict_) >>> result = ut.repr4(inverted_dict, nl=False) >>> print(result) {1: 'a', 2: 'b'} Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = OrderedDict([(2, 'good',), (1, 'ok',), (0, 'junk',), (None, 'UNKNOWN',)]) >>> inverted_dict = invert_dict(dict_) >>> result = ut.repr4(inverted_dict, nl=False) >>> print(result) {'good': 2, 'ok': 1, 'junk': 0, 'UNKNOWN': None} Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 2} >>> inverted_dict = invert_dict(dict_, unique_vals=False) >>> inverted_dict = ut.map_dict_vals(sorted, inverted_dict) >>> result = ut.repr4(inverted_dict, nl=False) >>> print(result) {0: ['b', 'c', 'd', 'e'], 1: ['a'], 2: ['f']} """ if unique_vals: inverted_items = [(val, key) for key, val in six.iteritems(dict_)] inverted_dict = type(dict_)(inverted_items) else: inverted_dict = group_items(dict_.keys(), dict_.values()) return inverted_dict
[ "def", "invert_dict", "(", "dict_", ",", "unique_vals", "=", "True", ")", ":", "if", "unique_vals", ":", "inverted_items", "=", "[", "(", "val", ",", "key", ")", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "dict_", ")", "]", "inverted_dict", "=", "type", "(", "dict_", ")", "(", "inverted_items", ")", "else", ":", "inverted_dict", "=", "group_items", "(", "dict_", ".", "keys", "(", ")", ",", "dict_", ".", "values", "(", ")", ")", "return", "inverted_dict" ]
34.384615
0.000544
def split(self, only_watertight=True, adjacency=None, **kwargs): """ Returns a list of Trimesh objects, based on face connectivity. Splits into individual components, sometimes referred to as 'bodies' Parameters --------- only_watertight : bool Only return watertight meshes and discard remainder adjacency : None or (n, 2) int Override face adjacency with custom values Returns --------- meshes : (n,) trimesh.Trimesh Separate bodies from original mesh """ meshes = graph.split(self, only_watertight=only_watertight, adjacency=adjacency, **kwargs) return meshes
[ "def", "split", "(", "self", ",", "only_watertight", "=", "True", ",", "adjacency", "=", "None", ",", "*", "*", "kwargs", ")", ":", "meshes", "=", "graph", ".", "split", "(", "self", ",", "only_watertight", "=", "only_watertight", ",", "adjacency", "=", "adjacency", ",", "*", "*", "kwargs", ")", "return", "meshes" ]
34.727273
0.002548
async def preProcessForComparison(results, target_size, size_tolerance_prct): """ Process results to prepare them for future comparison and sorting. """ # find reference (=image most likely to match target cover ignoring factors like size and format) reference = None for result in results: if result.source_quality is CoverSourceQuality.REFERENCE: if ((reference is None) or (CoverSourceResult.compare(result, reference, target_size=target_size, size_tolerance_prct=size_tolerance_prct) > 0)): reference = result # remove results that are only refs results = list(itertools.filterfalse(operator.attrgetter("is_only_reference"), results)) # remove duplicates no_dup_results = [] for result in results: is_dup = False for result_comp in results: if ((result_comp is not result) and (result_comp.urls == result.urls) and (__class__.compare(result, result_comp, target_size=target_size, size_tolerance_prct=size_tolerance_prct) < 0)): is_dup = True break if not is_dup: no_dup_results.append(result) dup_count = len(results) - len(no_dup_results) if dup_count > 0: logging.getLogger("Cover").info("Removed %u duplicate results" % (dup_count)) results = no_dup_results if reference is not None: logging.getLogger("Cover").info("Reference is: %s" % (reference)) reference.is_similar_to_reference = True # calculate sigs futures = [] for result in results: coroutine = result.updateSignature() future = asyncio.ensure_future(coroutine) futures.append(future) if reference.is_only_reference: assert(reference not in results) coroutine = reference.updateSignature() future = asyncio.ensure_future(coroutine) futures.append(future) if futures: await asyncio.wait(futures) for future in futures: future.result() # raise pending exception if any # compare other results to reference for result in results: if ((result is not reference) and (result.thumbnail_sig is not None) and (reference.thumbnail_sig is not None)): result.is_similar_to_reference = __class__.areImageSigsSimilar(result.thumbnail_sig, reference.thumbnail_sig) if result.is_similar_to_reference: logging.getLogger("Cover").debug("%s is similar to reference" % (result)) else: logging.getLogger("Cover").debug("%s is NOT similar to reference" % (result)) else: logging.getLogger("Cover").warning("No reference result found") return results
[ "async", "def", "preProcessForComparison", "(", "results", ",", "target_size", ",", "size_tolerance_prct", ")", ":", "# find reference (=image most likely to match target cover ignoring factors like size and format)", "reference", "=", "None", "for", "result", "in", "results", ":", "if", "result", ".", "source_quality", "is", "CoverSourceQuality", ".", "REFERENCE", ":", "if", "(", "(", "reference", "is", "None", ")", "or", "(", "CoverSourceResult", ".", "compare", "(", "result", ",", "reference", ",", "target_size", "=", "target_size", ",", "size_tolerance_prct", "=", "size_tolerance_prct", ")", ">", "0", ")", ")", ":", "reference", "=", "result", "# remove results that are only refs", "results", "=", "list", "(", "itertools", ".", "filterfalse", "(", "operator", ".", "attrgetter", "(", "\"is_only_reference\"", ")", ",", "results", ")", ")", "# remove duplicates", "no_dup_results", "=", "[", "]", "for", "result", "in", "results", ":", "is_dup", "=", "False", "for", "result_comp", "in", "results", ":", "if", "(", "(", "result_comp", "is", "not", "result", ")", "and", "(", "result_comp", ".", "urls", "==", "result", ".", "urls", ")", "and", "(", "__class__", ".", "compare", "(", "result", ",", "result_comp", ",", "target_size", "=", "target_size", ",", "size_tolerance_prct", "=", "size_tolerance_prct", ")", "<", "0", ")", ")", ":", "is_dup", "=", "True", "break", "if", "not", "is_dup", ":", "no_dup_results", ".", "append", "(", "result", ")", "dup_count", "=", "len", "(", "results", ")", "-", "len", "(", "no_dup_results", ")", "if", "dup_count", ">", "0", ":", "logging", ".", "getLogger", "(", "\"Cover\"", ")", ".", "info", "(", "\"Removed %u duplicate results\"", "%", "(", "dup_count", ")", ")", "results", "=", "no_dup_results", "if", "reference", "is", "not", "None", ":", "logging", ".", "getLogger", "(", "\"Cover\"", ")", ".", "info", "(", "\"Reference is: %s\"", "%", "(", "reference", ")", ")", "reference", ".", "is_similar_to_reference", "=", "True", "# calculate sigs", "futures", "=", "[", "]", "for", "result", "in", "results", ":", "coroutine", "=", "result", ".", "updateSignature", "(", ")", "future", "=", "asyncio", ".", "ensure_future", "(", "coroutine", ")", "futures", ".", "append", "(", "future", ")", "if", "reference", ".", "is_only_reference", ":", "assert", "(", "reference", "not", "in", "results", ")", "coroutine", "=", "reference", ".", "updateSignature", "(", ")", "future", "=", "asyncio", ".", "ensure_future", "(", "coroutine", ")", "futures", ".", "append", "(", "future", ")", "if", "futures", ":", "await", "asyncio", ".", "wait", "(", "futures", ")", "for", "future", "in", "futures", ":", "future", ".", "result", "(", ")", "# raise pending exception if any", "# compare other results to reference", "for", "result", "in", "results", ":", "if", "(", "(", "result", "is", "not", "reference", ")", "and", "(", "result", ".", "thumbnail_sig", "is", "not", "None", ")", "and", "(", "reference", ".", "thumbnail_sig", "is", "not", "None", ")", ")", ":", "result", ".", "is_similar_to_reference", "=", "__class__", ".", "areImageSigsSimilar", "(", "result", ".", "thumbnail_sig", ",", "reference", ".", "thumbnail_sig", ")", "if", "result", ".", "is_similar_to_reference", ":", "logging", ".", "getLogger", "(", "\"Cover\"", ")", ".", "debug", "(", "\"%s is similar to reference\"", "%", "(", "result", ")", ")", "else", ":", "logging", ".", "getLogger", "(", "\"Cover\"", ")", ".", "debug", "(", "\"%s is NOT similar to reference\"", "%", "(", "result", ")", ")", "else", ":", "logging", ".", "getLogger", "(", "\"Cover\"", ")", ".", "warning", "(", "\"No reference result found\"", ")", "return", "results" ]
41.253521
0.01067
def set_course_timetable(self, course_id, timetables_course_section_id=None, timetables_course_section_id_end_time=None, timetables_course_section_id_location_name=None, timetables_course_section_id_start_time=None, timetables_course_section_id_weekdays=None): """ Set a course timetable. Creates and updates "timetable" events for a course. Can automaticaly generate a series of calendar events based on simple schedules (e.g. "Monday and Wednesday at 2:00pm" ) Existing timetable events for the course and course sections will be updated if they still are part of the timetable. Otherwise, they will be deleted. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - timetables[course_section_id] """An array of timetable objects for the course section specified by course_section_id. If course_section_id is set to "all", events will be created for the entire course.""" if timetables_course_section_id is not None: data["timetables[course_section_id]"] = timetables_course_section_id # OPTIONAL - timetables[course_section_id][weekdays] """A comma-separated list of abbreviated weekdays (Mon-Monday, Tue-Tuesday, Wed-Wednesday, Thu-Thursday, Fri-Friday, Sat-Saturday, Sun-Sunday)""" if timetables_course_section_id_weekdays is not None: data["timetables[course_section_id][weekdays]"] = timetables_course_section_id_weekdays # OPTIONAL - timetables[course_section_id][start_time] """Time to start each event at (e.g. "9:00 am")""" if timetables_course_section_id_start_time is not None: data["timetables[course_section_id][start_time]"] = timetables_course_section_id_start_time # OPTIONAL - timetables[course_section_id][end_time] """Time to end each event at (e.g. "9:00 am")""" if timetables_course_section_id_end_time is not None: data["timetables[course_section_id][end_time]"] = timetables_course_section_id_end_time # OPTIONAL - timetables[course_section_id][location_name] """A location name to set for each event""" if timetables_course_section_id_location_name is not None: data["timetables[course_section_id][location_name]"] = timetables_course_section_id_location_name self.logger.debug("POST /api/v1/courses/{course_id}/calendar_events/timetable with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/calendar_events/timetable".format(**path), data=data, params=params, no_data=True)
[ "def", "set_course_timetable", "(", "self", ",", "course_id", ",", "timetables_course_section_id", "=", "None", ",", "timetables_course_section_id_end_time", "=", "None", ",", "timetables_course_section_id_location_name", "=", "None", ",", "timetables_course_section_id_start_time", "=", "None", ",", "timetables_course_section_id_weekdays", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# OPTIONAL - timetables[course_section_id]\r", "\"\"\"An array of timetable objects for the course section specified by course_section_id.\r\n If course_section_id is set to \"all\", events will be created for the entire course.\"\"\"", "if", "timetables_course_section_id", "is", "not", "None", ":", "data", "[", "\"timetables[course_section_id]\"", "]", "=", "timetables_course_section_id", "# OPTIONAL - timetables[course_section_id][weekdays]\r", "\"\"\"A comma-separated list of abbreviated weekdays\r\n (Mon-Monday, Tue-Tuesday, Wed-Wednesday, Thu-Thursday, Fri-Friday, Sat-Saturday, Sun-Sunday)\"\"\"", "if", "timetables_course_section_id_weekdays", "is", "not", "None", ":", "data", "[", "\"timetables[course_section_id][weekdays]\"", "]", "=", "timetables_course_section_id_weekdays", "# OPTIONAL - timetables[course_section_id][start_time]\r", "\"\"\"Time to start each event at (e.g. \"9:00 am\")\"\"\"", "if", "timetables_course_section_id_start_time", "is", "not", "None", ":", "data", "[", "\"timetables[course_section_id][start_time]\"", "]", "=", "timetables_course_section_id_start_time", "# OPTIONAL - timetables[course_section_id][end_time]\r", "\"\"\"Time to end each event at (e.g. \"9:00 am\")\"\"\"", "if", "timetables_course_section_id_end_time", "is", "not", "None", ":", "data", "[", "\"timetables[course_section_id][end_time]\"", "]", "=", "timetables_course_section_id_end_time", "# OPTIONAL - timetables[course_section_id][location_name]\r", "\"\"\"A location name to set for each event\"\"\"", "if", "timetables_course_section_id_location_name", "is", "not", "None", ":", "data", "[", "\"timetables[course_section_id][location_name]\"", "]", "=", "timetables_course_section_id_location_name", "self", ".", "logger", ".", "debug", "(", "\"POST /api/v1/courses/{course_id}/calendar_events/timetable with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"POST\"", ",", "\"/api/v1/courses/{course_id}/calendar_events/timetable\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
57.469388
0.005237
def fpost(self, url, form_data): """ To make a form-data POST request to Falkonry API server :param url: string :param form_data: form-data """ response = None if 'files' in form_data: response = requests.post( self.host + url, data=form_data['data'] if 'data' in form_data else {}, files=form_data['files'] if 'files' in form_data else {}, headers={ 'Authorization': 'Bearer ' + self.token, 'x-falkonry-source':self.sourceHeader }, verify=False ) else: response = requests.post( self.host + url, data=json.dumps(form_data['data'] if 'data' in form_data else {}), headers={ 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'x-falkonry-source':self.sourceHeader }, verify=False ) if response.status_code == 201 or response.status_code == 202: try: return json.loads(response._content.decode('utf-8')) except Exception as e: return json.loads(response.content) elif response.status_code == 401: raise Exception(json.dumps({'message':'Unauthorized Access'})) else: raise Exception(response.content)
[ "def", "fpost", "(", "self", ",", "url", ",", "form_data", ")", ":", "response", "=", "None", "if", "'files'", "in", "form_data", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "host", "+", "url", ",", "data", "=", "form_data", "[", "'data'", "]", "if", "'data'", "in", "form_data", "else", "{", "}", ",", "files", "=", "form_data", "[", "'files'", "]", "if", "'files'", "in", "form_data", "else", "{", "}", ",", "headers", "=", "{", "'Authorization'", ":", "'Bearer '", "+", "self", ".", "token", ",", "'x-falkonry-source'", ":", "self", ".", "sourceHeader", "}", ",", "verify", "=", "False", ")", "else", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "host", "+", "url", ",", "data", "=", "json", ".", "dumps", "(", "form_data", "[", "'data'", "]", "if", "'data'", "in", "form_data", "else", "{", "}", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Authorization'", ":", "'Bearer '", "+", "self", ".", "token", ",", "'x-falkonry-source'", ":", "self", ".", "sourceHeader", "}", ",", "verify", "=", "False", ")", "if", "response", ".", "status_code", "==", "201", "or", "response", ".", "status_code", "==", "202", ":", "try", ":", "return", "json", ".", "loads", "(", "response", ".", "_content", ".", "decode", "(", "'utf-8'", ")", ")", "except", "Exception", "as", "e", ":", "return", "json", ".", "loads", "(", "response", ".", "content", ")", "elif", "response", ".", "status_code", "==", "401", ":", "raise", "Exception", "(", "json", ".", "dumps", "(", "{", "'message'", ":", "'Unauthorized Access'", "}", ")", ")", "else", ":", "raise", "Exception", "(", "response", ".", "content", ")" ]
37.897436
0.003958
def load_deposit(data): """Load the raw JSON dump of the Deposition. Uses Record API in order to bypass all Deposit-specific initialization, which are to be done after the final stage of deposit migration. :param data: Dictionary containing deposition data. :type data: dict """ from invenio_db import db deposit, dep_pid = create_record_and_pid(data) deposit = create_files_and_sip(deposit, dep_pid) db.session.commit()
[ "def", "load_deposit", "(", "data", ")", ":", "from", "invenio_db", "import", "db", "deposit", ",", "dep_pid", "=", "create_record_and_pid", "(", "data", ")", "deposit", "=", "create_files_and_sip", "(", "deposit", ",", "dep_pid", ")", "db", ".", "session", ".", "commit", "(", ")" ]
34.615385
0.002165
def get_object(model, cid, engine_name=None, connection=None): """ Get cached object from redis if id is None then return None: """ from uliweb import settings if not id: return if not check_enable(): return redis = get_redis() if not redis: return tablename = model._alias or model.tablename info = settings.get_var('OBJCACHE_TABLES/%s' % tablename, {}) if info is None: return _id = get_id(engine_name or model.get_engine_name(), tablename, cid) try: log.debug("Try to find objcache:get:table=%s:id=[%s]" % (tablename, _id)) if redis.exists(_id): v = redis.hgetall(_id) o = model.load(v, from_='dump') log.debug("Found!") return o else: log.debug("Not Found!") except Exception as e: log.exception(e)
[ "def", "get_object", "(", "model", ",", "cid", ",", "engine_name", "=", "None", ",", "connection", "=", "None", ")", ":", "from", "uliweb", "import", "settings", "if", "not", "id", ":", "return", "if", "not", "check_enable", "(", ")", ":", "return", "redis", "=", "get_redis", "(", ")", "if", "not", "redis", ":", "return", "tablename", "=", "model", ".", "_alias", "or", "model", ".", "tablename", "info", "=", "settings", ".", "get_var", "(", "'OBJCACHE_TABLES/%s'", "%", "tablename", ",", "{", "}", ")", "if", "info", "is", "None", ":", "return", "_id", "=", "get_id", "(", "engine_name", "or", "model", ".", "get_engine_name", "(", ")", ",", "tablename", ",", "cid", ")", "try", ":", "log", ".", "debug", "(", "\"Try to find objcache:get:table=%s:id=[%s]\"", "%", "(", "tablename", ",", "_id", ")", ")", "if", "redis", ".", "exists", "(", "_id", ")", ":", "v", "=", "redis", ".", "hgetall", "(", "_id", ")", "o", "=", "model", ".", "load", "(", "v", ",", "from_", "=", "'dump'", ")", "log", ".", "debug", "(", "\"Found!\"", ")", "return", "o", "else", ":", "log", ".", "debug", "(", "\"Not Found!\"", ")", "except", "Exception", "as", "e", ":", "log", ".", "exception", "(", "e", ")" ]
25.028571
0.010989
def log_predictive_density_sampling(self, y_test, mu_star, var_star, Y_metadata=None, num_samples=1000): """ Calculation of the log predictive density via sampling .. math: log p(y_{*}|D) = log 1/num_samples prod^{S}_{s=1} p(y_{*}|f_{*s}) f_{*s} ~ p(f_{*}|\mu_{*}\\sigma^{2}_{*}) :param y_test: test observations (y_{*}) :type y_test: (Nx1) array :param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*}) :type mu_star: (Nx1) array :param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*}) :type var_star: (Nx1) array :param num_samples: num samples of p(f_{*}|mu_{*}, var_{*}) to take :type num_samples: int """ assert y_test.shape==mu_star.shape assert y_test.shape==var_star.shape assert y_test.shape[1] == 1 #Take samples of p(f*|y) #fi_samples = np.random.randn(num_samples)*np.sqrt(var_star) + mu_star fi_samples = np.random.normal(mu_star, np.sqrt(var_star), size=(mu_star.shape[0], num_samples)) from scipy.misc import logsumexp log_p_ystar = -np.log(num_samples) + logsumexp(self.logpdf(fi_samples, y_test, Y_metadata=Y_metadata), axis=1) log_p_ystar = np.array(log_p_ystar).reshape(*y_test.shape) return log_p_ystar
[ "def", "log_predictive_density_sampling", "(", "self", ",", "y_test", ",", "mu_star", ",", "var_star", ",", "Y_metadata", "=", "None", ",", "num_samples", "=", "1000", ")", ":", "assert", "y_test", ".", "shape", "==", "mu_star", ".", "shape", "assert", "y_test", ".", "shape", "==", "var_star", ".", "shape", "assert", "y_test", ".", "shape", "[", "1", "]", "==", "1", "#Take samples of p(f*|y)", "#fi_samples = np.random.randn(num_samples)*np.sqrt(var_star) + mu_star", "fi_samples", "=", "np", ".", "random", ".", "normal", "(", "mu_star", ",", "np", ".", "sqrt", "(", "var_star", ")", ",", "size", "=", "(", "mu_star", ".", "shape", "[", "0", "]", ",", "num_samples", ")", ")", "from", "scipy", ".", "misc", "import", "logsumexp", "log_p_ystar", "=", "-", "np", ".", "log", "(", "num_samples", ")", "+", "logsumexp", "(", "self", ".", "logpdf", "(", "fi_samples", ",", "y_test", ",", "Y_metadata", "=", "Y_metadata", ")", ",", "axis", "=", "1", ")", "log_p_ystar", "=", "np", ".", "array", "(", "log_p_ystar", ")", ".", "reshape", "(", "*", "y_test", ".", "shape", ")", "return", "log_p_ystar" ]
45.793103
0.008112
def post(url, var): """Post data to an url.""" data = {b[0]: b[1] for b in [a.split("=") for a in var]} writeln("Sending data to url", url) response = requests.post(url, data=data) if response.status_code == 200: writeln(response.text) else: writeln(str(response.status_code), response.reason)
[ "def", "post", "(", "url", ",", "var", ")", ":", "data", "=", "{", "b", "[", "0", "]", ":", "b", "[", "1", "]", "for", "b", "in", "[", "a", ".", "split", "(", "\"=\"", ")", "for", "a", "in", "var", "]", "}", "writeln", "(", "\"Sending data to url\"", ",", "url", ")", "response", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ")", "if", "response", ".", "status_code", "==", "200", ":", "writeln", "(", "response", ".", "text", ")", "else", ":", "writeln", "(", "str", "(", "response", ".", "status_code", ")", ",", "response", ".", "reason", ")" ]
36.111111
0.003003
def _camel_case(cls, text, first_upper=False): """ Transform text to camelCase or CamelCase :param text: :param first_upper: first symbol must be upper? :return: """ result = '' need_upper = False for pos, symbol in enumerate(text): if symbol == '_' and pos > 0: need_upper = True else: if need_upper: result += symbol.upper() else: result += symbol.lower() need_upper = False if first_upper: result = result[0].upper() + result[1:] return result
[ "def", "_camel_case", "(", "cls", ",", "text", ",", "first_upper", "=", "False", ")", ":", "result", "=", "''", "need_upper", "=", "False", "for", "pos", ",", "symbol", "in", "enumerate", "(", "text", ")", ":", "if", "symbol", "==", "'_'", "and", "pos", ">", "0", ":", "need_upper", "=", "True", "else", ":", "if", "need_upper", ":", "result", "+=", "symbol", ".", "upper", "(", ")", "else", ":", "result", "+=", "symbol", ".", "lower", "(", ")", "need_upper", "=", "False", "if", "first_upper", ":", "result", "=", "result", "[", "0", "]", ".", "upper", "(", ")", "+", "result", "[", "1", ":", "]", "return", "result" ]
29.727273
0.002963
def perform(self, cfg): """ Performs transformation according to configuration :param cfg: transformation configuration """ self.__src = self._load(cfg[Transformation.__CFG_KEY_LOAD]) self.__transform(cfg[Transformation.__CFG_KEY_TRANSFORM]) self.__cleanup(cfg[Transformation.__CFG_KEY_CLEANUP]) self.__save(cfg[Transformation.__CFG_KEY_SAVE])
[ "def", "perform", "(", "self", ",", "cfg", ")", ":", "self", ".", "__src", "=", "self", ".", "_load", "(", "cfg", "[", "Transformation", ".", "__CFG_KEY_LOAD", "]", ")", "self", ".", "__transform", "(", "cfg", "[", "Transformation", ".", "__CFG_KEY_TRANSFORM", "]", ")", "self", ".", "__cleanup", "(", "cfg", "[", "Transformation", ".", "__CFG_KEY_CLEANUP", "]", ")", "self", ".", "__save", "(", "cfg", "[", "Transformation", ".", "__CFG_KEY_SAVE", "]", ")" ]
44.333333
0.004914
def fetch_job(self, job_id, checkout=False): """ Fetch the current job reference (refs/aetros/job/<id>) from origin and (when checkout=True)read its tree to the current git index and checkout into working director. """ self.job_id = job_id self.logger.debug("Git fetch job reference %s" % (self.ref_head, )) out, code, err = self.command_exec(['ls-remote', 'origin', self.ref_head]) if code: self.logger.error('Could not find the job ' + job_id + ' on the server. Are you online and does the job exist?') sys.exit(1) try: self.command_exec(['fetch', '-f', '-n', 'origin', self.ref_head+':'+self.ref_head]) except Exception: self.logger.error("Could not load job information for " + job_id + '. You need to be online to start pre-configured jobs.') raise self.read_job(job_id, checkout)
[ "def", "fetch_job", "(", "self", ",", "job_id", ",", "checkout", "=", "False", ")", ":", "self", ".", "job_id", "=", "job_id", "self", ".", "logger", ".", "debug", "(", "\"Git fetch job reference %s\"", "%", "(", "self", ".", "ref_head", ",", ")", ")", "out", ",", "code", ",", "err", "=", "self", ".", "command_exec", "(", "[", "'ls-remote'", ",", "'origin'", ",", "self", ".", "ref_head", "]", ")", "if", "code", ":", "self", ".", "logger", ".", "error", "(", "'Could not find the job '", "+", "job_id", "+", "' on the server. Are you online and does the job exist?'", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "self", ".", "command_exec", "(", "[", "'fetch'", ",", "'-f'", ",", "'-n'", ",", "'origin'", ",", "self", ".", "ref_head", "+", "':'", "+", "self", ".", "ref_head", "]", ")", "except", "Exception", ":", "self", ".", "logger", ".", "error", "(", "\"Could not load job information for \"", "+", "job_id", "+", "'. You need to be online to start pre-configured jobs.'", ")", "raise", "self", ".", "read_job", "(", "job_id", ",", "checkout", ")" ]
43.666667
0.007471
def set_objective(self, expression): """Set objective of problem.""" if isinstance(expression, numbers.Number): # Allow expressions with no variables as objective, # represented as a number expression = Expression(offset=expression) # Clear previous objective for i in range(swiglpk.glp_get_num_cols(self._p)): swiglpk.glp_set_obj_coef(self._p, 1 + i, 0) for variable, value in expression.values(): var_index = self._variables[variable] swiglpk.glp_set_obj_coef(self._p, var_index, float(value)) swiglpk.glp_set_obj_coef(self._p, 0, float(expression.offset))
[ "def", "set_objective", "(", "self", ",", "expression", ")", ":", "if", "isinstance", "(", "expression", ",", "numbers", ".", "Number", ")", ":", "# Allow expressions with no variables as objective,", "# represented as a number", "expression", "=", "Expression", "(", "offset", "=", "expression", ")", "# Clear previous objective", "for", "i", "in", "range", "(", "swiglpk", ".", "glp_get_num_cols", "(", "self", ".", "_p", ")", ")", ":", "swiglpk", ".", "glp_set_obj_coef", "(", "self", ".", "_p", ",", "1", "+", "i", ",", "0", ")", "for", "variable", ",", "value", "in", "expression", ".", "values", "(", ")", ":", "var_index", "=", "self", ".", "_variables", "[", "variable", "]", "swiglpk", ".", "glp_set_obj_coef", "(", "self", ".", "_p", ",", "var_index", ",", "float", "(", "value", ")", ")", "swiglpk", ".", "glp_set_obj_coef", "(", "self", ".", "_p", ",", "0", ",", "float", "(", "expression", ".", "offset", ")", ")" ]
39.176471
0.002933
def merged(*dicts, **kwargs): """ Merge dictionaries. Later keys overwrite. .. code-block:: python merged(dict(a=1), dict(b=2), c=3, d=1) """ if not dicts: return Struct() result = dict() for d in dicts: result.update(d) result.update(kwargs) struct_type = type(dicts[0]) return struct_type(**result)
[ "def", "merged", "(", "*", "dicts", ",", "*", "*", "kwargs", ")", ":", "if", "not", "dicts", ":", "return", "Struct", "(", ")", "result", "=", "dict", "(", ")", "for", "d", "in", "dicts", ":", "result", ".", "update", "(", "d", ")", "result", ".", "update", "(", "kwargs", ")", "struct_type", "=", "type", "(", "dicts", "[", "0", "]", ")", "return", "struct_type", "(", "*", "*", "result", ")" ]
20.647059
0.002725
def validate_config(data: dict) -> dict: """Convert to MIP config format.""" errors = ConfigSchema().validate(data) if errors: for field, messages in errors.items(): if isinstance(messages, dict): for level, sample_errors in messages.items(): sample_id = data['samples'][level]['sample_id'] for sub_field, sub_messages in sample_errors.items(): LOG.error(f"{sample_id} -> {sub_field}: {', '.join(sub_messages)}") else: LOG.error(f"{field}: {', '.join(messages)}") raise ConfigError('invalid config input', errors=errors)
[ "def", "validate_config", "(", "data", ":", "dict", ")", "->", "dict", ":", "errors", "=", "ConfigSchema", "(", ")", ".", "validate", "(", "data", ")", "if", "errors", ":", "for", "field", ",", "messages", "in", "errors", ".", "items", "(", ")", ":", "if", "isinstance", "(", "messages", ",", "dict", ")", ":", "for", "level", ",", "sample_errors", "in", "messages", ".", "items", "(", ")", ":", "sample_id", "=", "data", "[", "'samples'", "]", "[", "level", "]", "[", "'sample_id'", "]", "for", "sub_field", ",", "sub_messages", "in", "sample_errors", ".", "items", "(", ")", ":", "LOG", ".", "error", "(", "f\"{sample_id} -> {sub_field}: {', '.join(sub_messages)}\"", ")", "else", ":", "LOG", ".", "error", "(", "f\"{field}: {', '.join(messages)}\"", ")", "raise", "ConfigError", "(", "'invalid config input'", ",", "errors", "=", "errors", ")" ]
54.153846
0.00419
def collect_sound_streams(self): """ Return a list of sound streams in this timeline and its children. The streams are returned in order with respect to the timeline. A stream is returned as a list: the first element is the tag which introduced that stream; other elements are the tags which made up the stream body (if any). """ rc = [] current_stream = None # looking in all containers for frames for tag in self.all_tags_of_type((TagSoundStreamHead, TagSoundStreamBlock)): if isinstance(tag, TagSoundStreamHead): # we have a new stream current_stream = [ tag ] rc.append(current_stream) if isinstance(tag, TagSoundStreamBlock): # we have a frame for the current stream current_stream.append(tag) return rc
[ "def", "collect_sound_streams", "(", "self", ")", ":", "rc", "=", "[", "]", "current_stream", "=", "None", "# looking in all containers for frames", "for", "tag", "in", "self", ".", "all_tags_of_type", "(", "(", "TagSoundStreamHead", ",", "TagSoundStreamBlock", ")", ")", ":", "if", "isinstance", "(", "tag", ",", "TagSoundStreamHead", ")", ":", "# we have a new stream", "current_stream", "=", "[", "tag", "]", "rc", ".", "append", "(", "current_stream", ")", "if", "isinstance", "(", "tag", ",", "TagSoundStreamBlock", ")", ":", "# we have a frame for the current stream", "current_stream", ".", "append", "(", "tag", ")", "return", "rc" ]
42.333333
0.005501
def get(self, name=""): """Get the address(es). """ addrs = [] with self._address_lock: for metadata in self._addresses.values(): if (name == "" or (name and name in metadata["service"])): mda = copy.copy(metadata) mda["receive_time"] = mda["receive_time"].isoformat() addrs.append(mda) LOGGER.debug('return address %s', str(addrs)) return addrs
[ "def", "get", "(", "self", ",", "name", "=", "\"\"", ")", ":", "addrs", "=", "[", "]", "with", "self", ".", "_address_lock", ":", "for", "metadata", "in", "self", ".", "_addresses", ".", "values", "(", ")", ":", "if", "(", "name", "==", "\"\"", "or", "(", "name", "and", "name", "in", "metadata", "[", "\"service\"", "]", ")", ")", ":", "mda", "=", "copy", ".", "copy", "(", "metadata", ")", "mda", "[", "\"receive_time\"", "]", "=", "mda", "[", "\"receive_time\"", "]", ".", "isoformat", "(", ")", "addrs", ".", "append", "(", "mda", ")", "LOGGER", ".", "debug", "(", "'return address %s'", ",", "str", "(", "addrs", ")", ")", "return", "addrs" ]
35.214286
0.003953
def calculate_subscription_lifecycle(subscription_id): """ Calculates the expected lifecycle position the subscription in subscription_ids, and creates a BehindSubscription entry for them. Args: subscription_id (str): ID of subscription to calculate lifecycle for """ subscription = Subscription.objects.select_related("messageset", "schedule").get( id=subscription_id ) behind = subscription.messages_behind() if behind == 0: return current_messageset = subscription.messageset current_sequence_number = subscription.next_sequence_number end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1] BehindSubscription.objects.create( subscription=subscription, messages_behind=behind, current_messageset=current_messageset, current_sequence_number=current_sequence_number, expected_messageset=end_subscription.messageset, expected_sequence_number=end_subscription.next_sequence_number, )
[ "def", "calculate_subscription_lifecycle", "(", "subscription_id", ")", ":", "subscription", "=", "Subscription", ".", "objects", ".", "select_related", "(", "\"messageset\"", ",", "\"schedule\"", ")", ".", "get", "(", "id", "=", "subscription_id", ")", "behind", "=", "subscription", ".", "messages_behind", "(", ")", "if", "behind", "==", "0", ":", "return", "current_messageset", "=", "subscription", ".", "messageset", "current_sequence_number", "=", "subscription", ".", "next_sequence_number", "end_subscription", "=", "Subscription", ".", "fast_forward_lifecycle", "(", "subscription", ",", "save", "=", "False", ")", "[", "-", "1", "]", "BehindSubscription", ".", "objects", ".", "create", "(", "subscription", "=", "subscription", ",", "messages_behind", "=", "behind", ",", "current_messageset", "=", "current_messageset", ",", "current_sequence_number", "=", "current_sequence_number", ",", "expected_messageset", "=", "end_subscription", ".", "messageset", ",", "expected_sequence_number", "=", "end_subscription", ".", "next_sequence_number", ",", ")" ]
39.115385
0.002879
def pred_from_structures(self, target_species, structures_list, remove_duplicates=True, remove_existing=False): """ performs a structure prediction targeting compounds containing all of the target_species, based on a list of structure (those structures can for instance come from a database like the ICSD). It will return all the structures formed by ionic substitutions with a probability higher than the threshold Notes: If the default probability model is used, input structures must be oxidation state decorated. See AutoOxiStateDecorationTransformation This method does not change the number of species in a structure. i.e if the number of target species is 3, only input structures containing 3 species will be considered. Args: target_species: a list of species with oxidation states e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)] structures_list: a list of dictionnary of the form {'structure':Structure object ,'id':some id where it comes from} the id can for instance refer to an ICSD id. remove_duplicates: if True, the duplicates in the predicted structures will be removed remove_existing: if True, the predicted structures that already exist in the structures_list will be removed Returns: a list of TransformedStructure objects. """ target_species = get_el_sp(target_species) result = [] transmuter = StandardTransmuter([]) if len(list(set(target_species) & set(self.get_allowed_species()))) \ != len(target_species): raise ValueError("the species in target_species are not allowed " + "for the probability model you are using") for permut in itertools.permutations(target_species): for s in structures_list: # check if: species are in the domain, # and the probability of subst. is above the threshold els = s['structure'].composition.elements if len(els) == len(permut) and \ len(list(set(els) & set( self.get_allowed_species()))) == \ len(els) and self._sp.cond_prob_list(permut, els) > \ self._threshold: clean_subst = {els[i]: permut[i] for i in range(0, len(els)) if els[i] != permut[i]} if len(clean_subst) == 0: continue transf = SubstitutionTransformation(clean_subst) if Substitutor._is_charge_balanced( transf.apply_transformation(s['structure'])): ts = TransformedStructure( s['structure'], [transf], history=[{"source": s['id']}], other_parameters={ 'type': 'structure_prediction', 'proba': self._sp.cond_prob_list(permut, els)} ) result.append(ts) transmuter.append_transformed_structures([ts]) if remove_duplicates: transmuter.apply_filter(RemoveDuplicatesFilter( symprec=self._symprec)) if remove_existing: # Make the list of structures from structures_list that corresponds to the # target species chemsys = list(set([sp.symbol for sp in target_species])) structures_list_target = [st['structure'] for st in structures_list if Substitutor._is_from_chemical_system( chemsys, st['structure'])] transmuter.apply_filter(RemoveExistingFilter(structures_list_target, symprec=self._symprec)) return transmuter.transformed_structures
[ "def", "pred_from_structures", "(", "self", ",", "target_species", ",", "structures_list", ",", "remove_duplicates", "=", "True", ",", "remove_existing", "=", "False", ")", ":", "target_species", "=", "get_el_sp", "(", "target_species", ")", "result", "=", "[", "]", "transmuter", "=", "StandardTransmuter", "(", "[", "]", ")", "if", "len", "(", "list", "(", "set", "(", "target_species", ")", "&", "set", "(", "self", ".", "get_allowed_species", "(", ")", ")", ")", ")", "!=", "len", "(", "target_species", ")", ":", "raise", "ValueError", "(", "\"the species in target_species are not allowed \"", "+", "\"for the probability model you are using\"", ")", "for", "permut", "in", "itertools", ".", "permutations", "(", "target_species", ")", ":", "for", "s", "in", "structures_list", ":", "# check if: species are in the domain,", "# and the probability of subst. is above the threshold", "els", "=", "s", "[", "'structure'", "]", ".", "composition", ".", "elements", "if", "len", "(", "els", ")", "==", "len", "(", "permut", ")", "and", "len", "(", "list", "(", "set", "(", "els", ")", "&", "set", "(", "self", ".", "get_allowed_species", "(", ")", ")", ")", ")", "==", "len", "(", "els", ")", "and", "self", ".", "_sp", ".", "cond_prob_list", "(", "permut", ",", "els", ")", ">", "self", ".", "_threshold", ":", "clean_subst", "=", "{", "els", "[", "i", "]", ":", "permut", "[", "i", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "els", ")", ")", "if", "els", "[", "i", "]", "!=", "permut", "[", "i", "]", "}", "if", "len", "(", "clean_subst", ")", "==", "0", ":", "continue", "transf", "=", "SubstitutionTransformation", "(", "clean_subst", ")", "if", "Substitutor", ".", "_is_charge_balanced", "(", "transf", ".", "apply_transformation", "(", "s", "[", "'structure'", "]", ")", ")", ":", "ts", "=", "TransformedStructure", "(", "s", "[", "'structure'", "]", ",", "[", "transf", "]", ",", "history", "=", "[", "{", "\"source\"", ":", "s", "[", "'id'", "]", "}", "]", ",", "other_parameters", "=", "{", "'type'", ":", "'structure_prediction'", ",", "'proba'", ":", "self", ".", "_sp", ".", "cond_prob_list", "(", "permut", ",", "els", ")", "}", ")", "result", ".", "append", "(", "ts", ")", "transmuter", ".", "append_transformed_structures", "(", "[", "ts", "]", ")", "if", "remove_duplicates", ":", "transmuter", ".", "apply_filter", "(", "RemoveDuplicatesFilter", "(", "symprec", "=", "self", ".", "_symprec", ")", ")", "if", "remove_existing", ":", "# Make the list of structures from structures_list that corresponds to the", "# target species", "chemsys", "=", "list", "(", "set", "(", "[", "sp", ".", "symbol", "for", "sp", "in", "target_species", "]", ")", ")", "structures_list_target", "=", "[", "st", "[", "'structure'", "]", "for", "st", "in", "structures_list", "if", "Substitutor", ".", "_is_from_chemical_system", "(", "chemsys", ",", "st", "[", "'structure'", "]", ")", "]", "transmuter", ".", "apply_filter", "(", "RemoveExistingFilter", "(", "structures_list_target", ",", "symprec", "=", "self", ".", "_symprec", ")", ")", "return", "transmuter", ".", "transformed_structures" ]
46.204301
0.001823
def leb128_decode(data): """ Decodes a LEB128-encoded unsigned integer. :param BufferedIOBase data: The buffer containing the LEB128-encoded integer to decode. :return: The decoded integer. :rtype: int """ result = 0 shift = 0 while True: character = data.read(1) if len(character) == 0: raise bitcoin.core.SerializationTruncationError('Invalid LEB128 integer') b = ord(character) result |= (b & 0x7f) << shift if b & 0x80 == 0: break shift += 7 return result
[ "def", "leb128_decode", "(", "data", ")", ":", "result", "=", "0", "shift", "=", "0", "while", "True", ":", "character", "=", "data", ".", "read", "(", "1", ")", "if", "len", "(", "character", ")", "==", "0", ":", "raise", "bitcoin", ".", "core", ".", "SerializationTruncationError", "(", "'Invalid LEB128 integer'", ")", "b", "=", "ord", "(", "character", ")", "result", "|=", "(", "b", "&", "0x7f", ")", "<<", "shift", "if", "b", "&", "0x80", "==", "0", ":", "break", "shift", "+=", "7", "return", "result" ]
28.409091
0.006192
def permute(self, qubits: Qubits) -> 'Channel': """Return a copy of this channel with qubits in new order""" vec = self.vec.permute(qubits) return Channel(vec.tensor, qubits=vec.qubits)
[ "def", "permute", "(", "self", ",", "qubits", ":", "Qubits", ")", "->", "'Channel'", ":", "vec", "=", "self", ".", "vec", ".", "permute", "(", "qubits", ")", "return", "Channel", "(", "vec", ".", "tensor", ",", "qubits", "=", "vec", ".", "qubits", ")" ]
51.5
0.009569
def auth(self): """ Access the auth :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList """ if self._auth is None: self._auth = AuthTypesList( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['sid'], ) return self._auth
[ "def", "auth", "(", "self", ")", ":", "if", "self", ".", "_auth", "is", "None", ":", "self", ".", "_auth", "=", "AuthTypesList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "domain_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_auth" ]
33.428571
0.008316
def _to_ufo_features(self, master, ufo): """Write an UFO's OpenType feature file.""" # Recover the original feature code if it was stored in the user data original = master.userData[ORIGINAL_FEATURE_CODE_KEY] if original is not None: ufo.features.text = original return prefixes = [] for prefix in self.font.featurePrefixes: strings = [] if prefix.name != ANONYMOUS_FEATURE_PREFIX_NAME: strings.append("# Prefix: %s\n" % prefix.name) strings.append(autostr(prefix.automatic)) strings.append(prefix.code) prefixes.append("".join(strings)) prefix_str = "\n\n".join(prefixes) class_defs = [] for class_ in self.font.classes: prefix = "@" if not class_.name.startswith("@") else "" name = prefix + class_.name class_defs.append( "{}{} = [ {} ];".format(autostr(class_.automatic), name, class_.code) ) class_str = "\n\n".join(class_defs) feature_defs = [] for feature in self.font.features: code = feature.code lines = ["feature %s {" % feature.name] if feature.notes: lines.append("# notes:") lines.extend("# " + line for line in feature.notes.splitlines()) if feature.automatic: lines.append("# automatic") if feature.disabled: lines.append("# disabled") lines.extend("#" + line for line in code.splitlines()) else: lines.append(code) lines.append("} %s;" % feature.name) feature_defs.append("\n".join(lines)) fea_str = "\n\n".join(feature_defs) # Don't add a GDEF table when planning to round-trip. To get Glyphs.app-like # results, we would need anchor propagation or user intervention. Glyphs.app # only generates it on generating binaries. gdef_str = None if self.generate_GDEF: if re.search(r"^\s*table\s+GDEF\s+{", prefix_str, flags=re.MULTILINE): raise ValueError( "The features already contain a `table GDEF {...}` statement. " "Either delete it or set generate_GDEF to False." ) gdef_str = _build_gdef( ufo, self._designspace.lib.get("public.skipExportGlyphs") ) # make sure feature text is a unicode string, for defcon full_text = ( "\n\n".join(filter(None, [class_str, prefix_str, fea_str, gdef_str])) + "\n" ) ufo.features.text = full_text if full_text.strip() else ""
[ "def", "_to_ufo_features", "(", "self", ",", "master", ",", "ufo", ")", ":", "# Recover the original feature code if it was stored in the user data", "original", "=", "master", ".", "userData", "[", "ORIGINAL_FEATURE_CODE_KEY", "]", "if", "original", "is", "not", "None", ":", "ufo", ".", "features", ".", "text", "=", "original", "return", "prefixes", "=", "[", "]", "for", "prefix", "in", "self", ".", "font", ".", "featurePrefixes", ":", "strings", "=", "[", "]", "if", "prefix", ".", "name", "!=", "ANONYMOUS_FEATURE_PREFIX_NAME", ":", "strings", ".", "append", "(", "\"# Prefix: %s\\n\"", "%", "prefix", ".", "name", ")", "strings", ".", "append", "(", "autostr", "(", "prefix", ".", "automatic", ")", ")", "strings", ".", "append", "(", "prefix", ".", "code", ")", "prefixes", ".", "append", "(", "\"\"", ".", "join", "(", "strings", ")", ")", "prefix_str", "=", "\"\\n\\n\"", ".", "join", "(", "prefixes", ")", "class_defs", "=", "[", "]", "for", "class_", "in", "self", ".", "font", ".", "classes", ":", "prefix", "=", "\"@\"", "if", "not", "class_", ".", "name", ".", "startswith", "(", "\"@\"", ")", "else", "\"\"", "name", "=", "prefix", "+", "class_", ".", "name", "class_defs", ".", "append", "(", "\"{}{} = [ {} ];\"", ".", "format", "(", "autostr", "(", "class_", ".", "automatic", ")", ",", "name", ",", "class_", ".", "code", ")", ")", "class_str", "=", "\"\\n\\n\"", ".", "join", "(", "class_defs", ")", "feature_defs", "=", "[", "]", "for", "feature", "in", "self", ".", "font", ".", "features", ":", "code", "=", "feature", ".", "code", "lines", "=", "[", "\"feature %s {\"", "%", "feature", ".", "name", "]", "if", "feature", ".", "notes", ":", "lines", ".", "append", "(", "\"# notes:\"", ")", "lines", ".", "extend", "(", "\"# \"", "+", "line", "for", "line", "in", "feature", ".", "notes", ".", "splitlines", "(", ")", ")", "if", "feature", ".", "automatic", ":", "lines", ".", "append", "(", "\"# automatic\"", ")", "if", "feature", ".", "disabled", ":", "lines", ".", "append", "(", "\"# disabled\"", ")", "lines", ".", "extend", "(", "\"#\"", "+", "line", "for", "line", "in", "code", ".", "splitlines", "(", ")", ")", "else", ":", "lines", ".", "append", "(", "code", ")", "lines", ".", "append", "(", "\"} %s;\"", "%", "feature", ".", "name", ")", "feature_defs", ".", "append", "(", "\"\\n\"", ".", "join", "(", "lines", ")", ")", "fea_str", "=", "\"\\n\\n\"", ".", "join", "(", "feature_defs", ")", "# Don't add a GDEF table when planning to round-trip. To get Glyphs.app-like", "# results, we would need anchor propagation or user intervention. Glyphs.app", "# only generates it on generating binaries.", "gdef_str", "=", "None", "if", "self", ".", "generate_GDEF", ":", "if", "re", ".", "search", "(", "r\"^\\s*table\\s+GDEF\\s+{\"", ",", "prefix_str", ",", "flags", "=", "re", ".", "MULTILINE", ")", ":", "raise", "ValueError", "(", "\"The features already contain a `table GDEF {...}` statement. \"", "\"Either delete it or set generate_GDEF to False.\"", ")", "gdef_str", "=", "_build_gdef", "(", "ufo", ",", "self", ".", "_designspace", ".", "lib", ".", "get", "(", "\"public.skipExportGlyphs\"", ")", ")", "# make sure feature text is a unicode string, for defcon", "full_text", "=", "(", "\"\\n\\n\"", ".", "join", "(", "filter", "(", "None", ",", "[", "class_str", ",", "prefix_str", ",", "fea_str", ",", "gdef_str", "]", ")", ")", "+", "\"\\n\"", ")", "ufo", ".", "features", ".", "text", "=", "full_text", "if", "full_text", ".", "strip", "(", ")", "else", "\"\"" ]
37.19697
0.001984
def nacm_rule_list_cmdrule_log_if_permit(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm") rule_list = ET.SubElement(nacm, "rule-list") name_key = ET.SubElement(rule_list, "name") name_key.text = kwargs.pop('name') cmdrule = ET.SubElement(rule_list, "cmdrule", xmlns="http://tail-f.com/yang/acm") name_key = ET.SubElement(cmdrule, "name") name_key.text = kwargs.pop('name') log_if_permit = ET.SubElement(cmdrule, "log-if-permit") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "nacm_rule_list_cmdrule_log_if_permit", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "nacm", "=", "ET", ".", "SubElement", "(", "config", ",", "\"nacm\"", ",", "xmlns", "=", "\"urn:ietf:params:xml:ns:yang:ietf-netconf-acm\"", ")", "rule_list", "=", "ET", ".", "SubElement", "(", "nacm", ",", "\"rule-list\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "rule_list", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "cmdrule", "=", "ET", ".", "SubElement", "(", "rule_list", ",", "\"cmdrule\"", ",", "xmlns", "=", "\"http://tail-f.com/yang/acm\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "cmdrule", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "log_if_permit", "=", "ET", ".", "SubElement", "(", "cmdrule", ",", "\"log-if-permit\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
47.266667
0.005533
def fold(self): """Folds the region.""" start, end = self.get_range() TextBlockHelper.set_collapsed(self._trigger, True) block = self._trigger.next() while block.blockNumber() <= end and block.isValid(): block.setVisible(False) block = block.next()
[ "def", "fold", "(", "self", ")", ":", "start", ",", "end", "=", "self", ".", "get_range", "(", ")", "TextBlockHelper", ".", "set_collapsed", "(", "self", ".", "_trigger", ",", "True", ")", "block", "=", "self", ".", "_trigger", ".", "next", "(", ")", "while", "block", ".", "blockNumber", "(", ")", "<=", "end", "and", "block", ".", "isValid", "(", ")", ":", "block", ".", "setVisible", "(", "False", ")", "block", "=", "block", ".", "next", "(", ")" ]
38.125
0.00641
def plural(formatter, value, name, option, format): """Chooses different textension for locale-specific pluralization rules. Spec: `{:[p[lural]][(locale)]:msgstr0|msgstr1|...}` Example:: >>> smart.format(u'There {num:is an item|are {} items}.', num=1} There is an item. >>> smart.format(u'There {num:is an item|are {} items}.', num=10} There are 10 items. """ # Extract the plural words from the format string. words = format.split('|') # This extension requires at least two plural words. if not name and len(words) == 1: return # This extension only formats numbers. try: number = decimal.Decimal(value) except (ValueError, decimal.InvalidOperation): return # Get the locale. locale = Locale.parse(option) if option else formatter.locale # Select word based on the plural tag index. index = get_plural_tag_index(number, locale) return formatter.format(words[index], value)
[ "def", "plural", "(", "formatter", ",", "value", ",", "name", ",", "option", ",", "format", ")", ":", "# Extract the plural words from the format string.", "words", "=", "format", ".", "split", "(", "'|'", ")", "# This extension requires at least two plural words.", "if", "not", "name", "and", "len", "(", "words", ")", "==", "1", ":", "return", "# This extension only formats numbers.", "try", ":", "number", "=", "decimal", ".", "Decimal", "(", "value", ")", "except", "(", "ValueError", ",", "decimal", ".", "InvalidOperation", ")", ":", "return", "# Get the locale.", "locale", "=", "Locale", ".", "parse", "(", "option", ")", "if", "option", "else", "formatter", ".", "locale", "# Select word based on the plural tag index.", "index", "=", "get_plural_tag_index", "(", "number", ",", "locale", ")", "return", "formatter", ".", "format", "(", "words", "[", "index", "]", ",", "value", ")" ]
34.535714
0.001006
def dictlist_convert_to_string(dict_list: Iterable[Dict], key: str) -> None: """ Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a blank string, convert it to ``None``. """ for d in dict_list: d[key] = str(d[key]) if d[key] == "": d[key] = None
[ "def", "dictlist_convert_to_string", "(", "dict_list", ":", "Iterable", "[", "Dict", "]", ",", "key", ":", "str", ")", "->", "None", ":", "for", "d", "in", "dict_list", ":", "d", "[", "key", "]", "=", "str", "(", "d", "[", "key", "]", ")", "if", "d", "[", "key", "]", "==", "\"\"", ":", "d", "[", "key", "]", "=", "None" ]
38.5
0.002538
def coerce_nc3_dtype(arr): """Coerce an array to a data type that can be stored in a netCDF-3 file This function performs the following dtype conversions: int64 -> int32 bool -> int8 Data is checked for equality, or equivalence (non-NaN values) with `np.allclose` with the default keyword arguments. """ dtype = str(arr.dtype) if dtype in _nc3_dtype_coercions: new_dtype = _nc3_dtype_coercions[dtype] # TODO: raise a warning whenever casting the data-type instead? cast_arr = arr.astype(new_dtype) if not (cast_arr == arr).all(): raise ValueError('could not safely cast array from dtype %s to %s' % (dtype, new_dtype)) arr = cast_arr return arr
[ "def", "coerce_nc3_dtype", "(", "arr", ")", ":", "dtype", "=", "str", "(", "arr", ".", "dtype", ")", "if", "dtype", "in", "_nc3_dtype_coercions", ":", "new_dtype", "=", "_nc3_dtype_coercions", "[", "dtype", "]", "# TODO: raise a warning whenever casting the data-type instead?", "cast_arr", "=", "arr", ".", "astype", "(", "new_dtype", ")", "if", "not", "(", "cast_arr", "==", "arr", ")", ".", "all", "(", ")", ":", "raise", "ValueError", "(", "'could not safely cast array from dtype %s to %s'", "%", "(", "dtype", ",", "new_dtype", ")", ")", "arr", "=", "cast_arr", "return", "arr" ]
37.8
0.00129
def requests_url(self, ptype, **data): """ 这里包装了一个函数,发送post_data :param ptype: n 列表无歌曲,返回新列表 e 发送歌曲完毕 b 不再播放,返回新列表 s 下一首,返回新的列表 r 标记喜欢 u 取消标记喜欢 """ options = { 'type': ptype, 'pt': '3.1', 'channel': self._channel_id, 'pb': '320', 'from': 'mainsite', 'r': '', 'kbps': '320', 'app_name': 'radio_website', 'client': 's:mainsite|y:3.0', 'version': '100' } if 'sid' in data: options['sid'] = data['sid'] url = 'https://douban.fm/j/v2/playlist' while True: try: s = requests.get(url, params=options, cookies=self._cookies, headers=HEADERS) req_json = s.json() if req_json['r'] == 0: if 'song' not in req_json or not req_json['song']: break return req_json['song'][0] except Exception as err: raise APIError(err) break return None
[ "def", "requests_url", "(", "self", ",", "ptype", ",", "*", "*", "data", ")", ":", "options", "=", "{", "'type'", ":", "ptype", ",", "'pt'", ":", "'3.1'", ",", "'channel'", ":", "self", ".", "_channel_id", ",", "'pb'", ":", "'320'", ",", "'from'", ":", "'mainsite'", ",", "'r'", ":", "''", ",", "'kbps'", ":", "'320'", ",", "'app_name'", ":", "'radio_website'", ",", "'client'", ":", "'s:mainsite|y:3.0'", ",", "'version'", ":", "'100'", "}", "if", "'sid'", "in", "data", ":", "options", "[", "'sid'", "]", "=", "data", "[", "'sid'", "]", "url", "=", "'https://douban.fm/j/v2/playlist'", "while", "True", ":", "try", ":", "s", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "options", ",", "cookies", "=", "self", ".", "_cookies", ",", "headers", "=", "HEADERS", ")", "req_json", "=", "s", ".", "json", "(", ")", "if", "req_json", "[", "'r'", "]", "==", "0", ":", "if", "'song'", "not", "in", "req_json", "or", "not", "req_json", "[", "'song'", "]", ":", "break", "return", "req_json", "[", "'song'", "]", "[", "0", "]", "except", "Exception", "as", "err", ":", "raise", "APIError", "(", "err", ")", "break", "return", "None" ]
31.815789
0.001605
def set(self, item, value): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical # TODO(EA): Remove an is_extension_ when all extension types satisfy # the interface value_is_extension_type = (is_extension_type(value) or is_extension_array_dtype(value)) # categorical/spares/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: if value.ndim == self.ndim - 1: value = _safe_reshape(value, (1,) + value.shape) def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] if value.shape[1:] != self.shape[1:]: raise AssertionError('Shape of new values must be compatible ' 'with manager shape') try: loc = self.items.get_loc(item) except KeyError: # This item wasn't present, just insert at end self.insert(len(self.items), item, value) return if isinstance(loc, int): loc = [loc] blknos = self._blknos[loc] blklocs = self._blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno, val_locs in libinternals.get_blkno_placements(blknos, self.nblocks, group=True): blk = self.blocks[blkno] blk_locs = blklocs[val_locs.indexer] if blk.should_store(value): blk.set(blk_locs, value_getitem(val_locs)) else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno) else: self._blklocs[blk.mgr_locs.indexer] = -1 blk.delete(blk_locs) self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.int64) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, allow_fill=False) self.blocks = tuple(blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)) if unfit_val_locs: unfit_mgr_locs = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_mgr_locs) new_blocks = [] if value_is_extension_type: # This code (ab-)uses the fact that sparse blocks contain only # one item. new_blocks.extend( make_block(values=value.copy(), ndim=self.ndim, placement=slice(mgr_loc, mgr_loc + 1)) for mgr_loc in unfit_mgr_locs) self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + len(self.blocks)) self._blklocs[unfit_mgr_locs] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( make_block(values=value_getitem(unfit_val_items), ndim=self.ndim, placement=unfit_mgr_locs)) self._blknos[unfit_mgr_locs] = len(self.blocks) self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False
[ "def", "set", "(", "self", ",", "item", ",", "value", ")", ":", "# FIXME: refactor, clearly separate broadcasting & zip-like assignment", "# can prob also fix the various if tests for sparse/categorical", "# TODO(EA): Remove an is_extension_ when all extension types satisfy", "# the interface", "value_is_extension_type", "=", "(", "is_extension_type", "(", "value", ")", "or", "is_extension_array_dtype", "(", "value", ")", ")", "# categorical/spares/datetimetz", "if", "value_is_extension_type", ":", "def", "value_getitem", "(", "placement", ")", ":", "return", "value", "else", ":", "if", "value", ".", "ndim", "==", "self", ".", "ndim", "-", "1", ":", "value", "=", "_safe_reshape", "(", "value", ",", "(", "1", ",", ")", "+", "value", ".", "shape", ")", "def", "value_getitem", "(", "placement", ")", ":", "return", "value", "else", ":", "def", "value_getitem", "(", "placement", ")", ":", "return", "value", "[", "placement", ".", "indexer", "]", "if", "value", ".", "shape", "[", "1", ":", "]", "!=", "self", ".", "shape", "[", "1", ":", "]", ":", "raise", "AssertionError", "(", "'Shape of new values must be compatible '", "'with manager shape'", ")", "try", ":", "loc", "=", "self", ".", "items", ".", "get_loc", "(", "item", ")", "except", "KeyError", ":", "# This item wasn't present, just insert at end", "self", ".", "insert", "(", "len", "(", "self", ".", "items", ")", ",", "item", ",", "value", ")", "return", "if", "isinstance", "(", "loc", ",", "int", ")", ":", "loc", "=", "[", "loc", "]", "blknos", "=", "self", ".", "_blknos", "[", "loc", "]", "blklocs", "=", "self", ".", "_blklocs", "[", "loc", "]", ".", "copy", "(", ")", "unfit_mgr_locs", "=", "[", "]", "unfit_val_locs", "=", "[", "]", "removed_blknos", "=", "[", "]", "for", "blkno", ",", "val_locs", "in", "libinternals", ".", "get_blkno_placements", "(", "blknos", ",", "self", ".", "nblocks", ",", "group", "=", "True", ")", ":", "blk", "=", "self", ".", "blocks", "[", "blkno", "]", "blk_locs", "=", "blklocs", "[", "val_locs", ".", "indexer", "]", "if", "blk", ".", "should_store", "(", "value", ")", ":", "blk", ".", "set", "(", "blk_locs", ",", "value_getitem", "(", "val_locs", ")", ")", "else", ":", "unfit_mgr_locs", ".", "append", "(", "blk", ".", "mgr_locs", ".", "as_array", "[", "blk_locs", "]", ")", "unfit_val_locs", ".", "append", "(", "val_locs", ")", "# If all block items are unfit, schedule the block for removal.", "if", "len", "(", "val_locs", ")", "==", "len", "(", "blk", ".", "mgr_locs", ")", ":", "removed_blknos", ".", "append", "(", "blkno", ")", "else", ":", "self", ".", "_blklocs", "[", "blk", ".", "mgr_locs", ".", "indexer", "]", "=", "-", "1", "blk", ".", "delete", "(", "blk_locs", ")", "self", ".", "_blklocs", "[", "blk", ".", "mgr_locs", ".", "indexer", "]", "=", "np", ".", "arange", "(", "len", "(", "blk", ")", ")", "if", "len", "(", "removed_blknos", ")", ":", "# Remove blocks & update blknos accordingly", "is_deleted", "=", "np", ".", "zeros", "(", "self", ".", "nblocks", ",", "dtype", "=", "np", ".", "bool_", ")", "is_deleted", "[", "removed_blknos", "]", "=", "True", "new_blknos", "=", "np", ".", "empty", "(", "self", ".", "nblocks", ",", "dtype", "=", "np", ".", "int64", ")", "new_blknos", ".", "fill", "(", "-", "1", ")", "new_blknos", "[", "~", "is_deleted", "]", "=", "np", ".", "arange", "(", "self", ".", "nblocks", "-", "len", "(", "removed_blknos", ")", ")", "self", ".", "_blknos", "=", "algos", ".", "take_1d", "(", "new_blknos", ",", "self", ".", "_blknos", ",", "axis", "=", "0", ",", "allow_fill", "=", "False", ")", "self", ".", "blocks", "=", "tuple", "(", "blk", "for", "i", ",", "blk", "in", "enumerate", "(", "self", ".", "blocks", ")", "if", "i", "not", "in", "set", "(", "removed_blknos", ")", ")", "if", "unfit_val_locs", ":", "unfit_mgr_locs", "=", "np", ".", "concatenate", "(", "unfit_mgr_locs", ")", "unfit_count", "=", "len", "(", "unfit_mgr_locs", ")", "new_blocks", "=", "[", "]", "if", "value_is_extension_type", ":", "# This code (ab-)uses the fact that sparse blocks contain only", "# one item.", "new_blocks", ".", "extend", "(", "make_block", "(", "values", "=", "value", ".", "copy", "(", ")", ",", "ndim", "=", "self", ".", "ndim", ",", "placement", "=", "slice", "(", "mgr_loc", ",", "mgr_loc", "+", "1", ")", ")", "for", "mgr_loc", "in", "unfit_mgr_locs", ")", "self", ".", "_blknos", "[", "unfit_mgr_locs", "]", "=", "(", "np", ".", "arange", "(", "unfit_count", ")", "+", "len", "(", "self", ".", "blocks", ")", ")", "self", ".", "_blklocs", "[", "unfit_mgr_locs", "]", "=", "0", "else", ":", "# unfit_val_locs contains BlockPlacement objects", "unfit_val_items", "=", "unfit_val_locs", "[", "0", "]", ".", "append", "(", "unfit_val_locs", "[", "1", ":", "]", ")", "new_blocks", ".", "append", "(", "make_block", "(", "values", "=", "value_getitem", "(", "unfit_val_items", ")", ",", "ndim", "=", "self", ".", "ndim", ",", "placement", "=", "unfit_mgr_locs", ")", ")", "self", ".", "_blknos", "[", "unfit_mgr_locs", "]", "=", "len", "(", "self", ".", "blocks", ")", "self", ".", "_blklocs", "[", "unfit_mgr_locs", "]", "=", "np", ".", "arange", "(", "unfit_count", ")", "self", ".", "blocks", "+=", "tuple", "(", "new_blocks", ")", "# Newly created block's dtype may already be present.", "self", ".", "_known_consolidated", "=", "False" ]
39.868421
0.000429
def fqn(o): """Returns the fully qualified class name of an object or a class :param o: object or class :return: class name >>> import concurrency.fields >>> fqn('str') Traceback (most recent call last): ... ValueError: Invalid argument `str` >>> class A(object): ... def method(self): ... pass >>> str(fqn(A)) 'concurrency.utils.A' >>> str(fqn(A())) 'concurrency.utils.A' >>> str(fqn(concurrency.fields)) 'concurrency.fields' >>> str(fqn(A.method)) 'concurrency.utils.A.method' """ parts = [] # if inspect.ismethod(o): # try: # cls = o.im_class # except AttributeError: # # Python 3 eliminates im_class, substitutes __module__ and # # __qualname__ to provide similar information. # parts = (o.__module__, o.__qualname__) # else: # parts = (fqn(cls), get_classname(o)) if hasattr(o, '__module__'): parts.append(o.__module__) parts.append(get_classname(o)) elif inspect.ismodule(o): return o.__name__ if not parts: raise ValueError("Invalid argument `%s`" % o) return ".".join(parts)
[ "def", "fqn", "(", "o", ")", ":", "parts", "=", "[", "]", "# if inspect.ismethod(o):", "# try:", "# cls = o.im_class", "# except AttributeError:", "# # Python 3 eliminates im_class, substitutes __module__ and", "# # __qualname__ to provide similar information.", "# parts = (o.__module__, o.__qualname__)", "# else:", "# parts = (fqn(cls), get_classname(o))", "if", "hasattr", "(", "o", ",", "'__module__'", ")", ":", "parts", ".", "append", "(", "o", ".", "__module__", ")", "parts", ".", "append", "(", "get_classname", "(", "o", ")", ")", "elif", "inspect", ".", "ismodule", "(", "o", ")", ":", "return", "o", ".", "__name__", "if", "not", "parts", ":", "raise", "ValueError", "(", "\"Invalid argument `%s`\"", "%", "o", ")", "return", "\".\"", ".", "join", "(", "parts", ")" ]
25.021277
0.000818
def make_poll(self, options, expires_in, multiple=False, hide_totals=False): """ Generate a poll object that can be passed as the `poll` option when posting a status. options is an array of strings with the poll options (Maximum, by default: 4), expires_in is the time in seconds for which the poll should be open. Set multiple to True to allow people to choose more than one answer. Set hide_totals to True to hide the results of the poll until it has expired. """ poll_params = locals() del poll_params["self"] return poll_params
[ "def", "make_poll", "(", "self", ",", "options", ",", "expires_in", ",", "multiple", "=", "False", ",", "hide_totals", "=", "False", ")", ":", "poll_params", "=", "locals", "(", ")", "del", "poll_params", "[", "\"self\"", "]", "return", "poll_params" ]
51.75
0.012658
def solve_apply(expr, vars): """Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation. """ func = __solve_for_scalar(expr.func, vars) args = [] kwargs = {} for arg in expr.args: if isinstance(arg, ast.Pair): if not isinstance(arg.lhs, ast.Var): raise errors.EfilterError( root=arg.lhs, message="Invalid argument name.") kwargs[arg.key.value] = solve(arg.value, vars).value else: args.append(solve(arg, vars).value) result = applicative.apply(func, args, kwargs) return Result(result, ())
[ "def", "solve_apply", "(", "expr", ",", "vars", ")", ":", "func", "=", "__solve_for_scalar", "(", "expr", ".", "func", ",", "vars", ")", "args", "=", "[", "]", "kwargs", "=", "{", "}", "for", "arg", "in", "expr", ".", "args", ":", "if", "isinstance", "(", "arg", ",", "ast", ".", "Pair", ")", ":", "if", "not", "isinstance", "(", "arg", ".", "lhs", ",", "ast", ".", "Var", ")", ":", "raise", "errors", ".", "EfilterError", "(", "root", "=", "arg", ".", "lhs", ",", "message", "=", "\"Invalid argument name.\"", ")", "kwargs", "[", "arg", ".", "key", ".", "value", "]", "=", "solve", "(", "arg", ".", "value", ",", "vars", ")", ".", "value", "else", ":", "args", ".", "append", "(", "solve", "(", "arg", ",", "vars", ")", ".", "value", ")", "result", "=", "applicative", ".", "apply", "(", "func", ",", "args", ",", "kwargs", ")", "return", "Result", "(", "result", ",", "(", ")", ")" ]
35.8
0.001088
def setup_tables(create=True, drop=False): """ Binds the model classes to registered metadata and engine and (potentially) creates the db tables. This function expects that you have bound the L{meta.metadata} and L{meta.engine}. @param create: Whether to create the tables (if they do not exist). @type create: C{bool} @param drop: Whether to drop the tables (if they exist). @type drop: C{bool} """ global frames_table frames_table = Table('frames', meta.metadata, Column('message_id', String(255), primary_key=True), Column('sequence', BigInteger, primary_key=False, autoincrement=True), Column('destination', String(255), index=True), Column('frame', PickleType), Column('queued', DateTime, default=func.now())) if drop: meta.metadata.drop_all() if drop or create: meta.metadata.create_all()
[ "def", "setup_tables", "(", "create", "=", "True", ",", "drop", "=", "False", ")", ":", "global", "frames_table", "frames_table", "=", "Table", "(", "'frames'", ",", "meta", ".", "metadata", ",", "Column", "(", "'message_id'", ",", "String", "(", "255", ")", ",", "primary_key", "=", "True", ")", ",", "Column", "(", "'sequence'", ",", "BigInteger", ",", "primary_key", "=", "False", ",", "autoincrement", "=", "True", ")", ",", "Column", "(", "'destination'", ",", "String", "(", "255", ")", ",", "index", "=", "True", ")", ",", "Column", "(", "'frame'", ",", "PickleType", ")", ",", "Column", "(", "'queued'", ",", "DateTime", ",", "default", "=", "func", ".", "now", "(", ")", ")", ")", "if", "drop", ":", "meta", ".", "metadata", ".", "drop_all", "(", ")", "if", "drop", "or", "create", ":", "meta", ".", "metadata", ".", "create_all", "(", ")" ]
37
0.002927