text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def get_requested_aosp_permissions(self): """ Returns requested permissions declared within AOSP project. This includes several other permissions as well, which are in the platform apps. :rtype: list of str """ aosp_permissions = [] all_permissions = self.get_permissions() for perm in all_permissions: if perm in list(self.permission_module.keys()): aosp_permissions.append(perm) return aosp_permissions
[ "def", "get_requested_aosp_permissions", "(", "self", ")", ":", "aosp_permissions", "=", "[", "]", "all_permissions", "=", "self", ".", "get_permissions", "(", ")", "for", "perm", "in", "all_permissions", ":", "if", "perm", "in", "list", "(", "self", ".", "permission_module", ".", "keys", "(", ")", ")", ":", "aosp_permissions", ".", "append", "(", "perm", ")", "return", "aosp_permissions" ]
35.214286
0.005929
def ask_and_eval(self, func, args=(), gradf=None, number=None, xmean=None, sigma_fac=1, evaluations=1, aggregation=np.median, kappa=1): """samples `number` solutions and evaluates them on `func`, where each solution `s` is resampled until ``self.is_feasible(s, func(s)) is True``. Arguments --------- `func` objective function, ``func(x)`` returns a scalar `args` additional parameters for `func` `gradf` gradient of objective function, ``g = gradf(x, *args)`` must satisfy ``len(g) == len(x)`` `number` number of solutions to be sampled, by default population size ``popsize`` (AKA lambda) `xmean` mean for sampling the solutions, by default ``self.mean``. `sigma_fac` multiplier for sampling width, standard deviation, for example to get a small perturbation of solution `xmean` `evaluations` number of evaluations for each sampled solution `aggregation` function that aggregates `evaluations` values to as single value. `kappa` multiplier used for the evaluation of the solutions, in that ``func(m + kappa*(x - m))`` is the f-value for x. Return ------ ``(X, fit)``, where X -- list of solutions fit -- list of respective function values Details ------- While ``not self.is_feasible(x, func(x))``new solutions are sampled. By default ``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``. The argument to `func` can be freely modified within `func`. Depending on the ``CMA_mirrors`` option, some solutions are not sampled independently but as mirrors of other bad solutions. This is a simple derandomization that can save 10-30% of the evaluations in particular with small populations, for example on the cigar function. Example ------- >>> import cma >>> x0, sigma0 = 8*[10], 1 # 8-D >>> es = cma.CMAEvolutionStrategy(x0, sigma0) >>> while not es.stop(): ... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling ... es.tell(X, fit) # pass on fitness values ... es.disp(20) # print every 20-th iteration >>> print('terminated on ' + str(es.stop())) <output omitted> A single iteration step can be expressed in one line, such that an entire optimization after initialization becomes :: while not es.stop(): es.tell(*es.ask_and_eval(cma.fcts.elli)) """ # initialize popsize = self.sp.popsize if number is not None: popsize = number selective_mirroring = self.opts['CMA_mirrormethod'] > 0 nmirrors = self.sp.lam_mirr if popsize != self.sp.popsize: nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize) # TODO: now selective mirroring might be impaired assert new_injections or self.opts['CMA_mirrormethod'] < 2 if new_injections and self.opts['CMA_mirrormethod'] != 1: # otherwise mirrors are done elsewhere nmirrors = 0 assert nmirrors <= popsize // 2 self.mirrors_idx = np.arange(nmirrors) # might never be used self.mirrors_rejected_idx = [] # might never be used is_feasible = self.opts['is_feasible'] # do the work fit = [] # or np.NaN * np.empty(number) X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=args) if xmean is None: xmean = self.mean # might have changed in self.ask X = [] for k in xrange(int(popsize)): x, f = X_first.pop(0), None rejected = -1 while rejected < 0 or not is_feasible(x, f): # rejection sampling rejected += 1 if rejected: # resample x = self.ask(1, xmean, sigma_fac)[0] elif k >= popsize - nmirrors: # mirrored sample if k == popsize - nmirrors and selective_mirroring: self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1] x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]]) if rejected == 1 and k >= popsize - nmirrors: self.mirrors_rejected_idx.append(k) # contraints handling test hardwired ccccccccccc length_normalizer = 1 # zzzzzzzzzzzzzzzzzzzzzzzzz f = func(x, *args) if kappa == 1 else \ func(xmean + kappa * length_normalizer * (x - xmean), *args) if is_feasible(x, f) and evaluations > 1: f = aggregation([f] + [(func(x, *args) if kappa == 1 else func(xmean + kappa * length_normalizer * (x - xmean), *args)) for _i in xrange(int(evaluations - 1))]) if rejected + 1 % 1000 == 0: print(' %d solutions rejected (f-value NaN or None) at iteration %d' % (rejected, self.countiter)) fit.append(f) X.append(x) self.evaluations_per_f_value = int(evaluations) return X, fit
[ "def", "ask_and_eval", "(", "self", ",", "func", ",", "args", "=", "(", ")", ",", "gradf", "=", "None", ",", "number", "=", "None", ",", "xmean", "=", "None", ",", "sigma_fac", "=", "1", ",", "evaluations", "=", "1", ",", "aggregation", "=", "np", ".", "median", ",", "kappa", "=", "1", ")", ":", "# initialize", "popsize", "=", "self", ".", "sp", ".", "popsize", "if", "number", "is", "not", "None", ":", "popsize", "=", "number", "selective_mirroring", "=", "self", ".", "opts", "[", "'CMA_mirrormethod'", "]", ">", "0", "nmirrors", "=", "self", ".", "sp", ".", "lam_mirr", "if", "popsize", "!=", "self", ".", "sp", ".", "popsize", ":", "nmirrors", "=", "Mh", ".", "sround", "(", "popsize", "*", "self", ".", "sp", ".", "lam_mirr", "/", "self", ".", "sp", ".", "popsize", ")", "# TODO: now selective mirroring might be impaired", "assert", "new_injections", "or", "self", ".", "opts", "[", "'CMA_mirrormethod'", "]", "<", "2", "if", "new_injections", "and", "self", ".", "opts", "[", "'CMA_mirrormethod'", "]", "!=", "1", ":", "# otherwise mirrors are done elsewhere", "nmirrors", "=", "0", "assert", "nmirrors", "<=", "popsize", "//", "2", "self", ".", "mirrors_idx", "=", "np", ".", "arange", "(", "nmirrors", ")", "# might never be used", "self", ".", "mirrors_rejected_idx", "=", "[", "]", "# might never be used", "is_feasible", "=", "self", ".", "opts", "[", "'is_feasible'", "]", "# do the work", "fit", "=", "[", "]", "# or np.NaN * np.empty(number)", "X_first", "=", "self", ".", "ask", "(", "popsize", ",", "xmean", "=", "xmean", ",", "gradf", "=", "gradf", ",", "args", "=", "args", ")", "if", "xmean", "is", "None", ":", "xmean", "=", "self", ".", "mean", "# might have changed in self.ask", "X", "=", "[", "]", "for", "k", "in", "xrange", "(", "int", "(", "popsize", ")", ")", ":", "x", ",", "f", "=", "X_first", ".", "pop", "(", "0", ")", ",", "None", "rejected", "=", "-", "1", "while", "rejected", "<", "0", "or", "not", "is_feasible", "(", "x", ",", "f", ")", ":", "# rejection sampling", "rejected", "+=", "1", "if", "rejected", ":", "# resample", "x", "=", "self", ".", "ask", "(", "1", ",", "xmean", ",", "sigma_fac", ")", "[", "0", "]", "elif", "k", ">=", "popsize", "-", "nmirrors", ":", "# mirrored sample", "if", "k", "==", "popsize", "-", "nmirrors", "and", "selective_mirroring", ":", "self", ".", "mirrors_idx", "=", "np", ".", "argsort", "(", "fit", ")", "[", "-", "1", ":", "-", "1", "-", "nmirrors", ":", "-", "1", "]", "x", "=", "self", ".", "get_mirror", "(", "X", "[", "self", ".", "mirrors_idx", "[", "popsize", "-", "1", "-", "k", "]", "]", ")", "if", "rejected", "==", "1", "and", "k", ">=", "popsize", "-", "nmirrors", ":", "self", ".", "mirrors_rejected_idx", ".", "append", "(", "k", ")", "# contraints handling test hardwired ccccccccccc", "length_normalizer", "=", "1", "# zzzzzzzzzzzzzzzzzzzzzzzzz", "f", "=", "func", "(", "x", ",", "*", "args", ")", "if", "kappa", "==", "1", "else", "func", "(", "xmean", "+", "kappa", "*", "length_normalizer", "*", "(", "x", "-", "xmean", ")", ",", "*", "args", ")", "if", "is_feasible", "(", "x", ",", "f", ")", "and", "evaluations", ">", "1", ":", "f", "=", "aggregation", "(", "[", "f", "]", "+", "[", "(", "func", "(", "x", ",", "*", "args", ")", "if", "kappa", "==", "1", "else", "func", "(", "xmean", "+", "kappa", "*", "length_normalizer", "*", "(", "x", "-", "xmean", ")", ",", "*", "args", ")", ")", "for", "_i", "in", "xrange", "(", "int", "(", "evaluations", "-", "1", ")", ")", "]", ")", "if", "rejected", "+", "1", "%", "1000", "==", "0", ":", "print", "(", "' %d solutions rejected (f-value NaN or None) at iteration %d'", "%", "(", "rejected", ",", "self", ".", "countiter", ")", ")", "fit", ".", "append", "(", "f", ")", "X", ".", "append", "(", "x", ")", "self", ".", "evaluations_per_f_value", "=", "int", "(", "evaluations", ")", "return", "X", ",", "fit" ]
44.634146
0.002316
def PreprocessSources( self, artifacts_registry_object, source_path_specs, resolver_context=None): """Preprocesses the sources. Args: artifacts_registry_object (artifacts.ArtifactDefinitionsRegistry): artifact definitions registry. source_path_specs (list[dfvfs.PathSpec]): path specifications of the sources to process. resolver_context (Optional[dfvfs.Context]): resolver context. """ detected_operating_systems = [] for source_path_spec in source_path_specs: try: file_system, mount_point = self.GetSourceFileSystem( source_path_spec, resolver_context=resolver_context) except (RuntimeError, dfvfs_errors.BackEndError) as exception: logger.error(exception) continue try: searcher = file_system_searcher.FileSystemSearcher( file_system, mount_point) operating_system = self._DetermineOperatingSystem(searcher) if operating_system != definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN: preprocess_manager.PreprocessPluginsManager.RunPlugins( artifacts_registry_object, file_system, mount_point, self.knowledge_base) detected_operating_systems.append(operating_system) finally: file_system.Close() if detected_operating_systems: logger.info('Preprocessing detected operating systems: {0:s}'.format( ', '.join(detected_operating_systems))) self.knowledge_base.SetValue( 'operating_system', detected_operating_systems[0])
[ "def", "PreprocessSources", "(", "self", ",", "artifacts_registry_object", ",", "source_path_specs", ",", "resolver_context", "=", "None", ")", ":", "detected_operating_systems", "=", "[", "]", "for", "source_path_spec", "in", "source_path_specs", ":", "try", ":", "file_system", ",", "mount_point", "=", "self", ".", "GetSourceFileSystem", "(", "source_path_spec", ",", "resolver_context", "=", "resolver_context", ")", "except", "(", "RuntimeError", ",", "dfvfs_errors", ".", "BackEndError", ")", "as", "exception", ":", "logger", ".", "error", "(", "exception", ")", "continue", "try", ":", "searcher", "=", "file_system_searcher", ".", "FileSystemSearcher", "(", "file_system", ",", "mount_point", ")", "operating_system", "=", "self", ".", "_DetermineOperatingSystem", "(", "searcher", ")", "if", "operating_system", "!=", "definitions", ".", "OPERATING_SYSTEM_FAMILY_UNKNOWN", ":", "preprocess_manager", ".", "PreprocessPluginsManager", ".", "RunPlugins", "(", "artifacts_registry_object", ",", "file_system", ",", "mount_point", ",", "self", ".", "knowledge_base", ")", "detected_operating_systems", ".", "append", "(", "operating_system", ")", "finally", ":", "file_system", ".", "Close", "(", ")", "if", "detected_operating_systems", ":", "logger", ".", "info", "(", "'Preprocessing detected operating systems: {0:s}'", ".", "format", "(", "', '", ".", "join", "(", "detected_operating_systems", ")", ")", ")", "self", ".", "knowledge_base", ".", "SetValue", "(", "'operating_system'", ",", "detected_operating_systems", "[", "0", "]", ")" ]
37.463415
0.005711
async def update_api(request: web.Request) -> web.Response: """ This handler accepts a POST request with Content-Type: multipart/form-data and file fields in the body named "whl", "serverlib", and "fw". The "whl" and "serverlib" files should be valid Python wheels to be installed ("whl" is expected generally to be the API server wheel, and "serverlib" is expected to be the ot2serverlib wheel. The "fw" file is expected to be a Smoothie firmware hex file. The Python files are install using pip, and the firmware file is flashed to the Smoothie board, then the files are deleted and a success code is returned. """ log.debug('Update request received') data = await request.post() try: res0 = await install_py( data['whl'], request.loop) reslist = [res0] if 'serverlib' in data.keys(): res1 = await install_py( data['serverlib'], request.loop) reslist.append(res1) if 'fw' in data.keys(): res2 = await install_smoothie_firmware( data['fw'], request.loop) reslist.append(res2) res: Dict[str, Any] = { 'message': [r['message'] for r in reslist], 'filename': [r['filename'] for r in reslist] } status = 200 except Exception as e: res = {'message': 'Exception {} raised by update of {}: {}'.format( type(e), data, e.__traceback__)} status = 500 return web.json_response(res, status=status)
[ "async", "def", "update_api", "(", "request", ":", "web", ".", "Request", ")", "->", "web", ".", "Response", ":", "log", ".", "debug", "(", "'Update request received'", ")", "data", "=", "await", "request", ".", "post", "(", ")", "try", ":", "res0", "=", "await", "install_py", "(", "data", "[", "'whl'", "]", ",", "request", ".", "loop", ")", "reslist", "=", "[", "res0", "]", "if", "'serverlib'", "in", "data", ".", "keys", "(", ")", ":", "res1", "=", "await", "install_py", "(", "data", "[", "'serverlib'", "]", ",", "request", ".", "loop", ")", "reslist", ".", "append", "(", "res1", ")", "if", "'fw'", "in", "data", ".", "keys", "(", ")", ":", "res2", "=", "await", "install_smoothie_firmware", "(", "data", "[", "'fw'", "]", ",", "request", ".", "loop", ")", "reslist", ".", "append", "(", "res2", ")", "res", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "'message'", ":", "[", "r", "[", "'message'", "]", "for", "r", "in", "reslist", "]", ",", "'filename'", ":", "[", "r", "[", "'filename'", "]", "for", "r", "in", "reslist", "]", "}", "status", "=", "200", "except", "Exception", "as", "e", ":", "res", "=", "{", "'message'", ":", "'Exception {} raised by update of {}: {}'", ".", "format", "(", "type", "(", "e", ")", ",", "data", ",", "e", ".", "__traceback__", ")", "}", "status", "=", "500", "return", "web", ".", "json_response", "(", "res", ",", "status", "=", "status", ")" ]
43.314286
0.000645
def _parsemeta_tmy2(columns, line): """Retrieves metadata from the top line of the tmy2 file. Parameters ---------- columns : string String of column headings in the header line : string Header string containing DataFrame Returns ------- meta : Dict of metadata contained in the header string """ # Remove duplicated spaces, and read in each element rawmeta = " ".join(line.split()).split(" ") meta = rawmeta[:3] # take the first string entries meta.append(int(rawmeta[3])) # Convert to decimal notation with S negative longitude = ( float(rawmeta[5]) + float(rawmeta[6])/60) * (2*(rawmeta[4] == 'N') - 1) # Convert to decimal notation with W negative latitude = ( float(rawmeta[8]) + float(rawmeta[9])/60) * (2*(rawmeta[7] == 'E') - 1) meta.append(longitude) meta.append(latitude) meta.append(float(rawmeta[10])) # Creates a dictionary of metadata meta_dict = dict(zip(columns.split(','), meta)) return meta_dict
[ "def", "_parsemeta_tmy2", "(", "columns", ",", "line", ")", ":", "# Remove duplicated spaces, and read in each element", "rawmeta", "=", "\" \"", ".", "join", "(", "line", ".", "split", "(", ")", ")", ".", "split", "(", "\" \"", ")", "meta", "=", "rawmeta", "[", ":", "3", "]", "# take the first string entries", "meta", ".", "append", "(", "int", "(", "rawmeta", "[", "3", "]", ")", ")", "# Convert to decimal notation with S negative", "longitude", "=", "(", "float", "(", "rawmeta", "[", "5", "]", ")", "+", "float", "(", "rawmeta", "[", "6", "]", ")", "/", "60", ")", "*", "(", "2", "*", "(", "rawmeta", "[", "4", "]", "==", "'N'", ")", "-", "1", ")", "# Convert to decimal notation with W negative", "latitude", "=", "(", "float", "(", "rawmeta", "[", "8", "]", ")", "+", "float", "(", "rawmeta", "[", "9", "]", ")", "/", "60", ")", "*", "(", "2", "*", "(", "rawmeta", "[", "7", "]", "==", "'E'", ")", "-", "1", ")", "meta", ".", "append", "(", "longitude", ")", "meta", ".", "append", "(", "latitude", ")", "meta", ".", "append", "(", "float", "(", "rawmeta", "[", "10", "]", ")", ")", "# Creates a dictionary of metadata", "meta_dict", "=", "dict", "(", "zip", "(", "columns", ".", "split", "(", "','", ")", ",", "meta", ")", ")", "return", "meta_dict" ]
31.59375
0.00096
def parse_rec(filename): """ Parse a PASCAL VOC xml file """ tree = ET.parse(filename) objects = [] for obj in tree.findall('object'): obj_struct = {} obj_struct['name'] = obj.find('name').text obj_struct['pose'] = obj.find('pose').text obj_struct['truncated'] = int(obj.find('truncated').text) obj_struct['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1, int(bbox.find('ymin').text) - 1, int(bbox.find('xmax').text) - 1, int(bbox.find('ymax').text) - 1] objects.append(obj_struct) return objects
[ "def", "parse_rec", "(", "filename", ")", ":", "tree", "=", "ET", ".", "parse", "(", "filename", ")", "objects", "=", "[", "]", "for", "obj", "in", "tree", ".", "findall", "(", "'object'", ")", ":", "obj_struct", "=", "{", "}", "obj_struct", "[", "'name'", "]", "=", "obj", ".", "find", "(", "'name'", ")", ".", "text", "obj_struct", "[", "'pose'", "]", "=", "obj", ".", "find", "(", "'pose'", ")", ".", "text", "obj_struct", "[", "'truncated'", "]", "=", "int", "(", "obj", ".", "find", "(", "'truncated'", ")", ".", "text", ")", "obj_struct", "[", "'difficult'", "]", "=", "int", "(", "obj", ".", "find", "(", "'difficult'", ")", ".", "text", ")", "bbox", "=", "obj", ".", "find", "(", "'bndbox'", ")", "obj_struct", "[", "'bbox'", "]", "=", "[", "int", "(", "bbox", ".", "find", "(", "'xmin'", ")", ".", "text", ")", "-", "1", ",", "int", "(", "bbox", ".", "find", "(", "'ymin'", ")", ".", "text", ")", "-", "1", ",", "int", "(", "bbox", ".", "find", "(", "'xmax'", ")", ".", "text", ")", "-", "1", ",", "int", "(", "bbox", ".", "find", "(", "'ymax'", ")", ".", "text", ")", "-", "1", "]", "objects", ".", "append", "(", "obj_struct", ")", "return", "objects" ]
40.666667
0.001335
def main(self): """The main function containing the loop for communication and process management. This function is the heart of the daemon. It is responsible for: - Client communication - Executing commands from clients - Update the status of processes by polling the ProcessHandler. - Logging - Cleanup on exit """ try: while self.running: # Trigger the processing of finished processes by the ProcessHandler. # If there are finished processes we write the log to keep it up to date. if self.process_handler.check_finished(): self.logger.write(self.queue) if self.reset and self.process_handler.all_finished(): # Rotate log and reset queue self.logger.rotate(self.queue) self.queue.reset() self.reset = False # Check if the ProcessHandler has any free slots to spawn a new process if not self.paused and not self.reset and self.running: self.process_handler.check_for_new() # This is the communication section of the daemon. # 1. Receive message from the client # 2. Check payload and call respective function with payload as parameter. # 3. Execute logic # 4. Return payload with response to client # Create list for waitable objects readable, writable, failed = select.select(self.read_list, [], [], 1) for waiting_socket in readable: if waiting_socket is self.socket: # Listening for clients to connect. # Client sockets are added to readlist to be processed. try: client_socket, client_address = self.socket.accept() self.read_list.append(client_socket) except Exception: self.logger.warning('Daemon rejected client') else: # Trying to receive instruction from client socket try: instruction = waiting_socket.recv(1048576) except (EOFError, OSError): self.logger.warning('Client died while sending message, dropping received data.') # Remove client socket self.read_list.remove(waiting_socket) waiting_socket.close() instruction = None # Check for valid instruction if instruction is not None: # Check if received data can be unpickled. try: payload = pickle.loads(instruction) except EOFError: # Instruction is ignored if it can't be unpickled self.logger.error('Received message is incomplete, dropping received data.') self.read_list.remove(waiting_socket) waiting_socket.close() # Set invalid payload payload = {'mode': ''} functions = { 'add': self.add, 'remove': self.remove, 'edit': self.edit_command, 'switch': self.switch, 'send': self.pipe_to_process, 'status': self.send_status, 'start': self.start, 'pause': self.pause, 'stash': self.stash, 'enqueue': self.enqueue, 'restart': self.restart, 'kill': self.kill_process, 'reset': self.reset_everything, 'clear': self.clear, 'config': self.set_config, 'STOPDAEMON': self.stop_daemon, } if payload['mode'] in functions.keys(): self.logger.debug('Payload received:') self.logger.debug(payload) response = functions[payload['mode']](payload) self.logger.debug('Sending payload:') self.logger.debug(response) try: self.respond_client(response, waiting_socket) except (BrokenPipeError): self.logger.warning('Client disconnected during message dispatching. Function successfully executed anyway.') # Remove client socket self.read_list.remove(waiting_socket) waiting_socket.close() instruction = None else: self.respond_client({'message': 'Unknown Command', 'status': 'error'}, waiting_socket) except Exception: self.logger.exception() # Wait for killed or stopped processes to finish (cleanup) self.process_handler.wait_for_finish() # Close socket, clean everything up and exit self.socket.close() cleanup(self.config_dir) sys.exit(0)
[ "def", "main", "(", "self", ")", ":", "try", ":", "while", "self", ".", "running", ":", "# Trigger the processing of finished processes by the ProcessHandler.", "# If there are finished processes we write the log to keep it up to date.", "if", "self", ".", "process_handler", ".", "check_finished", "(", ")", ":", "self", ".", "logger", ".", "write", "(", "self", ".", "queue", ")", "if", "self", ".", "reset", "and", "self", ".", "process_handler", ".", "all_finished", "(", ")", ":", "# Rotate log and reset queue", "self", ".", "logger", ".", "rotate", "(", "self", ".", "queue", ")", "self", ".", "queue", ".", "reset", "(", ")", "self", ".", "reset", "=", "False", "# Check if the ProcessHandler has any free slots to spawn a new process", "if", "not", "self", ".", "paused", "and", "not", "self", ".", "reset", "and", "self", ".", "running", ":", "self", ".", "process_handler", ".", "check_for_new", "(", ")", "# This is the communication section of the daemon.", "# 1. Receive message from the client", "# 2. Check payload and call respective function with payload as parameter.", "# 3. Execute logic", "# 4. Return payload with response to client", "# Create list for waitable objects", "readable", ",", "writable", ",", "failed", "=", "select", ".", "select", "(", "self", ".", "read_list", ",", "[", "]", ",", "[", "]", ",", "1", ")", "for", "waiting_socket", "in", "readable", ":", "if", "waiting_socket", "is", "self", ".", "socket", ":", "# Listening for clients to connect.", "# Client sockets are added to readlist to be processed.", "try", ":", "client_socket", ",", "client_address", "=", "self", ".", "socket", ".", "accept", "(", ")", "self", ".", "read_list", ".", "append", "(", "client_socket", ")", "except", "Exception", ":", "self", ".", "logger", ".", "warning", "(", "'Daemon rejected client'", ")", "else", ":", "# Trying to receive instruction from client socket", "try", ":", "instruction", "=", "waiting_socket", ".", "recv", "(", "1048576", ")", "except", "(", "EOFError", ",", "OSError", ")", ":", "self", ".", "logger", ".", "warning", "(", "'Client died while sending message, dropping received data.'", ")", "# Remove client socket", "self", ".", "read_list", ".", "remove", "(", "waiting_socket", ")", "waiting_socket", ".", "close", "(", ")", "instruction", "=", "None", "# Check for valid instruction", "if", "instruction", "is", "not", "None", ":", "# Check if received data can be unpickled.", "try", ":", "payload", "=", "pickle", ".", "loads", "(", "instruction", ")", "except", "EOFError", ":", "# Instruction is ignored if it can't be unpickled", "self", ".", "logger", ".", "error", "(", "'Received message is incomplete, dropping received data.'", ")", "self", ".", "read_list", ".", "remove", "(", "waiting_socket", ")", "waiting_socket", ".", "close", "(", ")", "# Set invalid payload", "payload", "=", "{", "'mode'", ":", "''", "}", "functions", "=", "{", "'add'", ":", "self", ".", "add", ",", "'remove'", ":", "self", ".", "remove", ",", "'edit'", ":", "self", ".", "edit_command", ",", "'switch'", ":", "self", ".", "switch", ",", "'send'", ":", "self", ".", "pipe_to_process", ",", "'status'", ":", "self", ".", "send_status", ",", "'start'", ":", "self", ".", "start", ",", "'pause'", ":", "self", ".", "pause", ",", "'stash'", ":", "self", ".", "stash", ",", "'enqueue'", ":", "self", ".", "enqueue", ",", "'restart'", ":", "self", ".", "restart", ",", "'kill'", ":", "self", ".", "kill_process", ",", "'reset'", ":", "self", ".", "reset_everything", ",", "'clear'", ":", "self", ".", "clear", ",", "'config'", ":", "self", ".", "set_config", ",", "'STOPDAEMON'", ":", "self", ".", "stop_daemon", ",", "}", "if", "payload", "[", "'mode'", "]", "in", "functions", ".", "keys", "(", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Payload received:'", ")", "self", ".", "logger", ".", "debug", "(", "payload", ")", "response", "=", "functions", "[", "payload", "[", "'mode'", "]", "]", "(", "payload", ")", "self", ".", "logger", ".", "debug", "(", "'Sending payload:'", ")", "self", ".", "logger", ".", "debug", "(", "response", ")", "try", ":", "self", ".", "respond_client", "(", "response", ",", "waiting_socket", ")", "except", "(", "BrokenPipeError", ")", ":", "self", ".", "logger", ".", "warning", "(", "'Client disconnected during message dispatching. Function successfully executed anyway.'", ")", "# Remove client socket", "self", ".", "read_list", ".", "remove", "(", "waiting_socket", ")", "waiting_socket", ".", "close", "(", ")", "instruction", "=", "None", "else", ":", "self", ".", "respond_client", "(", "{", "'message'", ":", "'Unknown Command'", ",", "'status'", ":", "'error'", "}", ",", "waiting_socket", ")", "except", "Exception", ":", "self", ".", "logger", ".", "exception", "(", ")", "# Wait for killed or stopped processes to finish (cleanup)", "self", ".", "process_handler", ".", "wait_for_finish", "(", ")", "# Close socket, clean everything up and exit", "self", ".", "socket", ".", "close", "(", ")", "cleanup", "(", "self", ".", "config_dir", ")", "sys", ".", "exit", "(", "0", ")" ]
50.491379
0.002679
def account_info(self): """ Certain attributes have a user's account information associated with it such as a gifted or crafted item. A dict with two keys: 'persona' and 'id64'. None if the attribute has no account information attached to it. """ account_info = self._attribute.get("account_info") if account_info: return {"persona": account_info.get("personaname", ""), "id64": account_info["steamid"]} else: return None
[ "def", "account_info", "(", "self", ")", ":", "account_info", "=", "self", ".", "_attribute", ".", "get", "(", "\"account_info\"", ")", "if", "account_info", ":", "return", "{", "\"persona\"", ":", "account_info", ".", "get", "(", "\"personaname\"", ",", "\"\"", ")", ",", "\"id64\"", ":", "account_info", "[", "\"steamid\"", "]", "}", "else", ":", "return", "None" ]
42.583333
0.003831
def add_route(self, gateway, network): """ Add a route to engine. Specify gateway and network. If this is the default gateway, use a network address of 0.0.0.0/0. .. note: This will fail if the gateway provided does not have a corresponding interface on the network. :param str gateway: gateway of an existing interface :param str network: network address in cidr format :raises EngineCommandFailed: invalid route, possibly no network :return: None """ self.make_request( EngineCommandFailed, method='create', resource='add_route', params={'gateway': gateway, 'network': network})
[ "def", "add_route", "(", "self", ",", "gateway", ",", "network", ")", ":", "self", ".", "make_request", "(", "EngineCommandFailed", ",", "method", "=", "'create'", ",", "resource", "=", "'add_route'", ",", "params", "=", "{", "'gateway'", ":", "gateway", ",", "'network'", ":", "network", "}", ")" ]
36.8
0.002649
def connect(provider_id): """Starts the provider connection OAuth flow""" provider = get_provider_or_404(provider_id) callback_url = get_authorize_callback('connect', provider_id) allow_view = get_url(config_value('CONNECT_ALLOW_VIEW')) pc = request.form.get('next', allow_view) session[config_value('POST_OAUTH_CONNECT_SESSION_KEY')] = pc return provider.authorize(callback_url)
[ "def", "connect", "(", "provider_id", ")", ":", "provider", "=", "get_provider_or_404", "(", "provider_id", ")", "callback_url", "=", "get_authorize_callback", "(", "'connect'", ",", "provider_id", ")", "allow_view", "=", "get_url", "(", "config_value", "(", "'CONNECT_ALLOW_VIEW'", ")", ")", "pc", "=", "request", ".", "form", ".", "get", "(", "'next'", ",", "allow_view", ")", "session", "[", "config_value", "(", "'POST_OAUTH_CONNECT_SESSION_KEY'", ")", "]", "=", "pc", "return", "provider", ".", "authorize", "(", "callback_url", ")" ]
50
0.002457
def compute_position_log(self, td=None, method='mc', update_deviation=True): """ Args: deviation (ndarray): A deviation survey with rows like MD, INC, AZI td (Number): The TD of the well, if not the end of the deviation survey you're passing. method (str): 'aa': average angle 'bt': balanced tangential 'mc': minimum curvature update_deviation: This function makes some adjustments to the dev- iation survey, to account for the surface and TD. If you do not want to change the stored deviation survey, set to False. Returns: ndarray. A position log with rows like X-offset, Y-offset, Z-offset """ deviation = np.copy(self.deviation) # Adjust to TD. if td is not None: last_row = np.copy(deviation[-1, :]) last_row[0] = td deviation = np.vstack([deviation, last_row]) # Adjust to surface if necessary. if deviation[0, 0] > 0: deviation = np.vstack([np.array([0, 0, 0]), deviation]) last = deviation[:-1] this = deviation[1:] diff = this[:, 0] - last[:, 0] Ia, Aa = np.radians(last[:, 1]), np.radians(last[:, 2]) Ib, Ab = np.radians(this[:, 1]), np.radians(this[:, 2]) if method == 'aa': Iavg = (Ia + Ib) / 2 Aavg = (Aa + Ab) / 2 delta_N = diff * np.sin(Iavg) * np.cos(Aavg) delta_E = diff * np.sin(Iavg) * np.sin(Aavg) delta_V = diff * np.cos(Iavg) elif method in ('bt', 'mc'): delta_N = 0.5 * diff * np.sin(Ia) * np.cos(Aa) delta_N += 0.5 * diff * np.sin(Ib) * np.cos(Ab) delta_E = 0.5 * diff * np.sin(Ia) * np.sin(Aa) delta_E += 0.5 * diff * np.sin(Ib) * np.sin(Ab) delta_V = 0.5 * diff * np.cos(Ia) delta_V += 0.5 * diff * np.cos(Ib) else: raise Exception("Method must be one of 'aa', 'bt', 'mc'") if method == 'mc': _x = np.sin(Ib) * (1 - np.cos(Ab - Aa)) dogleg = np.arccos(np.cos(Ib - Ia) - np.sin(Ia) * _x) dogleg[dogleg == 0] = 1e-9 rf = 2 / dogleg * np.tan(dogleg / 2) # ratio factor rf[np.isnan(rf)] = 1 # Adjust for NaN. delta_N *= rf delta_E *= rf delta_V *= rf # Prepare the output array. result = np.zeros_like(deviation, dtype=np.float) # Stack the results, add the surface. _offsets = np.squeeze(np.dstack([delta_N, delta_E, delta_V])) _offsets = np.vstack([np.array([0, 0, 0]), _offsets]) result += _offsets.cumsum(axis=0) if update_deviation: self.deviation = deviation self.position = result return
[ "def", "compute_position_log", "(", "self", ",", "td", "=", "None", ",", "method", "=", "'mc'", ",", "update_deviation", "=", "True", ")", ":", "deviation", "=", "np", ".", "copy", "(", "self", ".", "deviation", ")", "# Adjust to TD.", "if", "td", "is", "not", "None", ":", "last_row", "=", "np", ".", "copy", "(", "deviation", "[", "-", "1", ",", ":", "]", ")", "last_row", "[", "0", "]", "=", "td", "deviation", "=", "np", ".", "vstack", "(", "[", "deviation", ",", "last_row", "]", ")", "# Adjust to surface if necessary.", "if", "deviation", "[", "0", ",", "0", "]", ">", "0", ":", "deviation", "=", "np", ".", "vstack", "(", "[", "np", ".", "array", "(", "[", "0", ",", "0", ",", "0", "]", ")", ",", "deviation", "]", ")", "last", "=", "deviation", "[", ":", "-", "1", "]", "this", "=", "deviation", "[", "1", ":", "]", "diff", "=", "this", "[", ":", ",", "0", "]", "-", "last", "[", ":", ",", "0", "]", "Ia", ",", "Aa", "=", "np", ".", "radians", "(", "last", "[", ":", ",", "1", "]", ")", ",", "np", ".", "radians", "(", "last", "[", ":", ",", "2", "]", ")", "Ib", ",", "Ab", "=", "np", ".", "radians", "(", "this", "[", ":", ",", "1", "]", ")", ",", "np", ".", "radians", "(", "this", "[", ":", ",", "2", "]", ")", "if", "method", "==", "'aa'", ":", "Iavg", "=", "(", "Ia", "+", "Ib", ")", "/", "2", "Aavg", "=", "(", "Aa", "+", "Ab", ")", "/", "2", "delta_N", "=", "diff", "*", "np", ".", "sin", "(", "Iavg", ")", "*", "np", ".", "cos", "(", "Aavg", ")", "delta_E", "=", "diff", "*", "np", ".", "sin", "(", "Iavg", ")", "*", "np", ".", "sin", "(", "Aavg", ")", "delta_V", "=", "diff", "*", "np", ".", "cos", "(", "Iavg", ")", "elif", "method", "in", "(", "'bt'", ",", "'mc'", ")", ":", "delta_N", "=", "0.5", "*", "diff", "*", "np", ".", "sin", "(", "Ia", ")", "*", "np", ".", "cos", "(", "Aa", ")", "delta_N", "+=", "0.5", "*", "diff", "*", "np", ".", "sin", "(", "Ib", ")", "*", "np", ".", "cos", "(", "Ab", ")", "delta_E", "=", "0.5", "*", "diff", "*", "np", ".", "sin", "(", "Ia", ")", "*", "np", ".", "sin", "(", "Aa", ")", "delta_E", "+=", "0.5", "*", "diff", "*", "np", ".", "sin", "(", "Ib", ")", "*", "np", ".", "sin", "(", "Ab", ")", "delta_V", "=", "0.5", "*", "diff", "*", "np", ".", "cos", "(", "Ia", ")", "delta_V", "+=", "0.5", "*", "diff", "*", "np", ".", "cos", "(", "Ib", ")", "else", ":", "raise", "Exception", "(", "\"Method must be one of 'aa', 'bt', 'mc'\"", ")", "if", "method", "==", "'mc'", ":", "_x", "=", "np", ".", "sin", "(", "Ib", ")", "*", "(", "1", "-", "np", ".", "cos", "(", "Ab", "-", "Aa", ")", ")", "dogleg", "=", "np", ".", "arccos", "(", "np", ".", "cos", "(", "Ib", "-", "Ia", ")", "-", "np", ".", "sin", "(", "Ia", ")", "*", "_x", ")", "dogleg", "[", "dogleg", "==", "0", "]", "=", "1e-9", "rf", "=", "2", "/", "dogleg", "*", "np", ".", "tan", "(", "dogleg", "/", "2", ")", "# ratio factor", "rf", "[", "np", ".", "isnan", "(", "rf", ")", "]", "=", "1", "# Adjust for NaN.", "delta_N", "*=", "rf", "delta_E", "*=", "rf", "delta_V", "*=", "rf", "# Prepare the output array.", "result", "=", "np", ".", "zeros_like", "(", "deviation", ",", "dtype", "=", "np", ".", "float", ")", "# Stack the results, add the surface.", "_offsets", "=", "np", ".", "squeeze", "(", "np", ".", "dstack", "(", "[", "delta_N", ",", "delta_E", ",", "delta_V", "]", ")", ")", "_offsets", "=", "np", ".", "vstack", "(", "[", "np", ".", "array", "(", "[", "0", ",", "0", ",", "0", "]", ")", ",", "_offsets", "]", ")", "result", "+=", "_offsets", ".", "cumsum", "(", "axis", "=", "0", ")", "if", "update_deviation", ":", "self", ".", "deviation", "=", "deviation", "self", ".", "position", "=", "result", "return" ]
36.3875
0.001672
def add_event(self, event): """ Add an event to the heap/priority queue Parameters ---------- event : Event """ assert event.dep_time_ut <= event.arr_time_ut heappush(self.heap, event)
[ "def", "add_event", "(", "self", ",", "event", ")", ":", "assert", "event", ".", "dep_time_ut", "<=", "event", ".", "arr_time_ut", "heappush", "(", "self", ".", "heap", ",", "event", ")" ]
24
0.008032
def padded_accuracy_topk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Percentage of times that top-k predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) effective_k = tf.minimum(k, common_layers.shape_list(padded_predictions)[-1]) _, outputs = tf.nn.top_k(padded_predictions, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(padded_labels) padded_labels = tf.expand_dims(padded_labels, axis=-1) padded_labels += tf.zeros_like(outputs) # Pad to same shape. same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=-1) return same_topk, weights
[ "def", "padded_accuracy_topk", "(", "predictions", ",", "labels", ",", "k", ",", "weights_fn", "=", "common_layers", ".", "weights_nonzero", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"padded_accuracy_topk\"", ",", "values", "=", "[", "predictions", ",", "labels", "]", ")", ":", "padded_predictions", ",", "padded_labels", "=", "common_layers", ".", "pad_with_zeros", "(", "predictions", ",", "labels", ")", "weights", "=", "weights_fn", "(", "padded_labels", ")", "effective_k", "=", "tf", ".", "minimum", "(", "k", ",", "common_layers", ".", "shape_list", "(", "padded_predictions", ")", "[", "-", "1", "]", ")", "_", ",", "outputs", "=", "tf", ".", "nn", ".", "top_k", "(", "padded_predictions", ",", "k", "=", "effective_k", ")", "outputs", "=", "tf", ".", "to_int32", "(", "outputs", ")", "padded_labels", "=", "tf", ".", "to_int32", "(", "padded_labels", ")", "padded_labels", "=", "tf", ".", "expand_dims", "(", "padded_labels", ",", "axis", "=", "-", "1", ")", "padded_labels", "+=", "tf", ".", "zeros_like", "(", "outputs", ")", "# Pad to same shape.", "same", "=", "tf", ".", "to_float", "(", "tf", ".", "equal", "(", "outputs", ",", "padded_labels", ")", ")", "same_topk", "=", "tf", ".", "reduce_sum", "(", "same", ",", "axis", "=", "-", "1", ")", "return", "same_topk", ",", "weights" ]
50.421053
0.003074
def get_device_scale(self): """Returns the previous device offset set by :meth:`set_device_scale`. *New in cairo 1.14.* *New in cairocffi 0.9.* """ size = ffi.new('double[2]') cairo.cairo_surface_get_device_scale(self._pointer, size + 0, size + 1) return tuple(size)
[ "def", "get_device_scale", "(", "self", ")", ":", "size", "=", "ffi", ".", "new", "(", "'double[2]'", ")", "cairo", ".", "cairo_surface_get_device_scale", "(", "self", ".", "_pointer", ",", "size", "+", "0", ",", "size", "+", "1", ")", "return", "tuple", "(", "size", ")" ]
28.636364
0.006154
def rates(ctx, opts): """Check current API rate limits.""" click.echo("Retrieving rate limits ... ", nl=False) context_msg = "Failed to retrieve status!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): resources_limits = get_rate_limits() click.secho("OK", fg="green") headers = ["Resource", "Throttled", "Remaining", "Interval (Seconds)", "Reset"] rows = [] for resource, limits in six.iteritems(resources_limits): rows.append( [ click.style(resource, fg="cyan"), click.style( "Yes" if limits.throttled else "No", fg="red" if limits.throttled else "green", ), "%(remaining)s/%(limit)s" % { "remaining": click.style( six.text_type(limits.remaining), fg="yellow" ), "limit": click.style(six.text_type(limits.limit), fg="yellow"), }, click.style(six.text_type(limits.interval), fg="blue"), click.style(six.text_type(limits.reset), fg="magenta"), ] ) if resources_limits: click.echo() utils.pretty_print_table(headers, rows) click.echo() num_results = len(resources_limits) list_suffix = "resource%s" % ("s" if num_results != 1 else "") utils.pretty_print_list_info(num_results=num_results, suffix=list_suffix)
[ "def", "rates", "(", "ctx", ",", "opts", ")", ":", "click", ".", "echo", "(", "\"Retrieving rate limits ... \"", ",", "nl", "=", "False", ")", "context_msg", "=", "\"Failed to retrieve status!\"", "with", "handle_api_exceptions", "(", "ctx", ",", "opts", "=", "opts", ",", "context_msg", "=", "context_msg", ")", ":", "with", "maybe_spinner", "(", "opts", ")", ":", "resources_limits", "=", "get_rate_limits", "(", ")", "click", ".", "secho", "(", "\"OK\"", ",", "fg", "=", "\"green\"", ")", "headers", "=", "[", "\"Resource\"", ",", "\"Throttled\"", ",", "\"Remaining\"", ",", "\"Interval (Seconds)\"", ",", "\"Reset\"", "]", "rows", "=", "[", "]", "for", "resource", ",", "limits", "in", "six", ".", "iteritems", "(", "resources_limits", ")", ":", "rows", ".", "append", "(", "[", "click", ".", "style", "(", "resource", ",", "fg", "=", "\"cyan\"", ")", ",", "click", ".", "style", "(", "\"Yes\"", "if", "limits", ".", "throttled", "else", "\"No\"", ",", "fg", "=", "\"red\"", "if", "limits", ".", "throttled", "else", "\"green\"", ",", ")", ",", "\"%(remaining)s/%(limit)s\"", "%", "{", "\"remaining\"", ":", "click", ".", "style", "(", "six", ".", "text_type", "(", "limits", ".", "remaining", ")", ",", "fg", "=", "\"yellow\"", ")", ",", "\"limit\"", ":", "click", ".", "style", "(", "six", ".", "text_type", "(", "limits", ".", "limit", ")", ",", "fg", "=", "\"yellow\"", ")", ",", "}", ",", "click", ".", "style", "(", "six", ".", "text_type", "(", "limits", ".", "interval", ")", ",", "fg", "=", "\"blue\"", ")", ",", "click", ".", "style", "(", "six", ".", "text_type", "(", "limits", ".", "reset", ")", ",", "fg", "=", "\"magenta\"", ")", ",", "]", ")", "if", "resources_limits", ":", "click", ".", "echo", "(", ")", "utils", ".", "pretty_print_table", "(", "headers", ",", "rows", ")", "click", ".", "echo", "(", ")", "num_results", "=", "len", "(", "resources_limits", ")", "list_suffix", "=", "\"resource%s\"", "%", "(", "\"s\"", "if", "num_results", "!=", "1", "else", "\"\"", ")", "utils", ".", "pretty_print_list_info", "(", "num_results", "=", "num_results", ",", "suffix", "=", "list_suffix", ")" ]
34.860465
0.001947
def _add_to_found_storage(self, storage_url): """ Will first normalize the img src and then check if this bucket was discovered before If it is in storage_urls_found, the function returns Else, it send a GET for the original URL (normalized image src) and will look for "AmazonS3" in the "Server" response header. If found, will add to URL with the resource stripped :param storage_url: img src scraped from page """ storage_url = self._normalize_url(storage_url) bucket = S3Bucket(storage_url) if bucket.url not in self.storage_urls_found: try: res = self.request_handler.send("GET", url=storage_url) if self._is_amazon_s3_bucket(res): self.storage_urls_found.add(bucket.url) self.s3_buckets.add(bucket) except RequestHandlerException: # Cannot connect to storage, move on pass
[ "def", "_add_to_found_storage", "(", "self", ",", "storage_url", ")", ":", "storage_url", "=", "self", ".", "_normalize_url", "(", "storage_url", ")", "bucket", "=", "S3Bucket", "(", "storage_url", ")", "if", "bucket", ".", "url", "not", "in", "self", ".", "storage_urls_found", ":", "try", ":", "res", "=", "self", ".", "request_handler", ".", "send", "(", "\"GET\"", ",", "url", "=", "storage_url", ")", "if", "self", ".", "_is_amazon_s3_bucket", "(", "res", ")", ":", "self", ".", "storage_urls_found", ".", "add", "(", "bucket", ".", "url", ")", "self", ".", "s3_buckets", ".", "add", "(", "bucket", ")", "except", "RequestHandlerException", ":", "# Cannot connect to storage, move on", "pass" ]
46.428571
0.00402
def get_pattern_link_topattern(self, patternnumber): """Get the 'linked pattern' value for a given pattern. Args: patternnumber (integer): From 0-7 Returns: The 'linked pattern' value (int). """ _checkPatternNumber(patternnumber) address = _calculateRegisterAddress('linkpattern', patternnumber) return self.read_register(address)
[ "def", "get_pattern_link_topattern", "(", "self", ",", "patternnumber", ")", ":", "_checkPatternNumber", "(", "patternnumber", ")", "address", "=", "_calculateRegisterAddress", "(", "'linkpattern'", ",", "patternnumber", ")", "return", "self", ".", "read_register", "(", "address", ")" ]
31.769231
0.007059
def get_backend_expiry(self, expiry=DEFAULT_EXPIRY): """ Return the expiry value usable by this backend based upon the provided timeout. """ if expiry == DEFAULT_EXPIRY: expiry = self.default_expiry elif expiry == 0: # avoid time.time() related precision issues expiry = -1 return None if expiry is None else time.time() + expiry
[ "def", "get_backend_expiry", "(", "self", ",", "expiry", "=", "DEFAULT_EXPIRY", ")", ":", "if", "expiry", "==", "DEFAULT_EXPIRY", ":", "expiry", "=", "self", ".", "default_expiry", "elif", "expiry", "==", "0", ":", "# avoid time.time() related precision issues", "expiry", "=", "-", "1", "return", "None", "if", "expiry", "is", "None", "else", "time", ".", "time", "(", ")", "+", "expiry" ]
37.363636
0.004751
def lock(self, page): """Locks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page')
[ "def", "lock", "(", "self", ",", "page", ")", ":", "result", "=", "self", ".", "_dokuwiki", ".", "send", "(", "'dokuwiki.setLocks'", ",", "lock", "=", "[", "page", "]", ",", "unlock", "=", "[", "]", ")", "if", "result", "[", "'lockfail'", "]", ":", "raise", "DokuWikiError", "(", "'unable to lock page'", ")" ]
41.5
0.007874
def save_any_file(data, filename): """ Determines a Saver based on the the file extension. Returns whether successfully saved. :param filename: the name of the file to save :type filename: str :param data: the data to save :type data: Instances :return: whether successfully saved :rtype: bool """ saver = saver_for_file(filename) if saver is None: return False else: saver.save_file(data, filename) return True
[ "def", "save_any_file", "(", "data", ",", "filename", ")", ":", "saver", "=", "saver_for_file", "(", "filename", ")", "if", "saver", "is", "None", ":", "return", "False", "else", ":", "saver", ".", "save_file", "(", "data", ",", "filename", ")", "return", "True" ]
27.529412
0.004132
def open_session(self): """ Open tensorflow session. Exposed for memory management. """ with self._graph.as_default(): init = tf.initialize_all_variables() self._sess = tf.Session() self._sess.run(init)
[ "def", "open_session", "(", "self", ")", ":", "with", "self", ".", "_graph", ".", "as_default", "(", ")", ":", "init", "=", "tf", ".", "initialize_all_variables", "(", ")", "self", ".", "_sess", "=", "tf", ".", "Session", "(", ")", "self", ".", "_sess", ".", "run", "(", "init", ")" ]
41.5
0.007874
def interactive_output(f, controls): """Connect widget controls to a function. This function does not generate a user interface for the widgets (unlike `interact`). This enables customisation of the widget user interface layout. The user interface layout must be defined and displayed manually. """ out = Output() def observer(change): kwargs = {k:v.value for k,v in controls.items()} show_inline_matplotlib_plots() with out: clear_output(wait=True) f(**kwargs) show_inline_matplotlib_plots() for k,w in controls.items(): w.observe(observer, 'value') show_inline_matplotlib_plots() observer(None) return out
[ "def", "interactive_output", "(", "f", ",", "controls", ")", ":", "out", "=", "Output", "(", ")", "def", "observer", "(", "change", ")", ":", "kwargs", "=", "{", "k", ":", "v", ".", "value", "for", "k", ",", "v", "in", "controls", ".", "items", "(", ")", "}", "show_inline_matplotlib_plots", "(", ")", "with", "out", ":", "clear_output", "(", "wait", "=", "True", ")", "f", "(", "*", "*", "kwargs", ")", "show_inline_matplotlib_plots", "(", ")", "for", "k", ",", "w", "in", "controls", ".", "items", "(", ")", ":", "w", ".", "observe", "(", "observer", ",", "'value'", ")", "show_inline_matplotlib_plots", "(", ")", "observer", "(", "None", ")", "return", "out" ]
33.380952
0.008322
def return_dat(self, chan, begsam, endsam): """Return the data as 2D numpy.ndarray. Parameters ---------- chan : int or list index (indices) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns ------- numpy.ndarray A 2d matrix, with dimension chan X samples """ dat_begsam = max(begsam, 0) dat_endsam = min(endsam, self.n_samples) dur = dat_endsam - dat_begsam dtype_onlychan = dtype({k: v for k, v in self.dtype.fields.items() if v[0].kind != 'S'}) # make sure we read some data at least, otherwise segfault if dat_begsam < self.n_samples and dat_endsam > 0: with self.filename.open('rb') as f: f.seek(self.header_len, SEEK_SET) # skip header f.seek(self.dtype.itemsize * dat_begsam, SEEK_CUR) dat = fromfile(f, dtype=self.dtype, count=dur) dat = ndarray(dat.shape, dtype_onlychan, dat, 0, dat.strides).view((dtype_onlychan[0], len(dtype_onlychan.names))).T else: n_chan = len(dtype_onlychan.names) dat = empty((n_chan, 0)) if begsam < 0: pad = empty((dat.shape[0], 0 - begsam)) pad.fill(NaN) dat = c_[pad, dat] if endsam >= self.n_samples: pad = empty((dat.shape[0], endsam - self.n_samples)) pad.fill(NaN) dat = c_[dat, pad] return dat[chan, :] * self.gain[chan][:, None]
[ "def", "return_dat", "(", "self", ",", "chan", ",", "begsam", ",", "endsam", ")", ":", "dat_begsam", "=", "max", "(", "begsam", ",", "0", ")", "dat_endsam", "=", "min", "(", "endsam", ",", "self", ".", "n_samples", ")", "dur", "=", "dat_endsam", "-", "dat_begsam", "dtype_onlychan", "=", "dtype", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "dtype", ".", "fields", ".", "items", "(", ")", "if", "v", "[", "0", "]", ".", "kind", "!=", "'S'", "}", ")", "# make sure we read some data at least, otherwise segfault", "if", "dat_begsam", "<", "self", ".", "n_samples", "and", "dat_endsam", ">", "0", ":", "with", "self", ".", "filename", ".", "open", "(", "'rb'", ")", "as", "f", ":", "f", ".", "seek", "(", "self", ".", "header_len", ",", "SEEK_SET", ")", "# skip header", "f", ".", "seek", "(", "self", ".", "dtype", ".", "itemsize", "*", "dat_begsam", ",", "SEEK_CUR", ")", "dat", "=", "fromfile", "(", "f", ",", "dtype", "=", "self", ".", "dtype", ",", "count", "=", "dur", ")", "dat", "=", "ndarray", "(", "dat", ".", "shape", ",", "dtype_onlychan", ",", "dat", ",", "0", ",", "dat", ".", "strides", ")", ".", "view", "(", "(", "dtype_onlychan", "[", "0", "]", ",", "len", "(", "dtype_onlychan", ".", "names", ")", ")", ")", ".", "T", "else", ":", "n_chan", "=", "len", "(", "dtype_onlychan", ".", "names", ")", "dat", "=", "empty", "(", "(", "n_chan", ",", "0", ")", ")", "if", "begsam", "<", "0", ":", "pad", "=", "empty", "(", "(", "dat", ".", "shape", "[", "0", "]", ",", "0", "-", "begsam", ")", ")", "pad", ".", "fill", "(", "NaN", ")", "dat", "=", "c_", "[", "pad", ",", "dat", "]", "if", "endsam", ">=", "self", ".", "n_samples", ":", "pad", "=", "empty", "(", "(", "dat", ".", "shape", "[", "0", "]", ",", "endsam", "-", "self", ".", "n_samples", ")", ")", "pad", ".", "fill", "(", "NaN", ")", "dat", "=", "c_", "[", "dat", ",", "pad", "]", "return", "dat", "[", "chan", ",", ":", "]", "*", "self", ".", "gain", "[", "chan", "]", "[", ":", ",", "None", "]" ]
30.803922
0.002468
def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if os.path.exists(egg_link): return egg_link return dist.location
[ "def", "dist_location", "(", "dist", ")", ":", "egg_link", "=", "egg_link_path", "(", "dist", ")", "if", "os", ".", "path", ".", "exists", "(", "egg_link", ")", ":", "return", "egg_link", "return", "dist", ".", "location" ]
33
0.002457
def effectiveTagSet(self): """Return a :class:`~pyasn1.type.tag.TagSet` object of the currently initialized component or self (if |ASN.1| is tagged).""" if self.tagSet: return self.tagSet else: component = self.getComponent() return component.effectiveTagSet
[ "def", "effectiveTagSet", "(", "self", ")", ":", "if", "self", ".", "tagSet", ":", "return", "self", ".", "tagSet", "else", ":", "component", "=", "self", ".", "getComponent", "(", ")", "return", "component", ".", "effectiveTagSet" ]
44.571429
0.009434
def request(self, cmd, *args, **kwargs): """ Request data fromo the server. :param cmd: repo handler command. :returns: Result. """ params = {'action': cmd} #TODO: serialize the kwargs? params.update(kwargs) return self.__request(self.url, params)
[ "def", "request", "(", "self", ",", "cmd", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'action'", ":", "cmd", "}", "#TODO: serialize the kwargs?", "params", ".", "update", "(", "kwargs", ")", "return", "self", ".", "__request", "(", "self", ".", "url", ",", "params", ")" ]
25.833333
0.009346
def datetime_to_synergy(time_qualifier, dt): """ method parses datetime and returns Synergy Date""" if time_qualifier == QUALIFIER_HOURLY: date_format = SYNERGY_HOURLY_PATTERN elif time_qualifier == QUALIFIER_DAILY: date_format = SYNERGY_DAILY_PATTERN elif time_qualifier == QUALIFIER_MONTHLY: date_format = SYNERGY_MONTHLY_PATTERN elif time_qualifier == QUALIFIER_YEARLY: date_format = SYNERGY_YEARLY_PATTERN elif time_qualifier == QUALIFIER_REAL_TIME: date_format = SYNERGY_SESSION_PATTERN else: raise ValueError('unknown time qualifier: {0}'.format(time_qualifier)) return dt.strftime(date_format)
[ "def", "datetime_to_synergy", "(", "time_qualifier", ",", "dt", ")", ":", "if", "time_qualifier", "==", "QUALIFIER_HOURLY", ":", "date_format", "=", "SYNERGY_HOURLY_PATTERN", "elif", "time_qualifier", "==", "QUALIFIER_DAILY", ":", "date_format", "=", "SYNERGY_DAILY_PATTERN", "elif", "time_qualifier", "==", "QUALIFIER_MONTHLY", ":", "date_format", "=", "SYNERGY_MONTHLY_PATTERN", "elif", "time_qualifier", "==", "QUALIFIER_YEARLY", ":", "date_format", "=", "SYNERGY_YEARLY_PATTERN", "elif", "time_qualifier", "==", "QUALIFIER_REAL_TIME", ":", "date_format", "=", "SYNERGY_SESSION_PATTERN", "else", ":", "raise", "ValueError", "(", "'unknown time qualifier: {0}'", ".", "format", "(", "time_qualifier", ")", ")", "return", "dt", ".", "strftime", "(", "date_format", ")" ]
44.4
0.001471
def clustering_coef_bu(G): ''' The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector ''' n = len(G) C = np.zeros((n,)) for u in range(n): V, = np.where(G[u, :]) k = len(V) if k >= 2: # degree must be at least 2 S = G[np.ix_(V, V)] C[u] = np.sum(S) / (k * k - k) return C
[ "def", "clustering_coef_bu", "(", "G", ")", ":", "n", "=", "len", "(", "G", ")", "C", "=", "np", ".", "zeros", "(", "(", "n", ",", ")", ")", "for", "u", "in", "range", "(", "n", ")", ":", "V", ",", "=", "np", ".", "where", "(", "G", "[", "u", ",", ":", "]", ")", "k", "=", "len", "(", "V", ")", "if", "k", ">=", "2", ":", "# degree must be at least 2", "S", "=", "G", "[", "np", ".", "ix_", "(", "V", ",", "V", ")", "]", "C", "[", "u", "]", "=", "np", ".", "sum", "(", "S", ")", "/", "(", "k", "*", "k", "-", "k", ")", "return", "C" ]
23.192308
0.001592
def pack_tups(*args): """Pack an arbitrary set of iterables and non-iterables into tuples. Function packs a set of inputs with arbitrary iterability into tuples. Iterability is tested with :func:`iterable`. Non-iterable inputs are repeated in each output tuple. Iterable inputs are expanded uniformly across the output tuples. For consistency, all iterables must be the same length. The input arguments are parsed such that bare strings are treated as **NON-ITERABLE**, through the use of a local subclass of |str| that cripples the ``__iter__()`` method. Any strings passed are returned in the packed tuples as standard, **ITERABLE** instances of |str|, however. The order of the input arguments is retained within each output tuple. No structural conversion is attempted on the arguments. If all inputs are non-iterable, a list containing a single |tuple| will be returned. Parameters ---------- \*args Arbitrary number of arbitrary mix of iterable and non-iterable objects to be packed into tuples. Returns ------- tups |list| of |tuple| -- Number of tuples returned is equal to the length of the iterables passed in `*args` Raises ------ ~exceptions.ValueError If any iterable objects are of different lengths """ # Imports import numpy as np # Debug flag _DEBUG = False # Marker value for non-iterable items NOT_ITER = -1 # Uninitialized test value UNINIT_VAL = -1 # Print the input if in debug mode if _DEBUG: # pragma: no cover print("args = {0}".format(args)) # Non-iterable subclass of str class StrNoIter(str): """ Non-iterable subclass of |str|. """ def __iter__(self): raise NotImplementedError("Non-iterable string") ## end def __iter__ ## end class StrNoIter # Re-wrap input arguments with non-iterable strings if required mod_args = [(StrNoIter(a) if isinstance(a, str) else a) for a in args] # Determine the length or non-iterable status of each item and store # the maximum value (depends on NOT_ITER < 0) iterlens = [(len(a) if iterable(a) else NOT_ITER) for a in mod_args] maxiter = max(iterlens) # Check to ensure all iterables are the same length if not all(map(lambda v: v in (NOT_ITER, maxiter), iterlens)): raise ValueError("All iterable items must be of equal length") ## end if # If everything is non-iterable, just return the args tuple wrapped in # a list (as above, depends on NOT_ITER < 0) if maxiter == NOT_ITER: return [args] ## end if # Swap any non-iterables for a suitable length repeat, and zip to # tuples for return tups = list(zip(*[(np.repeat(a, maxiter) if l == NOT_ITER else a) for (a,l) in zip(mod_args, iterlens)])) # Dump the resulting tuples, if in debug mode if _DEBUG: # pragma: no cover print("tups = {0}".format(tups)) ## end if # Return the tuples return tups
[ "def", "pack_tups", "(", "*", "args", ")", ":", "# Imports", "import", "numpy", "as", "np", "# Debug flag", "_DEBUG", "=", "False", "# Marker value for non-iterable items", "NOT_ITER", "=", "-", "1", "# Uninitialized test value", "UNINIT_VAL", "=", "-", "1", "# Print the input if in debug mode", "if", "_DEBUG", ":", "# pragma: no cover", "print", "(", "\"args = {0}\"", ".", "format", "(", "args", ")", ")", "# Non-iterable subclass of str", "class", "StrNoIter", "(", "str", ")", ":", "\"\"\" Non-iterable subclass of |str|. \"\"\"", "def", "__iter__", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "\"Non-iterable string\"", ")", "## end def __iter__", "## end class StrNoIter", "# Re-wrap input arguments with non-iterable strings if required", "mod_args", "=", "[", "(", "StrNoIter", "(", "a", ")", "if", "isinstance", "(", "a", ",", "str", ")", "else", "a", ")", "for", "a", "in", "args", "]", "# Determine the length or non-iterable status of each item and store", "# the maximum value (depends on NOT_ITER < 0)", "iterlens", "=", "[", "(", "len", "(", "a", ")", "if", "iterable", "(", "a", ")", "else", "NOT_ITER", ")", "for", "a", "in", "mod_args", "]", "maxiter", "=", "max", "(", "iterlens", ")", "# Check to ensure all iterables are the same length", "if", "not", "all", "(", "map", "(", "lambda", "v", ":", "v", "in", "(", "NOT_ITER", ",", "maxiter", ")", ",", "iterlens", ")", ")", ":", "raise", "ValueError", "(", "\"All iterable items must be of equal length\"", ")", "## end if", "# If everything is non-iterable, just return the args tuple wrapped in", "# a list (as above, depends on NOT_ITER < 0)", "if", "maxiter", "==", "NOT_ITER", ":", "return", "[", "args", "]", "## end if", "# Swap any non-iterables for a suitable length repeat, and zip to", "# tuples for return", "tups", "=", "list", "(", "zip", "(", "*", "[", "(", "np", ".", "repeat", "(", "a", ",", "maxiter", ")", "if", "l", "==", "NOT_ITER", "else", "a", ")", "for", "(", "a", ",", "l", ")", "in", "zip", "(", "mod_args", ",", "iterlens", ")", "]", ")", ")", "# Dump the resulting tuples, if in debug mode", "if", "_DEBUG", ":", "# pragma: no cover", "print", "(", "\"tups = {0}\"", ".", "format", "(", "tups", ")", ")", "## end if", "# Return the tuples", "return", "tups" ]
31.270833
0.003229
def ifusergroup(parser, token): """ Check to see if the currently logged in user belongs to a specific group. Requires the Django authentication contrib app and middleware. Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or {% ifusergroup Admins Clients Sellers %} ... {% else %} ... {% endifusergroup %} """ try: tokensp = token.split_contents() groups = [] groups+=tokensp[1:] except ValueError: raise template.TemplateSyntaxError("Tag 'ifusergroup' requires at least 1 argument.") nodelist_true = parser.parse(('else', 'endifusergroup')) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse(tuple(['endifusergroup',])) parser.delete_first_token() else: nodelist_false = NodeList() return GroupCheckNode(groups, nodelist_true, nodelist_false)
[ "def", "ifusergroup", "(", "parser", ",", "token", ")", ":", "try", ":", "tokensp", "=", "token", ".", "split_contents", "(", ")", "groups", "=", "[", "]", "groups", "+=", "tokensp", "[", "1", ":", "]", "except", "ValueError", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"Tag 'ifusergroup' requires at least 1 argument.\"", ")", "nodelist_true", "=", "parser", ".", "parse", "(", "(", "'else'", ",", "'endifusergroup'", ")", ")", "token", "=", "parser", ".", "next_token", "(", ")", "if", "token", ".", "contents", "==", "'else'", ":", "nodelist_false", "=", "parser", ".", "parse", "(", "tuple", "(", "[", "'endifusergroup'", ",", "]", ")", ")", "parser", ".", "delete_first_token", "(", ")", "else", ":", "nodelist_false", "=", "NodeList", "(", ")", "return", "GroupCheckNode", "(", "groups", ",", "nodelist_true", ",", "nodelist_false", ")" ]
35.2
0.005531
def read_lsm_timestamps(fh): """Read LSM time stamps from file and return as list.""" size, count = struct.unpack('<ii', fh.read(8)) if size != (8 + 8 * count): log.warning('read_lsm_timestamps: invalid LSM TimeStamps block') return [] # return struct.unpack('<%dd' % count, fh.read(8*count)) return fh.read_array('<f8', count=count)
[ "def", "read_lsm_timestamps", "(", "fh", ")", ":", "size", ",", "count", "=", "struct", ".", "unpack", "(", "'<ii'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "size", "!=", "(", "8", "+", "8", "*", "count", ")", ":", "log", ".", "warning", "(", "'read_lsm_timestamps: invalid LSM TimeStamps block'", ")", "return", "[", "]", "# return struct.unpack('<%dd' % count, fh.read(8*count))", "return", "fh", ".", "read_array", "(", "'<f8'", ",", "count", "=", "count", ")" ]
45.25
0.00271
def cli(env, identifier): """Cancel global IP.""" mgr = SoftLayer.NetworkManager(env.client) global_ip_id = helpers.resolve_id(mgr.resolve_global_ip_ids, identifier, name='global ip') if not (env.skip_confirmations or formatting.no_going_back(global_ip_id)): raise exceptions.CLIAbort('Aborted') mgr.cancel_global_ip(global_ip_id)
[ "def", "cli", "(", "env", ",", "identifier", ")", ":", "mgr", "=", "SoftLayer", ".", "NetworkManager", "(", "env", ".", "client", ")", "global_ip_id", "=", "helpers", ".", "resolve_id", "(", "mgr", ".", "resolve_global_ip_ids", ",", "identifier", ",", "name", "=", "'global ip'", ")", "if", "not", "(", "env", ".", "skip_confirmations", "or", "formatting", ".", "no_going_back", "(", "global_ip_id", ")", ")", ":", "raise", "exceptions", ".", "CLIAbort", "(", "'Aborted'", ")", "mgr", ".", "cancel_global_ip", "(", "global_ip_id", ")" ]
35.363636
0.002506
def do_notebook(self, name): """Run a notebook file after optionally converting it to a python file.""" CONVERT_NOTEBOOKS = int(os.getenv('CONVERT_NOTEBOOKS', True)) s = StringIO() if mock: out = unittest.mock.patch('sys.stdout', new=MockDevice(s)) err = unittest.mock.patch('sys.stderr', new=MockDevice(s)) self._do_notebook(name, CONVERT_NOTEBOOKS) out.close() err.close() else: self._do_notebook(name, CONVERT_NOTEBOOKS) self.assertTrue(True)
[ "def", "do_notebook", "(", "self", ",", "name", ")", ":", "CONVERT_NOTEBOOKS", "=", "int", "(", "os", ".", "getenv", "(", "'CONVERT_NOTEBOOKS'", ",", "True", ")", ")", "s", "=", "StringIO", "(", ")", "if", "mock", ":", "out", "=", "unittest", ".", "mock", ".", "patch", "(", "'sys.stdout'", ",", "new", "=", "MockDevice", "(", "s", ")", ")", "err", "=", "unittest", ".", "mock", ".", "patch", "(", "'sys.stderr'", ",", "new", "=", "MockDevice", "(", "s", ")", ")", "self", ".", "_do_notebook", "(", "name", ",", "CONVERT_NOTEBOOKS", ")", "out", ".", "close", "(", ")", "err", ".", "close", "(", ")", "else", ":", "self", ".", "_do_notebook", "(", "name", ",", "CONVERT_NOTEBOOKS", ")", "self", ".", "assertTrue", "(", "True", ")" ]
40
0.00349
def call_api(self, method_type, method_name, valid_status_codes, resource, data, uid, **kwargs): """ Make HTTP calls. Args: method_type: The HTTP method method_name: The name of the python method making the HTTP call valid_status_codes: A tuple of integer status codes deemed acceptable as response statuses resource: The resource class that will be generated data: The post data being sent. uid: The unique identifier of the resource. Returns: kwargs is a list of keyword arguments. Additional custom keyword arguments can be sent into this method and will be passed into subclass methods: - get_url - prepare_http_request - get_http_headers """ url = resource.get_resource_url( resource, base_url=self.Meta.base_url ) if method_type in SINGLE_RESOURCE_METHODS: if not uid and not kwargs: raise MissingUidException url = resource.get_url( url=url, uid=uid, **kwargs) params = { 'headers': self.get_http_headers( self.Meta.name, method_name, **kwargs), 'url': url } if method_type in ['POST', 'PUT', 'PATCH'] and isinstance(data, dict): params.update(json=data) prepared_request = self.prepare_http_request( method_type, params, **kwargs) response = self.session.send(prepared_request) return self._handle_response(response, valid_status_codes, resource)
[ "def", "call_api", "(", "self", ",", "method_type", ",", "method_name", ",", "valid_status_codes", ",", "resource", ",", "data", ",", "uid", ",", "*", "*", "kwargs", ")", ":", "url", "=", "resource", ".", "get_resource_url", "(", "resource", ",", "base_url", "=", "self", ".", "Meta", ".", "base_url", ")", "if", "method_type", "in", "SINGLE_RESOURCE_METHODS", ":", "if", "not", "uid", "and", "not", "kwargs", ":", "raise", "MissingUidException", "url", "=", "resource", ".", "get_url", "(", "url", "=", "url", ",", "uid", "=", "uid", ",", "*", "*", "kwargs", ")", "params", "=", "{", "'headers'", ":", "self", ".", "get_http_headers", "(", "self", ".", "Meta", ".", "name", ",", "method_name", ",", "*", "*", "kwargs", ")", ",", "'url'", ":", "url", "}", "if", "method_type", "in", "[", "'POST'", ",", "'PUT'", ",", "'PATCH'", "]", "and", "isinstance", "(", "data", ",", "dict", ")", ":", "params", ".", "update", "(", "json", "=", "data", ")", "prepared_request", "=", "self", ".", "prepare_http_request", "(", "method_type", ",", "params", ",", "*", "*", "kwargs", ")", "response", "=", "self", ".", "session", ".", "send", "(", "prepared_request", ")", "return", "self", ".", "_handle_response", "(", "response", ",", "valid_status_codes", ",", "resource", ")" ]
38.255814
0.002371
def create_readme_with_long_description(): '''Try to convert content of README.md into rst format using pypandoc, write it into README and return it. If pypandoc cannot be imported write content of README.md unchanged into README and return it. ''' this_dir = os.path.abspath(os.path.dirname(__file__)) readme_md = os.path.join(this_dir, 'README.md') readme = os.path.join(this_dir, 'README') if os.path.exists(readme_md): # this is the case when running `python setup.py sdist` if os.path.exists(readme): os.remove(readme) try: import pypandoc long_description = pypandoc.convert(readme_md, 'rst', format='md') except(ImportError): with open(readme_md, encoding='utf-8') as in_: long_description = in_.read() with open(readme, 'w') as out: out.write(long_description) else: # this is in case of `pip install fabsetup-x.y.z.tar.gz` with open(readme, encoding='utf-8') as in_: long_description = in_.read() return long_description
[ "def", "create_readme_with_long_description", "(", ")", ":", "this_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "readme_md", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "'README.md'", ")", "readme", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "'README'", ")", "if", "os", ".", "path", ".", "exists", "(", "readme_md", ")", ":", "# this is the case when running `python setup.py sdist`", "if", "os", ".", "path", ".", "exists", "(", "readme", ")", ":", "os", ".", "remove", "(", "readme", ")", "try", ":", "import", "pypandoc", "long_description", "=", "pypandoc", ".", "convert", "(", "readme_md", ",", "'rst'", ",", "format", "=", "'md'", ")", "except", "(", "ImportError", ")", ":", "with", "open", "(", "readme_md", ",", "encoding", "=", "'utf-8'", ")", "as", "in_", ":", "long_description", "=", "in_", ".", "read", "(", ")", "with", "open", "(", "readme", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "long_description", ")", "else", ":", "# this is in case of `pip install fabsetup-x.y.z.tar.gz`", "with", "open", "(", "readme", ",", "encoding", "=", "'utf-8'", ")", "as", "in_", ":", "long_description", "=", "in_", ".", "read", "(", ")", "return", "long_description" ]
34.125
0.00089
def check_exports(mod, specs, renamings): ''' Does nothing but raising PythranSyntaxError if specs references an undefined global ''' functions = {renamings.get(k, k): v for k, v in specs.functions.items()} mod_functions = {node.name: node for node in mod.body if isinstance(node, ast.FunctionDef)} for fname, signatures in functions.items(): try: fnode = mod_functions[fname] except KeyError: raise PythranSyntaxError( "Invalid spec: exporting undefined function `{}`" .format(fname)) for signature in signatures: args_count = len(fnode.args.args) if len(signature) > args_count: raise PythranSyntaxError( "Too many arguments when exporting `{}`" .format(fname)) elif len(signature) < args_count - len(fnode.args.defaults): raise PythranSyntaxError( "Not enough arguments when exporting `{}`" .format(fname))
[ "def", "check_exports", "(", "mod", ",", "specs", ",", "renamings", ")", ":", "functions", "=", "{", "renamings", ".", "get", "(", "k", ",", "k", ")", ":", "v", "for", "k", ",", "v", "in", "specs", ".", "functions", ".", "items", "(", ")", "}", "mod_functions", "=", "{", "node", ".", "name", ":", "node", "for", "node", "in", "mod", ".", "body", "if", "isinstance", "(", "node", ",", "ast", ".", "FunctionDef", ")", "}", "for", "fname", ",", "signatures", "in", "functions", ".", "items", "(", ")", ":", "try", ":", "fnode", "=", "mod_functions", "[", "fname", "]", "except", "KeyError", ":", "raise", "PythranSyntaxError", "(", "\"Invalid spec: exporting undefined function `{}`\"", ".", "format", "(", "fname", ")", ")", "for", "signature", "in", "signatures", ":", "args_count", "=", "len", "(", "fnode", ".", "args", ".", "args", ")", "if", "len", "(", "signature", ")", ">", "args_count", ":", "raise", "PythranSyntaxError", "(", "\"Too many arguments when exporting `{}`\"", ".", "format", "(", "fname", ")", ")", "elif", "len", "(", "signature", ")", "<", "args_count", "-", "len", "(", "fnode", ".", "args", ".", "defaults", ")", ":", "raise", "PythranSyntaxError", "(", "\"Not enough arguments when exporting `{}`\"", ".", "format", "(", "fname", ")", ")" ]
39.333333
0.000919
def get_status_from_resource(self, response): """Process the latest status update retrieved from the same URL as the previous request. :param requests.Response response: latest REST call response. :raises: BadResponse if status not 200 or 204. """ self._raise_if_bad_http_status_and_method(response) if self._is_empty(response): raise BadResponse('The response from long running operation ' 'does not contain a body.') status = self._get_provisioning_state(response) self.status = status or 'Succeeded' self.resource = self._deserialize(response)
[ "def", "get_status_from_resource", "(", "self", ",", "response", ")", ":", "self", ".", "_raise_if_bad_http_status_and_method", "(", "response", ")", "if", "self", ".", "_is_empty", "(", "response", ")", ":", "raise", "BadResponse", "(", "'The response from long running operation '", "'does not contain a body.'", ")", "status", "=", "self", ".", "_get_provisioning_state", "(", "response", ")", "self", ".", "status", "=", "status", "or", "'Succeeded'", "self", ".", "resource", "=", "self", ".", "_deserialize", "(", "response", ")" ]
41
0.002981
def parse_field_value(field_info, value): """Parse ``value`` according to ``field_info`` """ if field_info.id == "FT": return [x for x in value.split(";") if x != "."] elif field_info.type == "Flag": return True elif field_info.number == 1: return convert_field_value(field_info.type, value) else: if value == ".": return [] else: return [convert_field_value(field_info.type, x) for x in value.split(",")]
[ "def", "parse_field_value", "(", "field_info", ",", "value", ")", ":", "if", "field_info", ".", "id", "==", "\"FT\"", ":", "return", "[", "x", "for", "x", "in", "value", ".", "split", "(", "\";\"", ")", "if", "x", "!=", "\".\"", "]", "elif", "field_info", ".", "type", "==", "\"Flag\"", ":", "return", "True", "elif", "field_info", ".", "number", "==", "1", ":", "return", "convert_field_value", "(", "field_info", ".", "type", ",", "value", ")", "else", ":", "if", "value", "==", "\".\"", ":", "return", "[", "]", "else", ":", "return", "[", "convert_field_value", "(", "field_info", ".", "type", ",", "x", ")", "for", "x", "in", "value", ".", "split", "(", "\",\"", ")", "]" ]
34.285714
0.004057
def getAsWmsDatasetString(self, session): """ Retrieve the WMS Raster as a string in the WMS Dataset format """ # Magic numbers FIRST_VALUE_INDEX = 12 # Write value raster if type(self.raster) != type(None): # Convert to GRASS ASCII Raster valueGrassRasterString = self.getAsGrassAsciiGrid(session) # Split by lines values = valueGrassRasterString.split() # Assemble into string wmsDatasetString = '' for i in range(FIRST_VALUE_INDEX, len(values)): wmsDatasetString += '{0:.6f}\r\n'.format(float(values[i])) return wmsDatasetString else: wmsDatasetString = self.rasterText
[ "def", "getAsWmsDatasetString", "(", "self", ",", "session", ")", ":", "# Magic numbers", "FIRST_VALUE_INDEX", "=", "12", "# Write value raster", "if", "type", "(", "self", ".", "raster", ")", "!=", "type", "(", "None", ")", ":", "# Convert to GRASS ASCII Raster", "valueGrassRasterString", "=", "self", ".", "getAsGrassAsciiGrid", "(", "session", ")", "# Split by lines", "values", "=", "valueGrassRasterString", ".", "split", "(", ")", "# Assemble into string", "wmsDatasetString", "=", "''", "for", "i", "in", "range", "(", "FIRST_VALUE_INDEX", ",", "len", "(", "values", ")", ")", ":", "wmsDatasetString", "+=", "'{0:.6f}\\r\\n'", ".", "format", "(", "float", "(", "values", "[", "i", "]", ")", ")", "return", "wmsDatasetString", "else", ":", "wmsDatasetString", "=", "self", ".", "rasterText" ]
30.916667
0.003922
def create_site(self, params={}): """ Creates a site http://dev.wheniwork.com/#create-update-site """ url = "/2/sites/" body = params data = self._post_resource(url, body) return self.site_from_json(data["site"])
[ "def", "create_site", "(", "self", ",", "params", "=", "{", "}", ")", ":", "url", "=", "\"/2/sites/\"", "body", "=", "params", "data", "=", "self", ".", "_post_resource", "(", "url", ",", "body", ")", "return", "self", ".", "site_from_json", "(", "data", "[", "\"site\"", "]", ")" ]
24.363636
0.007194
def is_plugin_installed(name): """Check if a plugin is installed, even if it's not enabled. :param name: Name of the plugin to check. :type name: string :return: If the plugin is installed. :rtype: bool """ for directory in plugin_paths: if isdir(join(directory, name)): return True return False
[ "def", "is_plugin_installed", "(", "name", ")", ":", "for", "directory", "in", "plugin_paths", ":", "if", "isdir", "(", "join", "(", "directory", ",", "name", ")", ")", ":", "return", "True", "return", "False" ]
25.923077
0.002865
def sweep(ABF,sweep=None,rainbow=True,alpha=None,protocol=False,color='b', continuous=False,offsetX=0,offsetY=0,minutes=False, decimate=None,newFigure=False): """ Load a particular sweep then plot it. If sweep is None or False, just plot current dataX/dataY. If rainbow, it'll make it color coded prettily. """ if len(pylab.get_fignums())==0 or newFigure: new(ABF,True) if offsetY>0: pylab.grid(None) # figure which sweeps to plot if sweep is None: sweeps=[ABF.currentSweep] if not ABF.currentSweep: sweeps=[0] elif sweep=="all": sweeps=range(0,ABF.sweeps) elif type(sweep) in [int,float]: sweeps=[int(sweep)] elif type(sweep) is list: sweeps=sweep else: print("DONT KNOW WHAT TO DO WITH THIS SWEEPS!!!\n",type(sweep),sweep) #figure out offsets: if continuous: offsetX=ABF.sweepInterval # determine the colors to use colors=[color]*len(sweeps) #detault to blue if rainbow and len(sweeps)>1: for i in range(len(sweeps)): colors[i]=ABF.colormap[i] if alpha is None and len(sweeps)==1: alpha=1 if rainbow and alpha is None: alpha=.5 # correct for alpha if alpha is None: alpha=1 # conversion to minutes? if minutes == False: minutes=1 else: minutes=60 pylab.xlabel("minutes") ABF.decimateMethod=decimate # do the plotting of each sweep for i in range(len(sweeps)): ABF.setSweep(sweeps[i]) if protocol: pylab.plot((np.array(ABF.protoX)/ABF.rate+offsetX*i)/minutes, ABF.protoY+offsetY*i, alpha=alpha,color=colors[i]) else: pylab.plot((ABF.dataX+offsetX*i)/minutes, ABF.dataY+offsetY*i,alpha=alpha,color=colors[i]) ABF.decimateMethod=None pylab.margins(0,.02)
[ "def", "sweep", "(", "ABF", ",", "sweep", "=", "None", ",", "rainbow", "=", "True", ",", "alpha", "=", "None", ",", "protocol", "=", "False", ",", "color", "=", "'b'", ",", "continuous", "=", "False", ",", "offsetX", "=", "0", ",", "offsetY", "=", "0", ",", "minutes", "=", "False", ",", "decimate", "=", "None", ",", "newFigure", "=", "False", ")", ":", "if", "len", "(", "pylab", ".", "get_fignums", "(", ")", ")", "==", "0", "or", "newFigure", ":", "new", "(", "ABF", ",", "True", ")", "if", "offsetY", ">", "0", ":", "pylab", ".", "grid", "(", "None", ")", "# figure which sweeps to plot", "if", "sweep", "is", "None", ":", "sweeps", "=", "[", "ABF", ".", "currentSweep", "]", "if", "not", "ABF", ".", "currentSweep", ":", "sweeps", "=", "[", "0", "]", "elif", "sweep", "==", "\"all\"", ":", "sweeps", "=", "range", "(", "0", ",", "ABF", ".", "sweeps", ")", "elif", "type", "(", "sweep", ")", "in", "[", "int", ",", "float", "]", ":", "sweeps", "=", "[", "int", "(", "sweep", ")", "]", "elif", "type", "(", "sweep", ")", "is", "list", ":", "sweeps", "=", "sweep", "else", ":", "print", "(", "\"DONT KNOW WHAT TO DO WITH THIS SWEEPS!!!\\n\"", ",", "type", "(", "sweep", ")", ",", "sweep", ")", "#figure out offsets:", "if", "continuous", ":", "offsetX", "=", "ABF", ".", "sweepInterval", "# determine the colors to use", "colors", "=", "[", "color", "]", "*", "len", "(", "sweeps", ")", "#detault to blue", "if", "rainbow", "and", "len", "(", "sweeps", ")", ">", "1", ":", "for", "i", "in", "range", "(", "len", "(", "sweeps", ")", ")", ":", "colors", "[", "i", "]", "=", "ABF", ".", "colormap", "[", "i", "]", "if", "alpha", "is", "None", "and", "len", "(", "sweeps", ")", "==", "1", ":", "alpha", "=", "1", "if", "rainbow", "and", "alpha", "is", "None", ":", "alpha", "=", ".5", "# correct for alpha", "if", "alpha", "is", "None", ":", "alpha", "=", "1", "# conversion to minutes?", "if", "minutes", "==", "False", ":", "minutes", "=", "1", "else", ":", "minutes", "=", "60", "pylab", ".", "xlabel", "(", "\"minutes\"", ")", "ABF", ".", "decimateMethod", "=", "decimate", "# do the plotting of each sweep", "for", "i", "in", "range", "(", "len", "(", "sweeps", ")", ")", ":", "ABF", ".", "setSweep", "(", "sweeps", "[", "i", "]", ")", "if", "protocol", ":", "pylab", ".", "plot", "(", "(", "np", ".", "array", "(", "ABF", ".", "protoX", ")", "/", "ABF", ".", "rate", "+", "offsetX", "*", "i", ")", "/", "minutes", ",", "ABF", ".", "protoY", "+", "offsetY", "*", "i", ",", "alpha", "=", "alpha", ",", "color", "=", "colors", "[", "i", "]", ")", "else", ":", "pylab", ".", "plot", "(", "(", "ABF", ".", "dataX", "+", "offsetX", "*", "i", ")", "/", "minutes", ",", "ABF", ".", "dataY", "+", "offsetY", "*", "i", ",", "alpha", "=", "alpha", ",", "color", "=", "colors", "[", "i", "]", ")", "ABF", ".", "decimateMethod", "=", "None", "pylab", ".", "margins", "(", "0", ",", ".02", ")" ]
29.446154
0.02275
def __substitute_objects(self, value, context_dict): """ recursively substitute value with the context_dict """ if type(value) == dict: return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()]) elif type(value) == str: try: return value % context_dict except KeyError: e = sys.exc_info()[1] logger.warn("Could not specialize %s! Error: %s" % (value, e)) return value else: return value
[ "def", "__substitute_objects", "(", "self", ",", "value", ",", "context_dict", ")", ":", "if", "type", "(", "value", ")", "==", "dict", ":", "return", "dict", "(", "[", "(", "k", ",", "self", ".", "__substitute_objects", "(", "v", ",", "context_dict", ")", ")", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", "]", ")", "elif", "type", "(", "value", ")", "==", "str", ":", "try", ":", "return", "value", "%", "context_dict", "except", "KeyError", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "logger", ".", "warn", "(", "\"Could not specialize %s! Error: %s\"", "%", "(", "value", ",", "e", ")", ")", "return", "value", "else", ":", "return", "value" ]
37.466667
0.005208
def create_polynoms(): """Create and return poly1d objects. Uses the parameters from Morgan to create poly1d objects for calculations. """ fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv') res_df = pd.read_csv(fname) polys = {} for resorder, row in zip('65 54 43 21'.split(), range(4)): p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']]) polys['janus ' + ':'.join(resorder)] = p return polys
[ "def", "create_polynoms", "(", ")", ":", "fname", "=", "pr", ".", "resource_filename", "(", "'pyciss'", ",", "'data/soliton_prediction_parameters.csv'", ")", "res_df", "=", "pd", ".", "read_csv", "(", "fname", ")", "polys", "=", "{", "}", "for", "resorder", ",", "row", "in", "zip", "(", "'65 54 43 21'", ".", "split", "(", ")", ",", "range", "(", "4", ")", ")", ":", "p", "=", "poly1d", "(", "[", "res_df", ".", "loc", "[", "row", ",", "'Slope (km/yr)'", "]", ",", "res_df", ".", "loc", "[", "row", ",", "'Intercept (km)'", "]", "]", ")", "polys", "[", "'janus '", "+", "':'", ".", "join", "(", "resorder", ")", "]", "=", "p", "return", "polys" ]
37.285714
0.005607
def get_weekly_charts(self, chart_kind, from_date=None, to_date=None): """ Returns the weekly charts for the week starting from the from_date value to the to_date value. chart_kind should be one of "album", "artist" or "track" """ method = ".getWeekly" + chart_kind.title() + "Chart" chart_type = eval(chart_kind.title()) # string to type params = self._get_params() if from_date and to_date: params["from"] = from_date params["to"] = to_date doc = self._request(self.ws_prefix + method, True, params) seq = [] for node in doc.getElementsByTagName(chart_kind.lower()): if chart_kind == "artist": item = chart_type(_extract(node, "name"), self.network) else: item = chart_type( _extract(node, "artist"), _extract(node, "name"), self.network ) weight = _number(_extract(node, "playcount")) seq.append(TopItem(item, weight)) return seq
[ "def", "get_weekly_charts", "(", "self", ",", "chart_kind", ",", "from_date", "=", "None", ",", "to_date", "=", "None", ")", ":", "method", "=", "\".getWeekly\"", "+", "chart_kind", ".", "title", "(", ")", "+", "\"Chart\"", "chart_type", "=", "eval", "(", "chart_kind", ".", "title", "(", ")", ")", "# string to type", "params", "=", "self", ".", "_get_params", "(", ")", "if", "from_date", "and", "to_date", ":", "params", "[", "\"from\"", "]", "=", "from_date", "params", "[", "\"to\"", "]", "=", "to_date", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "method", ",", "True", ",", "params", ")", "seq", "=", "[", "]", "for", "node", "in", "doc", ".", "getElementsByTagName", "(", "chart_kind", ".", "lower", "(", ")", ")", ":", "if", "chart_kind", "==", "\"artist\"", ":", "item", "=", "chart_type", "(", "_extract", "(", "node", ",", "\"name\"", ")", ",", "self", ".", "network", ")", "else", ":", "item", "=", "chart_type", "(", "_extract", "(", "node", ",", "\"artist\"", ")", ",", "_extract", "(", "node", ",", "\"name\"", ")", ",", "self", ".", "network", ")", "weight", "=", "_number", "(", "_extract", "(", "node", ",", "\"playcount\"", ")", ")", "seq", ".", "append", "(", "TopItem", "(", "item", ",", "weight", ")", ")", "return", "seq" ]
37.642857
0.002775
def get_proxy_version(self): """ Returns version of the Cloud SQL Proxy. """ self._download_sql_proxy_if_needed() command_to_run = [self.sql_proxy_path] command_to_run.extend(['--version']) command_to_run.extend(self._get_credential_parameters()) result = subprocess.check_output(command_to_run).decode('utf-8') pattern = re.compile("^.*[V|v]ersion ([^;]*);.*$") m = pattern.match(result) if m: return m.group(1) else: return None
[ "def", "get_proxy_version", "(", "self", ")", ":", "self", ".", "_download_sql_proxy_if_needed", "(", ")", "command_to_run", "=", "[", "self", ".", "sql_proxy_path", "]", "command_to_run", ".", "extend", "(", "[", "'--version'", "]", ")", "command_to_run", ".", "extend", "(", "self", ".", "_get_credential_parameters", "(", ")", ")", "result", "=", "subprocess", ".", "check_output", "(", "command_to_run", ")", ".", "decode", "(", "'utf-8'", ")", "pattern", "=", "re", ".", "compile", "(", "\"^.*[V|v]ersion ([^;]*);.*$\"", ")", "m", "=", "pattern", ".", "match", "(", "result", ")", "if", "m", ":", "return", "m", ".", "group", "(", "1", ")", "else", ":", "return", "None" ]
35.733333
0.003636
def from_prefix(cls, container, prefix): """Create from prefix object.""" if cls._is_gs_folder(prefix): name, suffix, extra = prefix.name.partition(cls._gs_folder_suffix) if (suffix, extra) == (cls._gs_folder_suffix, ''): # Patch GS specific folder to remove suffix. prefix.name = name return super(GsObject, cls).from_prefix(container, prefix)
[ "def", "from_prefix", "(", "cls", ",", "container", ",", "prefix", ")", ":", "if", "cls", ".", "_is_gs_folder", "(", "prefix", ")", ":", "name", ",", "suffix", ",", "extra", "=", "prefix", ".", "name", ".", "partition", "(", "cls", ".", "_gs_folder_suffix", ")", "if", "(", "suffix", ",", "extra", ")", "==", "(", "cls", ".", "_gs_folder_suffix", ",", "''", ")", ":", "# Patch GS specific folder to remove suffix.", "prefix", ".", "name", "=", "name", "return", "super", "(", "GsObject", ",", "cls", ")", ".", "from_prefix", "(", "container", ",", "prefix", ")" ]
46.333333
0.004706
def remove_label(self, to_remove): """ Remove a label from the document. (-> rewrite the label file) """ if to_remove not in self.labels: return labels = self.labels labels.remove(to_remove) with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'w') \ as file_desc: for label in labels: file_desc.write("%s,%s\n" % (label.name, label.get_color_str()))
[ "def", "remove_label", "(", "self", ",", "to_remove", ")", ":", "if", "to_remove", "not", "in", "self", ".", "labels", ":", "return", "labels", "=", "self", ".", "labels", "labels", ".", "remove", "(", "to_remove", ")", "with", "self", ".", "fs", ".", "open", "(", "self", ".", "fs", ".", "join", "(", "self", ".", "path", ",", "self", ".", "LABEL_FILE", ")", ",", "'w'", ")", "as", "file_desc", ":", "for", "label", "in", "labels", ":", "file_desc", ".", "write", "(", "\"%s,%s\\n\"", "%", "(", "label", ".", "name", ",", "label", ".", "get_color_str", "(", ")", ")", ")" ]
38.615385
0.003891
def compress_for_rename(paths): """Returns a set containing the paths that need to be renamed. This set may include directories when the original sequence of paths included every file on disk. """ case_map = dict((os.path.normcase(p), p) for p in paths) remaining = set(case_map) unchecked = sorted(set(os.path.split(p)[0] for p in case_map.values()), key=len) wildcards = set() def norm_join(*a): return os.path.normcase(os.path.join(*a)) for root in unchecked: if any(os.path.normcase(root).startswith(w) for w in wildcards): # This directory has already been handled. continue all_files = set() all_subdirs = set() for dirname, subdirs, files in os.walk(root): all_subdirs.update(norm_join(root, dirname, d) for d in subdirs) all_files.update(norm_join(root, dirname, f) for f in files) # If all the files we found are in our remaining set of files to # remove, then remove them from the latter set and add a wildcard # for the directory. if not (all_files - remaining): remaining.difference_update(all_files) wildcards.add(root + os.sep) return set(map(case_map.__getitem__, remaining)) | wildcards
[ "def", "compress_for_rename", "(", "paths", ")", ":", "case_map", "=", "dict", "(", "(", "os", ".", "path", ".", "normcase", "(", "p", ")", ",", "p", ")", "for", "p", "in", "paths", ")", "remaining", "=", "set", "(", "case_map", ")", "unchecked", "=", "sorted", "(", "set", "(", "os", ".", "path", ".", "split", "(", "p", ")", "[", "0", "]", "for", "p", "in", "case_map", ".", "values", "(", ")", ")", ",", "key", "=", "len", ")", "wildcards", "=", "set", "(", ")", "def", "norm_join", "(", "*", "a", ")", ":", "return", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "join", "(", "*", "a", ")", ")", "for", "root", "in", "unchecked", ":", "if", "any", "(", "os", ".", "path", ".", "normcase", "(", "root", ")", ".", "startswith", "(", "w", ")", "for", "w", "in", "wildcards", ")", ":", "# This directory has already been handled.", "continue", "all_files", "=", "set", "(", ")", "all_subdirs", "=", "set", "(", ")", "for", "dirname", ",", "subdirs", ",", "files", "in", "os", ".", "walk", "(", "root", ")", ":", "all_subdirs", ".", "update", "(", "norm_join", "(", "root", ",", "dirname", ",", "d", ")", "for", "d", "in", "subdirs", ")", "all_files", ".", "update", "(", "norm_join", "(", "root", ",", "dirname", ",", "f", ")", "for", "f", "in", "files", ")", "# If all the files we found are in our remaining set of files to", "# remove, then remove them from the latter set and add a wildcard", "# for the directory.", "if", "not", "(", "all_files", "-", "remaining", ")", ":", "remaining", ".", "difference_update", "(", "all_files", ")", "wildcards", ".", "add", "(", "root", "+", "os", ".", "sep", ")", "return", "set", "(", "map", "(", "case_map", ".", "__getitem__", ",", "remaining", ")", ")", "|", "wildcards" ]
37.833333
0.000716
def put_settings(self, app=None, index=None, settings=None, es=None): """Modify index settings. Index must exist already. """ if not index: index = self.index if not app: app = self.app if not es: es = self.es if not settings: return for alias, old_settings in self.es.indices.get_settings(index=index).items(): try: if test_settings_contain(old_settings['settings']['index'], settings['settings']): return except KeyError: pass es.indices.close(index=index) es.indices.put_settings(index=index, body=settings) es.indices.open(index=index)
[ "def", "put_settings", "(", "self", ",", "app", "=", "None", ",", "index", "=", "None", ",", "settings", "=", "None", ",", "es", "=", "None", ")", ":", "if", "not", "index", ":", "index", "=", "self", ".", "index", "if", "not", "app", ":", "app", "=", "self", ".", "app", "if", "not", "es", ":", "es", "=", "self", ".", "es", "if", "not", "settings", ":", "return", "for", "alias", ",", "old_settings", "in", "self", ".", "es", ".", "indices", ".", "get_settings", "(", "index", "=", "index", ")", ".", "items", "(", ")", ":", "try", ":", "if", "test_settings_contain", "(", "old_settings", "[", "'settings'", "]", "[", "'index'", "]", ",", "settings", "[", "'settings'", "]", ")", ":", "return", "except", "KeyError", ":", "pass", "es", ".", "indices", ".", "close", "(", "index", "=", "index", ")", "es", ".", "indices", ".", "put_settings", "(", "index", "=", "index", ",", "body", "=", "settings", ")", "es", ".", "indices", ".", "open", "(", "index", "=", "index", ")" ]
27.074074
0.005284
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when): """Add a file handler to the global logger.""" kwargs = {} # If the filename is not set, use the default filename if filename is None: filename = getattr(sys.modules['__main__'], '__file__', 'log.py') filename = os.path.basename(filename.replace('.py', '.log')) filename = os.path.join('/tmp', filename) if not os.path.exists(os.path.dirname(filename)): os.mkdir(os.path.dirname(filename)) kwargs['filename'] = filename # Choose the filehandler based on the passed arguments if backup_count == 0: # Use FileHandler cls = logging.FileHandler kwargs['mode'] = mode elif when is None: # Use RotatingFileHandler cls = logging.handlers.RotatingFileHandler kwargs['maxBytes'] = limit kwargs['backupCount'] = backup_count kwargs['mode'] = mode else: # Use TimedRotatingFileHandler cls = logging.handlers.TimedRotatingFileHandler kwargs['when'] = when kwargs['interval'] = limit kwargs['backupCount'] = backup_count return add_handler(cls, level, fmt, False, **kwargs)
[ "def", "add_filehandler", "(", "level", ",", "fmt", ",", "filename", ",", "mode", ",", "backup_count", ",", "limit", ",", "when", ")", ":", "kwargs", "=", "{", "}", "# If the filename is not set, use the default filename", "if", "filename", "is", "None", ":", "filename", "=", "getattr", "(", "sys", ".", "modules", "[", "'__main__'", "]", ",", "'__file__'", ",", "'log.py'", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "filename", ".", "replace", "(", "'.py'", ",", "'.log'", ")", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "'/tmp'", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "os", ".", "mkdir", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "kwargs", "[", "'filename'", "]", "=", "filename", "# Choose the filehandler based on the passed arguments", "if", "backup_count", "==", "0", ":", "# Use FileHandler", "cls", "=", "logging", ".", "FileHandler", "kwargs", "[", "'mode'", "]", "=", "mode", "elif", "when", "is", "None", ":", "# Use RotatingFileHandler", "cls", "=", "logging", ".", "handlers", ".", "RotatingFileHandler", "kwargs", "[", "'maxBytes'", "]", "=", "limit", "kwargs", "[", "'backupCount'", "]", "=", "backup_count", "kwargs", "[", "'mode'", "]", "=", "mode", "else", ":", "# Use TimedRotatingFileHandler", "cls", "=", "logging", ".", "handlers", ".", "TimedRotatingFileHandler", "kwargs", "[", "'when'", "]", "=", "when", "kwargs", "[", "'interval'", "]", "=", "limit", "kwargs", "[", "'backupCount'", "]", "=", "backup_count", "return", "add_handler", "(", "cls", ",", "level", ",", "fmt", ",", "False", ",", "*", "*", "kwargs", ")" ]
37.83871
0.003325
def run_slurm(self, steps=None, **kwargs): """Run the steps via the SLURM queue.""" # Optional extra SLURM parameters # params = self.extra_slurm_params params.update(kwargs) # Mandatory extra SLURM parameters # if 'time' not in params: params['time'] = self.default_time if 'job_name' not in params: params['job_name'] = self.job_name if 'email' not in params: params['email'] = None if 'dependency' not in params: params['dependency'] = 'singleton' # Send it # self.slurm_job = LoggedJobSLURM(self.command(steps), base_dir = self.parent.p.logs_dir, modules = self.modules, **params) # Return the Job ID # return self.slurm_job.run()
[ "def", "run_slurm", "(", "self", ",", "steps", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Optional extra SLURM parameters #", "params", "=", "self", ".", "extra_slurm_params", "params", ".", "update", "(", "kwargs", ")", "# Mandatory extra SLURM parameters #", "if", "'time'", "not", "in", "params", ":", "params", "[", "'time'", "]", "=", "self", ".", "default_time", "if", "'job_name'", "not", "in", "params", ":", "params", "[", "'job_name'", "]", "=", "self", ".", "job_name", "if", "'email'", "not", "in", "params", ":", "params", "[", "'email'", "]", "=", "None", "if", "'dependency'", "not", "in", "params", ":", "params", "[", "'dependency'", "]", "=", "'singleton'", "# Send it #", "self", ".", "slurm_job", "=", "LoggedJobSLURM", "(", "self", ".", "command", "(", "steps", ")", ",", "base_dir", "=", "self", ".", "parent", ".", "p", ".", "logs_dir", ",", "modules", "=", "self", ".", "modules", ",", "*", "*", "params", ")", "# Return the Job ID #", "return", "self", ".", "slurm_job", ".", "run", "(", ")" ]
51.117647
0.019209
def revoke(self, only_access=False): """Revoke the current Authorization. :param only_access: (Optional) When explicitly set to True, do not evict the refresh token if one is set. Revoking a refresh token will in-turn revoke all access tokens associated with that authorization. """ if only_access or self.refresh_token is None: super(Authorizer, self).revoke() else: self._authenticator.revoke_token( self.refresh_token, "refresh_token" ) self._clear_access_token() self.refresh_token = None
[ "def", "revoke", "(", "self", ",", "only_access", "=", "False", ")", ":", "if", "only_access", "or", "self", ".", "refresh_token", "is", "None", ":", "super", "(", "Authorizer", ",", "self", ")", ".", "revoke", "(", ")", "else", ":", "self", ".", "_authenticator", ".", "revoke_token", "(", "self", ".", "refresh_token", ",", "\"refresh_token\"", ")", "self", ".", "_clear_access_token", "(", ")", "self", ".", "refresh_token", "=", "None" ]
34.555556
0.00313
def _set_ldp_session(self, v, load=False): """ Setter method for ldp_session, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/ldp_session (list) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_session is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_session() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("ldp_session_ip",ldp_session.ldp_session, yang_name="ldp-session", rest_name="session", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ldp-session-ip', extensions={u'tailf-common': {u'info': u'Define LDP Session', u'cli-suppress-list-no': None, u'alt-name': u'session', u'callpoint': u'MplsLdpSession', u'cli-mode-name': u'config-router-mpls-ldp-session-$(ldp-session-ip)'}}), is_container='list', yang_name="ldp-session", rest_name="session", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define LDP Session', u'cli-suppress-list-no': None, u'alt-name': u'session', u'callpoint': u'MplsLdpSession', u'cli-mode-name': u'config-router-mpls-ldp-session-$(ldp-session-ip)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_session must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("ldp_session_ip",ldp_session.ldp_session, yang_name="ldp-session", rest_name="session", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ldp-session-ip', extensions={u'tailf-common': {u'info': u'Define LDP Session', u'cli-suppress-list-no': None, u'alt-name': u'session', u'callpoint': u'MplsLdpSession', u'cli-mode-name': u'config-router-mpls-ldp-session-$(ldp-session-ip)'}}), is_container='list', yang_name="ldp-session", rest_name="session", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define LDP Session', u'cli-suppress-list-no': None, u'alt-name': u'session', u'callpoint': u'MplsLdpSession', u'cli-mode-name': u'config-router-mpls-ldp-session-$(ldp-session-ip)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""", }) self.__ldp_session = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ldp_session", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"ldp_session_ip\"", ",", "ldp_session", ".", "ldp_session", ",", "yang_name", "=", "\"ldp-session\"", ",", "rest_name", "=", "\"session\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'ldp-session-ip'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Define LDP Session'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'alt-name'", ":", "u'session'", ",", "u'callpoint'", ":", "u'MplsLdpSession'", ",", "u'cli-mode-name'", ":", "u'config-router-mpls-ldp-session-$(ldp-session-ip)'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"ldp-session\"", ",", "rest_name", "=", "\"session\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Define LDP Session'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'alt-name'", ":", "u'session'", ",", "u'callpoint'", ":", "u'MplsLdpSession'", ",", "u'cli-mode-name'", ":", "u'config-router-mpls-ldp-session-$(ldp-session-ip)'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls'", ",", "defining_module", "=", "'brocade-mpls'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"ldp_session must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"ldp_session_ip\",ldp_session.ldp_session, yang_name=\"ldp-session\", rest_name=\"session\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ldp-session-ip', extensions={u'tailf-common': {u'info': u'Define LDP Session', u'cli-suppress-list-no': None, u'alt-name': u'session', u'callpoint': u'MplsLdpSession', u'cli-mode-name': u'config-router-mpls-ldp-session-$(ldp-session-ip)'}}), is_container='list', yang_name=\"ldp-session\", rest_name=\"session\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define LDP Session', u'cli-suppress-list-no': None, u'alt-name': u'session', u'callpoint': u'MplsLdpSession', u'cli-mode-name': u'config-router-mpls-ldp-session-$(ldp-session-ip)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__ldp_session", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
123
0.004034
def _slugify(string): """ This is not as good as a proper slugification function, but the input space is limited >>> _slugify("beets") 'beets' >>> _slugify("Toaster Strudel") 'toaster-strudel' Here's why: It handles very little. It doesn't handle esoteric whitespace or symbols: >>> _slugify("Hat\\nBasket- of justice and some @#*(! symbols") 'hat-basket-of-justice-and-some-symbols' """ words = re.split(r'[\W]', string) clean_words = [w for w in words if w != ''] return '-'.join(clean_words).lower()
[ "def", "_slugify", "(", "string", ")", ":", "words", "=", "re", ".", "split", "(", "r'[\\W]'", ",", "string", ")", "clean_words", "=", "[", "w", "for", "w", "in", "words", "if", "w", "!=", "''", "]", "return", "'-'", ".", "join", "(", "clean_words", ")", ".", "lower", "(", ")" ]
28.473684
0.005367
def _is_protein(pe): """Return True if the element is a protein""" val = isinstance(pe, _bp('Protein')) or \ isinstance(pe, _bpimpl('Protein')) or \ isinstance(pe, _bp('ProteinReference')) or \ isinstance(pe, _bpimpl('ProteinReference')) return val
[ "def", "_is_protein", "(", "pe", ")", ":", "val", "=", "isinstance", "(", "pe", ",", "_bp", "(", "'Protein'", ")", ")", "or", "isinstance", "(", "pe", ",", "_bpimpl", "(", "'Protein'", ")", ")", "or", "isinstance", "(", "pe", ",", "_bp", "(", "'ProteinReference'", ")", ")", "or", "isinstance", "(", "pe", ",", "_bpimpl", "(", "'ProteinReference'", ")", ")", "return", "val" ]
41.428571
0.013514
def make_combined_periodogram(pflist, outfile, addmethods=False): '''This just puts all of the period-finders on a single periodogram. This will renormalize all of the periodograms so their values lie between 0 and 1, with values lying closer to 1 being more significant. Periodograms that give the same best periods will have their peaks line up together. Parameters ---------- pflist : list of dict This is a list of result dicts from any of the period-finders in periodbase. To use your own period-finders' results here, make sure the result dict is of the form and has at least the keys below:: {'periods': np.array of all periods searched by the period-finder, 'lspvals': np.array of periodogram power value for each period, 'bestperiod': a float value that is the period with the highest peak in the periodogram, i.e. the most-likely actual period, 'method': a three-letter code naming the period-finder used; must be one of the keys in the `astrobase.periodbase.METHODLABELS` dict, 'nbestperiods': a list of the periods corresponding to periodogram peaks (`nbestlspvals` below) to annotate on the periodogram plot so they can be called out visually, 'nbestlspvals': a list of the power values associated with periodogram peaks to annotate on the periodogram plot so they can be called out visually; should be the same length as `nbestperiods` above, 'kwargs': dict of kwargs passed to your own period-finder function} outfile : str This is the output file to write the output to. NOTE: EPS/PS won't work because we use alpha transparency to better distinguish between the various periodograms. addmethods : bool If this is True, will add all of the normalized periodograms together, then renormalize them to between 0 and 1. In this way, if all of the period-finders agree on something, it'll stand out easily. FIXME: implement this kwarg. Returns ------- str The name of the generated plot file. ''' import matplotlib.pyplot as plt for pf in pflist: if pf['method'] == 'pdm': plt.plot(pf['periods'], np.max(pf['lspvals'])/pf['lspvals'] - 1.0, label='%s P=%.5f' % (pf['method'], pf['bestperiod']), alpha=0.5) else: plt.plot(pf['periods'], pf['lspvals']/np.max(pf['lspvals']), label='%s P=%.5f' % (pf['method'], pf['bestperiod']), alpha=0.5) plt.xlabel('period [days]') plt.ylabel('normalized periodogram power') plt.xscale('log') plt.legend() plt.tight_layout() plt.savefig(outfile) plt.close('all') return outfile
[ "def", "make_combined_periodogram", "(", "pflist", ",", "outfile", ",", "addmethods", "=", "False", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "for", "pf", "in", "pflist", ":", "if", "pf", "[", "'method'", "]", "==", "'pdm'", ":", "plt", ".", "plot", "(", "pf", "[", "'periods'", "]", ",", "np", ".", "max", "(", "pf", "[", "'lspvals'", "]", ")", "/", "pf", "[", "'lspvals'", "]", "-", "1.0", ",", "label", "=", "'%s P=%.5f'", "%", "(", "pf", "[", "'method'", "]", ",", "pf", "[", "'bestperiod'", "]", ")", ",", "alpha", "=", "0.5", ")", "else", ":", "plt", ".", "plot", "(", "pf", "[", "'periods'", "]", ",", "pf", "[", "'lspvals'", "]", "/", "np", ".", "max", "(", "pf", "[", "'lspvals'", "]", ")", ",", "label", "=", "'%s P=%.5f'", "%", "(", "pf", "[", "'method'", "]", ",", "pf", "[", "'bestperiod'", "]", ")", ",", "alpha", "=", "0.5", ")", "plt", ".", "xlabel", "(", "'period [days]'", ")", "plt", ".", "ylabel", "(", "'normalized periodogram power'", ")", "plt", ".", "xscale", "(", "'log'", ")", "plt", ".", "legend", "(", ")", "plt", ".", "tight_layout", "(", ")", "plt", ".", "savefig", "(", "outfile", ")", "plt", ".", "close", "(", "'all'", ")", "return", "outfile" ]
37.901235
0.000952
def validate_flavor_data(self, expected, actual): """Validate flavor data. Validate a list of actual flavors vs a list of expected flavors. """ self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act)
[ "def", "validate_flavor_data", "(", "self", ",", "expected", ",", "actual", ")", ":", "self", ".", "log", ".", "debug", "(", "'Validating flavor data...'", ")", "self", ".", "log", ".", "debug", "(", "'actual: {}'", ".", "format", "(", "repr", "(", "actual", ")", ")", ")", "act", "=", "[", "a", ".", "name", "for", "a", "in", "actual", "]", "return", "self", ".", "_validate_list_data", "(", "expected", ",", "act", ")" ]
41.111111
0.005291
def relation_call(method, relation_name=None, flag=None, state=None, *args): """Invoke a method on the class implementing a relation via the CLI""" if relation_name: relation = relation_from_name(relation_name) if relation is None: raise ValueError('Relation not found: %s' % relation_name) elif flag or state: relation = relation_from_flag(flag or state) if relation is None: raise ValueError('Relation not found: %s' % (flag or state)) else: raise ValueError('Must specify either relation_name or flag') result = getattr(relation, method)(*args) if isinstance(relation, RelationBase) and method == 'conversations': # special case for conversations to make them work from CLI result = [c.scope for c in result] return result
[ "def", "relation_call", "(", "method", ",", "relation_name", "=", "None", ",", "flag", "=", "None", ",", "state", "=", "None", ",", "*", "args", ")", ":", "if", "relation_name", ":", "relation", "=", "relation_from_name", "(", "relation_name", ")", "if", "relation", "is", "None", ":", "raise", "ValueError", "(", "'Relation not found: %s'", "%", "relation_name", ")", "elif", "flag", "or", "state", ":", "relation", "=", "relation_from_flag", "(", "flag", "or", "state", ")", "if", "relation", "is", "None", ":", "raise", "ValueError", "(", "'Relation not found: %s'", "%", "(", "flag", "or", "state", ")", ")", "else", ":", "raise", "ValueError", "(", "'Must specify either relation_name or flag'", ")", "result", "=", "getattr", "(", "relation", ",", "method", ")", "(", "*", "args", ")", "if", "isinstance", "(", "relation", ",", "RelationBase", ")", "and", "method", "==", "'conversations'", ":", "# special case for conversations to make them work from CLI", "result", "=", "[", "c", ".", "scope", "for", "c", "in", "result", "]", "return", "result" ]
48.058824
0.0012
def weakref_proxy(obj): """returns either a weakref.proxy for the object, or if object is already a proxy, returns itself.""" if type(obj) in weakref.ProxyTypes: return obj else: return weakref.proxy(obj)
[ "def", "weakref_proxy", "(", "obj", ")", ":", "if", "type", "(", "obj", ")", "in", "weakref", ".", "ProxyTypes", ":", "return", "obj", "else", ":", "return", "weakref", ".", "proxy", "(", "obj", ")" ]
32.857143
0.008475
def DeleteClusterTags(r, tags, dry_run=False): """ Deletes tags from the cluster. @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run """ query = { "dry-run": dry_run, "tag": tags, } return r.request("delete", "/2/tags", query=query)
[ "def", "DeleteClusterTags", "(", "r", ",", "tags", ",", "dry_run", "=", "False", ")", ":", "query", "=", "{", "\"dry-run\"", ":", "dry_run", ",", "\"tag\"", ":", "tags", ",", "}", "return", "r", ".", "request", "(", "\"delete\"", ",", "\"/2/tags\"", ",", "query", "=", "query", ")" ]
21.375
0.002801
def python_to_couch(options): """ Translates query options from python style options into CouchDB/Cloudant query options. For example ``{'include_docs': True}`` will translate to ``{'include_docs': 'true'}``. Primarily meant for use by code that formulates a query to retrieve results data from the remote database, such as the database API convenience method :func:`~cloudant.database.CouchDatabase.all_docs` or the View :func:`~cloudant.view.View.__call__` callable, both used to retrieve data. :param dict options: Python style parameters to be translated. :returns: Dictionary of translated CouchDB/Cloudant query parameters """ translation = dict() for key, val in iteritems_(options): py_to_couch_validate(key, val) translation.update(_py_to_couch_translate(key, val)) return translation
[ "def", "python_to_couch", "(", "options", ")", ":", "translation", "=", "dict", "(", ")", "for", "key", ",", "val", "in", "iteritems_", "(", "options", ")", ":", "py_to_couch_validate", "(", "key", ",", "val", ")", "translation", ".", "update", "(", "_py_to_couch_translate", "(", "key", ",", "val", ")", ")", "return", "translation" ]
44.789474
0.001151
def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s
[ "def", "yield_lines", "(", "strs", ")", ":", "if", "isinstance", "(", "strs", ",", "string_types", ")", ":", "for", "s", "in", "strs", ".", "splitlines", "(", ")", ":", "s", "=", "s", ".", "strip", "(", ")", "# skip blank lines/comments", "if", "s", "and", "not", "s", ".", "startswith", "(", "'#'", ")", ":", "yield", "s", "else", ":", "for", "ss", "in", "strs", ":", "for", "s", "in", "yield_lines", "(", "ss", ")", ":", "yield", "s" ]
32
0.002532
def get_ruleset_file(ruleset=None): """ Get the ruleset file from name :param ruleset: str :return: str """ ruleset = ruleset or "default" ruleset_dirs = get_ruleset_dirs() for ruleset_directory in ruleset_dirs: possible_ruleset_files = [os.path.join(ruleset_directory, ruleset + ext) for ext in EXTS] for ruleset_file in possible_ruleset_files: if os.path.isfile(ruleset_file): logger.debug("Ruleset file '{}' found.".format(ruleset_file)) return ruleset_file logger.warning("Ruleset with the name '{}' cannot be found at '{}'." .format(ruleset, ruleset_dirs)) raise ColinRulesetException("Ruleset with the name '{}' cannot be found.".format(ruleset))
[ "def", "get_ruleset_file", "(", "ruleset", "=", "None", ")", ":", "ruleset", "=", "ruleset", "or", "\"default\"", "ruleset_dirs", "=", "get_ruleset_dirs", "(", ")", "for", "ruleset_directory", "in", "ruleset_dirs", ":", "possible_ruleset_files", "=", "[", "os", ".", "path", ".", "join", "(", "ruleset_directory", ",", "ruleset", "+", "ext", ")", "for", "ext", "in", "EXTS", "]", "for", "ruleset_file", "in", "possible_ruleset_files", ":", "if", "os", ".", "path", ".", "isfile", "(", "ruleset_file", ")", ":", "logger", ".", "debug", "(", "\"Ruleset file '{}' found.\"", ".", "format", "(", "ruleset_file", ")", ")", "return", "ruleset_file", "logger", ".", "warning", "(", "\"Ruleset with the name '{}' cannot be found at '{}'.\"", ".", "format", "(", "ruleset", ",", "ruleset_dirs", ")", ")", "raise", "ColinRulesetException", "(", "\"Ruleset with the name '{}' cannot be found.\"", ".", "format", "(", "ruleset", ")", ")" ]
35.952381
0.003871
def run_callback(self, callback, *args): """Queue a callback. The *callback* will be called with positional arguments *args* in the next iteration of the event loop. If you add multiple callbacks, they will be called in the order that you added them. The callback will run in the Hub's fiber. This method is thread-safe: it is allowed to queue a callback from a different thread than the one running the Hub. """ if self._loop is None: raise RuntimeError('hub is closed') elif not callable(callback): raise TypeError('"callback": expecting a callable') self._callbacks.append((callback, args)) # thread-safe self._interrupt_loop()
[ "def", "run_callback", "(", "self", ",", "callback", ",", "*", "args", ")", ":", "if", "self", ".", "_loop", "is", "None", ":", "raise", "RuntimeError", "(", "'hub is closed'", ")", "elif", "not", "callable", "(", "callback", ")", ":", "raise", "TypeError", "(", "'\"callback\": expecting a callable'", ")", "self", ".", "_callbacks", ".", "append", "(", "(", "callback", ",", "args", ")", ")", "# thread-safe", "self", ".", "_interrupt_loop", "(", ")" ]
43.352941
0.002656
def _send_get_request(self, path, params, headers): """ Sends the GET request to the Route53 endpoint. :param str path: The path to tack on to the endpoint URL for the query. :param dict params: Key/value pairs to send. :param dict headers: A dict of headers to send with the request. :rtype: str :returns: The body of the response. """ r = requests.get(self.endpoint + path, params=params, headers=headers) r.raise_for_status() return r.text
[ "def", "_send_get_request", "(", "self", ",", "path", ",", "params", ",", "headers", ")", ":", "r", "=", "requests", ".", "get", "(", "self", ".", "endpoint", "+", "path", ",", "params", "=", "params", ",", "headers", "=", "headers", ")", "r", ".", "raise_for_status", "(", ")", "return", "r", ".", "text" ]
35.333333
0.003676
def meyer_penny_program(): """ Returns the program to simulate the Meyer-Penny Game The full description is available in docs/source/examples.rst :return: pyQuil Program """ prog = pq.Program() ro = prog.declare('ro', memory_size=2) picard_register = ro[1] answer_register = ro[0] then_branch = pq.Program(X(0)) else_branch = pq.Program(I(0)) # Prepare Qubits in Heads state or superposition, respectively prog.inst(X(0), H(1)) # Q puts the coin into a superposition prog.inst(H(0)) # Picard makes a decision and acts accordingly prog.measure(1, picard_register) prog.if_then(picard_register, then_branch, else_branch) # Q undoes his superposition operation prog.inst(H(0)) # The outcome is recorded into the answer register prog.measure(0, answer_register) return prog
[ "def", "meyer_penny_program", "(", ")", ":", "prog", "=", "pq", ".", "Program", "(", ")", "ro", "=", "prog", ".", "declare", "(", "'ro'", ",", "memory_size", "=", "2", ")", "picard_register", "=", "ro", "[", "1", "]", "answer_register", "=", "ro", "[", "0", "]", "then_branch", "=", "pq", ".", "Program", "(", "X", "(", "0", ")", ")", "else_branch", "=", "pq", ".", "Program", "(", "I", "(", "0", ")", ")", "# Prepare Qubits in Heads state or superposition, respectively", "prog", ".", "inst", "(", "X", "(", "0", ")", ",", "H", "(", "1", ")", ")", "# Q puts the coin into a superposition", "prog", ".", "inst", "(", "H", "(", "0", ")", ")", "# Picard makes a decision and acts accordingly", "prog", ".", "measure", "(", "1", ",", "picard_register", ")", "prog", ".", "if_then", "(", "picard_register", ",", "then_branch", ",", "else_branch", ")", "# Q undoes his superposition operation", "prog", ".", "inst", "(", "H", "(", "0", ")", ")", "# The outcome is recorded into the answer register", "prog", ".", "measure", "(", "0", ",", "answer_register", ")", "return", "prog" ]
29.928571
0.001156
def get_index_from_alias(alias_name, index_client=None): """Retrieve the base index name from an alias Args: alias_name (str) Name of the alias index_client (Elasticsearch.IndicesClient) an Elasticsearch index client. Optional, will create one if not given Returns: (str) Name of index """ index_client = index_client or indices_client() if not index_client.exists_alias(name=alias_name): return None return list(index_client.get_alias(name=alias_name).keys())[0]
[ "def", "get_index_from_alias", "(", "alias_name", ",", "index_client", "=", "None", ")", ":", "index_client", "=", "index_client", "or", "indices_client", "(", ")", "if", "not", "index_client", ".", "exists_alias", "(", "name", "=", "alias_name", ")", ":", "return", "None", "return", "list", "(", "index_client", ".", "get_alias", "(", "name", "=", "alias_name", ")", ".", "keys", "(", ")", ")", "[", "0", "]" ]
36.857143
0.00189
def shutdown(self): """Stops all active periodic tasks and closes the socket.""" self.stop_all_periodic_tasks() for channel in self._bcm_sockets: log.debug("Closing bcm socket for channel {}".format(channel)) bcm_socket = self._bcm_sockets[channel] bcm_socket.close() log.debug("Closing raw can socket") self.socket.close()
[ "def", "shutdown", "(", "self", ")", ":", "self", ".", "stop_all_periodic_tasks", "(", ")", "for", "channel", "in", "self", ".", "_bcm_sockets", ":", "log", ".", "debug", "(", "\"Closing bcm socket for channel {}\"", ".", "format", "(", "channel", ")", ")", "bcm_socket", "=", "self", ".", "_bcm_sockets", "[", "channel", "]", "bcm_socket", ".", "close", "(", ")", "log", ".", "debug", "(", "\"Closing raw can socket\"", ")", "self", ".", "socket", ".", "close", "(", ")" ]
43.444444
0.005013
def parse_arguments(argv): """Parse command line arguments. Args: argv: list of command line arguments, includeing programe name. Returns: An argparse Namespace object. """ parser = argparse.ArgumentParser( description='Runs Preprocessing on structured CSV data.') parser.add_argument('--input-file-pattern', type=str, required=True, help='Input CSV file names. May contain a file pattern') parser.add_argument('--output-dir', type=str, required=True, help='Google Cloud Storage which to place outputs.') parser.add_argument('--schema-file', type=str, required=True, help=('BigQuery json schema file')) args = parser.parse_args(args=argv[1:]) # Make sure the output folder exists if local folder. file_io.recursive_create_dir(args.output_dir) return args
[ "def", "parse_arguments", "(", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Runs Preprocessing on structured CSV data.'", ")", "parser", ".", "add_argument", "(", "'--input-file-pattern'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'Input CSV file names. May contain a file pattern'", ")", "parser", ".", "add_argument", "(", "'--output-dir'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'Google Cloud Storage which to place outputs.'", ")", "parser", ".", "add_argument", "(", "'--schema-file'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "(", "'BigQuery json schema file'", ")", ")", "args", "=", "parser", ".", "parse_args", "(", "args", "=", "argv", "[", "1", ":", "]", ")", "# Make sure the output folder exists if local folder.", "file_io", ".", "recursive_create_dir", "(", "args", ".", "output_dir", ")", "return", "args" ]
32.2
0.01005
def vars_args(parser): """Add various command line options for external vars""" parser.add_argument('--extra-vars', dest='extra_vars', help='Extra template variables', default=[], type=str, action='append') parser.add_argument('--extra-vars-file', dest='extra_vars_file', help='YAML files full of variables', default=[], type=str, action='append')
[ "def", "vars_args", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--extra-vars'", ",", "dest", "=", "'extra_vars'", ",", "help", "=", "'Extra template variables'", ",", "default", "=", "[", "]", ",", "type", "=", "str", ",", "action", "=", "'append'", ")", "parser", ".", "add_argument", "(", "'--extra-vars-file'", ",", "dest", "=", "'extra_vars_file'", ",", "help", "=", "'YAML files full of variables'", ",", "default", "=", "[", "]", ",", "type", "=", "str", ",", "action", "=", "'append'", ")" ]
41.857143
0.001669
def set_armed_state(self, state): """Set the armed state, also update local state.""" self.set_service_value( self.security_sensor_service, 'Armed', 'newArmedValue', state) self.set_cache_value('Armed', state)
[ "def", "set_armed_state", "(", "self", ",", "state", ")", ":", "self", ".", "set_service_value", "(", "self", ".", "security_sensor_service", ",", "'Armed'", ",", "'newArmedValue'", ",", "state", ")", "self", ".", "set_cache_value", "(", "'Armed'", ",", "state", ")" ]
34.25
0.007117
def acls(self): """The instance bound ACLs operations layer.""" if self._acls is None: self._acls = InstanceAcls(instance=self) return self._acls
[ "def", "acls", "(", "self", ")", ":", "if", "self", ".", "_acls", "is", "None", ":", "self", ".", "_acls", "=", "InstanceAcls", "(", "instance", "=", "self", ")", "return", "self", ".", "_acls" ]
35.4
0.01105
def reply_to(self, value): """The reply to email address :param value: The reply to email address :type value: ReplyTo, str, tuple """ if isinstance(value, str): value = ReplyTo(value, None) if isinstance(value, tuple): value = ReplyTo(value[0], value[1]) self._reply_to = value
[ "def", "reply_to", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "ReplyTo", "(", "value", ",", "None", ")", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "value", "=", "ReplyTo", "(", "value", "[", "0", "]", ",", "value", "[", "1", "]", ")", "self", ".", "_reply_to", "=", "value" ]
31.727273
0.005571
def dump_error_msg(msg, ofd=_LOGGER.debug): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L908. Positional arguments: msg -- message to print (nl_msg class instance). Keyword arguments: ofd -- function to call with arguments similar to `logging.debug`. """ hdr = nlmsg_hdr(msg) err = libnl.linux_private.netlink.nlmsgerr(nlmsg_data(hdr)) ofd(' [ERRORMSG] %d octets', err.SIZEOF) if nlmsg_len(hdr) >= err.SIZEOF: ofd(' .error = %d "%s"', err.error, os.strerror(-err.error)) ofd(' [ORIGINAL MESSAGE] %d octets', hdr.SIZEOF) errmsg = nlmsg_inherit(err.msg) print_hdr(ofd, errmsg)
[ "def", "dump_error_msg", "(", "msg", ",", "ofd", "=", "_LOGGER", ".", "debug", ")", ":", "hdr", "=", "nlmsg_hdr", "(", "msg", ")", "err", "=", "libnl", ".", "linux_private", ".", "netlink", ".", "nlmsgerr", "(", "nlmsg_data", "(", "hdr", ")", ")", "ofd", "(", "' [ERRORMSG] %d octets'", ",", "err", ".", "SIZEOF", ")", "if", "nlmsg_len", "(", "hdr", ")", ">=", "err", ".", "SIZEOF", ":", "ofd", "(", "' .error = %d \"%s\"'", ",", "err", ".", "error", ",", "os", ".", "strerror", "(", "-", "err", ".", "error", ")", ")", "ofd", "(", "' [ORIGINAL MESSAGE] %d octets'", ",", "hdr", ".", "SIZEOF", ")", "errmsg", "=", "nlmsg_inherit", "(", "err", ".", "msg", ")", "print_hdr", "(", "ofd", ",", "errmsg", ")" ]
34.526316
0.001484
def key_changes(self, from_token, to_token): """Gets a list of users who have updated their device identity keys. Args: from_token (str): The desired start point of the list. Should be the next_batch field from a response to an earlier call to /sync. to_token (str): The desired end point of the list. Should be the next_batch field from a recent call to /sync - typically the most recent such call. """ params = {"from": from_token, "to": to_token} return self._send("GET", "/keys/changes", query_params=params)
[ "def", "key_changes", "(", "self", ",", "from_token", ",", "to_token", ")", ":", "params", "=", "{", "\"from\"", ":", "from_token", ",", "\"to\"", ":", "to_token", "}", "return", "self", ".", "_send", "(", "\"GET\"", ",", "\"/keys/changes\"", ",", "query_params", "=", "params", ")" ]
54.454545
0.00821
def export_module_spec_with_checkpoint(module_spec, checkpoint_path, export_path, scope_prefix=""): """Exports given checkpoint as tfhub module with given spec.""" # The main requirement is that it is possible to know how to map from # module variable name to checkpoint variable name. # This is trivial if the original code used variable scopes, # but can be messy if the variables to export are interwined # with variables not export. with tf.Graph().as_default(): m = hub.Module(module_spec) assign_map = { scope_prefix + name: value for name, value in m.variable_map.items() } tf.train.init_from_checkpoint(checkpoint_path, assign_map) init_op = tf.initializers.global_variables() with tf.Session() as session: session.run(init_op) m.export(export_path, session)
[ "def", "export_module_spec_with_checkpoint", "(", "module_spec", ",", "checkpoint_path", ",", "export_path", ",", "scope_prefix", "=", "\"\"", ")", ":", "# The main requirement is that it is possible to know how to map from", "# module variable name to checkpoint variable name.", "# This is trivial if the original code used variable scopes,", "# but can be messy if the variables to export are interwined", "# with variables not export.", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "m", "=", "hub", ".", "Module", "(", "module_spec", ")", "assign_map", "=", "{", "scope_prefix", "+", "name", ":", "value", "for", "name", ",", "value", "in", "m", ".", "variable_map", ".", "items", "(", ")", "}", "tf", ".", "train", ".", "init_from_checkpoint", "(", "checkpoint_path", ",", "assign_map", ")", "init_op", "=", "tf", ".", "initializers", ".", "global_variables", "(", ")", "with", "tf", ".", "Session", "(", ")", "as", "session", ":", "session", ".", "run", "(", "init_op", ")", "m", ".", "export", "(", "export_path", ",", "session", ")" ]
43.904762
0.010616
def observe_multi(self, keys, master_only=False): """Multi-variant of :meth:`observe`""" return _Base.observe_multi(self, keys, master_only=master_only)
[ "def", "observe_multi", "(", "self", ",", "keys", ",", "master_only", "=", "False", ")", ":", "return", "_Base", ".", "observe_multi", "(", "self", ",", "keys", ",", "master_only", "=", "master_only", ")" ]
55.333333
0.011905
def finite_difference(self, *args, **kwargs): """ Calculates a numerical approximation of the Jacobian of the model using the sixth order central finite difference method. Accepts a `dx` keyword to tune the relative stepsize used. Makes 6*n_params calls to the model. :return: A numerical approximation of the Jacobian of the model as a list with length n_components containing numpy arrays of shape (n_params, n_datapoints) """ # See also: scipy.misc.derivative. It might be convinced to work, but # it will make way too many function evaluations dx = kwargs.pop('dx') bound_arguments = self.__signature__.bind(*args, **kwargs) var_vals = [bound_arguments.arguments[var.name] for var in self.independent_vars] param_vals = [bound_arguments.arguments[param.name] for param in self.params] param_vals = np.array(param_vals, dtype=float) f = partial(self, *var_vals) # See also: scipy.misc.central_diff_weights factors = np.array((3/2., -3/5., 1/10.)) orders = np.arange(1, len(factors) + 1) out = [] # TODO: Dark numpy magic. Needs an extra dimension in out, and a sum # over the right axis at the end. # We can't make the output arrays yet, since we don't know the size of # the components. So put a sentinel value. out = None for param_idx, param_val in enumerate(param_vals): for order, factor in zip(orders, factors): h = np.zeros(len(self.params)) # Note: stepsize (h) depends on the parameter values... h[param_idx] = dx * order if abs(param_val) >= 1e-7: # ...but it'd better not be (too close to) 0. h[param_idx] *= param_val up = f(*(param_vals + h)) down = f(*(param_vals - h)) if out is None: # Initialize output arrays. Now that we evaluated f, we # know the size of our data. out = [] # out is a list of length Ncomponents with numpy arrays of # shape (Nparams, Ndata). Part of our misery comes from the # fact that the length of the data may be different for all # the components. Numpy doesn't like ragged arrays, so make # a list of arrays. for comp_idx in range(len(self)): try: len(up[comp_idx]) except TypeError: # output[comp_idx] is a number data_shape = (1,) else: data_shape = up[comp_idx].shape # Initialize at 0 so we can += all the contributions param_grad = np.zeros([len(self.params)] + list(data_shape), dtype=float) out.append(param_grad) for comp_idx in range(len(self)): diff = up[comp_idx] - down[comp_idx] out[comp_idx][param_idx, :] += factor * diff / (2 * h[param_idx]) return out
[ "def", "finite_difference", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# See also: scipy.misc.derivative. It might be convinced to work, but", "# it will make way too many function evaluations", "dx", "=", "kwargs", ".", "pop", "(", "'dx'", ")", "bound_arguments", "=", "self", ".", "__signature__", ".", "bind", "(", "*", "args", ",", "*", "*", "kwargs", ")", "var_vals", "=", "[", "bound_arguments", ".", "arguments", "[", "var", ".", "name", "]", "for", "var", "in", "self", ".", "independent_vars", "]", "param_vals", "=", "[", "bound_arguments", ".", "arguments", "[", "param", ".", "name", "]", "for", "param", "in", "self", ".", "params", "]", "param_vals", "=", "np", ".", "array", "(", "param_vals", ",", "dtype", "=", "float", ")", "f", "=", "partial", "(", "self", ",", "*", "var_vals", ")", "# See also: scipy.misc.central_diff_weights", "factors", "=", "np", ".", "array", "(", "(", "3", "/", "2.", ",", "-", "3", "/", "5.", ",", "1", "/", "10.", ")", ")", "orders", "=", "np", ".", "arange", "(", "1", ",", "len", "(", "factors", ")", "+", "1", ")", "out", "=", "[", "]", "# TODO: Dark numpy magic. Needs an extra dimension in out, and a sum", "# over the right axis at the end.", "# We can't make the output arrays yet, since we don't know the size of", "# the components. So put a sentinel value.", "out", "=", "None", "for", "param_idx", ",", "param_val", "in", "enumerate", "(", "param_vals", ")", ":", "for", "order", ",", "factor", "in", "zip", "(", "orders", ",", "factors", ")", ":", "h", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "params", ")", ")", "# Note: stepsize (h) depends on the parameter values...", "h", "[", "param_idx", "]", "=", "dx", "*", "order", "if", "abs", "(", "param_val", ")", ">=", "1e-7", ":", "# ...but it'd better not be (too close to) 0.", "h", "[", "param_idx", "]", "*=", "param_val", "up", "=", "f", "(", "*", "(", "param_vals", "+", "h", ")", ")", "down", "=", "f", "(", "*", "(", "param_vals", "-", "h", ")", ")", "if", "out", "is", "None", ":", "# Initialize output arrays. Now that we evaluated f, we", "# know the size of our data.", "out", "=", "[", "]", "# out is a list of length Ncomponents with numpy arrays of", "# shape (Nparams, Ndata). Part of our misery comes from the", "# fact that the length of the data may be different for all", "# the components. Numpy doesn't like ragged arrays, so make", "# a list of arrays.", "for", "comp_idx", "in", "range", "(", "len", "(", "self", ")", ")", ":", "try", ":", "len", "(", "up", "[", "comp_idx", "]", ")", "except", "TypeError", ":", "# output[comp_idx] is a number", "data_shape", "=", "(", "1", ",", ")", "else", ":", "data_shape", "=", "up", "[", "comp_idx", "]", ".", "shape", "# Initialize at 0 so we can += all the contributions", "param_grad", "=", "np", ".", "zeros", "(", "[", "len", "(", "self", ".", "params", ")", "]", "+", "list", "(", "data_shape", ")", ",", "dtype", "=", "float", ")", "out", ".", "append", "(", "param_grad", ")", "for", "comp_idx", "in", "range", "(", "len", "(", "self", ")", ")", ":", "diff", "=", "up", "[", "comp_idx", "]", "-", "down", "[", "comp_idx", "]", "out", "[", "comp_idx", "]", "[", "param_idx", ",", ":", "]", "+=", "factor", "*", "diff", "/", "(", "2", "*", "h", "[", "param_idx", "]", ")", "return", "out" ]
51.333333
0.00182
def delete_value(self, key): """ Delete the key if the token is expired. Arg: key : cache key """ response = {} response['status'] = False response['msg'] = "key does not exist" file_cache = self.read_file() if key in file_cache: del file_cache[key] self.update_file(file_cache) response['status'] = True response['msg'] = "success" return response
[ "def", "delete_value", "(", "self", ",", "key", ")", ":", "response", "=", "{", "}", "response", "[", "'status'", "]", "=", "False", "response", "[", "'msg'", "]", "=", "\"key does not exist\"", "file_cache", "=", "self", ".", "read_file", "(", ")", "if", "key", "in", "file_cache", ":", "del", "file_cache", "[", "key", "]", "self", ".", "update_file", "(", "file_cache", ")", "response", "[", "'status'", "]", "=", "True", "response", "[", "'msg'", "]", "=", "\"success\"", "return", "response" ]
26.055556
0.004115
def dimensions(self) -> Tuple[str, ...]: """The dimension names of the NetCDF variable. Usually, the string defined by property |IOSequence.descr_sequence| prefixes all dimension names except the second one related to time, which allows storing different sequences in one NetCDF file: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableDeep >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=1) >>> ncvar.log(elements.element1.model.sequences.fluxes.nkor, None) >>> ncvar.dimensions ('flux_nkor_stations', 'time', 'flux_nkor_axis3') However, when isolating variables into separate NetCDF files, the sequence-specific suffix is omitted: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=True, timeaxis=1) >>> ncvar.log(elements.element1.model.sequences.fluxes.nkor, None) >>> ncvar.dimensions ('stations', 'time', 'axis3') When using the first axis as the "timeaxis", the order of the first two dimension names turns: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=True, timeaxis=0) >>> ncvar.log(elements.element1.model.sequences.fluxes.nkor, None) >>> ncvar.dimensions ('time', 'stations', 'axis3') """ nmb_timepoints = dimmapping['nmb_timepoints'] nmb_subdevices = '%s%s' % (self.prefix, dimmapping['nmb_subdevices']) dimensions = list(self.sort_timeplaceentries( nmb_timepoints, nmb_subdevices)) for idx in range(list(self.sequences.values())[0].NDIM): dimensions.append('%saxis%d' % (self.prefix, idx + 3)) return tuple(dimensions)
[ "def", "dimensions", "(", "self", ")", "->", "Tuple", "[", "str", ",", "...", "]", ":", "nmb_timepoints", "=", "dimmapping", "[", "'nmb_timepoints'", "]", "nmb_subdevices", "=", "'%s%s'", "%", "(", "self", ".", "prefix", ",", "dimmapping", "[", "'nmb_subdevices'", "]", ")", "dimensions", "=", "list", "(", "self", ".", "sort_timeplaceentries", "(", "nmb_timepoints", ",", "nmb_subdevices", ")", ")", "for", "idx", "in", "range", "(", "list", "(", "self", ".", "sequences", ".", "values", "(", ")", ")", "[", "0", "]", ".", "NDIM", ")", ":", "dimensions", ".", "append", "(", "'%saxis%d'", "%", "(", "self", ".", "prefix", ",", "idx", "+", "3", ")", ")", "return", "tuple", "(", "dimensions", ")" ]
47.052632
0.001096
def horner_log(coeffs, log_coeff, x): '''Technically possible to save one addition of the last term of coeffs is removed but benchmarks said nothing was saved''' tot = 0.0 for c in coeffs: tot = tot*x + c return tot + log_coeff*log(x)
[ "def", "horner_log", "(", "coeffs", ",", "log_coeff", ",", "x", ")", ":", "tot", "=", "0.0", "for", "c", "in", "coeffs", ":", "tot", "=", "tot", "*", "x", "+", "c", "return", "tot", "+", "log_coeff", "*", "log", "(", "x", ")" ]
36.714286
0.007605
def index_service(self, service_id): """ Index a service in search engine. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) if not service.is_valid: LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id) return LOGGER.debug('Indexing service %s' % service.id) layer_to_process = service.layer_set.all() for layer in layer_to_process: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
[ "def", "index_service", "(", "self", ",", "service_id", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Service", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "if", "not", "service", ".", "is_valid", ":", "LOGGER", ".", "debug", "(", "'Not indexing service with id %s in search engine as it is not valid'", "%", "service", ".", "id", ")", "return", "LOGGER", ".", "debug", "(", "'Indexing service %s'", "%", "service", ".", "id", ")", "layer_to_process", "=", "service", ".", "layer_set", ".", "all", "(", ")", "for", "layer", "in", "layer_to_process", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "True", ")", "else", ":", "index_layer", "(", "layer", ".", "id", ")" ]
30.15
0.003215
def author_mail_from_git(self): """ Get the author mail from git information. """ try: # launch git command and get answer cmd = Popen(["git", "config", "--get", "user.email"], stdout=PIPE) stdoutdata = cmd.communicate() if (stdoutdata[0]): self.author_mail = stdoutdata[0].rstrip(os.linesep) except ImportError: pass except CalledProcessError: pass except OSError: pass return self.author_mail
[ "def", "author_mail_from_git", "(", "self", ")", ":", "try", ":", "# launch git command and get answer", "cmd", "=", "Popen", "(", "[", "\"git\"", ",", "\"config\"", ",", "\"--get\"", ",", "\"user.email\"", "]", ",", "stdout", "=", "PIPE", ")", "stdoutdata", "=", "cmd", ".", "communicate", "(", ")", "if", "(", "stdoutdata", "[", "0", "]", ")", ":", "self", ".", "author_mail", "=", "stdoutdata", "[", "0", "]", ".", "rstrip", "(", "os", ".", "linesep", ")", "except", "ImportError", ":", "pass", "except", "CalledProcessError", ":", "pass", "except", "OSError", ":", "pass", "return", "self", ".", "author_mail" ]
33
0.003683
def inverable_group_multi_list(item_lists): """ aid_list1 = np.array([1, 1, 2, 2, 3, 3]) aid2_list = np.array([4, 2, 1, 9, 8, 7]) item_lists = (np.array(aid1_list), np.array(aid2_list)) """ #unique_list1, inverse1 = np.unique(item1_list, return_index=True, return_inverse=True) import vtool as vt import utool as ut # Find uniques and groups in each individual list unique_lists = [] groupx_lists = [] for item_list in item_lists: unique_items, groupxs = vt.group_indices(item_list) unique_lists.append(unique_items) groupx_lists.append(groupxs) # Merge all indexes into a signle long list groups_stacked = ut.flatten(groupx_lists) flat_stacked, cumsum = ut.invertible_flatten2(unique_lists) # Find uniques in those lists flat_unique, stack_groups = vt.group_indices(np.array(flat_stacked)) # Get a list of corresonding group indicies from each input list flat_groupx_multilist = [ut.take(groups_stacked, groupx) for groupx in stack_groups] # flat_unique corresponds with the aids (hence chips) the flag_groupxs # multilist is a list where each item is a tuple who's nth item indexes # into the nth input list. Ie (1, 0) is a list of indexes into the 1st chip # the 0th keypoint list return flat_unique, flat_groupx_multilist
[ "def", "inverable_group_multi_list", "(", "item_lists", ")", ":", "#unique_list1, inverse1 = np.unique(item1_list, return_index=True, return_inverse=True)", "import", "vtool", "as", "vt", "import", "utool", "as", "ut", "# Find uniques and groups in each individual list", "unique_lists", "=", "[", "]", "groupx_lists", "=", "[", "]", "for", "item_list", "in", "item_lists", ":", "unique_items", ",", "groupxs", "=", "vt", ".", "group_indices", "(", "item_list", ")", "unique_lists", ".", "append", "(", "unique_items", ")", "groupx_lists", ".", "append", "(", "groupxs", ")", "# Merge all indexes into a signle long list", "groups_stacked", "=", "ut", ".", "flatten", "(", "groupx_lists", ")", "flat_stacked", ",", "cumsum", "=", "ut", ".", "invertible_flatten2", "(", "unique_lists", ")", "# Find uniques in those lists", "flat_unique", ",", "stack_groups", "=", "vt", ".", "group_indices", "(", "np", ".", "array", "(", "flat_stacked", ")", ")", "# Get a list of corresonding group indicies from each input list", "flat_groupx_multilist", "=", "[", "ut", ".", "take", "(", "groups_stacked", ",", "groupx", ")", "for", "groupx", "in", "stack_groups", "]", "# flat_unique corresponds with the aids (hence chips) the flag_groupxs", "# multilist is a list where each item is a tuple who's nth item indexes", "# into the nth input list. Ie (1, 0) is a list of indexes into the 1st chip", "# the 0th keypoint list", "return", "flat_unique", ",", "flat_groupx_multilist" ]
47.071429
0.002974
def spec_dice(spec): """ Return the dice specification as a string in a common format """ if spec[0] == 'c': return str(spec[1]) elif spec[0] == 'r': r = spec[1:] s = "{}d{}".format(r[0], r[1]) if len(r) == 4 and ((r[2] == 'd' and r[3] < r[0]) or (r[2] == 'k' and r[3] > 0)): s += "{}{}".format(r[2], r[3]) return s elif spec[0] in ops: return "{} {} {}".format(spec_dice(spec[1]), spec[0], spec_dice(spec[2])) else: raise ValueError("Invalid dice specification")
[ "def", "spec_dice", "(", "spec", ")", ":", "if", "spec", "[", "0", "]", "==", "'c'", ":", "return", "str", "(", "spec", "[", "1", "]", ")", "elif", "spec", "[", "0", "]", "==", "'r'", ":", "r", "=", "spec", "[", "1", ":", "]", "s", "=", "\"{}d{}\"", ".", "format", "(", "r", "[", "0", "]", ",", "r", "[", "1", "]", ")", "if", "len", "(", "r", ")", "==", "4", "and", "(", "(", "r", "[", "2", "]", "==", "'d'", "and", "r", "[", "3", "]", "<", "r", "[", "0", "]", ")", "or", "(", "r", "[", "2", "]", "==", "'k'", "and", "r", "[", "3", "]", ">", "0", ")", ")", ":", "s", "+=", "\"{}{}\"", ".", "format", "(", "r", "[", "2", "]", ",", "r", "[", "3", "]", ")", "return", "s", "elif", "spec", "[", "0", "]", "in", "ops", ":", "return", "\"{} {} {}\"", ".", "format", "(", "spec_dice", "(", "spec", "[", "1", "]", ")", ",", "spec", "[", "0", "]", ",", "spec_dice", "(", "spec", "[", "2", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid dice specification\"", ")" ]
40.923077
0.009191
def plot_diagrams( diagrams, plot_only=None, title=None, xy_range=None, labels=None, colormap="default", size=20, ax_color=np.array([0.0, 0.0, 0.0]), diagonal=True, lifetime=False, legend=True, show=False, ax=None ): """A helper function to plot persistence diagrams. Parameters ---------- diagrams: ndarray (n_pairs, 2) or list of diagrams A diagram or list of diagrams. If diagram is a list of diagrams, then plot all on the same plot using different colors. plot_only: list of numeric If specified, an array of only the diagrams that should be plotted. title: string, default is None If title is defined, add it as title of the plot. xy_range: list of numeric [xmin, xmax, ymin, ymax] User provided range of axes. This is useful for comparing multiple persistence diagrams. labels: string or list of strings Legend labels for each diagram. If none are specified, we use H_0, H_1, H_2,... by default. colormap: string, default is 'default' Any of matplotlib color palettes. Some options are 'default', 'seaborn', 'sequential'. See all available styles with .. code:: python import matplotlib as mpl print(mpl.styles.available) size: numeric, default is 20 Pixel size of each point plotted. ax_color: any valid matplotlib color type. See [https://matplotlib.org/api/colors_api.html](https://matplotlib.org/api/colors_api.html) for complete API. diagonal: bool, default is True Plot the diagonal x=y line. lifetime: bool, default is False. If True, diagonal is turned to False. Plot life time of each point instead of birth and death. Essentially, visualize (x, y-x). legend: bool, default is True If true, show the legend. show: bool, default is False Call plt.show() after plotting. If you are using self.plot() as part of a subplot, set show=False and call plt.show() only once at the end. """ ax = ax or plt.gca() plt.style.use(colormap) xlabel, ylabel = "Birth", "Death" if labels is None: # Provide default labels for diagrams if using self.dgm_ labels = [ "$H_0$", "$H_1$", "$H_2$", "$H_3$", "$H_4$", "$H_5$", "$H_6$", "$H_7$", "$H_8$", ] if not isinstance(diagrams, list): # Must have diagrams as a list for processing downstream diagrams = [diagrams] if plot_only: diagrams = [diagrams[i] for i in plot_only] labels = [labels[i] for i in plot_only] if not isinstance(labels, list): labels = [labels] * len(diagrams) # Construct copy with proper type of each diagram # so we can freely edit them. diagrams = [dgm.astype(np.float32, copy=True) for dgm in diagrams] # find min and max of all visible diagrams concat_dgms = np.concatenate(diagrams).flatten() has_inf = np.any(np.isinf(concat_dgms)) finite_dgms = concat_dgms[np.isfinite(concat_dgms)] # clever bounding boxes of the diagram if not xy_range: # define bounds of diagram ax_min, ax_max = np.min(finite_dgms), np.max(finite_dgms) x_r = ax_max - ax_min # Give plot a nice buffer on all sides. # ax_range=0 when only one point, buffer = 1 if xy_range == 0 else x_r / 5 x_down = ax_min - buffer / 2 x_up = ax_max + buffer y_down, y_up = x_down, x_up else: x_down, x_up, y_down, y_up = xy_range yr = y_up - y_down if lifetime: # Don't plot landscape and diagonal at the same time. diagonal = False # reset y axis so it doesn't go much below zero y_down = -yr * 0.05 y_up = y_down + yr # set custom ylabel ylabel = "Lifetime" # set diagrams to be (x, y-x) for dgm in diagrams: dgm[:, 1] -= dgm[:, 0] # plot horizon line ax.plot([x_down, x_up], [0, 0], c=ax_color) # Plot diagonal if diagonal: ax.plot([x_down, x_up], [x_down, x_up], "--", c=ax_color) # Plot inf line if has_inf: # put inf line slightly below top b_inf = y_down + yr * 0.95 ax.plot([x_down, x_up], [b_inf, b_inf], "--", c="k", label=r"$\infty$") # convert each inf in each diagram with b_inf for dgm in diagrams: dgm[np.isinf(dgm)] = b_inf # Plot each diagram for dgm, label in zip(diagrams, labels): # plot persistence pairs ax.scatter(dgm[:, 0], dgm[:, 1], size, label=label, edgecolor="none") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim([x_down, x_up]) ax.set_ylim([y_down, y_up]) ax.set_aspect('equal', 'box') if title is not None: ax.set_title(title) if legend is True: ax.legend(loc="lower right") if show is True: plt.show()
[ "def", "plot_diagrams", "(", "diagrams", ",", "plot_only", "=", "None", ",", "title", "=", "None", ",", "xy_range", "=", "None", ",", "labels", "=", "None", ",", "colormap", "=", "\"default\"", ",", "size", "=", "20", ",", "ax_color", "=", "np", ".", "array", "(", "[", "0.0", ",", "0.0", ",", "0.0", "]", ")", ",", "diagonal", "=", "True", ",", "lifetime", "=", "False", ",", "legend", "=", "True", ",", "show", "=", "False", ",", "ax", "=", "None", ")", ":", "ax", "=", "ax", "or", "plt", ".", "gca", "(", ")", "plt", ".", "style", ".", "use", "(", "colormap", ")", "xlabel", ",", "ylabel", "=", "\"Birth\"", ",", "\"Death\"", "if", "labels", "is", "None", ":", "# Provide default labels for diagrams if using self.dgm_", "labels", "=", "[", "\"$H_0$\"", ",", "\"$H_1$\"", ",", "\"$H_2$\"", ",", "\"$H_3$\"", ",", "\"$H_4$\"", ",", "\"$H_5$\"", ",", "\"$H_6$\"", ",", "\"$H_7$\"", ",", "\"$H_8$\"", ",", "]", "if", "not", "isinstance", "(", "diagrams", ",", "list", ")", ":", "# Must have diagrams as a list for processing downstream", "diagrams", "=", "[", "diagrams", "]", "if", "plot_only", ":", "diagrams", "=", "[", "diagrams", "[", "i", "]", "for", "i", "in", "plot_only", "]", "labels", "=", "[", "labels", "[", "i", "]", "for", "i", "in", "plot_only", "]", "if", "not", "isinstance", "(", "labels", ",", "list", ")", ":", "labels", "=", "[", "labels", "]", "*", "len", "(", "diagrams", ")", "# Construct copy with proper type of each diagram", "# so we can freely edit them.", "diagrams", "=", "[", "dgm", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "True", ")", "for", "dgm", "in", "diagrams", "]", "# find min and max of all visible diagrams", "concat_dgms", "=", "np", ".", "concatenate", "(", "diagrams", ")", ".", "flatten", "(", ")", "has_inf", "=", "np", ".", "any", "(", "np", ".", "isinf", "(", "concat_dgms", ")", ")", "finite_dgms", "=", "concat_dgms", "[", "np", ".", "isfinite", "(", "concat_dgms", ")", "]", "# clever bounding boxes of the diagram", "if", "not", "xy_range", ":", "# define bounds of diagram", "ax_min", ",", "ax_max", "=", "np", ".", "min", "(", "finite_dgms", ")", ",", "np", ".", "max", "(", "finite_dgms", ")", "x_r", "=", "ax_max", "-", "ax_min", "# Give plot a nice buffer on all sides.", "# ax_range=0 when only one point,", "buffer", "=", "1", "if", "xy_range", "==", "0", "else", "x_r", "/", "5", "x_down", "=", "ax_min", "-", "buffer", "/", "2", "x_up", "=", "ax_max", "+", "buffer", "y_down", ",", "y_up", "=", "x_down", ",", "x_up", "else", ":", "x_down", ",", "x_up", ",", "y_down", ",", "y_up", "=", "xy_range", "yr", "=", "y_up", "-", "y_down", "if", "lifetime", ":", "# Don't plot landscape and diagonal at the same time.", "diagonal", "=", "False", "# reset y axis so it doesn't go much below zero", "y_down", "=", "-", "yr", "*", "0.05", "y_up", "=", "y_down", "+", "yr", "# set custom ylabel", "ylabel", "=", "\"Lifetime\"", "# set diagrams to be (x, y-x)", "for", "dgm", "in", "diagrams", ":", "dgm", "[", ":", ",", "1", "]", "-=", "dgm", "[", ":", ",", "0", "]", "# plot horizon line", "ax", ".", "plot", "(", "[", "x_down", ",", "x_up", "]", ",", "[", "0", ",", "0", "]", ",", "c", "=", "ax_color", ")", "# Plot diagonal", "if", "diagonal", ":", "ax", ".", "plot", "(", "[", "x_down", ",", "x_up", "]", ",", "[", "x_down", ",", "x_up", "]", ",", "\"--\"", ",", "c", "=", "ax_color", ")", "# Plot inf line", "if", "has_inf", ":", "# put inf line slightly below top", "b_inf", "=", "y_down", "+", "yr", "*", "0.95", "ax", ".", "plot", "(", "[", "x_down", ",", "x_up", "]", ",", "[", "b_inf", ",", "b_inf", "]", ",", "\"--\"", ",", "c", "=", "\"k\"", ",", "label", "=", "r\"$\\infty$\"", ")", "# convert each inf in each diagram with b_inf", "for", "dgm", "in", "diagrams", ":", "dgm", "[", "np", ".", "isinf", "(", "dgm", ")", "]", "=", "b_inf", "# Plot each diagram", "for", "dgm", ",", "label", "in", "zip", "(", "diagrams", ",", "labels", ")", ":", "# plot persistence pairs", "ax", ".", "scatter", "(", "dgm", "[", ":", ",", "0", "]", ",", "dgm", "[", ":", ",", "1", "]", ",", "size", ",", "label", "=", "label", ",", "edgecolor", "=", "\"none\"", ")", "ax", ".", "set_xlabel", "(", "xlabel", ")", "ax", ".", "set_ylabel", "(", "ylabel", ")", "ax", ".", "set_xlim", "(", "[", "x_down", ",", "x_up", "]", ")", "ax", ".", "set_ylim", "(", "[", "y_down", ",", "y_up", "]", ")", "ax", ".", "set_aspect", "(", "'equal'", ",", "'box'", ")", "if", "title", "is", "not", "None", ":", "ax", ".", "set_title", "(", "title", ")", "if", "legend", "is", "True", ":", "ax", ".", "legend", "(", "loc", "=", "\"lower right\"", ")", "if", "show", "is", "True", ":", "plt", ".", "show", "(", ")" ]
29.129412
0.002539
def set_user_agent(self, name, http): '''Sets the application name. LibVLC passes this as the user agent string when a protocol requires it. @param name: human-readable application name, e.g. "FooBar player 1.2.3". @param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0". @version: LibVLC 1.1.1 or later. ''' return libvlc_set_user_agent(self, str_to_bytes(name), str_to_bytes(http))
[ "def", "set_user_agent", "(", "self", ",", "name", ",", "http", ")", ":", "return", "libvlc_set_user_agent", "(", "self", ",", "str_to_bytes", "(", "name", ")", ",", "str_to_bytes", "(", "http", ")", ")" ]
54.875
0.011211
def _construct(self, context): """Constructs this by calling the deferred method. This assumes that all unbound_vars have been specified in context and if this layer has already been computed in this context, then the previously constructed value will be returned. Args: context: A dict of UnboundVariables/_DeferredLayers to their values. Returns: The result of calling the given method on this layer. """ with self.g.as_default(): if self._pass_through: # pylint: disable=protected-access return self._pass_through._construct(context) current_value = context.get(self, None) assert current_value is not _unspecified, 'Circular dependency' if current_value is not None: return current_value context[self] = _unspecified method_args = self._replace_deferred(self._method_args, context) method_kwargs = self._replace_deferred(self._method_kwargs, context) result = self._method(*method_args, **method_kwargs) _strip_unnecessary_contents_from_stack(result, set()) context[self] = result return result
[ "def", "_construct", "(", "self", ",", "context", ")", ":", "with", "self", ".", "g", ".", "as_default", "(", ")", ":", "if", "self", ".", "_pass_through", ":", "# pylint: disable=protected-access", "return", "self", ".", "_pass_through", ".", "_construct", "(", "context", ")", "current_value", "=", "context", ".", "get", "(", "self", ",", "None", ")", "assert", "current_value", "is", "not", "_unspecified", ",", "'Circular dependency'", "if", "current_value", "is", "not", "None", ":", "return", "current_value", "context", "[", "self", "]", "=", "_unspecified", "method_args", "=", "self", ".", "_replace_deferred", "(", "self", ".", "_method_args", ",", "context", ")", "method_kwargs", "=", "self", ".", "_replace_deferred", "(", "self", ".", "_method_kwargs", ",", "context", ")", "result", "=", "self", ".", "_method", "(", "*", "method_args", ",", "*", "*", "method_kwargs", ")", "_strip_unnecessary_contents_from_stack", "(", "result", ",", "set", "(", ")", ")", "context", "[", "self", "]", "=", "result", "return", "result" ]
39.571429
0.010573
def evolved_transformer_decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, cache=None, decode_loop_step=None, name="decoder", nonpadding=None, save_weights_to=None, make_image_summary=True, losses=None): """Evolved Transformer decoder. See arxiv.org/abs/1901.11117 for more details. Args: decoder_input: a Tensor. encoder_output: a Tensor. decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()). encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()). hparams: hyperparameters for model. cache: dict, containing tensors which are the results of previous layers, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. name: a string. nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convolutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: Not supported. Returns: Decoder output tensor. """ del losses attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): hidden_state = decoder_input for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): layer_name = "layer_%d" % layer layer_cache = cache[layer_name] if cache is not None else None with tf.variable_scope(layer_name): with tf.variable_scope(_SIXTEEN_HEAD_ATTENTION_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) attention_cache = layer_cache[ _SIXTEEN_HEAD_ATTENTION_NAME] if layer_cache is not None else None left_state = common_attention.multihead_attention( hidden_state, None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, _capped_double_heads(hparams.num_heads), hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), decode_loop_step=decode_loop_step, vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) if encoder_output is not None: with tf.variable_scope(_FIRST_ATTEND_TO_ENCODER_NAME): attention_cache = ( layer_cache[_FIRST_ATTEND_TO_ENCODER_NAME] if layer_cache is not None else None) right_state = common_attention.multihead_attention( hidden_state, encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) left_state = tf.nn.dropout(left_state, 1 - hparams.layer_prepostprocess_dropout) right_state = tf.nn.dropout( right_state, 1 - hparams.layer_prepostprocess_dropout) hidden_state = residual_state + left_state + right_state else: hidden_state = common_layers.layer_postprocess( residual_state, left_state, hparams) with tf.variable_scope(_CONV_BRANCHES_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) if nonpadding is not None: # Mask padding from conv layers. mask = tf.tile( tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size]) hidden_state *= mask if layer_cache: if decode_loop_step is None: hidden_state = layer_cache[ _CONV_BRANCHES_FIRST_LAYER_NAME] = tf.concat( [ layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME], hidden_state ], axis=1)[:, -1 * _DECODER_LEFT_CONV_PADDING - 1:, :] left_state = hidden_state right_state = hidden_state[:, _DECODER_LEFT_CONV_PADDING - _DECODER_RIGHT_CONV_PADDING:, :] else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. tmp = tf.transpose( layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME], perm=[1, 0, 2]) tmp = tf.expand_dims(tmp, axis=1) tmp = inplace_ops.alias_inplace_update( tmp, decode_loop_step * tf.shape(hidden_state)[1] + _DECODER_LEFT_CONV_PADDING, tf.transpose(hidden_state, perm=[1, 0, 2])) tmp = tf.squeeze(tmp, axis=1) hidden_state = layer_cache[ _CONV_BRANCHES_FIRST_LAYER_NAME] = tf.transpose( tmp, perm=[1, 0, 2]) left_state_indexes = [ decode_loop_step + i for i in range(_DECODER_LEFT_CONV_PADDING + 1) ] left_state = tf.gather(hidden_state, left_state_indexes, axis=1) right_state_indexes = [ decode_loop_step + i + (_DECODER_LEFT_CONV_PADDING - _DECODER_RIGHT_CONV_PADDING) for i in range(_DECODER_RIGHT_CONV_PADDING + 1) ] right_state = tf.gather(hidden_state, right_state_indexes, axis=1) else: # No caching. left_state = tf.pad( hidden_state, paddings=[[0, 0], [_DECODER_LEFT_CONV_PADDING, 0], [0, 0]]) right_state = tf.pad( hidden_state, paddings=[[0, 0], [_DECODER_RIGHT_CONV_PADDING, 0], [0, 0]]) left_output_dim = int(hparams.hidden_size * 2) separable_conv_11x1 = tf.layers.SeparableConv1D( left_output_dim, 11, padding="VALID", name="separable_conv11x1", activation=tf.nn.relu) left_state = separable_conv_11x1.apply(left_state) left_state = tf.nn.dropout(left_state, 1 - hparams.layer_prepostprocess_dropout) right_output_dim = int(hparams.hidden_size / 2) separable_conv_7x1_1 = tf.layers.SeparableConv1D( right_output_dim, 7, padding="VALID", name="separable_conv_7x1_1") right_state = separable_conv_7x1_1.apply(right_state) right_state = tf.nn.dropout(right_state, 1 - hparams.layer_prepostprocess_dropout) right_state = tf.pad( right_state, [[0, 0], [0, 0], [0, left_output_dim - right_output_dim]], constant_values=0) hidden_state = left_state + right_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) if nonpadding is not None: # Mask padding from conv layers. mask = tf.tile( tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size * 2]) hidden_state *= mask if layer_cache: if decode_loop_step is None: hidden_state = layer_cache[ _CONV_BRANCHES_SECOND_LAYER_NAME] = tf.concat( [ layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME], hidden_state ], axis=1)[:, -1 * _DECODER_FINAL_CONV_PADDING - 1:, :] else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. tmp = tf.transpose( layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME], perm=[1, 0, 2]) tmp = tf.expand_dims(tmp, axis=1) tmp = inplace_ops.alias_inplace_update( tmp, (decode_loop_step + _DECODER_FINAL_CONV_PADDING) * tf.shape(hidden_state)[1], tf.transpose(hidden_state, perm=[1, 0, 2])) tmp = tf.squeeze(tmp, axis=1) hidden_state = layer_cache[ _CONV_BRANCHES_SECOND_LAYER_NAME] = tf.transpose( tmp, perm=[1, 0, 2]) hidden_state_indexes = [ decode_loop_step + i for i in range(_DECODER_FINAL_CONV_PADDING + 1) ] hidden_state = tf.gather( hidden_state, hidden_state_indexes, axis=1) else: hidden_state = tf.pad( hidden_state, paddings=[[0, 0], [_DECODER_FINAL_CONV_PADDING, 0], [0, 0]]) separable_conv_7x1_2 = tf.layers.SeparableConv1D( hparams.hidden_size, 7, padding="VALID", name="separable_conv_7x1_2") hidden_state = separable_conv_7x1_2.apply(hidden_state) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) with tf.variable_scope(_VANILLA_ATTENTION_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) attention_cache = layer_cache[ _VANILLA_ATTENTION_NAME] if layer_cache is not None else None hidden_state = common_attention.multihead_attention( hidden_state, None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), decode_loop_step=decode_loop_step, vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) if encoder_output is not None: with tf.variable_scope(_SECOND_ATTEND_TO_ENCODER_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) attention_cache = ( layer_cache[_SECOND_ATTEND_TO_ENCODER_NAME] if layer_cache is not None else None) hidden_state = common_attention.multihead_attention( hidden_state, encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) with tf.variable_scope("dense_layers"): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) hidden_state = tf.layers.dense( hidden_state, int(hparams.hidden_size * 4), activation=tf.nn.swish) hidden_state = tf.nn.dropout(hidden_state, 1 - hparams.layer_prepostprocess_dropout) hidden_state = common_layers.layer_preprocess(hidden_state, hparams) hidden_state = tf.layers.dense(hidden_state, hparams.hidden_size) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) return common_layers.layer_preprocess(hidden_state, hparams)
[ "def", "evolved_transformer_decoder", "(", "decoder_input", ",", "encoder_output", ",", "decoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ",", "hparams", ",", "cache", "=", "None", ",", "decode_loop_step", "=", "None", ",", "name", "=", "\"decoder\"", ",", "nonpadding", "=", "None", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ",", "losses", "=", "None", ")", ":", "del", "losses", "attention_dropout_broadcast_dims", "=", "(", "common_layers", ".", "comma_separated_string_to_integer_list", "(", "getattr", "(", "hparams", ",", "\"attention_dropout_broadcast_dims\"", ",", "\"\"", ")", ")", ")", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "hidden_state", "=", "decoder_input", "for", "layer", "in", "range", "(", "hparams", ".", "num_decoder_layers", "or", "hparams", ".", "num_hidden_layers", ")", ":", "layer_name", "=", "\"layer_%d\"", "%", "layer", "layer_cache", "=", "cache", "[", "layer_name", "]", "if", "cache", "is", "not", "None", "else", "None", "with", "tf", ".", "variable_scope", "(", "layer_name", ")", ":", "with", "tf", ".", "variable_scope", "(", "_SIXTEEN_HEAD_ATTENTION_NAME", ")", ":", "residual_state", "=", "hidden_state", "hidden_state", "=", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")", "attention_cache", "=", "layer_cache", "[", "_SIXTEEN_HEAD_ATTENTION_NAME", "]", "if", "layer_cache", "is", "not", "None", "else", "None", "left_state", "=", "common_attention", ".", "multihead_attention", "(", "hidden_state", ",", "None", ",", "decoder_self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "_capped_double_heads", "(", "hparams", ".", "num_heads", ")", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "hparams", ".", "self_attention_type", ",", "max_relative_position", "=", "hparams", ".", "max_relative_position", ",", "heads_share_relative_embedding", "=", "(", "hparams", ".", "heads_share_relative_embedding", ")", ",", "add_relative_to_values", "=", "hparams", ".", "add_relative_to_values", ",", "save_weights_to", "=", "save_weights_to", ",", "cache", "=", "attention_cache", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_broadcast_dims", "=", "attention_dropout_broadcast_dims", ",", "max_length", "=", "hparams", ".", "get", "(", "\"max_length\"", ")", ",", "decode_loop_step", "=", "decode_loop_step", ",", "vars_3d", "=", "hparams", ".", "get", "(", "\"attention_variables_3d\"", ")", ",", "activation_dtype", "=", "hparams", ".", "get", "(", "\"activation_dtype\"", ",", "\"float32\"", ")", ",", "weight_dtype", "=", "hparams", ".", "get", "(", "\"weight_dtype\"", ",", "\"float32\"", ")", ")", "if", "encoder_output", "is", "not", "None", ":", "with", "tf", ".", "variable_scope", "(", "_FIRST_ATTEND_TO_ENCODER_NAME", ")", ":", "attention_cache", "=", "(", "layer_cache", "[", "_FIRST_ATTEND_TO_ENCODER_NAME", "]", "if", "layer_cache", "is", "not", "None", "else", "None", ")", "right_state", "=", "common_attention", ".", "multihead_attention", "(", "hidden_state", ",", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "max_relative_position", "=", "hparams", ".", "max_relative_position", ",", "heads_share_relative_embedding", "=", "(", "hparams", ".", "heads_share_relative_embedding", ")", ",", "add_relative_to_values", "=", "hparams", ".", "add_relative_to_values", ",", "save_weights_to", "=", "save_weights_to", ",", "cache", "=", "attention_cache", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_broadcast_dims", "=", "attention_dropout_broadcast_dims", ",", "max_length", "=", "hparams", ".", "get", "(", "\"max_length\"", ")", ",", "vars_3d", "=", "hparams", ".", "get", "(", "\"attention_variables_3d\"", ")", ",", "activation_dtype", "=", "hparams", ".", "get", "(", "\"activation_dtype\"", ",", "\"float32\"", ")", ",", "weight_dtype", "=", "hparams", ".", "get", "(", "\"weight_dtype\"", ",", "\"float32\"", ")", ")", "left_state", "=", "tf", ".", "nn", ".", "dropout", "(", "left_state", ",", "1", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "right_state", "=", "tf", ".", "nn", ".", "dropout", "(", "right_state", ",", "1", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "hidden_state", "=", "residual_state", "+", "left_state", "+", "right_state", "else", ":", "hidden_state", "=", "common_layers", ".", "layer_postprocess", "(", "residual_state", ",", "left_state", ",", "hparams", ")", "with", "tf", ".", "variable_scope", "(", "_CONV_BRANCHES_NAME", ")", ":", "residual_state", "=", "hidden_state", "hidden_state", "=", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")", "if", "nonpadding", "is", "not", "None", ":", "# Mask padding from conv layers.", "mask", "=", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "nonpadding", ",", "2", ")", ",", "[", "1", ",", "1", ",", "hparams", ".", "hidden_size", "]", ")", "hidden_state", "*=", "mask", "if", "layer_cache", ":", "if", "decode_loop_step", "is", "None", ":", "hidden_state", "=", "layer_cache", "[", "_CONV_BRANCHES_FIRST_LAYER_NAME", "]", "=", "tf", ".", "concat", "(", "[", "layer_cache", "[", "_CONV_BRANCHES_FIRST_LAYER_NAME", "]", ",", "hidden_state", "]", ",", "axis", "=", "1", ")", "[", ":", ",", "-", "1", "*", "_DECODER_LEFT_CONV_PADDING", "-", "1", ":", ",", ":", "]", "left_state", "=", "hidden_state", "right_state", "=", "hidden_state", "[", ":", ",", "_DECODER_LEFT_CONV_PADDING", "-", "_DECODER_RIGHT_CONV_PADDING", ":", ",", ":", "]", "else", ":", "# Inplace update is required for inference on TPU.", "# Inplace_ops only supports inplace_update on the first dimension.", "tmp", "=", "tf", ".", "transpose", "(", "layer_cache", "[", "_CONV_BRANCHES_FIRST_LAYER_NAME", "]", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", "tmp", "=", "tf", ".", "expand_dims", "(", "tmp", ",", "axis", "=", "1", ")", "tmp", "=", "inplace_ops", ".", "alias_inplace_update", "(", "tmp", ",", "decode_loop_step", "*", "tf", ".", "shape", "(", "hidden_state", ")", "[", "1", "]", "+", "_DECODER_LEFT_CONV_PADDING", ",", "tf", ".", "transpose", "(", "hidden_state", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", ")", "tmp", "=", "tf", ".", "squeeze", "(", "tmp", ",", "axis", "=", "1", ")", "hidden_state", "=", "layer_cache", "[", "_CONV_BRANCHES_FIRST_LAYER_NAME", "]", "=", "tf", ".", "transpose", "(", "tmp", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", "left_state_indexes", "=", "[", "decode_loop_step", "+", "i", "for", "i", "in", "range", "(", "_DECODER_LEFT_CONV_PADDING", "+", "1", ")", "]", "left_state", "=", "tf", ".", "gather", "(", "hidden_state", ",", "left_state_indexes", ",", "axis", "=", "1", ")", "right_state_indexes", "=", "[", "decode_loop_step", "+", "i", "+", "(", "_DECODER_LEFT_CONV_PADDING", "-", "_DECODER_RIGHT_CONV_PADDING", ")", "for", "i", "in", "range", "(", "_DECODER_RIGHT_CONV_PADDING", "+", "1", ")", "]", "right_state", "=", "tf", ".", "gather", "(", "hidden_state", ",", "right_state_indexes", ",", "axis", "=", "1", ")", "else", ":", "# No caching.", "left_state", "=", "tf", ".", "pad", "(", "hidden_state", ",", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "_DECODER_LEFT_CONV_PADDING", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "right_state", "=", "tf", ".", "pad", "(", "hidden_state", ",", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "_DECODER_RIGHT_CONV_PADDING", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "left_output_dim", "=", "int", "(", "hparams", ".", "hidden_size", "*", "2", ")", "separable_conv_11x1", "=", "tf", ".", "layers", ".", "SeparableConv1D", "(", "left_output_dim", ",", "11", ",", "padding", "=", "\"VALID\"", ",", "name", "=", "\"separable_conv11x1\"", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "left_state", "=", "separable_conv_11x1", ".", "apply", "(", "left_state", ")", "left_state", "=", "tf", ".", "nn", ".", "dropout", "(", "left_state", ",", "1", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "right_output_dim", "=", "int", "(", "hparams", ".", "hidden_size", "/", "2", ")", "separable_conv_7x1_1", "=", "tf", ".", "layers", ".", "SeparableConv1D", "(", "right_output_dim", ",", "7", ",", "padding", "=", "\"VALID\"", ",", "name", "=", "\"separable_conv_7x1_1\"", ")", "right_state", "=", "separable_conv_7x1_1", ".", "apply", "(", "right_state", ")", "right_state", "=", "tf", ".", "nn", ".", "dropout", "(", "right_state", ",", "1", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "right_state", "=", "tf", ".", "pad", "(", "right_state", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "left_output_dim", "-", "right_output_dim", "]", "]", ",", "constant_values", "=", "0", ")", "hidden_state", "=", "left_state", "+", "right_state", "hidden_state", "=", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")", "if", "nonpadding", "is", "not", "None", ":", "# Mask padding from conv layers.", "mask", "=", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "nonpadding", ",", "2", ")", ",", "[", "1", ",", "1", ",", "hparams", ".", "hidden_size", "*", "2", "]", ")", "hidden_state", "*=", "mask", "if", "layer_cache", ":", "if", "decode_loop_step", "is", "None", ":", "hidden_state", "=", "layer_cache", "[", "_CONV_BRANCHES_SECOND_LAYER_NAME", "]", "=", "tf", ".", "concat", "(", "[", "layer_cache", "[", "_CONV_BRANCHES_SECOND_LAYER_NAME", "]", ",", "hidden_state", "]", ",", "axis", "=", "1", ")", "[", ":", ",", "-", "1", "*", "_DECODER_FINAL_CONV_PADDING", "-", "1", ":", ",", ":", "]", "else", ":", "# Inplace update is required for inference on TPU.", "# Inplace_ops only supports inplace_update on the first dimension.", "tmp", "=", "tf", ".", "transpose", "(", "layer_cache", "[", "_CONV_BRANCHES_SECOND_LAYER_NAME", "]", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", "tmp", "=", "tf", ".", "expand_dims", "(", "tmp", ",", "axis", "=", "1", ")", "tmp", "=", "inplace_ops", ".", "alias_inplace_update", "(", "tmp", ",", "(", "decode_loop_step", "+", "_DECODER_FINAL_CONV_PADDING", ")", "*", "tf", ".", "shape", "(", "hidden_state", ")", "[", "1", "]", ",", "tf", ".", "transpose", "(", "hidden_state", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", ")", "tmp", "=", "tf", ".", "squeeze", "(", "tmp", ",", "axis", "=", "1", ")", "hidden_state", "=", "layer_cache", "[", "_CONV_BRANCHES_SECOND_LAYER_NAME", "]", "=", "tf", ".", "transpose", "(", "tmp", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", "hidden_state_indexes", "=", "[", "decode_loop_step", "+", "i", "for", "i", "in", "range", "(", "_DECODER_FINAL_CONV_PADDING", "+", "1", ")", "]", "hidden_state", "=", "tf", ".", "gather", "(", "hidden_state", ",", "hidden_state_indexes", ",", "axis", "=", "1", ")", "else", ":", "hidden_state", "=", "tf", ".", "pad", "(", "hidden_state", ",", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "_DECODER_FINAL_CONV_PADDING", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "separable_conv_7x1_2", "=", "tf", ".", "layers", ".", "SeparableConv1D", "(", "hparams", ".", "hidden_size", ",", "7", ",", "padding", "=", "\"VALID\"", ",", "name", "=", "\"separable_conv_7x1_2\"", ")", "hidden_state", "=", "separable_conv_7x1_2", ".", "apply", "(", "hidden_state", ")", "hidden_state", "=", "common_layers", ".", "layer_postprocess", "(", "residual_state", ",", "hidden_state", ",", "hparams", ")", "with", "tf", ".", "variable_scope", "(", "_VANILLA_ATTENTION_NAME", ")", ":", "residual_state", "=", "hidden_state", "hidden_state", "=", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")", "attention_cache", "=", "layer_cache", "[", "_VANILLA_ATTENTION_NAME", "]", "if", "layer_cache", "is", "not", "None", "else", "None", "hidden_state", "=", "common_attention", ".", "multihead_attention", "(", "hidden_state", ",", "None", ",", "decoder_self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "hparams", ".", "self_attention_type", ",", "max_relative_position", "=", "hparams", ".", "max_relative_position", ",", "heads_share_relative_embedding", "=", "(", "hparams", ".", "heads_share_relative_embedding", ")", ",", "add_relative_to_values", "=", "hparams", ".", "add_relative_to_values", ",", "save_weights_to", "=", "save_weights_to", ",", "cache", "=", "attention_cache", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_broadcast_dims", "=", "attention_dropout_broadcast_dims", ",", "max_length", "=", "hparams", ".", "get", "(", "\"max_length\"", ")", ",", "decode_loop_step", "=", "decode_loop_step", ",", "vars_3d", "=", "hparams", ".", "get", "(", "\"attention_variables_3d\"", ")", ",", "activation_dtype", "=", "hparams", ".", "get", "(", "\"activation_dtype\"", ",", "\"float32\"", ")", ",", "weight_dtype", "=", "hparams", ".", "get", "(", "\"weight_dtype\"", ",", "\"float32\"", ")", ")", "hidden_state", "=", "common_layers", ".", "layer_postprocess", "(", "residual_state", ",", "hidden_state", ",", "hparams", ")", "if", "encoder_output", "is", "not", "None", ":", "with", "tf", ".", "variable_scope", "(", "_SECOND_ATTEND_TO_ENCODER_NAME", ")", ":", "residual_state", "=", "hidden_state", "hidden_state", "=", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")", "attention_cache", "=", "(", "layer_cache", "[", "_SECOND_ATTEND_TO_ENCODER_NAME", "]", "if", "layer_cache", "is", "not", "None", "else", "None", ")", "hidden_state", "=", "common_attention", ".", "multihead_attention", "(", "hidden_state", ",", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "max_relative_position", "=", "hparams", ".", "max_relative_position", ",", "heads_share_relative_embedding", "=", "(", "hparams", ".", "heads_share_relative_embedding", ")", ",", "add_relative_to_values", "=", "hparams", ".", "add_relative_to_values", ",", "save_weights_to", "=", "save_weights_to", ",", "cache", "=", "attention_cache", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_broadcast_dims", "=", "attention_dropout_broadcast_dims", ",", "max_length", "=", "hparams", ".", "get", "(", "\"max_length\"", ")", ",", "vars_3d", "=", "hparams", ".", "get", "(", "\"attention_variables_3d\"", ")", ",", "activation_dtype", "=", "hparams", ".", "get", "(", "\"activation_dtype\"", ",", "\"float32\"", ")", ",", "weight_dtype", "=", "hparams", ".", "get", "(", "\"weight_dtype\"", ",", "\"float32\"", ")", ")", "hidden_state", "=", "common_layers", ".", "layer_postprocess", "(", "residual_state", ",", "hidden_state", ",", "hparams", ")", "with", "tf", ".", "variable_scope", "(", "\"dense_layers\"", ")", ":", "residual_state", "=", "hidden_state", "hidden_state", "=", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")", "hidden_state", "=", "tf", ".", "layers", ".", "dense", "(", "hidden_state", ",", "int", "(", "hparams", ".", "hidden_size", "*", "4", ")", ",", "activation", "=", "tf", ".", "nn", ".", "swish", ")", "hidden_state", "=", "tf", ".", "nn", ".", "dropout", "(", "hidden_state", ",", "1", "-", "hparams", ".", "layer_prepostprocess_dropout", ")", "hidden_state", "=", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")", "hidden_state", "=", "tf", ".", "layers", ".", "dense", "(", "hidden_state", ",", "hparams", ".", "hidden_size", ")", "hidden_state", "=", "common_layers", ".", "layer_postprocess", "(", "residual_state", ",", "hidden_state", ",", "hparams", ")", "return", "common_layers", ".", "layer_preprocess", "(", "hidden_state", ",", "hparams", ")" ]
45.495601
0.005298
def writeB1logfile(filename, data): """Write a header structure into a B1 logfile. Inputs: filename: name of the file. data: header dictionary Notes: exceptions pass through to the caller. """ allkeys = list(data.keys()) f = open(filename, 'wt', encoding='utf-8') for ld in _logfile_data: # process each line linebegin = ld[0] fieldnames = ld[1] # set the default formatter if it is not given if len(ld) < 3: formatter = str elif ld[2] is None: formatter = str else: formatter = ld[2] # this will contain the formatted values. formatted = '' if isinstance(fieldnames, str): # scalar field name, just one field. Formatter should be a # callable. if fieldnames not in allkeys: # this field has already been processed continue try: formatted = formatter(data[fieldnames]) except KeyError: # field not found in param structure continue elif isinstance(fieldnames, tuple): # more than one field names in a tuple. In this case, formatter can # be a tuple of callables... if all([(fn not in allkeys) for fn in fieldnames]): # if all the fields have been processed: continue if isinstance(formatter, tuple) and len(formatter) == len(fieldnames): formatted = ' '.join([ft(data[fn]) for ft, fn in zip(formatter, fieldnames)]) # ...or a single callable... elif not isinstance(formatter, tuple): formatted = formatter([data[fn] for fn in fieldnames]) # ...otherwise raise an exception. else: raise SyntaxError('Programming error: formatter should be a scalar or a tuple\ of the same length as the field names in logfile_data.') else: # fieldnames is neither a string, nor a tuple. raise SyntaxError( 'Invalid syntax (programming error) in logfile_data in writeparamfile().') # try to get the values linetowrite = linebegin + ':\t' + formatted + '\n' f.write(linetowrite) if isinstance(fieldnames, tuple): for fn in fieldnames: # remove the params treated. if fn in allkeys: allkeys.remove(fn) else: if fieldnames in allkeys: allkeys.remove(fieldnames) # write untreated params for k in allkeys: linetowrite = k + ':\t' + str(data[k]) + '\n' f.write(linetowrite) f.close()
[ "def", "writeB1logfile", "(", "filename", ",", "data", ")", ":", "allkeys", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "f", "=", "open", "(", "filename", ",", "'wt'", ",", "encoding", "=", "'utf-8'", ")", "for", "ld", "in", "_logfile_data", ":", "# process each line", "linebegin", "=", "ld", "[", "0", "]", "fieldnames", "=", "ld", "[", "1", "]", "# set the default formatter if it is not given", "if", "len", "(", "ld", ")", "<", "3", ":", "formatter", "=", "str", "elif", "ld", "[", "2", "]", "is", "None", ":", "formatter", "=", "str", "else", ":", "formatter", "=", "ld", "[", "2", "]", "# this will contain the formatted values.", "formatted", "=", "''", "if", "isinstance", "(", "fieldnames", ",", "str", ")", ":", "# scalar field name, just one field. Formatter should be a", "# callable.", "if", "fieldnames", "not", "in", "allkeys", ":", "# this field has already been processed", "continue", "try", ":", "formatted", "=", "formatter", "(", "data", "[", "fieldnames", "]", ")", "except", "KeyError", ":", "# field not found in param structure", "continue", "elif", "isinstance", "(", "fieldnames", ",", "tuple", ")", ":", "# more than one field names in a tuple. In this case, formatter can", "# be a tuple of callables...", "if", "all", "(", "[", "(", "fn", "not", "in", "allkeys", ")", "for", "fn", "in", "fieldnames", "]", ")", ":", "# if all the fields have been processed:", "continue", "if", "isinstance", "(", "formatter", ",", "tuple", ")", "and", "len", "(", "formatter", ")", "==", "len", "(", "fieldnames", ")", ":", "formatted", "=", "' '", ".", "join", "(", "[", "ft", "(", "data", "[", "fn", "]", ")", "for", "ft", ",", "fn", "in", "zip", "(", "formatter", ",", "fieldnames", ")", "]", ")", "# ...or a single callable...", "elif", "not", "isinstance", "(", "formatter", ",", "tuple", ")", ":", "formatted", "=", "formatter", "(", "[", "data", "[", "fn", "]", "for", "fn", "in", "fieldnames", "]", ")", "# ...otherwise raise an exception.", "else", ":", "raise", "SyntaxError", "(", "'Programming error: formatter should be a scalar or a tuple\\\nof the same length as the field names in logfile_data.'", ")", "else", ":", "# fieldnames is neither a string, nor a tuple.", "raise", "SyntaxError", "(", "'Invalid syntax (programming error) in logfile_data in writeparamfile().'", ")", "# try to get the values", "linetowrite", "=", "linebegin", "+", "':\\t'", "+", "formatted", "+", "'\\n'", "f", ".", "write", "(", "linetowrite", ")", "if", "isinstance", "(", "fieldnames", ",", "tuple", ")", ":", "for", "fn", "in", "fieldnames", ":", "# remove the params treated.", "if", "fn", "in", "allkeys", ":", "allkeys", ".", "remove", "(", "fn", ")", "else", ":", "if", "fieldnames", "in", "allkeys", ":", "allkeys", ".", "remove", "(", "fieldnames", ")", "# write untreated params", "for", "k", "in", "allkeys", ":", "linetowrite", "=", "k", "+", "':\\t'", "+", "str", "(", "data", "[", "k", "]", ")", "+", "'\\n'", "f", ".", "write", "(", "linetowrite", ")", "f", ".", "close", "(", ")" ]
38.542857
0.001807
def rename_directory(db, user_id, old_api_path, new_api_path): """ Rename a directory. """ old_db_path = from_api_dirname(old_api_path) new_db_path = from_api_dirname(new_api_path) if old_db_path == '/': raise RenameRoot('Renaming the root directory is not permitted.') # Overwriting existing directories is disallowed. if _dir_exists(db, user_id, new_db_path): raise DirectoryExists(new_api_path) # Set this foreign key constraint to deferred so it's not violated # when we run the first statement to update the name of the directory. db.execute('SET CONSTRAINTS ' 'pgcontents.directories_parent_user_id_fkey DEFERRED') # Update name column for the directory that's being renamed db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name == old_db_path, ) ).values( name=new_db_path, ) ) # Update the name and parent_name of any descendant directories. Do # this in a single statement so the non-deferrable check constraint # is satisfied. db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name.startswith(old_db_path), directories.c.parent_name.startswith(old_db_path), ) ).values( name=func.concat( new_db_path, func.right(directories.c.name, -func.length(old_db_path)) ), parent_name=func.concat( new_db_path, func.right( directories.c.parent_name, -func.length(old_db_path) ) ), ) )
[ "def", "rename_directory", "(", "db", ",", "user_id", ",", "old_api_path", ",", "new_api_path", ")", ":", "old_db_path", "=", "from_api_dirname", "(", "old_api_path", ")", "new_db_path", "=", "from_api_dirname", "(", "new_api_path", ")", "if", "old_db_path", "==", "'/'", ":", "raise", "RenameRoot", "(", "'Renaming the root directory is not permitted.'", ")", "# Overwriting existing directories is disallowed.", "if", "_dir_exists", "(", "db", ",", "user_id", ",", "new_db_path", ")", ":", "raise", "DirectoryExists", "(", "new_api_path", ")", "# Set this foreign key constraint to deferred so it's not violated", "# when we run the first statement to update the name of the directory.", "db", ".", "execute", "(", "'SET CONSTRAINTS '", "'pgcontents.directories_parent_user_id_fkey DEFERRED'", ")", "# Update name column for the directory that's being renamed", "db", ".", "execute", "(", "directories", ".", "update", "(", ")", ".", "where", "(", "and_", "(", "directories", ".", "c", ".", "user_id", "==", "user_id", ",", "directories", ".", "c", ".", "name", "==", "old_db_path", ",", ")", ")", ".", "values", "(", "name", "=", "new_db_path", ",", ")", ")", "# Update the name and parent_name of any descendant directories. Do", "# this in a single statement so the non-deferrable check constraint", "# is satisfied.", "db", ".", "execute", "(", "directories", ".", "update", "(", ")", ".", "where", "(", "and_", "(", "directories", ".", "c", ".", "user_id", "==", "user_id", ",", "directories", ".", "c", ".", "name", ".", "startswith", "(", "old_db_path", ")", ",", "directories", ".", "c", ".", "parent_name", ".", "startswith", "(", "old_db_path", ")", ",", ")", ")", ".", "values", "(", "name", "=", "func", ".", "concat", "(", "new_db_path", ",", "func", ".", "right", "(", "directories", ".", "c", ".", "name", ",", "-", "func", ".", "length", "(", "old_db_path", ")", ")", ")", ",", "parent_name", "=", "func", ".", "concat", "(", "new_db_path", ",", "func", ".", "right", "(", "directories", ".", "c", ".", "parent_name", ",", "-", "func", ".", "length", "(", "old_db_path", ")", ")", ")", ",", ")", ")" ]
32.490909
0.000543
def as_curve(self, start=None, stop=None): """ Get the synthetic as a Curve, in depth. Facilitates plotting along- side other curve data. """ params = {'start': start or getattr(self, 'z start', None), 'mnemonic': 'SYN', 'step': 0.1524 } return Curve(data, params=params)
[ "def", "as_curve", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "params", "=", "{", "'start'", ":", "start", "or", "getattr", "(", "self", ",", "'z start'", ",", "None", ")", ",", "'mnemonic'", ":", "'SYN'", ",", "'step'", ":", "0.1524", "}", "return", "Curve", "(", "data", ",", "params", "=", "params", ")" ]
33.090909
0.005348
def _set_bfd_session_setup_delay(self, v, load=False): """ Setter method for bfd_session_setup_delay, mapped from YANG variable /rbridge_id/bfd_session_setup_delay (container) If this variable is read-only (config: false) in the source YANG file, then _set_bfd_session_setup_delay is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bfd_session_setup_delay() directly. YANG Description: Configure BFD desired session setup delay in seconds. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=bfd_session_setup_delay.bfd_session_setup_delay, is_container='container', presence=False, yang_name="bfd-session-setup-delay", rest_name="bfd-session-setup-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD desired session setup delay in seconds.', u'callpoint': u'bfd-session-delay-cpworker', u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bfd', defining_module='brocade-bfd', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """bfd_session_setup_delay must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=bfd_session_setup_delay.bfd_session_setup_delay, is_container='container', presence=False, yang_name="bfd-session-setup-delay", rest_name="bfd-session-setup-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD desired session setup delay in seconds.', u'callpoint': u'bfd-session-delay-cpworker', u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bfd', defining_module='brocade-bfd', yang_type='container', is_config=True)""", }) self.__bfd_session_setup_delay = t if hasattr(self, '_set'): self._set()
[ "def", "_set_bfd_session_setup_delay", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "bfd_session_setup_delay", ".", "bfd_session_setup_delay", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"bfd-session-setup-delay\"", ",", "rest_name", "=", "\"bfd-session-setup-delay\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure BFD desired session setup delay in seconds.'", ",", "u'callpoint'", ":", "u'bfd-session-delay-cpworker'", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-bfd'", ",", "defining_module", "=", "'brocade-bfd'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"bfd_session_setup_delay must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=bfd_session_setup_delay.bfd_session_setup_delay, is_container='container', presence=False, yang_name=\"bfd-session-setup-delay\", rest_name=\"bfd-session-setup-delay\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD desired session setup delay in seconds.', u'callpoint': u'bfd-session-delay-cpworker', u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bfd', defining_module='brocade-bfd', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__bfd_session_setup_delay", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
87.875
0.005159
def _execute_hooks(self, element): """ Executes finalize hooks """ if self.hooks and self.finalize_hooks: self.param.warning( "Supply either hooks or finalize_hooks not both, " "using hooks and ignoring finalize_hooks.") hooks = self.hooks or self.finalize_hooks for hook in hooks: try: hook(self, element) except Exception as e: self.param.warning("Plotting hook %r could not be " "applied:\n\n %s" % (hook, e))
[ "def", "_execute_hooks", "(", "self", ",", "element", ")", ":", "if", "self", ".", "hooks", "and", "self", ".", "finalize_hooks", ":", "self", ".", "param", ".", "warning", "(", "\"Supply either hooks or finalize_hooks not both, \"", "\"using hooks and ignoring finalize_hooks.\"", ")", "hooks", "=", "self", ".", "hooks", "or", "self", ".", "finalize_hooks", "for", "hook", "in", "hooks", ":", "try", ":", "hook", "(", "self", ",", "element", ")", "except", "Exception", "as", "e", ":", "self", ".", "param", ".", "warning", "(", "\"Plotting hook %r could not be \"", "\"applied:\\n\\n %s\"", "%", "(", "hook", ",", "e", ")", ")" ]
38.733333
0.003361
def delete(self, ids): """ Method to delete ipv4's by their ids :param ids: Identifiers of ipv4's :return: None """ url = build_uri_with_ids('api/v3/ipv4/%s/', ids) return super(ApiIPv4, self).delete(url)
[ "def", "delete", "(", "self", ",", "ids", ")", ":", "url", "=", "build_uri_with_ids", "(", "'api/v3/ipv4/%s/'", ",", "ids", ")", "return", "super", "(", "ApiIPv4", ",", "self", ")", ".", "delete", "(", "url", ")" ]
25.3
0.007634
def vectorize(self, token_list): ''' Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...] ''' vector_list = [self.__collection.tf_idf(token, self.__collection) for token in token_list] return vector_list
[ "def", "vectorize", "(", "self", ",", "token_list", ")", ":", "vector_list", "=", "[", "self", ".", "__collection", ".", "tf_idf", "(", "token", ",", "self", ".", "__collection", ")", "for", "token", "in", "token_list", "]", "return", "vector_list" ]
30.416667
0.013298
def get_top_pathologies(graph, n: Optional[int] = 15) -> List[Tuple[BaseEntity, int]]: """Get the top highest relationship-having edges in the graph by BEL. :param pybel.BELGraph graph: A BEL graph :param n: The number of top connected pathologies to return. If None, returns all nodes """ return count_pathologies(graph).most_common(n)
[ "def", "get_top_pathologies", "(", "graph", ",", "n", ":", "Optional", "[", "int", "]", "=", "15", ")", "->", "List", "[", "Tuple", "[", "BaseEntity", ",", "int", "]", "]", ":", "return", "count_pathologies", "(", "graph", ")", ".", "most_common", "(", "n", ")" ]
50.142857
0.008403
def invertible_flatten1(unflat_list): r""" Flattens `unflat_list` but remember how to reconstruct the `unflat_list` Returns `flat_list` and the `reverse_list` with indexes into the `flat_list` Args: unflat_list (list): list of nested lists that we will flatten. Returns: tuple : (flat_list, reverse_list) CommandLine: python -m utool.util_list --exec-invertible_flatten1 --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> unflat_list = [[1, 2, 3], [4, 5], [6, 6]] >>> flat_list, reverse_list = invertible_flatten1(unflat_list) >>> result = ('flat_list = %s\n' % (ut.repr2(flat_list),)) >>> result += ('reverse_list = %s' % (ut.repr2(reverse_list),)) >>> print(result) flat_list = [1, 2, 3, 4, 5, 6, 6] reverse_list = [[0, 1, 2], [3, 4], [5, 6]] """ nextnum = functools.partial(six.next, itertools.count(0)) # Build an unflat list of flat indexes reverse_list = [[nextnum() for _ in tup] for tup in unflat_list] flat_list = flatten(unflat_list) return flat_list, reverse_list
[ "def", "invertible_flatten1", "(", "unflat_list", ")", ":", "nextnum", "=", "functools", ".", "partial", "(", "six", ".", "next", ",", "itertools", ".", "count", "(", "0", ")", ")", "# Build an unflat list of flat indexes", "reverse_list", "=", "[", "[", "nextnum", "(", ")", "for", "_", "in", "tup", "]", "for", "tup", "in", "unflat_list", "]", "flat_list", "=", "flatten", "(", "unflat_list", ")", "return", "flat_list", ",", "reverse_list" ]
36.3125
0.000838
def f_get(self, *args): """Returns annotations If len(args)>1, then returns a list of annotations. `f_get(X)` with *X* integer will return the annotation with name `annotation_X`. If the annotation contains only a single entry you can call `f_get()` without arguments. If you call `f_get()` and the annotation contains more than one element a ValueError is thrown. """ if len(args) == 0: if len(self._dict) == 1: return self._dict[list(self._dict.keys())[0]] elif len(self._dict) > 1: raise ValueError('Your annotation contains more than one entry: ' '`%s` Please use >>f_get<< with one of these.' % (str(list(self._dict.keys())))) else: raise AttributeError('Your annotation is empty, cannot access data.') result_list = [] for name in args: name = self._translate_key(name) try: result_list.append(self._dict[name]) except KeyError: raise AttributeError('Your annotation does not contain %s.' % name) if len(args) == 1: return result_list[0] else: return tuple(result_list)
[ "def", "f_get", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "0", ":", "if", "len", "(", "self", ".", "_dict", ")", "==", "1", ":", "return", "self", ".", "_dict", "[", "list", "(", "self", ".", "_dict", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "elif", "len", "(", "self", ".", "_dict", ")", ">", "1", ":", "raise", "ValueError", "(", "'Your annotation contains more than one entry: '", "'`%s` Please use >>f_get<< with one of these.'", "%", "(", "str", "(", "list", "(", "self", ".", "_dict", ".", "keys", "(", ")", ")", ")", ")", ")", "else", ":", "raise", "AttributeError", "(", "'Your annotation is empty, cannot access data.'", ")", "result_list", "=", "[", "]", "for", "name", "in", "args", ":", "name", "=", "self", ".", "_translate_key", "(", "name", ")", "try", ":", "result_list", ".", "append", "(", "self", ".", "_dict", "[", "name", "]", ")", "except", "KeyError", ":", "raise", "AttributeError", "(", "'Your annotation does not contain %s.'", "%", "name", ")", "if", "len", "(", "args", ")", "==", "1", ":", "return", "result_list", "[", "0", "]", "else", ":", "return", "tuple", "(", "result_list", ")" ]
36.742857
0.006818