text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
sequence
def stop(self): """Halts the acquisition, this must be called before resetting acquisition""" try: self.aitask.stop() self.aotask.stop() pass except: print u"No task running" self.aitask = None self.aotask = None
[ "def", "stop", "(", "self", ")", ":", "try", ":", "self", ".", "aitask", ".", "stop", "(", ")", "self", ".", "aotask", ".", "stop", "(", ")", "pass", "except", ":", "print", "u\"No task running\"", "self", ".", "aitask", "=", "None", "self", ".", "aotask", "=", "None" ]
29.6
[ 0.06666666666666667, 0.03529411764705882, 0.16666666666666666, 0.06666666666666667, 0.06666666666666667, 0.125, 0.2, 0.05555555555555555, 0.07692307692307693, 0.07692307692307693 ]
def absent(name=None, ipv4addr=None, **api_opts): ''' Ensure infoblox A record is removed. State example: .. code-block:: yaml infoblox_a.absent: - name: example-ha-0.domain.com infoblox_a.absent: - name: - ipv4addr: 127.0.23.23 ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} obj = __salt__['infoblox.get_a'](name=name, ipv4addr=ipv4addr, allow_array=False, **api_opts) if not obj: ret['result'] = True ret['comment'] = 'infoblox already removed' return ret if __opts__['test']: ret['result'] = None ret['changes'] = {'old': obj, 'new': 'absent'} return ret if __salt__['infoblox.delete_a'](name=name, ipv4addr=ipv4addr, **api_opts): ret['result'] = True ret['changes'] = {'old': obj, 'new': 'absent'} return ret
[ "def", "absent", "(", "name", "=", "None", ",", "ipv4addr", "=", "None", ",", "*", "*", "api_opts", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "obj", "=", "__salt__", "[", "'infoblox.get_a'", "]", "(", "name", "=", "name", ",", "ipv4addr", "=", "ipv4addr", ",", "allow_array", "=", "False", ",", "*", "*", "api_opts", ")", "if", "not", "obj", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'infoblox already removed'", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "obj", ",", "'new'", ":", "'absent'", "}", "return", "ret", "if", "__salt__", "[", "'infoblox.delete_a'", "]", "(", "name", "=", "name", ",", "ipv4addr", "=", "ipv4addr", ",", "*", "*", "api_opts", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "obj", ",", "'new'", ":", "'absent'", "}", "return", "ret" ]
27.28125
[ 0.02040816326530612, 0.2857142857142857, 0.05, 0, 0.1111111111111111, 0, 0.125, 0, 0.07692307692307693, 0.046511627906976744, 0, 0.07692307692307693, 0.10526315789473684, 0.05714285714285714, 0.2857142857142857, 0.028169014084507043, 0.030927835051546393, 0, 0.13333333333333333, 0.07142857142857142, 0.0392156862745098, 0.1111111111111111, 0, 0.08333333333333333, 0.07142857142857142, 0.037037037037037035, 0.1111111111111111, 0, 0.02531645569620253, 0.07142857142857142, 0.037037037037037035, 0.14285714285714285 ]
def to_geodataframe(products): """Return the products from a query response as a GeoPandas GeoDataFrame with the values in their appropriate Python types. """ try: import geopandas as gpd import shapely.wkt except ImportError: raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.") crs = {'init': 'epsg:4326'} # WGS84 if len(products) == 0: return gpd.GeoDataFrame(crs=crs) df = SentinelAPI.to_dataframe(products) geometry = [shapely.wkt.loads(fp) for fp in df['footprint']] # remove useless columns df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True) return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
[ "def", "to_geodataframe", "(", "products", ")", ":", "try", ":", "import", "geopandas", "as", "gpd", "import", "shapely", ".", "wkt", "except", "ImportError", ":", "raise", "ImportError", "(", "\"to_geodataframe requires the optional dependencies GeoPandas and Shapely.\"", ")", "crs", "=", "{", "'init'", ":", "'epsg:4326'", "}", "# WGS84", "if", "len", "(", "products", ")", "==", "0", ":", "return", "gpd", ".", "GeoDataFrame", "(", "crs", "=", "crs", ")", "df", "=", "SentinelAPI", ".", "to_dataframe", "(", "products", ")", "geometry", "=", "[", "shapely", ".", "wkt", ".", "loads", "(", "fp", ")", "for", "fp", "in", "df", "[", "'footprint'", "]", "]", "# remove useless columns", "df", ".", "drop", "(", "[", "'footprint'", ",", "'gmlfootprint'", "]", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "return", "gpd", ".", "GeoDataFrame", "(", "df", ",", "crs", "=", "crs", ",", "geometry", "=", "geometry", ")" ]
41.315789
[ 0.03333333333333333, 0.0375, 0.034482758620689655, 0.18181818181818182, 0.16666666666666666, 0.05714285714285714, 0.06666666666666667, 0.07407407407407407, 0.02830188679245283, 0, 0.045454545454545456, 0.06666666666666667, 0.045454545454545456, 0, 0.0425531914893617, 0.029411764705882353, 0.0625, 0.029411764705882353, 0.031746031746031744 ]
def _ResolutionOrder(self, variables_to_solve): """ return a list of lists of tuples (block,output,ndof) to be solved """ # Gp=nx.DiGraph() # # for i in range(nvar): # Gp.add_node('v'+str(i),bipartite=0) # # for i in range(neq): # Gp.add_node('e'+str(i),bipartite=1) # for j in range(nvar): # if Mo[i,j]==1: # Gp.add_edge('e'+str(i),'v'+str(j)) Gp = nx.DiGraph() for variable in self.variables: Gp.add_node(variable, bipartite=0) for block in self.blocks: for iov, output_variable in enumerate(block.outputs): Gp.add_node((block, iov), bipartite=1) Gp.add_edge((block, iov), output_variable) Gp.add_edge(output_variable, (block, iov)) for input_variable in block.inputs: if not isinstance(input_variable, Signal): Gp.add_edge(input_variable, (block, iov)) # for n1,n2 in M.items(): # Gp.add_edge(n1,n2) sinks = [] sources = [] for node in Gp.nodes(): if Gp.out_degree(node) == 0: sinks.append(node) elif Gp.in_degree(node) == 0: sources.append(node) G2 = sources[:] for node in sources: for node2 in nx.descendants(Gp, node): if node2 not in G2: G2.append(node2) if G2 != []: print(G2) raise ModelError('Overconstrained variables') G3 = sinks[:] for node in sinks: for node2 in nx.ancestors(Gp, node): if node2 not in G3: G3.append(node2) if G3 != []: raise ModelError('Underconstrained variables') # vars_resolvables=[] # for var in vars_resoudre: # if not 'v'+str(var) in G2+G3: # vars_resolvables.append(var) # G1=Gp.copy() # G1.remove_nodes_from(G2+G3) # # M1=nx.bipartite.maximum_matching(G1) # G1p=nx.DiGraph() # # G1p.add_nodes_from(G1.nodes()) # for e in G1.edges(): # # equation vers variable # if e[0][0]=='v': # G1p.add_edge(e[0],e[1]) # else: # G1p.add_edge(e[1],e[0]) # # print(len(M)) # for n1,n2 in M1.items(): # # print(n1,n2) # if n1[0]=='e': # G1p.add_edge(n1,n2) # else: # G1p.add_edge(n2,n1) scc = list(nx.strongly_connected_components(Gp)) # pos=nx.spring_layout(G1p) # plt.figure() # nx.draw(G1p,pos) # nx.draw_networkx_labels(G1p,pos) # print(scc) if scc != []: C = nx.condensation(Gp, scc) isc_vars = [] for isc, sc in enumerate(scc): for var in variables_to_solve: if var in sc: isc_vars.append(isc) break ancestors_vars = isc_vars[:] for isc_var in isc_vars: for ancetre in nx.ancestors(C, isc_var): if ancetre not in ancestors_vars: ancestors_vars.append(ancetre) order_sc = [sc for sc in nx.topological_sort( C) if sc in ancestors_vars] order_ev = [] for isc in order_sc: # liste d'équations et de variables triées pour être séparées evs = list(scc[isc]) # print(evs) # levs=int(len(evs)/2) eqs = [] var = [] for element in evs: if type(element) == tuple: eqs.append(element) else: var.append(element) order_ev.append((len(eqs), eqs, var)) return order_ev raise ModelError
[ "def", "_ResolutionOrder", "(", "self", ",", "variables_to_solve", ")", ":", "# Gp=nx.DiGraph()", "#", "# for i in range(nvar):", "# Gp.add_node('v'+str(i),bipartite=0)", "#", "# for i in range(neq):", "# Gp.add_node('e'+str(i),bipartite=1)", "# for j in range(nvar):", "# if Mo[i,j]==1:", "# Gp.add_edge('e'+str(i),'v'+str(j))", "Gp", "=", "nx", ".", "DiGraph", "(", ")", "for", "variable", "in", "self", ".", "variables", ":", "Gp", ".", "add_node", "(", "variable", ",", "bipartite", "=", "0", ")", "for", "block", "in", "self", ".", "blocks", ":", "for", "iov", ",", "output_variable", "in", "enumerate", "(", "block", ".", "outputs", ")", ":", "Gp", ".", "add_node", "(", "(", "block", ",", "iov", ")", ",", "bipartite", "=", "1", ")", "Gp", ".", "add_edge", "(", "(", "block", ",", "iov", ")", ",", "output_variable", ")", "Gp", ".", "add_edge", "(", "output_variable", ",", "(", "block", ",", "iov", ")", ")", "for", "input_variable", "in", "block", ".", "inputs", ":", "if", "not", "isinstance", "(", "input_variable", ",", "Signal", ")", ":", "Gp", ".", "add_edge", "(", "input_variable", ",", "(", "block", ",", "iov", ")", ")", "# for n1,n2 in M.items():", "# Gp.add_edge(n1,n2)", "sinks", "=", "[", "]", "sources", "=", "[", "]", "for", "node", "in", "Gp", ".", "nodes", "(", ")", ":", "if", "Gp", ".", "out_degree", "(", "node", ")", "==", "0", ":", "sinks", ".", "append", "(", "node", ")", "elif", "Gp", ".", "in_degree", "(", "node", ")", "==", "0", ":", "sources", ".", "append", "(", "node", ")", "G2", "=", "sources", "[", ":", "]", "for", "node", "in", "sources", ":", "for", "node2", "in", "nx", ".", "descendants", "(", "Gp", ",", "node", ")", ":", "if", "node2", "not", "in", "G2", ":", "G2", ".", "append", "(", "node2", ")", "if", "G2", "!=", "[", "]", ":", "print", "(", "G2", ")", "raise", "ModelError", "(", "'Overconstrained variables'", ")", "G3", "=", "sinks", "[", ":", "]", "for", "node", "in", "sinks", ":", "for", "node2", "in", "nx", ".", "ancestors", "(", "Gp", ",", "node", ")", ":", "if", "node2", "not", "in", "G3", ":", "G3", ".", "append", "(", "node2", ")", "if", "G3", "!=", "[", "]", ":", "raise", "ModelError", "(", "'Underconstrained variables'", ")", "# vars_resolvables=[]", "# for var in vars_resoudre:", "# if not 'v'+str(var) in G2+G3:", "# vars_resolvables.append(var)", "# G1=Gp.copy()", "# G1.remove_nodes_from(G2+G3)", "#", "# M1=nx.bipartite.maximum_matching(G1)", "# G1p=nx.DiGraph()", "#", "# G1p.add_nodes_from(G1.nodes())", "# for e in G1.edges():", "# # equation vers variable", "# if e[0][0]=='v':", "# G1p.add_edge(e[0],e[1])", "# else:", "# G1p.add_edge(e[1],e[0])", "# # print(len(M))", "# for n1,n2 in M1.items():", "# # print(n1,n2)", "# if n1[0]=='e':", "# G1p.add_edge(n1,n2)", "# else:", "# G1p.add_edge(n2,n1)", "scc", "=", "list", "(", "nx", ".", "strongly_connected_components", "(", "Gp", ")", ")", "# pos=nx.spring_layout(G1p)", "# plt.figure()", "# nx.draw(G1p,pos)", "# nx.draw_networkx_labels(G1p,pos)", "# print(scc)", "if", "scc", "!=", "[", "]", ":", "C", "=", "nx", ".", "condensation", "(", "Gp", ",", "scc", ")", "isc_vars", "=", "[", "]", "for", "isc", ",", "sc", "in", "enumerate", "(", "scc", ")", ":", "for", "var", "in", "variables_to_solve", ":", "if", "var", "in", "sc", ":", "isc_vars", ".", "append", "(", "isc", ")", "break", "ancestors_vars", "=", "isc_vars", "[", ":", "]", "for", "isc_var", "in", "isc_vars", ":", "for", "ancetre", "in", "nx", ".", "ancestors", "(", "C", ",", "isc_var", ")", ":", "if", "ancetre", "not", "in", "ancestors_vars", ":", "ancestors_vars", ".", "append", "(", "ancetre", ")", "order_sc", "=", "[", "sc", "for", "sc", "in", "nx", ".", "topological_sort", "(", "C", ")", "if", "sc", "in", "ancestors_vars", "]", "order_ev", "=", "[", "]", "for", "isc", "in", "order_sc", ":", "# liste d'équations et de variables triées pour être séparées", "evs", "=", "list", "(", "scc", "[", "isc", "]", ")", "# print(evs)", "# levs=int(len(evs)/2)", "eqs", "=", "[", "]", "var", "=", "[", "]", "for", "element", "in", "evs", ":", "if", "type", "(", "element", ")", "==", "tuple", ":", "eqs", ".", "append", "(", "element", ")", "else", ":", "var", ".", "append", "(", "element", ")", "order_ev", ".", "append", "(", "(", "len", "(", "eqs", ")", ",", "eqs", ",", "var", ")", ")", "return", "order_ev", "raise", "ModelError" ]
30.68254
[ 0.02127659574468085, 0.18181818181818182, 0.0684931506849315, 0, 0.18181818181818182, 0.05, 1, 0.038461538461538464, 0.022727272727272728, 1, 0.04, 0.022727272727272728, 0.03333333333333333, 0.037037037037037035, 0.0196078431372549, 0, 0.08, 0.05128205128205128, 0.043478260869565216, 0.06060606060606061, 0.03076923076923077, 0.037037037037037035, 0.034482758620689655, 0.034482758620689655, 0.0392156862745098, 0.03225806451612903, 0.03076923076923077, 0, 0.0625, 0.06451612903225806, 0, 0.1111111111111111, 0.1, 0.06451612903225806, 0.05, 0.058823529411764705, 0.04878048780487805, 0.05555555555555555, 0, 0.08695652173913043, 0.07142857142857142, 0.04, 0.05714285714285714, 0.05555555555555555, 0, 0.1, 0.09523809523809523, 0.03508771929824561, 0, 0.09523809523809523, 0.07692307692307693, 0.041666666666666664, 0.05714285714285714, 0.05555555555555555, 0, 0.1, 0.034482758620689655, 0, 0.03571428571428571, 0.029411764705882353, 0.023809523809523808, 0.022222222222222223, 0, 0, 0.047619047619047616, 0.027777777777777776, 1, 0.022222222222222223, 0.04, 1, 0.02564102564102564, 0.034482758620689655, 0.02702702702702703, 0.034482758620689655, 0.025, 0.05555555555555555, 0.025, 0.043478260869565216, 0.030303030303030304, 0.038461538461538464, 0.037037037037037035, 0.027777777777777776, 0.05555555555555555, 0.027777777777777776, 0, 0.03571428571428571, 0.058823529411764705, 0.09523809523809523, 0.08, 0.04878048780487805, 0.05263157894736842, 0.09523809523809523, 0.05, 0.08, 0.047619047619047616, 0.043478260869565216, 0.06060606060606061, 0.045454545454545456, 0.06896551724137931, 0.05, 0, 0.05555555555555555, 0.03571428571428571, 0.03773584905660377, 0.037037037037037035, 0, 0.05263157894736842, 0.06976744186046512, 0.08, 0.0625, 0.025974025974025976, 0.05555555555555555, 0.037037037037037035, 0.02702702702702703, 0.08333333333333333, 0.08333333333333333, 0.05714285714285714, 0.043478260869565216, 0.046511627906976744, 0.08, 0.046511627906976744, 0.03773584905660377, 0, 0.07407407407407407, 0, 0.08333333333333333 ]
def addMenuLabel(menu, text): """Adds a QLabel contaning text to the given menu""" qaw = QWidgetAction(menu) lab = QLabel(text, menu) qaw.setDefaultWidget(lab) lab.setAlignment(Qt.AlignCenter) lab.setFrameShape(QFrame.StyledPanel) lab.setFrameShadow(QFrame.Sunken) menu.addAction(qaw) return lab
[ "def", "addMenuLabel", "(", "menu", ",", "text", ")", ":", "qaw", "=", "QWidgetAction", "(", "menu", ")", "lab", "=", "QLabel", "(", "text", ",", "menu", ")", "qaw", ".", "setDefaultWidget", "(", "lab", ")", "lab", ".", "setAlignment", "(", "Qt", ".", "AlignCenter", ")", "lab", ".", "setFrameShape", "(", "QFrame", ".", "StyledPanel", ")", "lab", ".", "setFrameShadow", "(", "QFrame", ".", "Sunken", ")", "menu", ".", "addAction", "(", "qaw", ")", "return", "lab" ]
32.2
[ 0.034482758620689655, 0.03571428571428571, 0.06896551724137931, 0.07142857142857142, 0.06896551724137931, 0.05555555555555555, 0.04878048780487805, 0.05405405405405406, 0.08695652173913043, 0.14285714285714285 ]
def map_providers_parallel(self, query='list_nodes', cached=False): ''' Return a mapping of what named VMs are running on what VM providers based on what providers are defined in the configuration and VMs Same as map_providers but query in parallel. ''' if cached is True and query in self.__cached_provider_queries: return self.__cached_provider_queries[query] opts = self.opts.copy() multiprocessing_data = [] # Optimize Providers opts['providers'] = self._optimize_providers(opts['providers']) for alias, drivers in six.iteritems(opts['providers']): # Make temp query for this driver to avoid overwrite next this_query = query for driver, details in six.iteritems(drivers): # If driver has function list_nodes_min, just replace it # with query param to check existing vms on this driver # for minimum information, Otherwise still use query param. if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds: this_query = 'list_nodes_min' fun = '{0}.{1}'.format(driver, this_query) if fun not in self.clouds: log.error('Public cloud provider %s is not available', driver) continue multiprocessing_data.append({ 'fun': fun, 'opts': opts, 'query': this_query, 'alias': alias, 'driver': driver }) output = {} if not multiprocessing_data: return output data_count = len(multiprocessing_data) pool = multiprocessing.Pool(data_count < 10 and data_count or 10, init_pool_worker) parallel_pmap = enter_mainloop(_run_parallel_map_providers_query, multiprocessing_data, pool=pool) for alias, driver, details in parallel_pmap: if not details: # There's no providers details?! Skip it! continue if alias not in output: output[alias] = {} output[alias][driver] = details self.__cached_provider_queries[query] = output return output
[ "def", "map_providers_parallel", "(", "self", ",", "query", "=", "'list_nodes'", ",", "cached", "=", "False", ")", ":", "if", "cached", "is", "True", "and", "query", "in", "self", ".", "__cached_provider_queries", ":", "return", "self", ".", "__cached_provider_queries", "[", "query", "]", "opts", "=", "self", ".", "opts", ".", "copy", "(", ")", "multiprocessing_data", "=", "[", "]", "# Optimize Providers", "opts", "[", "'providers'", "]", "=", "self", ".", "_optimize_providers", "(", "opts", "[", "'providers'", "]", ")", "for", "alias", ",", "drivers", "in", "six", ".", "iteritems", "(", "opts", "[", "'providers'", "]", ")", ":", "# Make temp query for this driver to avoid overwrite next", "this_query", "=", "query", "for", "driver", ",", "details", "in", "six", ".", "iteritems", "(", "drivers", ")", ":", "# If driver has function list_nodes_min, just replace it", "# with query param to check existing vms on this driver", "# for minimum information, Otherwise still use query param.", "if", "opts", ".", "get", "(", "'selected_query_option'", ")", "is", "None", "and", "'{0}.list_nodes_min'", ".", "format", "(", "driver", ")", "in", "self", ".", "clouds", ":", "this_query", "=", "'list_nodes_min'", "fun", "=", "'{0}.{1}'", ".", "format", "(", "driver", ",", "this_query", ")", "if", "fun", "not", "in", "self", ".", "clouds", ":", "log", ".", "error", "(", "'Public cloud provider %s is not available'", ",", "driver", ")", "continue", "multiprocessing_data", ".", "append", "(", "{", "'fun'", ":", "fun", ",", "'opts'", ":", "opts", ",", "'query'", ":", "this_query", ",", "'alias'", ":", "alias", ",", "'driver'", ":", "driver", "}", ")", "output", "=", "{", "}", "if", "not", "multiprocessing_data", ":", "return", "output", "data_count", "=", "len", "(", "multiprocessing_data", ")", "pool", "=", "multiprocessing", ".", "Pool", "(", "data_count", "<", "10", "and", "data_count", "or", "10", ",", "init_pool_worker", ")", "parallel_pmap", "=", "enter_mainloop", "(", "_run_parallel_map_providers_query", ",", "multiprocessing_data", ",", "pool", "=", "pool", ")", "for", "alias", ",", "driver", ",", "details", "in", "parallel_pmap", ":", "if", "not", "details", ":", "# There's no providers details?! Skip it!", "continue", "if", "alias", "not", "in", "output", ":", "output", "[", "alias", "]", "=", "{", "}", "output", "[", "alias", "]", "[", "driver", "]", "=", "details", "self", ".", "__cached_provider_queries", "[", "query", "]", "=", "output", "return", "output" ]
42.263158
[ 0.014925373134328358, 0.18181818181818182, 0.02666666666666667, 0.027777777777777776, 0, 0.038461538461538464, 0.18181818181818182, 0.02857142857142857, 0.03571428571428571, 0, 0.06451612903225806, 0.06060606060606061, 0, 0.07142857142857142, 0.028169014084507043, 0.031746031746031744, 0.028985507246376812, 0.06666666666666667, 0.034482758620689655, 0.027777777777777776, 0.028169014084507043, 0.02666666666666667, 0.02586206896551724, 0.04081632653061224, 0, 0.034482758620689655, 0.047619047619047616, 0.036585365853658534, 0.07142857142857142, 0, 0.06666666666666667, 0.06451612903225806, 0.06060606060606061, 0.05, 0.05714285714285714, 0.05555555555555555, 0.16666666666666666, 0.10526315789473684, 0.05555555555555555, 0.08, 0, 0.043478260869565216, 0.0410958904109589, 0.05660377358490566, 0.0410958904109589, 0.05, 0.10204081632653061, 0.038461538461538464, 0.07407407407407407, 0.03508771929824561, 0.08333333333333333, 0.05714285714285714, 0.058823529411764705, 0.046511627906976744, 0, 0.037037037037037035, 0.09523809523809523 ]
def get_cache_base(suffix=None): """ Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. """ if suffix is None: suffix = '.distlib' if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: result = os.path.expandvars('$localappdata') else: # Assume posix, or old Windows result = os.path.expanduser('~') # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if os.path.isdir(result): usable = os.access(result, os.W_OK) if not usable: logger.warning('Directory exists but is not writable: %s', result) else: try: os.makedirs(result) usable = True except OSError: logger.warning('Unable to create %s', result, exc_info=True) usable = False if not usable: result = tempfile.mkdtemp() logger.warning('Default location unusable, using %s', result) return os.path.join(result, suffix)
[ "def", "get_cache_base", "(", "suffix", "=", "None", ")", ":", "if", "suffix", "is", "None", ":", "suffix", "=", "'.distlib'", "if", "os", ".", "name", "==", "'nt'", "and", "'LOCALAPPDATA'", "in", "os", ".", "environ", ":", "result", "=", "os", ".", "path", ".", "expandvars", "(", "'$localappdata'", ")", "else", ":", "# Assume posix, or old Windows", "result", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "# we use 'isdir' instead of 'exists', because we want to", "# fail if there's a file with that name", "if", "os", ".", "path", ".", "isdir", "(", "result", ")", ":", "usable", "=", "os", ".", "access", "(", "result", ",", "os", ".", "W_OK", ")", "if", "not", "usable", ":", "logger", ".", "warning", "(", "'Directory exists but is not writable: %s'", ",", "result", ")", "else", ":", "try", ":", "os", ".", "makedirs", "(", "result", ")", "usable", "=", "True", "except", "OSError", ":", "logger", ".", "warning", "(", "'Unable to create %s'", ",", "result", ",", "exc_info", "=", "True", ")", "usable", "=", "False", "if", "not", "usable", ":", "result", "=", "tempfile", ".", "mkdtemp", "(", ")", "logger", ".", "warning", "(", "'Default location unusable, using %s'", ",", "result", ")", "return", "os", ".", "path", ".", "join", "(", "result", ",", "suffix", ")" ]
40.641026
[ 0.03125, 0.2857142857142857, 0.02564102564102564, 0.025974025974025976, 0.0392156862745098, 0, 0.0273972602739726, 0.02564102564102564, 0.02631578947368421, 0.02702702702702703, 0.13333333333333333, 0, 0.02702702702702703, 0.046153846153846156, 0.2857142857142857, 0.09090909090909091, 0.07407407407407407, 0.03571428571428571, 0.038461538461538464, 0.2222222222222222, 0.05263157894736842, 0.05, 0.03333333333333333, 0.046511627906976744, 0.06896551724137931, 0.046511627906976744, 0.09090909090909091, 0.02564102564102564, 0.2222222222222222, 0.16666666666666666, 0.06451612903225806, 0.08, 0.08695652173913043, 0.027777777777777776, 0.07692307692307693, 0.1111111111111111, 0.05714285714285714, 0.028985507246376812, 0.05128205128205128 ]
def ReadFileObject(self, artifacts_reader, file_object): """Reads artifact definitions into the registry from a file-like object. Args: artifacts_reader (ArtifactsReader): an artifacts reader. file_object (file): file-like object to read from. """ for artifact_definition in artifacts_reader.ReadFileObject(file_object): self.RegisterDefinition(artifact_definition)
[ "def", "ReadFileObject", "(", "self", ",", "artifacts_reader", ",", "file_object", ")", ":", "for", "artifact_definition", "in", "artifacts_reader", ".", "ReadFileObject", "(", "file_object", ")", ":", "self", ".", "RegisterDefinition", "(", "artifact_definition", ")" ]
43.555556
[ 0.017857142857142856, 0.02631578947368421, 0, 0.2222222222222222, 0.06451612903225806, 0.07142857142857142, 0.2857142857142857, 0.02631578947368421, 0.06 ]
def unpack_request(environ, content_length=0): """ Unpacks a get or post request query string. :param environ: whiskey application environment. :return: A dictionary with parameters. """ data = None if environ["REQUEST_METHOD"] == "GET": data = unpack_get(environ) elif environ["REQUEST_METHOD"] == "POST": data = unpack_post(environ, content_length) logger.debug("read request data: %s", data) return data
[ "def", "unpack_request", "(", "environ", ",", "content_length", "=", "0", ")", ":", "data", "=", "None", "if", "environ", "[", "\"REQUEST_METHOD\"", "]", "==", "\"GET\"", ":", "data", "=", "unpack_get", "(", "environ", ")", "elif", "environ", "[", "\"REQUEST_METHOD\"", "]", "==", "\"POST\"", ":", "data", "=", "unpack_post", "(", "environ", ",", "content_length", ")", "logger", ".", "debug", "(", "\"read request data: %s\"", ",", "data", ")", "return", "data" ]
32.142857
[ 0.021739130434782608, 0.2857142857142857, 0.0425531914893617, 0.057692307692307696, 0.07142857142857142, 0.2857142857142857, 0.13333333333333333, 0.047619047619047616, 0.058823529411764705, 0.044444444444444446, 0.0392156862745098, 0, 0.0425531914893617, 0.13333333333333333 ]
def moving_average_convergence(data, nslow=26, nfast=12): """ compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg return value is emaslow, emafast, macd which are len(data) arrays """ emaslow = moving_average(data, nslow, type='exponential') emafast = moving_average(data, nfast, type='exponential') return emaslow, emafast, emafast - emaslow
[ "def", "moving_average_convergence", "(", "data", ",", "nslow", "=", "26", ",", "nfast", "=", "12", ")", ":", "emaslow", "=", "moving_average", "(", "data", ",", "nslow", ",", "type", "=", "'exponential'", ")", "emafast", "=", "moving_average", "(", "data", ",", "nfast", ",", "type", "=", "'exponential'", ")", "return", "emaslow", ",", "emafast", ",", "emafast", "-", "emaslow" ]
46.222222
[ 0.017543859649122806, 0.2857142857142857, 0.05, 0.041666666666666664, 0.028985507246376812, 0.2857142857142857, 0.03278688524590164, 0.03278688524590164, 0.043478260869565216 ]
def change_radius(self, selections, value): '''Change the radius of each atom by a certain value ''' if 'atoms' in selections: atms = selections['atoms'].mask if value is None: self.radii_state.array[atms] = [vdw_radii.get(t) * 0.3 for t in self.system.type_array[atms]] else: self.radii_state.array[atms] = value self.update_scale_factors(self.scale_factors)
[ "def", "change_radius", "(", "self", ",", "selections", ",", "value", ")", ":", "if", "'atoms'", "in", "selections", ":", "atms", "=", "selections", "[", "'atoms'", "]", ".", "mask", "if", "value", "is", "None", ":", "self", ".", "radii_state", ".", "array", "[", "atms", "]", "=", "[", "vdw_radii", ".", "get", "(", "t", ")", "*", "0.3", "for", "t", "in", "self", ".", "system", ".", "type_array", "[", "atms", "]", "]", "else", ":", "self", ".", "radii_state", ".", "array", "[", "atms", "]", "=", "value", "self", ".", "update_scale_factors", "(", "self", ".", "scale_factors", ")" ]
38.583333
[ 0.023255813953488372, 0.03333333333333333, 0.25, 0.18181818181818182, 0.06060606060606061, 0.046511627906976744, 0.06896551724137931, 0.03636363636363636, 0.11764705882352941, 0.038461538461538464, 0.5, 0.03773584905660377 ]
def get_doc_comments(text): r""" Return a list of all documentation comments in the file text. Each comment is a pair, with the first element being the comment text and the second element being the line after it, which may be needed to guess function & arguments. >>> get_doc_comments(read_file('examples/module.js'))[0][0][:40] '/**\n * This is the module documentation.' >>> get_doc_comments(read_file('examples/module.js'))[1][0][7:50] 'This is documentation for the first method.' >>> get_doc_comments(read_file('examples/module.js'))[1][1] 'function the_first_function(arg1, arg2) ' >>> get_doc_comments(read_file('examples/module.js'))[2][0] '/** This is the documentation for the second function. */' """ def make_pair(match): comment = match.group() try: end = text.find('\n', match.end(0)) + 1 if '@class' not in comment: next_line = next(split_delimited('()', '\n', text[end:])) else: next_line = text[end:text.find('\n', end)] except StopIteration: next_line = '' return (comment, next_line) return [make_pair(match) for match in re.finditer('/\*\*(.*?)\*/', text, re.DOTALL)]
[ "def", "get_doc_comments", "(", "text", ")", ":", "def", "make_pair", "(", "match", ")", ":", "comment", "=", "match", ".", "group", "(", ")", "try", ":", "end", "=", "text", ".", "find", "(", "'\\n'", ",", "match", ".", "end", "(", "0", ")", ")", "+", "1", "if", "'@class'", "not", "in", "comment", ":", "next_line", "=", "next", "(", "split_delimited", "(", "'()'", ",", "'\\n'", ",", "text", "[", "end", ":", "]", ")", ")", "else", ":", "next_line", "=", "text", "[", "end", ":", "text", ".", "find", "(", "'\\n'", ",", "end", ")", "]", "except", "StopIteration", ":", "next_line", "=", "''", "return", "(", "comment", ",", "next_line", ")", "return", "[", "make_pair", "(", "match", ")", "for", "match", "in", "re", ".", "finditer", "(", "'/\\*\\*(.*?)\\*/'", ",", "text", ",", "re", ".", "DOTALL", ")", "]" ]
41.666667
[ 0.037037037037037035, 0.25, 0.028169014084507043, 0.027777777777777776, 0.02857142857142857, 0.06451612903225806, 0, 0.04411764705882353, 0.0425531914893617, 0.043478260869565216, 0.04081632653061224, 0.047619047619047616, 0.043478260869565216, 0.047619047619047616, 0.031746031746031744, 0, 0.2857142857142857, 0.08, 0.06451612903225806, 0.16666666666666666, 0.0392156862745098, 0.05128205128205128, 0.0273972602739726, 0.11764705882352941, 0.034482758620689655, 0.06896551724137931, 0.07692307692307693, 0.05714285714285714, 0.09859154929577464, 0.10344827586206896 ]
def release_tcp_port(self, port, project): """ Release a specific TCP port number :param port: TCP port number :param project: Project instance """ if port in self._used_tcp_ports: self._used_tcp_ports.remove(port) project.remove_tcp_port(port) log.debug("TCP port {} has been released".format(port))
[ "def", "release_tcp_port", "(", "self", ",", "port", ",", "project", ")", ":", "if", "port", "in", "self", ".", "_used_tcp_ports", ":", "self", ".", "_used_tcp_ports", ".", "remove", "(", "port", ")", "project", ".", "remove_tcp_port", "(", "port", ")", "log", ".", "debug", "(", "\"TCP port {} has been released\"", ".", "format", "(", "port", ")", ")" ]
31.25
[ 0.023809523809523808, 0.18181818181818182, 0.047619047619047616, 0, 0.08333333333333333, 0.075, 0.18181818181818182, 0, 0.05, 0.044444444444444446, 0.04878048780487805, 0.029850746268656716 ]
def humanize_delay(delay): ''' Accept a floating point number presenting link propagation delay in seconds (e.g., 0.1 for 100 milliseconds delay), and return a human(-like) string like '100 milliseconds'. Handles values as small as 1 microsecond, but no smaller. Because of imprecision in floating point numbers, a relatively easy way to handle this is to convert to string, then slice out sections. ''' delaystr = '{:1.06f}'.format(delay) decimal = delaystr.find('.') seconds = int(delaystr[:decimal]) millis = int(delaystr[-6:-3]) micros = int(delaystr[-3:]) # print (delay,delaystr,seconds,millis,micros) units = '' microsecs = micros + 1e3 * millis + 1e6 * seconds if micros > 0: units = ' \u00B5sec' value = int(microsecs) elif millis > 0: units = ' msec' value = int(microsecs / 1000) elif seconds > 0: units = ' sec' value = int(microsecs / 1000000) else: units = ' sec' value = delay if value > 1: units += 's' return '{}{}'.format(value, units)
[ "def", "humanize_delay", "(", "delay", ")", ":", "delaystr", "=", "'{:1.06f}'", ".", "format", "(", "delay", ")", "decimal", "=", "delaystr", ".", "find", "(", "'.'", ")", "seconds", "=", "int", "(", "delaystr", "[", ":", "decimal", "]", ")", "millis", "=", "int", "(", "delaystr", "[", "-", "6", ":", "-", "3", "]", ")", "micros", "=", "int", "(", "delaystr", "[", "-", "3", ":", "]", ")", "# print (delay,delaystr,seconds,millis,micros)", "units", "=", "''", "microsecs", "=", "micros", "+", "1e3", "*", "millis", "+", "1e6", "*", "seconds", "if", "micros", ">", "0", ":", "units", "=", "' \\u00B5sec'", "value", "=", "int", "(", "microsecs", ")", "elif", "millis", ">", "0", ":", "units", "=", "' msec'", "value", "=", "int", "(", "microsecs", "/", "1000", ")", "elif", "seconds", ">", "0", ":", "units", "=", "' sec'", "value", "=", "int", "(", "microsecs", "/", "1000000", ")", "else", ":", "units", "=", "' sec'", "value", "=", "delay", "if", "value", ">", "1", ":", "units", "+=", "'s'", "return", "'{}{}'", ".", "format", "(", "value", ",", "units", ")" ]
32.909091
[ 0.038461538461538464, 0.2857142857142857, 0.029411764705882353, 0.046153846153846156, 0.04285714285714286, 0.046511627906976744, 0, 0.028169014084507043, 0.027777777777777776, 0.2857142857142857, 0.075, 0.09090909090909091, 0.05405405405405406, 0.06060606060606061, 0.06451612903225806, 0.04, 0.14285714285714285, 0.03773584905660377, 0.1111111111111111, 0.07142857142857142, 0.06666666666666667, 0.1, 0.08695652173913043, 0.05405405405405406, 0.09523809523809523, 0.09090909090909091, 0.05, 0.2222222222222222, 0.09090909090909091, 0.09523809523809523, 0.11764705882352941, 0.1, 0.05263157894736842 ]
def acquire_context(self): """ Inspect the stack to acquire the current context used, to render the placeholder. I'm really sorry for this, but if you have a better way, you are welcome ! """ frame = None request = None try: for f in inspect.stack()[1:]: frame = f[0] args, varargs, keywords, alocals = inspect.getargvalues(frame) if not request and 'request' in args: request = alocals['request'] if 'context' in args: return alocals['context'] finally: del frame return RequestContext(request)
[ "def", "acquire_context", "(", "self", ")", ":", "frame", "=", "None", "request", "=", "None", "try", ":", "for", "f", "in", "inspect", ".", "stack", "(", ")", "[", "1", ":", "]", ":", "frame", "=", "f", "[", "0", "]", "args", ",", "varargs", ",", "keywords", ",", "alocals", "=", "inspect", ".", "getargvalues", "(", "frame", ")", "if", "not", "request", "and", "'request'", "in", "args", ":", "request", "=", "alocals", "[", "'request'", "]", "if", "'context'", "in", "args", ":", "return", "alocals", "[", "'context'", "]", "finally", ":", "del", "frame", "return", "RequestContext", "(", "request", ")" ]
32.619048
[ 0.038461538461538464, 0.18181818181818182, 0.03225806451612903, 0.03278688524590164, 0.03636363636363636, 0.18181818181818182, 0.1, 0.09090909090909091, 0, 0.16666666666666666, 0.04878048780487805, 0.07142857142857142, 0.02564102564102564, 0.03773584905660377, 0.041666666666666664, 0.05405405405405406, 0.044444444444444446, 0.125, 0.09523809523809523, 0, 0.05263157894736842 ]
def run_command(cmd, *args): """ Runs command on the system with given ``args``. """ command = ' '.join((cmd, args)) p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() return p.retcode, stdout, stderr
[ "def", "run_command", "(", "cmd", ",", "*", "args", ")", ":", "command", "=", "' '", ".", "join", "(", "(", "cmd", ",", "args", ")", ")", "p", "=", "Popen", "(", "command", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "return", "p", ".", "retcode", ",", "stdout", ",", "stderr" ]
32.5
[ 0.03571428571428571, 0.2857142857142857, 0.058823529411764705, 0.2857142857142857, 0.05714285714285714, 0.03333333333333333, 0.05555555555555555, 0.05555555555555555 ]
def transform_title(self, content_metadata_item): """ Return the title of the content item. """ title_with_locales = [] for locale in self.enterprise_configuration.get_locales(): title_with_locales.append({ 'locale': locale, 'value': content_metadata_item.get('title', '') }) return title_with_locales
[ "def", "transform_title", "(", "self", ",", "content_metadata_item", ")", ":", "title_with_locales", "=", "[", "]", "for", "locale", "in", "self", ".", "enterprise_configuration", ".", "get_locales", "(", ")", ":", "title_with_locales", ".", "append", "(", "{", "'locale'", ":", "locale", ",", "'value'", ":", "content_metadata_item", ".", "get", "(", "'title'", ",", "''", ")", "}", ")", "return", "title_with_locales" ]
30.384615
[ 0.02040816326530612, 0.18181818181818182, 0.044444444444444446, 0.18181818181818182, 0.06451612903225806, 0, 0.030303030303030304, 0.07692307692307693, 0.06060606060606061, 0.031746031746031744, 0.21428571428571427, 0, 0.06060606060606061 ]
def _rm_extract_sequence_and_name(alig_str_parts, s1_name, s2_name): """ parse an alignment line from a repeatmasker alignment and return the name of the sequence it si from and the sequence portion contained in the line. :param alig_str_parts: the alignment string, split around whitespace as list :param s1_name: the name of the first sequence in the alignment this line is from :param s2_name: the name of the second sequence in the alignment this line is from :return: a tuple of name and sequence string; name will always be either s1_name or s2_name :raise AlignmentIteratorError: if the line doesn't have the expected number of elements, or the name does not match either of s1_name or s2_name """ # first, based on the number of parts we have we'll guess whether its a # reverse complement or not if len(alig_str_parts) == 4: # expect the first element to amtch something.. nm = alig_str_parts[0] seq = alig_str_parts[2] elif len(alig_str_parts) == 5: # expect the second element to match something... nm = alig_str_parts[1] seq = alig_str_parts[3] else: raise AlignmentIteratorError("failed parsing alignment line '" + " ".join(alig_str_parts) + "'; reason: " + "expected this line to have 4 or 5 " + "elements, but it has " + str(len(alig_str_parts))) if _rm_name_match(nm, s1_name): return s1_name, seq elif _rm_name_match(nm, s2_name): return s2_name, seq else: raise AlignmentIteratorError("failed parsing alignment line '" + " ".join(alig_str_parts) + "'; reason: " + "extracted alignment name (" + nm + ") " + "did not match either sequence name from " + "header line (" + s1_name + " or " + s2_name + ")")
[ "def", "_rm_extract_sequence_and_name", "(", "alig_str_parts", ",", "s1_name", ",", "s2_name", ")", ":", "# first, based on the number of parts we have we'll guess whether its a", "# reverse complement or not", "if", "len", "(", "alig_str_parts", ")", "==", "4", ":", "# expect the first element to amtch something..", "nm", "=", "alig_str_parts", "[", "0", "]", "seq", "=", "alig_str_parts", "[", "2", "]", "elif", "len", "(", "alig_str_parts", ")", "==", "5", ":", "# expect the second element to match something...", "nm", "=", "alig_str_parts", "[", "1", "]", "seq", "=", "alig_str_parts", "[", "3", "]", "else", ":", "raise", "AlignmentIteratorError", "(", "\"failed parsing alignment line '\"", "+", "\" \"", ".", "join", "(", "alig_str_parts", ")", "+", "\"'; reason: \"", "+", "\"expected this line to have 4 or 5 \"", "+", "\"elements, but it has \"", "+", "str", "(", "len", "(", "alig_str_parts", ")", ")", ")", "if", "_rm_name_match", "(", "nm", ",", "s1_name", ")", ":", "return", "s1_name", ",", "seq", "elif", "_rm_name_match", "(", "nm", ",", "s2_name", ")", ":", "return", "s2_name", ",", "seq", "else", ":", "raise", "AlignmentIteratorError", "(", "\"failed parsing alignment line '\"", "+", "\" \"", ".", "join", "(", "alig_str_parts", ")", "+", "\"'; reason: \"", "+", "\"extracted alignment name (\"", "+", "nm", "+", "\") \"", "+", "\"did not match either sequence name from \"", "+", "\"header line (\"", "+", "s1_name", "+", "\" or \"", "+", "s2_name", "+", "\")\"", ")" ]
48
[ 0.014705882352941176, 0.4, 0.04, 0.039473684210526314, 0, 0.05128205128205128, 0.05128205128205128, 0.13636363636363635, 0.05063291139240506, 0.13636363636363635, 0.06756756756756757, 0.10344827586206896, 0.05194805194805195, 0.041666666666666664, 0.04918032786885246, 0.4, 0.0410958904109589, 0.10344827586206896, 0.1, 0.0392156862745098, 0.07692307692307693, 0.07407407407407407, 0.09375, 0.03773584905660377, 0.07692307692307693, 0.07407407407407407, 0.42857142857142855, 0.04411764705882353, 0.04, 0.04225352112676056, 0.05172413793103448, 0.06896551724137931, 0.09090909090909091, 0.08695652173913043, 0.08571428571428572, 0.08695652173913043, 0.42857142857142855, 0.04411764705882353, 0.04, 0.04, 0.03896103896103896, 0.043478260869565216, 0.0851063829787234 ]
def aggregate_variable(estimate, id): """ Aggregate census table variables by a custom label. """ estimates = [ variable.estimates.get(division__id=id).estimate for variable in estimate.variable.label.variables.all() ] method = estimate.variable.label.aggregation if method == "s": aggregate = sum(estimates) elif method == "a": aggregate = statistics.mean(estimates) elif method == "m": aggregate = statistics.median(estimates) else: aggregate = None return aggregate
[ "def", "aggregate_variable", "(", "estimate", ",", "id", ")", ":", "estimates", "=", "[", "variable", ".", "estimates", ".", "get", "(", "division__id", "=", "id", ")", ".", "estimate", "for", "variable", "in", "estimate", ".", "variable", ".", "label", ".", "variables", ".", "all", "(", ")", "]", "method", "=", "estimate", ".", "variable", ".", "label", ".", "aggregation", "if", "method", "==", "\"s\"", ":", "aggregate", "=", "sum", "(", "estimates", ")", "elif", "method", "==", "\"a\"", ":", "aggregate", "=", "statistics", ".", "mean", "(", "estimates", ")", "elif", "method", "==", "\"m\"", ":", "aggregate", "=", "statistics", ".", "median", "(", "estimates", ")", "else", ":", "aggregate", "=", "None", "return", "aggregate" ]
33.944444
[ 0.02702702702702703, 0.18181818181818182, 0.03389830508474576, 0.18181818181818182, 0.14285714285714285, 0.03333333333333333, 0.029850746268656716, 0.3333333333333333, 0.038461538461538464, 0.08, 0.05263157894736842, 0.07407407407407407, 0.04, 0.07407407407407407, 0.038461538461538464, 0.15384615384615385, 0.07142857142857142, 0.08333333333333333 ]
def open_raw_data_file(filename, mode="w", title="", scan_parameters=None, socket_address=None): '''Mimics pytables.open_file() and stores the configuration and run configuration Returns: RawDataFile Object Examples: with open_raw_data_file(filename = self.scan_data_filename, title=self.scan_id, scan_parameters=[scan_parameter]) as raw_data_file: # do something here raw_data_file.append(self.readout.data, scan_parameters={scan_parameter:scan_parameter_value}) ''' return RawDataFile(filename=filename, mode=mode, title=title, scan_parameters=scan_parameters, socket_address=socket_address)
[ "def", "open_raw_data_file", "(", "filename", ",", "mode", "=", "\"w\"", ",", "title", "=", "\"\"", ",", "scan_parameters", "=", "None", ",", "socket_address", "=", "None", ")", ":", "return", "RawDataFile", "(", "filename", "=", "filename", ",", "mode", "=", "mode", ",", "title", "=", "title", ",", "scan_parameters", "=", "scan_parameters", ",", "socket_address", "=", "socket_address", ")" ]
52.333333
[ 0.020833333333333332, 0.03529411764705882, 0, 0.16666666666666666, 0.09090909090909091, 0, 0.15384615384615385, 0.037037037037037035, 0.07407407407407407, 0.0392156862745098, 0.2857142857142857, 0.023255813953488372 ]
def build_pmid_exclusion_filter(pmids: Strings) -> EdgePredicate: """Fail for edges with citations whose references are one of the given PubMed identifiers. :param pmids: A PubMed identifier or list of PubMed identifiers to filter against """ if isinstance(pmids, str): @edge_predicate def pmid_exclusion_filter(data: EdgeData) -> bool: """Fail for edges with PubMed citations matching the contained PubMed identifier. :return: If the edge has a PubMed citation with the contained PubMed identifier """ return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] != pmids elif isinstance(pmids, Iterable): pmids = set(pmids) @edge_predicate def pmid_exclusion_filter(data: EdgeData) -> bool: """Pass for edges with PubMed citations matching one of the contained PubMed identifiers. :return: If the edge has a PubMed citation with one of the contained PubMed identifiers """ return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] not in pmids else: raise TypeError return pmid_exclusion_filter
[ "def", "build_pmid_exclusion_filter", "(", "pmids", ":", "Strings", ")", "->", "EdgePredicate", ":", "if", "isinstance", "(", "pmids", ",", "str", ")", ":", "@", "edge_predicate", "def", "pmid_exclusion_filter", "(", "data", ":", "EdgeData", ")", "->", "bool", ":", "\"\"\"Fail for edges with PubMed citations matching the contained PubMed identifier.\n\n :return: If the edge has a PubMed citation with the contained PubMed identifier\n \"\"\"", "return", "has_pubmed", "(", "data", ")", "and", "data", "[", "CITATION", "]", "[", "CITATION_REFERENCE", "]", "!=", "pmids", "elif", "isinstance", "(", "pmids", ",", "Iterable", ")", ":", "pmids", "=", "set", "(", "pmids", ")", "@", "edge_predicate", "def", "pmid_exclusion_filter", "(", "data", ":", "EdgeData", ")", "->", "bool", ":", "\"\"\"Pass for edges with PubMed citations matching one of the contained PubMed identifiers.\n\n :return: If the edge has a PubMed citation with one of the contained PubMed identifiers\n \"\"\"", "return", "has_pubmed", "(", "data", ")", "and", "data", "[", "CITATION", "]", "[", "CITATION_REFERENCE", "]", "not", "in", "pmids", "else", ":", "raise", "TypeError", "return", "pmid_exclusion_filter" ]
39.793103
[ 0.015384615384615385, 0.031914893617021274, 0, 0.047058823529411764, 0.2857142857142857, 0.06666666666666667, 0.08695652173913043, 0.034482758620689655, 0.03225806451612903, 0, 0.04395604395604396, 0.13333333333333333, 0.03614457831325301, 0, 0.05405405405405406, 0.07692307692307693, 0, 0.08695652173913043, 0.034482758620689655, 0.0297029702970297, 0, 0.04040404040404041, 0.13333333333333333, 0.034482758620689655, 0, 0.2222222222222222, 0.08695652173913043, 0, 0.0625 ]
def diop_natural_solution_linear(c, a, b): """ It finds the fist natural solution of the diophantine equation a*x + b*y = c. Some lines of this code are taken from the project sympy. :param c: constant :param a: quotient of x :param b: quotient of y :return: the first natural solution of the diophatine equation """ def get_intersection(a, b, a_dir, b_dir): # Do the intersection between two # ranges. if (a_dir, b_dir) == (">=", ">="): lb = a if a > b else b ub = float('inf') elif (a_dir, b_dir) == ("<=", ">="): if a > b: lb = b ub = a else: lb = None ub = None elif (a_dir, b_dir) == (">=", "<="): if b > a: lb = a ub = b else: lb = None ub = None elif (a_dir, b_dir) == ("<=", "<="): ub = a if a < b else b lb = float('-inf') return lb, ub d = StridedInterval.igcd(a, StridedInterval.igcd(b, c)) a = a // d b = b // d c = c // d if c == 0: return (0, 0) else: x0, y0, d = StridedInterval.extended_euclid(int(abs(a)), int(abs(b))) x0 = x0 * StridedInterval.sign(a) y0 = y0 * StridedInterval.sign(b) if c % d == 0: """ Integer solutions are: (c*x0 + b*t, c*y0 - a*t) we have to get the first positive solution, which means that we have to solve the following disequations for t: c*x0 + b*t >= 0 and c*y0 - a*t >= 0. """ assert b != 0 assert a != 0 t0 = (-c * x0) / float(b) t1 = (c * y0) / float(a) # direction of the disequation depends on b and a sign if b < 0: t0_dir = '<=' else: t0_dir = '>=' if a < 0: t1_dir = '>=' else: t1_dir = '<=' # calculate the intersection between the found # solution intervals to get the common solutions # for t. lb, ub = get_intersection(t0, t1, t0_dir, t1_dir) # Given that we are looking for the first value # which solve the diophantine equation, we have to # select the value of t closer to 0. if lb <= 0 and ub >= 0: t = ub if abs(ub) < abs(lb) else lb elif lb == float('inf') or lb == float("-inf"): t = ub elif ub == float('inf') or ub == float("-inf"): t = lb else: t = ub if abs(ub) < abs(lb) else lb # round the value of t if t == ub: t = int(math.floor(t)) else: t = int(math.ceil(t)) return (c*x0 + b*t, c*y0 - a*t) else: return (None, None)
[ "def", "diop_natural_solution_linear", "(", "c", ",", "a", ",", "b", ")", ":", "def", "get_intersection", "(", "a", ",", "b", ",", "a_dir", ",", "b_dir", ")", ":", "# Do the intersection between two", "# ranges.", "if", "(", "a_dir", ",", "b_dir", ")", "==", "(", "\">=\"", ",", "\">=\"", ")", ":", "lb", "=", "a", "if", "a", ">", "b", "else", "b", "ub", "=", "float", "(", "'inf'", ")", "elif", "(", "a_dir", ",", "b_dir", ")", "==", "(", "\"<=\"", ",", "\">=\"", ")", ":", "if", "a", ">", "b", ":", "lb", "=", "b", "ub", "=", "a", "else", ":", "lb", "=", "None", "ub", "=", "None", "elif", "(", "a_dir", ",", "b_dir", ")", "==", "(", "\">=\"", ",", "\"<=\"", ")", ":", "if", "b", ">", "a", ":", "lb", "=", "a", "ub", "=", "b", "else", ":", "lb", "=", "None", "ub", "=", "None", "elif", "(", "a_dir", ",", "b_dir", ")", "==", "(", "\"<=\"", ",", "\"<=\"", ")", ":", "ub", "=", "a", "if", "a", "<", "b", "else", "b", "lb", "=", "float", "(", "'-inf'", ")", "return", "lb", ",", "ub", "d", "=", "StridedInterval", ".", "igcd", "(", "a", ",", "StridedInterval", ".", "igcd", "(", "b", ",", "c", ")", ")", "a", "=", "a", "//", "d", "b", "=", "b", "//", "d", "c", "=", "c", "//", "d", "if", "c", "==", "0", ":", "return", "(", "0", ",", "0", ")", "else", ":", "x0", ",", "y0", ",", "d", "=", "StridedInterval", ".", "extended_euclid", "(", "int", "(", "abs", "(", "a", ")", ")", ",", "int", "(", "abs", "(", "b", ")", ")", ")", "x0", "=", "x0", "*", "StridedInterval", ".", "sign", "(", "a", ")", "y0", "=", "y0", "*", "StridedInterval", ".", "sign", "(", "b", ")", "if", "c", "%", "d", "==", "0", ":", "\"\"\"\n Integer solutions are: (c*x0 + b*t, c*y0 - a*t)\n we have to get the first positive solution, which means\n that we have to solve the following disequations for t:\n c*x0 + b*t >= 0 and c*y0 - a*t >= 0.\n \"\"\"", "assert", "b", "!=", "0", "assert", "a", "!=", "0", "t0", "=", "(", "-", "c", "*", "x0", ")", "/", "float", "(", "b", ")", "t1", "=", "(", "c", "*", "y0", ")", "/", "float", "(", "a", ")", "# direction of the disequation depends on b and a sign", "if", "b", "<", "0", ":", "t0_dir", "=", "'<='", "else", ":", "t0_dir", "=", "'>='", "if", "a", "<", "0", ":", "t1_dir", "=", "'>='", "else", ":", "t1_dir", "=", "'<='", "# calculate the intersection between the found", "# solution intervals to get the common solutions", "# for t.", "lb", ",", "ub", "=", "get_intersection", "(", "t0", ",", "t1", ",", "t0_dir", ",", "t1_dir", ")", "# Given that we are looking for the first value", "# which solve the diophantine equation, we have to", "# select the value of t closer to 0.", "if", "lb", "<=", "0", "and", "ub", ">=", "0", ":", "t", "=", "ub", "if", "abs", "(", "ub", ")", "<", "abs", "(", "lb", ")", "else", "lb", "elif", "lb", "==", "float", "(", "'inf'", ")", "or", "lb", "==", "float", "(", "\"-inf\"", ")", ":", "t", "=", "ub", "elif", "ub", "==", "float", "(", "'inf'", ")", "or", "ub", "==", "float", "(", "\"-inf\"", ")", ":", "t", "=", "lb", "else", ":", "t", "=", "ub", "if", "abs", "(", "ub", ")", "<", "abs", "(", "lb", ")", "else", "lb", "# round the value of t", "if", "t", "==", "ub", ":", "t", "=", "int", "(", "math", ".", "floor", "(", "t", ")", ")", "else", ":", "t", "=", "int", "(", "math", ".", "ceil", "(", "t", ")", ")", "return", "(", "c", "*", "x0", "+", "b", "*", "t", ",", "c", "*", "y0", "-", "a", "*", "t", ")", "else", ":", "return", "(", "None", ",", "None", ")" ]
34.104167
[ 0.023809523809523808, 0.18181818181818182, 0.02857142857142857, 0.0273972602739726, 0.14285714285714285, 0, 0.11538461538461539, 0.0967741935483871, 0.0967741935483871, 0.04285714285714286, 0.18181818181818182, 0.04081632653061224, 0.044444444444444446, 0.09523809523809523, 0.043478260869565216, 0.05263157894736842, 0.06060606060606061, 0.041666666666666664, 0.08, 0.07692307692307693, 0.07692307692307693, 0.09523809523809523, 0.06896551724137931, 0.06896551724137931, 0.041666666666666664, 0.08, 0.07692307692307693, 0.07692307692307693, 0.09523809523809523, 0.06896551724137931, 0.06896551724137931, 0.041666666666666664, 0.05263157894736842, 0.058823529411764705, 0, 0.08, 0, 0.031746031746031744, 0.1111111111111111, 0.1111111111111111, 0.1111111111111111, 0, 0.1111111111111111, 0.08, 0.15384615384615385, 0.037037037037037035, 0.044444444444444446, 0.044444444444444446, 0, 0.07692307692307693, 0.10526315789473684, 0.031746031746031744, 0.028169014084507043, 0.028169014084507043, 0.038461538461538464, 0.10526315789473684, 0.06896551724137931, 0.06896551724137931, 0, 0.04878048780487805, 0.05, 0.02857142857142857, 0.08, 0.06060606060606061, 0.09523809523809523, 0.06060606060606061, 0.08, 0.06060606060606061, 0.09523809523809523, 0.06060606060606061, 0, 0.03225806451612903, 0.03125, 0.08333333333333333, 0.03076923076923077, 0, 0.031746031746031744, 0.030303030303030304, 0.038461538461538464, 0.05128205128205128, 0.03636363636363636, 0.031746031746031744, 0.07692307692307693, 0.031746031746031744, 0.07692307692307693, 0.09523809523809523, 0.03636363636363636, 0.05263157894736842, 0.07407407407407407, 0.047619047619047616, 0.09523809523809523, 0.04878048780487805, 0, 0.0425531914893617, 0.11764705882352941, 0.05714285714285714 ]
def delete(index_name, force, verbose): """Delete index by its name.""" result = current_search_client.indices.delete( index=index_name, ignore=[400, 404] if force else None, ) if verbose: click.echo(json.dumps(result))
[ "def", "delete", "(", "index_name", ",", "force", ",", "verbose", ")", ":", "result", "=", "current_search_client", ".", "indices", ".", "delete", "(", "index", "=", "index_name", ",", "ignore", "=", "[", "400", ",", "404", "]", "if", "force", "else", "None", ",", ")", "if", "verbose", ":", "click", ".", "echo", "(", "json", ".", "dumps", "(", "result", ")", ")" ]
31.5
[ 0.02564102564102564, 0.05714285714285714, 0.06, 0.12, 0.06666666666666667, 0.6, 0.13333333333333333, 0.05263157894736842 ]
def complexes(network, state): """Return all irreducible complexes of the network. Args: network (Network): The |Network| of interest. state (tuple[int]): The state of the network (a binary tuple). Yields: SystemIrreducibilityAnalysis: A |SIA| for each |Subsystem| of the |Network|, excluding those with |big_phi = 0|. """ engine = FindIrreducibleComplexes(possible_complexes(network, state)) return engine.run(config.PARALLEL_COMPLEX_EVALUATION)
[ "def", "complexes", "(", "network", ",", "state", ")", ":", "engine", "=", "FindIrreducibleComplexes", "(", "possible_complexes", "(", "network", ",", "state", ")", ")", "return", "engine", ".", "run", "(", "config", ".", "PARALLEL_COMPLEX_EVALUATION", ")" ]
37.846154
[ 0.03333333333333333, 0.03636363636363636, 0, 0.2222222222222222, 0.09433962264150944, 0.05714285714285714, 0, 0.18181818181818182, 0.0821917808219178, 0.09259259259259259, 0.2857142857142857, 0.0273972602739726, 0.03508771929824561 ]
async def controller(self): """Return a Connection to the controller at self.endpoint """ return await Connection.connect( self.endpoint, username=self.username, password=self.password, cacert=self.cacert, bakery_client=self.bakery_client, loop=self.loop, max_frame_size=self.max_frame_size, )
[ "async", "def", "controller", "(", "self", ")", ":", "return", "await", "Connection", ".", "connect", "(", "self", ".", "endpoint", ",", "username", "=", "self", ".", "username", ",", "password", "=", "self", ".", "password", ",", "cacert", "=", "self", ".", "cacert", ",", "bakery_client", "=", "self", ".", "bakery_client", ",", "loop", "=", "self", ".", "loop", ",", "max_frame_size", "=", "self", ".", "max_frame_size", ",", ")" ]
33.166667
[ 0.037037037037037035, 0.03076923076923077, 0.18181818181818182, 0.075, 0.07692307692307693, 0.08571428571428572, 0.08571428571428572, 0.0967741935483871, 0.06666666666666667, 0.1111111111111111, 0.06382978723404255, 0.3333333333333333 ]
def load_cufflinks_dataframe( filename, id_column=ID_COLUMN, fpkm_column=FPKM_COLUMN, status_column=STATUS_COLUMN, locus_column=LOCUS_COLUMN, gene_names_column=GENE_NAMES_COLUMN, drop_failed=True, drop_lowdata=False, drop_hidata=True, replace_hidata_fpkm_value=None, drop_nonchromosomal_loci=False, drop_novel=False, sep=None): """ Loads a Cufflinks tracking file, which contains expression levels (in FPKM: Fragments Per Kilobase of transcript per Million fragments) for transcript isoforms or whole genes. These transcripts/genes may be previously known (in which case they have an Ensembl ID) or a novel assembly from the RNA-Seq data (in which case their IDs look like "CUFF.1") Parameters ---------- filename : str Filename of tracking file e.g. "genes.tracking_fpkm" id_column : str, optional fpkm_column : str, optional status_column : str, optional Name of column which indicates the FPKM estimate status. The column name is typically "FPKM_status". Possible contained within this column will be OK, FAIL, LOWDATA, HIDATA. locus_column : str, optional gene_names_column : str, optional drop_failed : bool, optional Drop rows whose FPKM status is "FAIL" (default=True) drop_lowdata : bool, optional Drop rows whose FPKM status is "LOWDATA", meaning that Cufflinks thought there were too few reads to accurately estimate the FPKM (default=False) drop_hidata : bool, optional Drop rows whose FPKM status is "HIDATA", meaning that too many fragments aligned to a feature for Cufflinks to process. Dropping the most expressed genes seems like a stupid idea so: default=False replace_hidata_fpkm_value : float, optional If drop_hidata=False, the HIDATA entries will still have an FPKM=0.0, this argument lets you replace the FPKM with some known constant. drop_nonchromosomal_loci : bool, optional Drop rows whose location isn't on a canonical chromosome i.e. doesn't start with "chr" (default=False) drop_novel : bool, optional Drop genes or isoforms that aren't found in Ensembl (default = False) sep : str, optional Separator between data fields in the FPKM tracking file (default is to infer whether the file uses comma or whitespace) Returns DataFrame with columns: id : str novel : bool fpkm : float chr : str start : int end : int gene_names : str list """ if sep is None: sep = infer_delimiter(filename) df = pd.read_csv(filename, sep=sep, engine="c") required_columns = { status_column, locus_column, id_column, gene_names_column, fpkm_column } check_required_columns(df, filename, required_columns) for flag, status_value in [ (drop_failed, "FAIL"), (drop_lowdata, "LOWDATA"), (drop_hidata, "HIDATA")]: mask = df[status_column] == status_value mask_count = mask.sum() total_count = len(df) if flag and mask_count > 0: verb_str = "Dropping" df = df[~mask] else: verb_str = "Keeping" logging.info( "%s %d/%d entries from %s with status=%s", verb_str, mask_count, total_count, filename, status_value) if drop_nonchromosomal_loci: loci = df[locus_column] chromosomal_loci = loci.str.startswith("chr") n_dropped = (~chromosomal_loci).sum() if n_dropped > 0: logging.info("Dropping %d/%d non-chromosomal loci from %s" % ( n_dropped, len(df), filename)) df = df[chromosomal_loci] if replace_hidata_fpkm_value: hidata_mask = df[status_column] == "HIDATA" n_hidata = hidata_mask.sum() logging.info( "Setting FPKM=%s for %d/%d entries with status=HIDATA", replace_hidata_fpkm_value, n_hidata, len(df)) df[fpkm_column][hidata_mask] = replace_hidata_fpkm_value if len(df) == 0: raise ValueError("Empty FPKM tracking file: %s" % filename) ids = df[id_column] known = ids.str.startswith("ENS") if known.sum() == 0: raise ValueError("No Ensembl IDs found in %s" % filename) if drop_novel: n_dropped = (~known).sum() if n_dropped > 0: logging.info( "Dropping %d/%d novel entries from %s", n_dropped, len(df), filename) df = df[known] known = np.ones(len(df), dtype='bool') loci = df[locus_column] chromosomes, starts, ends = parse_locus_column(df[locus_column]) # gene names are given either as "-" or a comma separated list # e.g. "BRAF1,PFAM2" gene_names_strings = df[gene_names_column].copy() gene_names_strings[gene_names_strings == "-"] = "" # split each entry into a list of zero or more strings gene_names_lists = gene_names_strings.str.split(",") return pd.DataFrame({ "id": df[id_column], "novel": ~known, "fpkm": df[fpkm_column], "chr": chromosomes, "start": starts, "end": ends, "gene_names": gene_names_lists })
[ "def", "load_cufflinks_dataframe", "(", "filename", ",", "id_column", "=", "ID_COLUMN", ",", "fpkm_column", "=", "FPKM_COLUMN", ",", "status_column", "=", "STATUS_COLUMN", ",", "locus_column", "=", "LOCUS_COLUMN", ",", "gene_names_column", "=", "GENE_NAMES_COLUMN", ",", "drop_failed", "=", "True", ",", "drop_lowdata", "=", "False", ",", "drop_hidata", "=", "True", ",", "replace_hidata_fpkm_value", "=", "None", ",", "drop_nonchromosomal_loci", "=", "False", ",", "drop_novel", "=", "False", ",", "sep", "=", "None", ")", ":", "if", "sep", "is", "None", ":", "sep", "=", "infer_delimiter", "(", "filename", ")", "df", "=", "pd", ".", "read_csv", "(", "filename", ",", "sep", "=", "sep", ",", "engine", "=", "\"c\"", ")", "required_columns", "=", "{", "status_column", ",", "locus_column", ",", "id_column", ",", "gene_names_column", ",", "fpkm_column", "}", "check_required_columns", "(", "df", ",", "filename", ",", "required_columns", ")", "for", "flag", ",", "status_value", "in", "[", "(", "drop_failed", ",", "\"FAIL\"", ")", ",", "(", "drop_lowdata", ",", "\"LOWDATA\"", ")", ",", "(", "drop_hidata", ",", "\"HIDATA\"", ")", "]", ":", "mask", "=", "df", "[", "status_column", "]", "==", "status_value", "mask_count", "=", "mask", ".", "sum", "(", ")", "total_count", "=", "len", "(", "df", ")", "if", "flag", "and", "mask_count", ">", "0", ":", "verb_str", "=", "\"Dropping\"", "df", "=", "df", "[", "~", "mask", "]", "else", ":", "verb_str", "=", "\"Keeping\"", "logging", ".", "info", "(", "\"%s %d/%d entries from %s with status=%s\"", ",", "verb_str", ",", "mask_count", ",", "total_count", ",", "filename", ",", "status_value", ")", "if", "drop_nonchromosomal_loci", ":", "loci", "=", "df", "[", "locus_column", "]", "chromosomal_loci", "=", "loci", ".", "str", ".", "startswith", "(", "\"chr\"", ")", "n_dropped", "=", "(", "~", "chromosomal_loci", ")", ".", "sum", "(", ")", "if", "n_dropped", ">", "0", ":", "logging", ".", "info", "(", "\"Dropping %d/%d non-chromosomal loci from %s\"", "%", "(", "n_dropped", ",", "len", "(", "df", ")", ",", "filename", ")", ")", "df", "=", "df", "[", "chromosomal_loci", "]", "if", "replace_hidata_fpkm_value", ":", "hidata_mask", "=", "df", "[", "status_column", "]", "==", "\"HIDATA\"", "n_hidata", "=", "hidata_mask", ".", "sum", "(", ")", "logging", ".", "info", "(", "\"Setting FPKM=%s for %d/%d entries with status=HIDATA\"", ",", "replace_hidata_fpkm_value", ",", "n_hidata", ",", "len", "(", "df", ")", ")", "df", "[", "fpkm_column", "]", "[", "hidata_mask", "]", "=", "replace_hidata_fpkm_value", "if", "len", "(", "df", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Empty FPKM tracking file: %s\"", "%", "filename", ")", "ids", "=", "df", "[", "id_column", "]", "known", "=", "ids", ".", "str", ".", "startswith", "(", "\"ENS\"", ")", "if", "known", ".", "sum", "(", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No Ensembl IDs found in %s\"", "%", "filename", ")", "if", "drop_novel", ":", "n_dropped", "=", "(", "~", "known", ")", ".", "sum", "(", ")", "if", "n_dropped", ">", "0", ":", "logging", ".", "info", "(", "\"Dropping %d/%d novel entries from %s\"", ",", "n_dropped", ",", "len", "(", "df", ")", ",", "filename", ")", "df", "=", "df", "[", "known", "]", "known", "=", "np", ".", "ones", "(", "len", "(", "df", ")", ",", "dtype", "=", "'bool'", ")", "loci", "=", "df", "[", "locus_column", "]", "chromosomes", ",", "starts", ",", "ends", "=", "parse_locus_column", "(", "df", "[", "locus_column", "]", ")", "# gene names are given either as \"-\" or a comma separated list", "# e.g. \"BRAF1,PFAM2\"", "gene_names_strings", "=", "df", "[", "gene_names_column", "]", ".", "copy", "(", ")", "gene_names_strings", "[", "gene_names_strings", "==", "\"-\"", "]", "=", "\"\"", "# split each entry into a list of zero or more strings", "gene_names_lists", "=", "gene_names_strings", ".", "str", ".", "split", "(", "\",\"", ")", "return", "pd", ".", "DataFrame", "(", "{", "\"id\"", ":", "df", "[", "id_column", "]", ",", "\"novel\"", ":", "~", "known", ",", "\"fpkm\"", ":", "df", "[", "fpkm_column", "]", ",", "\"chr\"", ":", "chromosomes", ",", "\"start\"", ":", "starts", ",", "\"end\"", ":", "ends", ",", "\"gene_names\"", ":", "gene_names_lists", "}", ")" ]
31.779762
[ 0.06896551724137931, 0.11764705882352941, 0.10714285714285714, 0.09375, 0.08333333333333333, 0.08823529411764706, 0.06818181818181818, 0.12, 0.1111111111111111, 0.12, 0.07692307692307693, 0.07692307692307693, 0.12, 0.2222222222222222, 0.2857142857142857, 0.028985507246376812, 0.0410958904109589, 0.02702702702702703, 0.056338028169014086, 0.05063291139240506, 0, 0.14285714285714285, 0.14285714285714285, 0, 0.16666666666666666, 0.03333333333333333, 0, 0.10344827586206896, 0, 0.0967741935483871, 0, 0.09090909090909091, 0.02666666666666667, 0.02564102564102564, 0.047619047619047616, 0, 0.09375, 0, 0.08108108108108109, 0, 0.09375, 0.03333333333333333, 0, 0.09090909090909091, 0.0375, 0.05, 0, 0.09375, 0.02857142857142857, 0.0273972602739726, 0.04, 0, 0.06382978723404255, 0.05194805194805195, 0.0273972602739726, 0, 0.06666666666666667, 0.03125, 0.03773584905660377, 0, 0.0967741935483871, 0.06493506493506493, 0, 0.13043478260869565, 0.031746031746031744, 0.028169014084507043, 0, 0.05714285714285714, 0.1875, 0.15, 0.15, 0.17647058823529413, 0.15789473684210525, 0.17647058823529413, 0.10344827586206896, 0.2857142857142857, 0.10526315789473684, 0.05128205128205128, 0, 0.0392156862745098, 0, 0.125, 0.09090909090909091, 0.09523809523809523, 0.1111111111111111, 0.07692307692307693, 0.10526315789473684, 0.6, 0.034482758620689655, 0, 0.0967741935483871, 0.058823529411764705, 0.05263157894736842, 0.08108108108108109, 0.041666666666666664, 0.06451612903225806, 0.06896551724137931, 0.05714285714285714, 0.06060606060606061, 0.07692307692307693, 0.15384615384615385, 0.0625, 0.14285714285714285, 0.037037037037037035, 0.09523809523809523, 0.08695652173913043, 0.08333333333333333, 0.09523809523809523, 0.12, 0, 0.0625, 0.06451612903225806, 0.03773584905660377, 0.044444444444444446, 0.08, 0.04054054054054054, 0.06521739130434782, 0.05405405405405406, 0, 0.06060606060606061, 0.0392156862745098, 0.05555555555555555, 0.14285714285714285, 0.029850746268656716, 0.05263157894736842, 0.09523809523809523, 0.15, 0.03125, 0, 0.1, 0.029850746268656716, 0, 0.08695652173913043, 0.05405405405405406, 0, 0.08333333333333333, 0.03076923076923077, 0, 0.1111111111111111, 0.058823529411764705, 0.08, 0.12, 0.03636363636363636, 0.07692307692307693, 0.08333333333333333, 0.12, 0.07692307692307693, 0.04, 0, 0.07407407407407407, 0.029411764705882353, 0, 0.030303030303030304, 0.08333333333333333, 0.03773584905660377, 0.037037037037037035, 0.034482758620689655, 0.03571428571428571, 0, 0.12, 0.07142857142857142, 0.08333333333333333, 0.0625, 0.07407407407407407, 0.08333333333333333, 0.1, 0.05263157894736842, 0.5 ]
def get(self, element, selected=False): """ Returns the curses attribute code for the given element. """ if self._attribute_map is None: raise RuntimeError('Attempted to access theme attribute before ' 'calling initialize_curses_theme()') if selected or self._selected: element = '@{0}'.format(element) return self._attribute_map[element]
[ "def", "get", "(", "self", ",", "element", ",", "selected", "=", "False", ")", ":", "if", "self", ".", "_attribute_map", "is", "None", ":", "raise", "RuntimeError", "(", "'Attempted to access theme attribute before '", "'calling initialize_curses_theme()'", ")", "if", "selected", "or", "self", ".", "_selected", ":", "element", "=", "'@{0}'", ".", "format", "(", "element", ")", "return", "self", ".", "_attribute_map", "[", "element", "]" ]
36
[ 0.02564102564102564, 0.18181818181818182, 0.03125, 0.18181818181818182, 0.05128205128205128, 0.039473684210526314, 0.05970149253731343, 0, 0.05263157894736842, 0.045454545454545456, 0, 0.046511627906976744 ]
def delete_agent(self, agent_id): """Delete an agent. :param str agent_id: The id of the agent to delete. It must be an str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. :return: agent deleted. :rtype: dict. """ # Raises an error when agent_id is invalid self._check_agent_id(agent_id) req_url = "{}/agents/{}".format(self._base_url, agent_id) resp = self._requests_session.delete(req_url) decoded_resp = self._decode_response(resp) return decoded_resp
[ "def", "delete_agent", "(", "self", ",", "agent_id", ")", ":", "# Raises an error when agent_id is invalid", "self", ".", "_check_agent_id", "(", "agent_id", ")", "req_url", "=", "\"{}/agents/{}\"", ".", "format", "(", "self", ".", "_base_url", ",", "agent_id", ")", "resp", "=", "self", ".", "_requests_session", ".", "delete", "(", "req_url", ")", "decoded_resp", "=", "self", ".", "_decode_response", "(", "resp", ")", "return", "decoded_resp" ]
27.894737
[ 0.030303030303030304, 0.08695652173913043, 0, 0.047619047619047616, 0.03278688524590164, 0.05, 0, 0.1111111111111111, 0.17647058823529413, 0.2857142857142857, 0.043478260869565216, 0.058823529411764705, 0, 0.03278688524590164, 0.04081632653061224, 0, 0.043478260869565216, 0, 0.08695652173913043 ]
def consistency_check(text, word_pairs, err, msg, offset=0): """Build a consistency checker for the given word_pairs.""" errors = [] msg = " ".join(msg.split()) for w in word_pairs: matches = [ [m for m in re.finditer(w[0], text)], [m for m in re.finditer(w[1], text)] ] if len(matches[0]) > 0 and len(matches[1]) > 0: idx_minority = len(matches[0]) > len(matches[1]) for m in matches[idx_minority]: errors.append(( m.start() + offset, m.end() + offset, err, msg.format(w[~idx_minority], m.group(0)), w[~idx_minority])) return errors
[ "def", "consistency_check", "(", "text", ",", "word_pairs", ",", "err", ",", "msg", ",", "offset", "=", "0", ")", ":", "errors", "=", "[", "]", "msg", "=", "\" \"", ".", "join", "(", "msg", ".", "split", "(", ")", ")", "for", "w", "in", "word_pairs", ":", "matches", "=", "[", "[", "m", "for", "m", "in", "re", ".", "finditer", "(", "w", "[", "0", "]", ",", "text", ")", "]", ",", "[", "m", "for", "m", "in", "re", ".", "finditer", "(", "w", "[", "1", "]", ",", "text", ")", "]", "]", "if", "len", "(", "matches", "[", "0", "]", ")", ">", "0", "and", "len", "(", "matches", "[", "1", "]", ")", ">", "0", ":", "idx_minority", "=", "len", "(", "matches", "[", "0", "]", ")", ">", "len", "(", "matches", "[", "1", "]", ")", "for", "m", "in", "matches", "[", "idx_minority", "]", ":", "errors", ".", "append", "(", "(", "m", ".", "start", "(", ")", "+", "offset", ",", "m", ".", "end", "(", ")", "+", "offset", ",", "err", ",", "msg", ".", "format", "(", "w", "[", "~", "idx_minority", "]", ",", "m", ".", "group", "(", "0", ")", ")", ",", "w", "[", "~", "idx_minority", "]", ")", ")", "return", "errors" ]
28.92
[ 0.016666666666666666, 0.031746031746031744, 0.13333333333333333, 0, 0.06451612903225806, 0, 0.08333333333333333, 0.15789473684210525, 0.04081632653061224, 0.041666666666666664, 0.3333333333333333, 0, 0.03636363636363636, 0, 0.03333333333333333, 0, 0.046511627906976744, 0.0967741935483871, 0.05128205128205128, 0.05405405405405406, 0.08333333333333333, 0.03278688524590164, 0.07894736842105263, 0, 0.11764705882352941 ]
def get_dataset(self, key, info): """Load a dataset.""" if key.name in ['owiLat', 'owiLon']: if self.lons is None or self.lats is None: self.lons = self.nc['owiLon'] self.lats = self.nc['owiLat'] if key.name == 'owiLat': res = self.lats else: res = self.lons res.attrs = info else: res = self.nc[key.name] if key.name in ['owiHs', 'owiWl', 'owiDirmet']: res = xr.DataArray(res, dims=['y', 'x', 'oswPartitions']) elif key.name in ['owiNrcs', 'owiNesz', 'owiNrcsNeszCorr']: res = xr.DataArray(res, dims=['y', 'x', 'oswPolarisation']) elif key.name in ['owiPolarisationName']: res = xr.DataArray(res, dims=['owiPolarisation']) elif key.name in ['owiCalConstObsi', 'owiCalConstInci']: res = xr.DataArray(res, dims=['owiIncSize']) elif key.name.startswith('owi'): res = xr.DataArray(res, dims=['y', 'x']) else: res = xr.DataArray(res, dims=['y', 'x']) res.attrs.update(info) if '_FillValue' in res.attrs: res = res.where(res != res.attrs['_FillValue']) res.attrs['_FillValue'] = np.nan if 'missionName' in self.nc.attrs: res.attrs.update({'platform_name': self.nc.attrs['missionName']}) res.attrs.update({'fstart_time': self._fstart_time}) res.attrs.update({'fend_time': self._fend_time}) if not self._shape: self._shape = res.shape return res
[ "def", "get_dataset", "(", "self", ",", "key", ",", "info", ")", ":", "if", "key", ".", "name", "in", "[", "'owiLat'", ",", "'owiLon'", "]", ":", "if", "self", ".", "lons", "is", "None", "or", "self", ".", "lats", "is", "None", ":", "self", ".", "lons", "=", "self", ".", "nc", "[", "'owiLon'", "]", "self", ".", "lats", "=", "self", ".", "nc", "[", "'owiLat'", "]", "if", "key", ".", "name", "==", "'owiLat'", ":", "res", "=", "self", ".", "lats", "else", ":", "res", "=", "self", ".", "lons", "res", ".", "attrs", "=", "info", "else", ":", "res", "=", "self", ".", "nc", "[", "key", ".", "name", "]", "if", "key", ".", "name", "in", "[", "'owiHs'", ",", "'owiWl'", ",", "'owiDirmet'", "]", ":", "res", "=", "xr", ".", "DataArray", "(", "res", ",", "dims", "=", "[", "'y'", ",", "'x'", ",", "'oswPartitions'", "]", ")", "elif", "key", ".", "name", "in", "[", "'owiNrcs'", ",", "'owiNesz'", ",", "'owiNrcsNeszCorr'", "]", ":", "res", "=", "xr", ".", "DataArray", "(", "res", ",", "dims", "=", "[", "'y'", ",", "'x'", ",", "'oswPolarisation'", "]", ")", "elif", "key", ".", "name", "in", "[", "'owiPolarisationName'", "]", ":", "res", "=", "xr", ".", "DataArray", "(", "res", ",", "dims", "=", "[", "'owiPolarisation'", "]", ")", "elif", "key", ".", "name", "in", "[", "'owiCalConstObsi'", ",", "'owiCalConstInci'", "]", ":", "res", "=", "xr", ".", "DataArray", "(", "res", ",", "dims", "=", "[", "'owiIncSize'", "]", ")", "elif", "key", ".", "name", ".", "startswith", "(", "'owi'", ")", ":", "res", "=", "xr", ".", "DataArray", "(", "res", ",", "dims", "=", "[", "'y'", ",", "'x'", "]", ")", "else", ":", "res", "=", "xr", ".", "DataArray", "(", "res", ",", "dims", "=", "[", "'y'", ",", "'x'", "]", ")", "res", ".", "attrs", ".", "update", "(", "info", ")", "if", "'_FillValue'", "in", "res", ".", "attrs", ":", "res", "=", "res", ".", "where", "(", "res", "!=", "res", ".", "attrs", "[", "'_FillValue'", "]", ")", "res", ".", "attrs", "[", "'_FillValue'", "]", "=", "np", ".", "nan", "if", "'missionName'", "in", "self", ".", "nc", ".", "attrs", ":", "res", ".", "attrs", ".", "update", "(", "{", "'platform_name'", ":", "self", ".", "nc", ".", "attrs", "[", "'missionName'", "]", "}", ")", "res", ".", "attrs", ".", "update", "(", "{", "'fstart_time'", ":", "self", ".", "_fstart_time", "}", ")", "res", ".", "attrs", ".", "update", "(", "{", "'fend_time'", ":", "self", ".", "_fend_time", "}", ")", "if", "not", "self", ".", "_shape", ":", "self", ".", "_shape", "=", "res", ".", "shape", "return", "res" ]
40.975
[ 0.030303030303030304, 0.06896551724137931, 0.045454545454545456, 0.037037037037037035, 0.044444444444444446, 0.044444444444444446, 0.05555555555555555, 0.06451612903225806, 0.11764705882352941, 0.06451612903225806, 0.07142857142857142, 0.15384615384615385, 0.05714285714285714, 0.03389830508474576, 0.0273972602739726, 0.028169014084507043, 0.02666666666666667, 0.03773584905660377, 0.03076923076923077, 0.029411764705882353, 0.03333333333333333, 0.045454545454545456, 0.03571428571428571, 0.11764705882352941, 0.03571428571428571, 0.058823529411764705, 0.04878048780487805, 0.031746031746031744, 0.041666666666666664, 0, 0.047619047619047616, 0.025974025974025976, 0, 0.03333333333333333, 0.03571428571428571, 0, 0.07407407407407407, 0.05714285714285714, 0, 0.1111111111111111 ]
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None, PathsToInputs=None, PathToOutput=None, PathToStderr='/dev/null', PathToStdout='/dev/null', UniqueOutputs=False, InputParam=None, OutputParam=None): """Generates command lines that can be used in a cluster environment param_iter : ParameterIterBase subclass instance PathToBin : Absolute location primary command (i.e. Python) PathToCmd : Absolute location of the command PathsToInputs : Absolute location(s) of input file(s) PathToOutput : Absolute location of output file PathToStderr : Path to stderr PathToStdout : Path to stdout UniqueOutputs : Generate unique tags for output files InputParam : Application input parameter (if not specified, assumes stdin is to be used) OutputParam : Application output parameter (if not specified, assumes stdout is to be used) """ # Make sure we have input(s) and output if not PathsToInputs: raise ValueError("No input file(s) specified.") if not PathToOutput: raise ValueError("No output file specified.") if not isinstance(PathsToInputs, list): PathsToInputs = [PathsToInputs] # PathToBin and PathToCmd can be blank if PathToBin is None: PathToBin = '' if PathToCmd is None: PathToCmd = '' # stdout_ and stderr_ do not have to be redirected if PathToStdout is None: stdout_ = '' else: stdout_ = '> "%s"' % PathToStdout if PathToStderr is None: stderr_ = '' else: stderr_ = '2> "%s"' % PathToStderr # Output can be redirected to stdout or specified output argument if OutputParam is None: output = '> "%s"' % PathToOutput stdout_ = '' else: output_param = param_iter.AppParams[OutputParam] output_param.on('"%s"' % PathToOutput) output = str(output_param) output_param.off() output_count = 0 base_command = ' '.join([PathToBin, PathToCmd]) for params in param_iter: # Support for multiple input files for inputfile in PathsToInputs: cmdline = [base_command] cmdline.extend(sorted(filter(None, map(str, params.values())))) # Input can come from stdin or specified input argument if InputParam is None: input = '< "%s"' % inputfile else: input_param = params[InputParam] input_param.on('"%s"' % inputfile) input = str(input_param) input_param.off() cmdline.append(input) if UniqueOutputs: cmdline.append(''.join([output, str(output_count)])) output_count += 1 else: cmdline.append(output) cmdline.append(stdout_) cmdline.append(stderr_) yield ' '.join(cmdline)
[ "def", "cmdline_generator", "(", "param_iter", ",", "PathToBin", "=", "None", ",", "PathToCmd", "=", "None", ",", "PathsToInputs", "=", "None", ",", "PathToOutput", "=", "None", ",", "PathToStderr", "=", "'/dev/null'", ",", "PathToStdout", "=", "'/dev/null'", ",", "UniqueOutputs", "=", "False", ",", "InputParam", "=", "None", ",", "OutputParam", "=", "None", ")", ":", "# Make sure we have input(s) and output", "if", "not", "PathsToInputs", ":", "raise", "ValueError", "(", "\"No input file(s) specified.\"", ")", "if", "not", "PathToOutput", ":", "raise", "ValueError", "(", "\"No output file specified.\"", ")", "if", "not", "isinstance", "(", "PathsToInputs", ",", "list", ")", ":", "PathsToInputs", "=", "[", "PathsToInputs", "]", "# PathToBin and PathToCmd can be blank", "if", "PathToBin", "is", "None", ":", "PathToBin", "=", "''", "if", "PathToCmd", "is", "None", ":", "PathToCmd", "=", "''", "# stdout_ and stderr_ do not have to be redirected", "if", "PathToStdout", "is", "None", ":", "stdout_", "=", "''", "else", ":", "stdout_", "=", "'> \"%s\"'", "%", "PathToStdout", "if", "PathToStderr", "is", "None", ":", "stderr_", "=", "''", "else", ":", "stderr_", "=", "'2> \"%s\"'", "%", "PathToStderr", "# Output can be redirected to stdout or specified output argument", "if", "OutputParam", "is", "None", ":", "output", "=", "'> \"%s\"'", "%", "PathToOutput", "stdout_", "=", "''", "else", ":", "output_param", "=", "param_iter", ".", "AppParams", "[", "OutputParam", "]", "output_param", ".", "on", "(", "'\"%s\"'", "%", "PathToOutput", ")", "output", "=", "str", "(", "output_param", ")", "output_param", ".", "off", "(", ")", "output_count", "=", "0", "base_command", "=", "' '", ".", "join", "(", "[", "PathToBin", ",", "PathToCmd", "]", ")", "for", "params", "in", "param_iter", ":", "# Support for multiple input files", "for", "inputfile", "in", "PathsToInputs", ":", "cmdline", "=", "[", "base_command", "]", "cmdline", ".", "extend", "(", "sorted", "(", "filter", "(", "None", ",", "map", "(", "str", ",", "params", ".", "values", "(", ")", ")", ")", ")", ")", "# Input can come from stdin or specified input argument", "if", "InputParam", "is", "None", ":", "input", "=", "'< \"%s\"'", "%", "inputfile", "else", ":", "input_param", "=", "params", "[", "InputParam", "]", "input_param", ".", "on", "(", "'\"%s\"'", "%", "inputfile", ")", "input", "=", "str", "(", "input_param", ")", "input_param", ".", "off", "(", ")", "cmdline", ".", "append", "(", "input", ")", "if", "UniqueOutputs", ":", "cmdline", ".", "append", "(", "''", ".", "join", "(", "[", "output", ",", "str", "(", "output_count", ")", "]", ")", ")", "output_count", "+=", "1", "else", ":", "cmdline", ".", "append", "(", "output", ")", "cmdline", ".", "append", "(", "stdout_", ")", "cmdline", ".", "append", "(", "stderr_", ")", "yield", "' '", ".", "join", "(", "cmdline", ")" ]
34.702381
[ 0.03076923076923077, 0.08333333333333333, 0.0684931506849315, 0.0847457627118644, 0.125, 0.027777777777777776, 0, 0.057692307692307696, 0.06349206349206349, 0.0625, 0.05263157894736842, 0.058823529411764705, 0.09090909090909091, 0.09090909090909091, 0.05263157894736842, 0.07042253521126761, 0.10714285714285714, 0.0684931506849315, 0.10344827586206896, 0.2857142857142857, 0.046511627906976744, 0.08, 0.03636363636363636, 0.08333333333333333, 0.03773584905660377, 0, 0.046511627906976744, 0.05128205128205128, 0, 0.047619047619047616, 0.08, 0.09090909090909091, 0.08, 0.09090909090909091, 0, 0.037037037037037035, 0.07142857142857142, 0.1, 0.2222222222222222, 0.04878048780487805, 0.07142857142857142, 0.1, 0.2222222222222222, 0.047619047619047616, 0, 0.028985507246376812, 0.07407407407407407, 0.05, 0.1, 0.2222222222222222, 0.03571428571428571, 0.043478260869565216, 0.058823529411764705, 0.07692307692307693, 0, 0.1, 0.0392156862745098, 0.06896551724137931, 0.047619047619047616, 0.05128205128205128, 0.05555555555555555, 0.02666666666666667, 0, 0.029850746268656716, 0.058823529411764705, 0.045454545454545456, 0.11764705882352941, 0.041666666666666664, 0.04, 0.05, 0.06060606060606061, 0, 0.06060606060606061, 0, 0.06896551724137931, 0.029411764705882353, 0.06060606060606061, 0.11764705882352941, 0.05263157894736842, 0, 0.05714285714285714, 0.05714285714285714, 0, 0.05714285714285714 ]
def generate_table(self): """ Generates the GSO lookup table for the DataFRame Returns ------- gso_table : OrderedDict Ordered dictionary using the string found as keys and their lookup position (v,o) as values gso_df : DataFrame DataFrame where strl columns have been converted to (v,o) values Notes ----- Modifies the DataFrame in-place. The DataFrame returned encodes the (v,o) values as uint64s. The encoding depends on teh dta version, and can be expressed as enc = v + o * 2 ** (o_size * 8) so that v is stored in the lower bits and o is in the upper bits. o_size is * 117: 4 * 118: 6 * 119: 5 """ gso_table = self._gso_table gso_df = self.df columns = list(gso_df.columns) selected = gso_df[self.columns] col_index = [(col, columns.index(col)) for col in self.columns] keys = np.empty(selected.shape, dtype=np.uint64) for o, (idx, row) in enumerate(selected.iterrows()): for j, (col, v) in enumerate(col_index): val = row[col] # Allow columns with mixed str and None (GH 23633) val = '' if val is None else val key = gso_table.get(val, None) if key is None: # Stata prefers human numbers key = (v + 1, o + 1) gso_table[val] = key keys[o, j] = self._convert_key(key) for i, col in enumerate(self.columns): gso_df[col] = keys[:, i] return gso_table, gso_df
[ "def", "generate_table", "(", "self", ")", ":", "gso_table", "=", "self", ".", "_gso_table", "gso_df", "=", "self", ".", "df", "columns", "=", "list", "(", "gso_df", ".", "columns", ")", "selected", "=", "gso_df", "[", "self", ".", "columns", "]", "col_index", "=", "[", "(", "col", ",", "columns", ".", "index", "(", "col", ")", ")", "for", "col", "in", "self", ".", "columns", "]", "keys", "=", "np", ".", "empty", "(", "selected", ".", "shape", ",", "dtype", "=", "np", ".", "uint64", ")", "for", "o", ",", "(", "idx", ",", "row", ")", "in", "enumerate", "(", "selected", ".", "iterrows", "(", ")", ")", ":", "for", "j", ",", "(", "col", ",", "v", ")", "in", "enumerate", "(", "col_index", ")", ":", "val", "=", "row", "[", "col", "]", "# Allow columns with mixed str and None (GH 23633)", "val", "=", "''", "if", "val", "is", "None", "else", "val", "key", "=", "gso_table", ".", "get", "(", "val", ",", "None", ")", "if", "key", "is", "None", ":", "# Stata prefers human numbers", "key", "=", "(", "v", "+", "1", ",", "o", "+", "1", ")", "gso_table", "[", "val", "]", "=", "key", "keys", "[", "o", ",", "j", "]", "=", "self", ".", "_convert_key", "(", "key", ")", "for", "i", ",", "col", "in", "enumerate", "(", "self", ".", "columns", ")", ":", "gso_df", "[", "col", "]", "=", "keys", "[", ":", ",", "i", "]", "return", "gso_table", ",", "gso_df" ]
32.72549
[ 0.04, 0.18181818181818182, 0.03571428571428571, 0, 0.13333333333333333, 0.13333333333333333, 0.0967741935483871, 0.03278688524590164, 0.07547169811320754, 0.11538461538461539, 0.031746031746031744, 0.125, 0, 0.15384615384615385, 0.15384615384615385, 0.075, 0, 0.056338028169014086, 0.029411764705882353, 0, 0.05128205128205128, 0, 0.029850746268656716, 0.08695652173913043, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.18181818181818182, 0, 0.05714285714285714, 0.08333333333333333, 0.05263157894736842, 0.05128205128205128, 0.028169014084507043, 0.03571428571428571, 0.03333333333333333, 0.038461538461538464, 0.06666666666666667, 0.030303030303030304, 0.041666666666666664, 0.043478260869565216, 0.06451612903225806, 0.04081632653061224, 0.05, 0.05, 0.0392156862745098, 0.043478260869565216, 0.05555555555555555, 0, 0.0625 ]
def present(name, value=None, contains=None, excludes=None): ''' Verify that the variable is in the ``make.conf`` and has the provided settings. If value is set, contains and excludes will be ignored. name The variable name. This will automatically be converted to upper case since variables in ``make.conf`` are in upper case value Enforce that the value of the variable is set to the provided value contains Enforce that the value of the variable contains the provided value excludes Enforce that the value of the variable does not contain the provided value. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} # Make name all Uppers since make.conf uses all Upper vars upper_name = name.upper() old_value = __salt__['makeconf.get_var'](upper_name) # If only checking if variable is present allows for setting the # variable outside of salt states, but the state can still ensure # that is exists if value is None and contains is None and excludes is None: # variable is present if old_value is not None: msg = 'Variable {0} is already present in make.conf' ret['comment'] = msg.format(name) else: if __opts__['test']: msg = 'Variable {0} is to be set in make.conf' ret['comment'] = msg.format(name) ret['result'] = None else: changes = __salt__['makeconf.set_var'](upper_name, '') # If failed to be set if changes[upper_name]['new'] is None: msg = 'Variable {0} failed to be set in make.conf' ret['comment'] = msg.format(name) ret['result'] = False else: msg = 'Variable {0} set in make.conf' ret['comment'] = msg.format(name) elif value is not None: # variable is present and is set to value if old_value is not None and old_value == value: msg = 'Variable {0} is already "{1}" in make.conf' ret['comment'] = msg.format(name, value) else: if __opts__['test']: msg = 'Variable {0} is to be set to "{1}" in make.conf' ret['comment'] = msg.format(name, value) ret['result'] = None else: changes = __salt__['makeconf.set_var'](upper_name, value) # If failed to be set new_value = __salt__['makeconf.get_var'](upper_name) if new_value is None or new_value != value: msg = 'Variable {0} failed to be set in make.conf' ret['comment'] = msg.format(name) ret['result'] = False else: msg = 'Variable {0} is set in make.conf' ret['changes'] = changes ret['comment'] = msg.format(name) elif contains is not None or excludes is not None: # Make these into sets to easily compare things contains_set = _make_set(contains) excludes_set = _make_set(excludes) old_value_set = _make_set(old_value) if contains_set.intersection(excludes_set): msg = 'Variable {0} cannot contain and exclude the same value' ret['comment'] = msg.format(name) ret['result'] = False else: to_append = set() to_trim = set() if contains is not None: to_append = contains_set.difference(old_value_set) if excludes is not None: to_trim = excludes_set.intersection(old_value_set) if not to_append and not to_trim: msg = 'Variable {0} is correct in make.conf' ret['comment'] = msg.format(name) else: if __opts__['test']: msg = 'Variable {0} is set to'.format(name) if to_append: msg += ' append "{0}"'.format(list(to_append)) if to_trim: msg += ' trim "{0}"'.format(list(to_trim)) msg += ' in make.conf' ret['comment'] = msg ret['result'] = None else: for value in to_append: __salt__['makeconf.append_var'](upper_name, value) for value in to_trim: __salt__['makeconf.trim_var'](upper_name, value) new_value = __salt__['makeconf.get_var'](upper_name) # TODO verify appends and trims worked ret['changes'] = {upper_name: {'old': old_value, 'new': new_value}} msg = 'Variable {0} is correct in make.conf' ret['comment'] = msg.format(name) # Now finally return return ret
[ "def", "present", "(", "name", ",", "value", "=", "None", ",", "contains", "=", "None", ",", "excludes", "=", "None", ")", ":", "ret", "=", "{", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'name'", ":", "name", ",", "'result'", ":", "True", "}", "# Make name all Uppers since make.conf uses all Upper vars", "upper_name", "=", "name", ".", "upper", "(", ")", "old_value", "=", "__salt__", "[", "'makeconf.get_var'", "]", "(", "upper_name", ")", "# If only checking if variable is present allows for setting the", "# variable outside of salt states, but the state can still ensure", "# that is exists", "if", "value", "is", "None", "and", "contains", "is", "None", "and", "excludes", "is", "None", ":", "# variable is present", "if", "old_value", "is", "not", "None", ":", "msg", "=", "'Variable {0} is already present in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "msg", "=", "'Variable {0} is to be set in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "else", ":", "changes", "=", "__salt__", "[", "'makeconf.set_var'", "]", "(", "upper_name", ",", "''", ")", "# If failed to be set", "if", "changes", "[", "upper_name", "]", "[", "'new'", "]", "is", "None", ":", "msg", "=", "'Variable {0} failed to be set in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "False", "else", ":", "msg", "=", "'Variable {0} set in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "elif", "value", "is", "not", "None", ":", "# variable is present and is set to value", "if", "old_value", "is", "not", "None", "and", "old_value", "==", "value", ":", "msg", "=", "'Variable {0} is already \"{1}\" in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ",", "value", ")", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "msg", "=", "'Variable {0} is to be set to \"{1}\" in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ",", "value", ")", "ret", "[", "'result'", "]", "=", "None", "else", ":", "changes", "=", "__salt__", "[", "'makeconf.set_var'", "]", "(", "upper_name", ",", "value", ")", "# If failed to be set", "new_value", "=", "__salt__", "[", "'makeconf.get_var'", "]", "(", "upper_name", ")", "if", "new_value", "is", "None", "or", "new_value", "!=", "value", ":", "msg", "=", "'Variable {0} failed to be set in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "False", "else", ":", "msg", "=", "'Variable {0} is set in make.conf'", "ret", "[", "'changes'", "]", "=", "changes", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "elif", "contains", "is", "not", "None", "or", "excludes", "is", "not", "None", ":", "# Make these into sets to easily compare things", "contains_set", "=", "_make_set", "(", "contains", ")", "excludes_set", "=", "_make_set", "(", "excludes", ")", "old_value_set", "=", "_make_set", "(", "old_value", ")", "if", "contains_set", ".", "intersection", "(", "excludes_set", ")", ":", "msg", "=", "'Variable {0} cannot contain and exclude the same value'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "False", "else", ":", "to_append", "=", "set", "(", ")", "to_trim", "=", "set", "(", ")", "if", "contains", "is", "not", "None", ":", "to_append", "=", "contains_set", ".", "difference", "(", "old_value_set", ")", "if", "excludes", "is", "not", "None", ":", "to_trim", "=", "excludes_set", ".", "intersection", "(", "old_value_set", ")", "if", "not", "to_append", "and", "not", "to_trim", ":", "msg", "=", "'Variable {0} is correct in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "msg", "=", "'Variable {0} is set to'", ".", "format", "(", "name", ")", "if", "to_append", ":", "msg", "+=", "' append \"{0}\"'", ".", "format", "(", "list", "(", "to_append", ")", ")", "if", "to_trim", ":", "msg", "+=", "' trim \"{0}\"'", ".", "format", "(", "list", "(", "to_trim", ")", ")", "msg", "+=", "' in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", "ret", "[", "'result'", "]", "=", "None", "else", ":", "for", "value", "in", "to_append", ":", "__salt__", "[", "'makeconf.append_var'", "]", "(", "upper_name", ",", "value", ")", "for", "value", "in", "to_trim", ":", "__salt__", "[", "'makeconf.trim_var'", "]", "(", "upper_name", ",", "value", ")", "new_value", "=", "__salt__", "[", "'makeconf.get_var'", "]", "(", "upper_name", ")", "# TODO verify appends and trims worked", "ret", "[", "'changes'", "]", "=", "{", "upper_name", ":", "{", "'old'", ":", "old_value", ",", "'new'", ":", "new_value", "}", "}", "msg", "=", "'Variable {0} is correct in make.conf'", "ret", "[", "'comment'", "]", "=", "msg", ".", "format", "(", "name", ")", "# Now finally return", "return", "ret" ]
40.663934
[ 0.016666666666666666, 0.2857142857142857, 0.0410958904109589, 0.028985507246376812, 0, 0.25, 0.027777777777777776, 0.047619047619047616, 0, 0.2222222222222222, 0.02666666666666667, 0, 0.16666666666666666, 0.02702702702702703, 0, 0.16666666666666666, 0.02631578947368421, 0.14285714285714285, 0.2857142857142857, 0.12, 0.12, 0.125, 0.15384615384615385, 0, 0.03225806451612903, 0.06896551724137931, 0, 0.03571428571428571, 0, 0.029411764705882353, 0.028985507246376812, 0.1, 0.031746031746031744, 0.06896551724137931, 0.06060606060606061, 0.03125, 0.044444444444444446, 0.15384615384615385, 0.0625, 0.03225806451612903, 0.04081632653061224, 0.05555555555555555, 0.11764705882352941, 0.02857142857142857, 0, 0.05405405405405406, 0.037037037037037035, 0.02857142857142857, 0.03773584905660377, 0.04878048780487805, 0.09523809523809523, 0.03508771929824561, 0.03773584905660377, 0, 0.07407407407407407, 0.04081632653061224, 0.03571428571428571, 0.03225806451612903, 0.038461538461538464, 0.15384615384615385, 0.0625, 0.028169014084507043, 0.03571428571428571, 0.05555555555555555, 0.11764705882352941, 0.0273972602739726, 0, 0.05405405405405406, 0.029411764705882353, 0.03389830508474576, 0.02857142857142857, 0.03773584905660377, 0.04878048780487805, 0.09523809523809523, 0.03333333333333333, 0.045454545454545456, 0.03773584905660377, 0, 0.037037037037037035, 0.03636363636363636, 0.047619047619047616, 0.047619047619047616, 0.045454545454545456, 0.0392156862745098, 0.02702702702702703, 0.044444444444444446, 0.06060606060606061, 0.15384615384615385, 0.06896551724137931, 0.07407407407407407, 0.05555555555555555, 0.030303030303030304, 0.05555555555555555, 0.030303030303030304, 0.044444444444444446, 0.03333333333333333, 0.04081632653061224, 0.11764705882352941, 0.05555555555555555, 0.031746031746031744, 0.06060606060606061, 0.02857142857142857, 0.06451612903225806, 0.030303030303030304, 0.047619047619047616, 0.05, 0.05, 0.09523809523809523, 0.046511627906976744, 0.02702702702702703, 0.04878048780487805, 0.027777777777777776, 0.027777777777777776, 0, 0.034482758620689655, 0.04411764705882353, 0.058823529411764705, 0.03125, 0.03773584905660377, 0, 0.08333333333333333, 0.14285714285714285 ]
def fast_deduplication_backup(self, old_backup_entry, process_bar): """ We can just link a old backup entry :param latest_backup: old BackupEntry model instance :param process_bar: tqdm process bar """ # TODO: merge code with parts from deduplication_backup() src_path = self.dir_path.resolved_path log.debug("*** fast deduplication backup: '%s'", src_path) old_file_path = old_backup_entry.get_backup_path() if not self.path_helper.abs_dst_path.is_dir(): try: self.path_helper.abs_dst_path.makedirs(mode=phlb_config.default_new_path_mode) except OSError as err: raise BackupFileError("Error creating out path: %s" % err) else: assert not self.path_helper.abs_dst_filepath.is_file(), ( "Out file already exists: %r" % self.path_helper.abs_src_filepath ) with self.path_helper.abs_dst_hash_filepath.open("w") as hash_file: try: old_file_path.link(self.path_helper.abs_dst_filepath) # call os.link() except OSError as err: log.error("Can't link '%s' to '%s': %s" % (old_file_path, self.path_helper.abs_dst_filepath, err)) log.info("Mark %r with 'no link source'.", old_backup_entry) old_backup_entry.no_link_source = True old_backup_entry.save() # do a normal copy backup self.deduplication_backup(process_bar) return hash_hexdigest = old_backup_entry.content_info.hash_hexdigest hash_file.write(hash_hexdigest) file_size = self.dir_path.stat.st_size if file_size > 0: # tqdm will not accept 0 bytes files ;) process_bar.update(file_size) BackupEntry.objects.create( backup_run=self.backup_run, backup_entry_path=self.path_helper.abs_dst_filepath, hash_hexdigest=hash_hexdigest, ) if self._SIMULATE_SLOW_SPEED: log.error("Slow down speed for tests!") time.sleep(self._SIMULATE_SLOW_SPEED) self.fast_backup = True # Was a fast backup used? self.file_linked = True
[ "def", "fast_deduplication_backup", "(", "self", ",", "old_backup_entry", ",", "process_bar", ")", ":", "# TODO: merge code with parts from deduplication_backup()", "src_path", "=", "self", ".", "dir_path", ".", "resolved_path", "log", ".", "debug", "(", "\"*** fast deduplication backup: '%s'\"", ",", "src_path", ")", "old_file_path", "=", "old_backup_entry", ".", "get_backup_path", "(", ")", "if", "not", "self", ".", "path_helper", ".", "abs_dst_path", ".", "is_dir", "(", ")", ":", "try", ":", "self", ".", "path_helper", ".", "abs_dst_path", ".", "makedirs", "(", "mode", "=", "phlb_config", ".", "default_new_path_mode", ")", "except", "OSError", "as", "err", ":", "raise", "BackupFileError", "(", "\"Error creating out path: %s\"", "%", "err", ")", "else", ":", "assert", "not", "self", ".", "path_helper", ".", "abs_dst_filepath", ".", "is_file", "(", ")", ",", "(", "\"Out file already exists: %r\"", "%", "self", ".", "path_helper", ".", "abs_src_filepath", ")", "with", "self", ".", "path_helper", ".", "abs_dst_hash_filepath", ".", "open", "(", "\"w\"", ")", "as", "hash_file", ":", "try", ":", "old_file_path", ".", "link", "(", "self", ".", "path_helper", ".", "abs_dst_filepath", ")", "# call os.link()", "except", "OSError", "as", "err", ":", "log", ".", "error", "(", "\"Can't link '%s' to '%s': %s\"", "%", "(", "old_file_path", ",", "self", ".", "path_helper", ".", "abs_dst_filepath", ",", "err", ")", ")", "log", ".", "info", "(", "\"Mark %r with 'no link source'.\"", ",", "old_backup_entry", ")", "old_backup_entry", ".", "no_link_source", "=", "True", "old_backup_entry", ".", "save", "(", ")", "# do a normal copy backup", "self", ".", "deduplication_backup", "(", "process_bar", ")", "return", "hash_hexdigest", "=", "old_backup_entry", ".", "content_info", ".", "hash_hexdigest", "hash_file", ".", "write", "(", "hash_hexdigest", ")", "file_size", "=", "self", ".", "dir_path", ".", "stat", ".", "st_size", "if", "file_size", ">", "0", ":", "# tqdm will not accept 0 bytes files ;)", "process_bar", ".", "update", "(", "file_size", ")", "BackupEntry", ".", "objects", ".", "create", "(", "backup_run", "=", "self", ".", "backup_run", ",", "backup_entry_path", "=", "self", ".", "path_helper", ".", "abs_dst_filepath", ",", "hash_hexdigest", "=", "hash_hexdigest", ",", ")", "if", "self", ".", "_SIMULATE_SLOW_SPEED", ":", "log", ".", "error", "(", "\"Slow down speed for tests!\"", ")", "time", ".", "sleep", "(", "self", ".", "_SIMULATE_SLOW_SPEED", ")", "self", ".", "fast_backup", "=", "True", "# Was a fast backup used?", "self", ".", "file_linked", "=", "True" ]
40.454545
[ 0.014925373134328358, 0.18181818181818182, 0.046511627906976744, 0, 0.05, 0.06818181818181818, 0.18181818181818182, 0.03076923076923077, 0.043478260869565216, 0.030303030303030304, 0.034482758620689655, 0, 0.037037037037037035, 0.125, 0.031914893617021274, 0.058823529411764705, 0.02702702702702703, 0.15384615384615385, 0.043478260869565216, 0.037037037037037035, 0.23076923076923078, 0, 0.02666666666666667, 0.125, 0.034482758620689655, 0.058823529411764705, 0.02631578947368421, 0.02631578947368421, 0.037037037037037035, 0.05128205128205128, 0, 0.04878048780487805, 0.037037037037037035, 0.09090909090909091, 0, 0.0273972602739726, 0.046511627906976744, 0, 0.043478260869565216, 0.08, 0.0392156862745098, 0.04878048780487805, 0, 0.08571428571428572, 0.07692307692307693, 0.046875, 0.07142857142857142, 0.3333333333333333, 0, 0.05405405405405406, 0.0392156862745098, 0.04081632653061224, 0, 0.034482758620689655, 0.06451612903225806 ]
def modify(self, **params): """https://developers.coinbase.com/api#modify-an-account""" data = self.api_client.update_account(self.id, **params) self.update(data) return data
[ "def", "modify", "(", "self", ",", "*", "*", "params", ")", ":", "data", "=", "self", ".", "api_client", ".", "update_account", "(", "self", ".", "id", ",", "*", "*", "params", ")", "self", ".", "update", "(", "data", ")", "return", "data" ]
40.4
[ 0.037037037037037035, 0.029850746268656716, 0.03125, 0.08, 0.10526315789473684 ]
def result_to_dict(raw_result): """ Parse raw result from fetcher into readable dictionary Args: raw_result (dict) - raw data from `fetcher` Returns: dict - readable dictionary """ result = {} for channel_index, channel in enumerate(raw_result): channel_id, channel_name = channel[0], channel[1] channel_result = { 'id': channel_id, 'name': channel_name, 'movies': [] } for movie in channel[2]: channel_result['movies'].append({ 'title': movie[1], 'start_time': datetime.fromtimestamp(movie[2]), 'end_time': datetime.fromtimestamp(movie[2] + movie[3]), 'inf': True if movie[3] else False, }) result[channel_id] = channel_result return result
[ "def", "result_to_dict", "(", "raw_result", ")", ":", "result", "=", "{", "}", "for", "channel_index", ",", "channel", "in", "enumerate", "(", "raw_result", ")", ":", "channel_id", ",", "channel_name", "=", "channel", "[", "0", "]", ",", "channel", "[", "1", "]", "channel_result", "=", "{", "'id'", ":", "channel_id", ",", "'name'", ":", "channel_name", ",", "'movies'", ":", "[", "]", "}", "for", "movie", "in", "channel", "[", "2", "]", ":", "channel_result", "[", "'movies'", "]", ".", "append", "(", "{", "'title'", ":", "movie", "[", "1", "]", ",", "'start_time'", ":", "datetime", ".", "fromtimestamp", "(", "movie", "[", "2", "]", ")", ",", "'end_time'", ":", "datetime", ".", "fromtimestamp", "(", "movie", "[", "2", "]", "+", "movie", "[", "3", "]", ")", ",", "'inf'", ":", "True", "if", "movie", "[", "3", "]", "else", "False", ",", "}", ")", "result", "[", "channel_id", "]", "=", "channel_result", "return", "result" ]
27.633333
[ 0.03225806451612903, 0.2857142857142857, 0.034482758620689655, 0, 0.2222222222222222, 0.0784313725490196, 0, 0.16666666666666666, 0.058823529411764705, 0.2857142857142857, 0, 0.13333333333333333, 0, 0.03571428571428571, 0.03508771929824561, 0.11538461538461539, 0.06896551724137931, 0.06060606060606061, 0.08333333333333333, 0.3333333333333333, 0.0625, 0.06666666666666667, 0.058823529411764705, 0.031746031746031744, 0.027777777777777776, 0.0392156862745098, 0.21428571428571427, 0.046511627906976744, 0, 0.11764705882352941 ]
def get_bgp_neighbors(self, **kwargs): """Get BGP neighbors configured on a device. Args: rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. vrf (str): The VRF for this BGP process. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: List of 0 or more BGP Neighbors on the specified rbridge. Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.local_asn(local_as='65535', ... rbridge_id='225') ... output = dev.bgp.neighbor(ip_addr='10.10.10.10', ... remote_as='65535', rbridge_id='225') ... output = dev.bgp.neighbor(remote_as='65535', ... rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... result = dev.bgp.get_bgp_neighbors(rbridge_id='225') ... assert len(result) >= 1 ... output = dev.bgp.neighbor(ip_addr='10.10.10.10', ... delete=True, rbridge_id='225') ... output = dev.bgp.neighbor(delete=True, rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.bgp.neighbor() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError KeyError """ callback = kwargs.pop('callback', self._callback) neighbor_args = dict(router_bgp_neighbor_address='', remote_as='', vrf_name=kwargs.pop('vrf', 'default'), rbridge_id=kwargs.pop('rbridge_id', '1')) neighbor = getattr(self._rbridge, 'rbridge_id_router_bgp_router_bgp_cmds_holder_' 'router_bgp_attributes_neighbor_ips_' 'neighbor_addr_remote_as') config = neighbor(**neighbor_args) output = callback(config, handler='get_config') result = [] urn = "{urn:brocade.com:mgmt:brocade-bgp}" # IPv4 BGP Neighbor Handling for item in output.data.findall( './/{*}neighbor-addr'): neighbor_address = item.find( '%srouter-bgp-neighbor-address' % urn).text remote_as = item.find('%sremote-as' % urn).text item_results = {'neighbor-address': neighbor_address, 'remote-as': remote_as} result.append(item_results) # IPv6 BGP Neighbor handling neighbor_args['router_bgp_neighbor_ipv6_address'] = '' neighbor = getattr(self._rbridge, 'rbridge_id_router_bgp_router_bgp_cmds_holder_' 'router_bgp_attributes_neighbor_ipv6s_neighbor_' 'ipv6_addr_remote_as') config = neighbor(**neighbor_args) output = callback(config, handler='get_config') for item in output.data.findall( './/{*}neighbor-ipv6-addr'): neighbor_address = item.find( '%srouter-bgp-neighbor-ipv6-address' % urn).text remote_as = item.find('%sremote-as' % urn).text item_results = {'neighbor-address': neighbor_address, 'remote-as': remote_as} result.append(item_results) return result
[ "def", "get_bgp_neighbors", "(", "self", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "neighbor_args", "=", "dict", "(", "router_bgp_neighbor_address", "=", "''", ",", "remote_as", "=", "''", ",", "vrf_name", "=", "kwargs", ".", "pop", "(", "'vrf'", ",", "'default'", ")", ",", "rbridge_id", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ",", "'1'", ")", ")", "neighbor", "=", "getattr", "(", "self", ".", "_rbridge", ",", "'rbridge_id_router_bgp_router_bgp_cmds_holder_'", "'router_bgp_attributes_neighbor_ips_'", "'neighbor_addr_remote_as'", ")", "config", "=", "neighbor", "(", "*", "*", "neighbor_args", ")", "output", "=", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "result", "=", "[", "]", "urn", "=", "\"{urn:brocade.com:mgmt:brocade-bgp}\"", "# IPv4 BGP Neighbor Handling", "for", "item", "in", "output", ".", "data", ".", "findall", "(", "'.//{*}neighbor-addr'", ")", ":", "neighbor_address", "=", "item", ".", "find", "(", "'%srouter-bgp-neighbor-address'", "%", "urn", ")", ".", "text", "remote_as", "=", "item", ".", "find", "(", "'%sremote-as'", "%", "urn", ")", ".", "text", "item_results", "=", "{", "'neighbor-address'", ":", "neighbor_address", ",", "'remote-as'", ":", "remote_as", "}", "result", ".", "append", "(", "item_results", ")", "# IPv6 BGP Neighbor handling", "neighbor_args", "[", "'router_bgp_neighbor_ipv6_address'", "]", "=", "''", "neighbor", "=", "getattr", "(", "self", ".", "_rbridge", ",", "'rbridge_id_router_bgp_router_bgp_cmds_holder_'", "'router_bgp_attributes_neighbor_ipv6s_neighbor_'", "'ipv6_addr_remote_as'", ")", "config", "=", "neighbor", "(", "*", "*", "neighbor_args", ")", "output", "=", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "for", "item", "in", "output", ".", "data", ".", "findall", "(", "'.//{*}neighbor-ipv6-addr'", ")", ":", "neighbor_address", "=", "item", ".", "find", "(", "'%srouter-bgp-neighbor-ipv6-address'", "%", "urn", ")", ".", "text", "remote_as", "=", "item", ".", "find", "(", "'%sremote-as'", "%", "urn", ")", ".", "text", "item_results", "=", "{", "'neighbor-address'", ":", "neighbor_address", ",", "'remote-as'", ":", "remote_as", "}", "result", ".", "append", "(", "item_results", ")", "return", "result" ]
44.650602
[ 0.02631578947368421, 0.038461538461538464, 0, 0.15384615384615385, 0.0379746835443038, 0.046511627906976744, 0.057692307692307696, 0.04, 0.039473684210526314, 0.07317073170731707, 0, 0.125, 0.03333333333333333, 0.1, 0, 0.11764705882352941, 0.08571428571428572, 0.06666666666666667, 0.06818181818181818, 0.04285714285714286, 0.046875, 0.10810810810810811, 0.04411764705882353, 0.08928571428571429, 0.046875, 0.08108108108108109, 0.06060606060606061, 0.027777777777777776, 0.06976744186046512, 0.04411764705882353, 0.1, 0.039473684210526314, 0.06060606060606061, 0.04054054054054054, 0.06521739130434782, 0.06451612903225806, 0.1, 0.18181818181818182, 0.03508771929824561, 0, 0.05, 0.09523809523809523, 0.05970149253731343, 0.07142857142857142, 0, 0.07317073170731707, 0.04054054054054054, 0.046875, 0.07547169811320754, 0.047619047619047616, 0.03636363636363636, 0.10526315789473684, 0.04, 0.05555555555555555, 0.075, 0.07692307692307693, 0.07317073170731707, 0.05084745762711865, 0.03389830508474576, 0, 0.046153846153846156, 0.058823529411764705, 0.05128205128205128, 0, 0.05555555555555555, 0.03225806451612903, 0.07317073170731707, 0.04054054054054054, 0.04, 0.08163265306122448, 0.047619047619047616, 0.03636363636363636, 0.075, 0.06818181818181818, 0.07317073170731707, 0.046875, 0.03389830508474576, 0, 0.046153846153846156, 0.058823529411764705, 0.05128205128205128, 0, 0.09523809523809523 ]
def validate_func_factory(validator_class: Any) -> ValidateFunc: """Provide default function for Schema validation. :param validator_class: JSONSchema suitable validator class. """ def validate_func(schema: AnyMapping, pure_data: AnyMapping) -> AnyMapping: """Validate schema with given data. :param schema: Schema representation to use. :param pure_data: Pure data to validate. """ return validator_class(schema).validate(pure_data) return validate_func
[ "def", "validate_func_factory", "(", "validator_class", ":", "Any", ")", "->", "ValidateFunc", ":", "def", "validate_func", "(", "schema", ":", "AnyMapping", ",", "pure_data", ":", "AnyMapping", ")", "->", "AnyMapping", ":", "\"\"\"Validate schema with given data.\n\n :param schema: Schema representation to use.\n :param pure_data: Pure data to validate.\n \"\"\"", "return", "validator_class", "(", "schema", ")", ".", "validate", "(", "pure_data", ")", "return", "validate_func" ]
38.769231
[ 0.015625, 0.037037037037037035, 0, 0.046875, 0.2857142857142857, 0.02531645569620253, 0.046511627906976744, 0, 0.057692307692307696, 0.0625, 0.18181818181818182, 0.034482758620689655, 0.08333333333333333 ]
def calc_custom(custom, genome, scaffold, sequence, scaffold_coverage, total_bases): """ custom = {(reads mapped to scaffold)/(total reads for sample)}/(length of scaffold) """ index = 0 if scaffold in scaffold_coverage: # what if the scaffold does not have bases mapped back to it? (this *should* not happen) if genome not in custom: custom[genome] = [[] for i in scaffold_coverage[scaffold]] for cov in scaffold_coverage[scaffold]: length = float(len(sequence[1])) bases = cov * length custom_value = ((bases) / (total_bases[index])) / length custom[genome][index].append(custom_value) index += 1 return custom
[ "def", "calc_custom", "(", "custom", ",", "genome", ",", "scaffold", ",", "sequence", ",", "scaffold_coverage", ",", "total_bases", ")", ":", "index", "=", "0", "if", "scaffold", "in", "scaffold_coverage", ":", "# what if the scaffold does not have bases mapped back to it? (this *should* not happen)", "if", "genome", "not", "in", "custom", ":", "custom", "[", "genome", "]", "=", "[", "[", "]", "for", "i", "in", "scaffold_coverage", "[", "scaffold", "]", "]", "for", "cov", "in", "scaffold_coverage", "[", "scaffold", "]", ":", "length", "=", "float", "(", "len", "(", "sequence", "[", "1", "]", ")", ")", "bases", "=", "cov", "*", "length", "custom_value", "=", "(", "(", "bases", ")", "/", "(", "total_bases", "[", "index", "]", ")", ")", "/", "length", "custom", "[", "genome", "]", "[", "index", "]", ".", "append", "(", "custom_value", ")", "index", "+=", "1", "return", "custom" ]
41.733333
[ 0.023809523809523808, 0.75, 0.047619047619047616, 0.75, 0.3, 0.04065040650406504, 0.11538461538461539, 0.04918032786885246, 0.07317073170731707, 0.08571428571428572, 0.13043478260869565, 0.05084745762711865, 0.06666666666666667, 0.23076923076923078, 0.21428571428571427 ]
def _linear_inverse_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for linear smoothing.""" n = float(cos_dist.size) radius = _kamb_radius(n, sigma) f = 2 / (1 - radius) cos_dist = cos_dist[cos_dist >= radius] count = (f * (cos_dist - radius)) return count, _kamb_units(n, radius)
[ "def", "_linear_inverse_kamb", "(", "cos_dist", ",", "sigma", "=", "3", ")", ":", "n", "=", "float", "(", "cos_dist", ".", "size", ")", "radius", "=", "_kamb_radius", "(", "n", ",", "sigma", ")", "f", "=", "2", "/", "(", "1", "-", "radius", ")", "cos_dist", "=", "cos_dist", "[", "cos_dist", ">=", "radius", "]", "count", "=", "(", "f", "*", "(", "cos_dist", "-", "radius", ")", ")", "return", "count", ",", "_kamb_units", "(", "n", ",", "radius", ")" ]
38.875
[ 0.022727272727272728, 0.03333333333333333, 0.07142857142857142, 0.05714285714285714, 0.08333333333333333, 0.046511627906976744, 0.05405405405405406, 0.05 ]
def callback(self, request, **kwargs): """ Called from the Service when the user accept to activate it """ return super(ServiceTwitter, self).callback(request, **kwargs)
[ "def", "callback", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "ServiceTwitter", ",", "self", ")", ".", "callback", "(", "request", ",", "*", "*", "kwargs", ")" ]
40.2
[ 0.02631578947368421, 0.18181818181818182, 0.028169014084507043, 0.18181818181818182, 0.02857142857142857 ]
def run_field_processor(self, field_data): """ Convert all '*_speed' fields using 'process_field_speed' All other units will use the default method. """ if field_data.name.endswith("_speed"): self.process_field_speed(field_data) else: super(StandardUnitsDataProcessor, self).run_field_processor(field_data)
[ "def", "run_field_processor", "(", "self", ",", "field_data", ")", ":", "if", "field_data", ".", "name", ".", "endswith", "(", "\"_speed\"", ")", ":", "self", ".", "process_field_speed", "(", "field_data", ")", "else", ":", "super", "(", "StandardUnitsDataProcessor", ",", "self", ")", ".", "run_field_processor", "(", "field_data", ")" ]
41.111111
[ 0.023809523809523808, 0.18181818181818182, 0.03125, 0.038461538461538464, 0.18181818181818182, 0.043478260869565216, 0.041666666666666664, 0.15384615384615385, 0.03614457831325301 ]
def _close(self): """ Release the USB interface again. """ self._usb_handle.releaseInterface() try: # If we're using PyUSB >= 1.0 we can re-attach the kernel driver here. self._usb_handle.dev.attach_kernel_driver(0) except: pass self._usb_int = None self._usb_handle = None return True
[ "def", "_close", "(", "self", ")", ":", "self", ".", "_usb_handle", ".", "releaseInterface", "(", ")", "try", ":", "# If we're using PyUSB >= 1.0 we can re-attach the kernel driver here.", "self", ".", "_usb_handle", ".", "dev", ".", "attach_kernel_driver", "(", "0", ")", "except", ":", "pass", "self", ".", "_usb_int", "=", "None", "self", ".", "_usb_handle", "=", "None", "return", "True" ]
29.307692
[ 0.058823529411764705, 0.18181818181818182, 0.05, 0.18181818181818182, 0.046511627906976744, 0.16666666666666666, 0.036585365853658534, 0.03571428571428571, 0.2, 0.125, 0.07142857142857142, 0.06451612903225806, 0.10526315789473684 ]
def read_data(self, dstart=None, dend=None): """Read data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is taken to be the header size as determined from reading the header. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. Returns ------- data : `numpy.ndarray` The data read from `file`. See Also -------- read_header """ self.file.seek(0, 2) # 2 means "from the end" filesize_bytes = self.file.tell() if dstart is None: dstart_abs = int(self.header_size) elif dstart < 0: dstart_abs = filesize_bytes + int(dstart) else: dstart_abs = int(dstart) if dend is None: dend_abs = int(filesize_bytes) elif dend < 0: dend_abs = int(dend) + filesize_bytes else: dend_abs = int(dend) if dstart_abs >= dend_abs: raise ValueError('invalid `dstart` and `dend`, resulting in ' 'absolute `dstart` >= `dend` ({} >= {})' ''.format(dstart_abs, dend_abs)) if dstart_abs < self.header_size: raise ValueError('invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' ''.format(dstart_abs, self.header_size)) if dend_abs > filesize_bytes: raise ValueError('invalid `dend`, resulting in absolute ' '`dend` > `filesize_bytes` ({} < {})' ''.format(dend_abs, filesize_bytes)) num_elems = (dend_abs - dstart_abs) / self.data_dtype.itemsize if num_elems != int(num_elems): raise ValueError( 'trying to read {} bytes, which is not a multiple of ' 'the itemsize {} of the data type {}' ''.format(dend_abs - dstart_abs, self.data_dtype.itemsize, self.data_dtype)) self.file.seek(dstart_abs) array = np.empty(int(num_elems), dtype=self.data_dtype) self.file.readinto(array.data) return array
[ "def", "read_data", "(", "self", ",", "dstart", "=", "None", ",", "dend", "=", "None", ")", ":", "self", ".", "file", ".", "seek", "(", "0", ",", "2", ")", "# 2 means \"from the end\"", "filesize_bytes", "=", "self", ".", "file", ".", "tell", "(", ")", "if", "dstart", "is", "None", ":", "dstart_abs", "=", "int", "(", "self", ".", "header_size", ")", "elif", "dstart", "<", "0", ":", "dstart_abs", "=", "filesize_bytes", "+", "int", "(", "dstart", ")", "else", ":", "dstart_abs", "=", "int", "(", "dstart", ")", "if", "dend", "is", "None", ":", "dend_abs", "=", "int", "(", "filesize_bytes", ")", "elif", "dend", "<", "0", ":", "dend_abs", "=", "int", "(", "dend", ")", "+", "filesize_bytes", "else", ":", "dend_abs", "=", "int", "(", "dend", ")", "if", "dstart_abs", ">=", "dend_abs", ":", "raise", "ValueError", "(", "'invalid `dstart` and `dend`, resulting in '", "'absolute `dstart` >= `dend` ({} >= {})'", "''", ".", "format", "(", "dstart_abs", ",", "dend_abs", ")", ")", "if", "dstart_abs", "<", "self", ".", "header_size", ":", "raise", "ValueError", "(", "'invalid `dstart`, resulting in absolute '", "'`dstart` < `header_size` ({} < {})'", "''", ".", "format", "(", "dstart_abs", ",", "self", ".", "header_size", ")", ")", "if", "dend_abs", ">", "filesize_bytes", ":", "raise", "ValueError", "(", "'invalid `dend`, resulting in absolute '", "'`dend` > `filesize_bytes` ({} < {})'", "''", ".", "format", "(", "dend_abs", ",", "filesize_bytes", ")", ")", "num_elems", "=", "(", "dend_abs", "-", "dstart_abs", ")", "/", "self", ".", "data_dtype", ".", "itemsize", "if", "num_elems", "!=", "int", "(", "num_elems", ")", ":", "raise", "ValueError", "(", "'trying to read {} bytes, which is not a multiple of '", "'the itemsize {} of the data type {}'", "''", ".", "format", "(", "dend_abs", "-", "dstart_abs", ",", "self", ".", "data_dtype", ".", "itemsize", ",", "self", ".", "data_dtype", ")", ")", "self", ".", "file", ".", "seek", "(", "dstart_abs", ")", "array", "=", "np", ".", "empty", "(", "int", "(", "num_elems", ")", ",", "dtype", "=", "self", ".", "data_dtype", ")", "self", ".", "file", ".", "readinto", "(", "array", ".", "data", ")", "return", "array" ]
40.5625
[ 0.022727272727272728, 0.03225806451612903, 0, 0.1111111111111111, 0.1111111111111111, 0.1, 0.0273972602739726, 0.028985507246376812, 0.02857142857142857, 0.025974025974025976, 0.10714285714285714, 0.04225352112676056, 0.02857142857142857, 0.02564102564102564, 0, 0.13333333333333333, 0.13333333333333333, 0.13333333333333333, 0.07894736842105263, 0, 0.125, 0.125, 0.10526315789473684, 0.18181818181818182, 0.037037037037037035, 0.04878048780487805, 0.07692307692307693, 0.043478260869565216, 0.08333333333333333, 0.03773584905660377, 0.15384615384615385, 0.05555555555555555, 0, 0.08333333333333333, 0.047619047619047616, 0.09090909090909091, 0.04081632653061224, 0.15384615384615385, 0.0625, 0, 0.058823529411764705, 0.0410958904109589, 0.043478260869565216, 0.06557377049180328, 0.04878048780487805, 0.04225352112676056, 0.046153846153846156, 0.057971014492753624, 0.05405405405405406, 0.043478260869565216, 0.045454545454545456, 0.06153846153846154, 0, 0.02857142857142857, 0.05128205128205128, 0.10344827586206896, 0.02857142857142857, 0.03773584905660377, 0.04054054054054054, 0.09302325581395349, 0.058823529411764705, 0.031746031746031744, 0.05263157894736842, 0.1 ]
def save_token(self): """ Saves the token dict in the store :return bool: Success / Failure """ if self.token is None: raise ValueError('You have to set the "token" first.') try: # set token will overwrite previous data self.doc_ref.set({ self.field_name: self.serializer.dumps(self.token) }) except Exception as e: log.error('Token could not be saved: {}'.format(str(e))) return False return True
[ "def", "save_token", "(", "self", ")", ":", "if", "self", ".", "token", "is", "None", ":", "raise", "ValueError", "(", "'You have to set the \"token\" first.'", ")", "try", ":", "# set token will overwrite previous data", "self", ".", "doc_ref", ".", "set", "(", "{", "self", ".", "field_name", ":", "self", ".", "serializer", ".", "dumps", "(", "self", ".", "token", ")", "}", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "'Token could not be saved: {}'", ".", "format", "(", "str", "(", "e", ")", ")", ")", "return", "False", "return", "True" ]
29.666667
[ 0.047619047619047616, 0.18181818181818182, 0.04878048780487805, 0.07692307692307693, 0.18181818181818182, 0.06666666666666667, 0.030303030303030304, 0, 0.16666666666666666, 0.038461538461538464, 0.1, 0.030303030303030304, 0.21428571428571427, 0.06666666666666667, 0.029411764705882353, 0.08333333333333333, 0, 0.10526315789473684 ]
def logspace_bins(self,bins=None,units=None,conversion_function=convert_time,resolution=None): """Generates bin edges for a logspace tiling: there is one edge more than bins and each bin is between two edges""" bins = self.logspace(bins=bins,units=units,conversion_function=conversion_function,resolution=resolution,end_at_end=False) resolution = np.mean((bins[:-1]) / (bins[1:])) bins = np.concatenate([bins*np.sqrt(resolution),bins[-1:]/np.sqrt(resolution)]) return bins
[ "def", "logspace_bins", "(", "self", ",", "bins", "=", "None", ",", "units", "=", "None", ",", "conversion_function", "=", "convert_time", ",", "resolution", "=", "None", ")", ":", "bins", "=", "self", ".", "logspace", "(", "bins", "=", "bins", ",", "units", "=", "units", ",", "conversion_function", "=", "conversion_function", ",", "resolution", "=", "resolution", ",", "end_at_end", "=", "False", ")", "resolution", "=", "np", ".", "mean", "(", "(", "bins", "[", ":", "-", "1", "]", ")", "/", "(", "bins", "[", "1", ":", "]", ")", ")", "bins", "=", "np", ".", "concatenate", "(", "[", "bins", "*", "np", ".", "sqrt", "(", "resolution", ")", ",", "bins", "[", "-", "1", ":", "]", "/", "np", ".", "sqrt", "(", "resolution", ")", "]", ")", "return", "bins" ]
84.5
[ 0.06382978723404255, 0.024390243902439025, 0.05384615384615385, 0.037037037037037035, 0.04597701149425287, 0.10526315789473684 ]
def posterior_to_xarray(self): """Extract posterior samples from fit.""" posterior = self.posterior posterior_model = self.posterior_model # filter posterior_predictive and log_likelihood posterior_predictive = self.posterior_predictive if posterior_predictive is None: posterior_predictive = [] elif isinstance(posterior_predictive, str): posterior_predictive = [posterior_predictive] log_likelihood = self.log_likelihood if not isinstance(log_likelihood, str): log_likelihood = [] else: log_likelihood = [log_likelihood] ignore = posterior_predictive + log_likelihood data = get_draws_stan3(posterior, model=posterior_model, ignore=ignore) return dict_to_dataset(data, library=self.stan, coords=self.coords, dims=self.dims)
[ "def", "posterior_to_xarray", "(", "self", ")", ":", "posterior", "=", "self", ".", "posterior", "posterior_model", "=", "self", ".", "posterior_model", "# filter posterior_predictive and log_likelihood\r", "posterior_predictive", "=", "self", ".", "posterior_predictive", "if", "posterior_predictive", "is", "None", ":", "posterior_predictive", "=", "[", "]", "elif", "isinstance", "(", "posterior_predictive", ",", "str", ")", ":", "posterior_predictive", "=", "[", "posterior_predictive", "]", "log_likelihood", "=", "self", ".", "log_likelihood", "if", "not", "isinstance", "(", "log_likelihood", ",", "str", ")", ":", "log_likelihood", "=", "[", "]", "else", ":", "log_likelihood", "=", "[", "log_likelihood", "]", "ignore", "=", "posterior_predictive", "+", "log_likelihood", "data", "=", "get_draws_stan3", "(", "posterior", ",", "model", "=", "posterior_model", ",", "ignore", "=", "ignore", ")", "return", "dict_to_dataset", "(", "data", ",", "library", "=", "self", ".", "stan", ",", "coords", "=", "self", ".", "coords", ",", "dims", "=", "self", ".", "dims", ")" ]
41.904762
[ 0.03225806451612903, 0.02, 0.02857142857142857, 0.02127659574468085, 0.017543859649122806, 0.017543859649122806, 0.07317073170731707, 0.02631578947368421, 0.057692307692307696, 0.017241379310344827, 0.022222222222222223, 0.0625, 0.03125, 0.21428571428571427, 0.021739130434782608, 1, 0.01818181818181818, 1, 0.0125, 1, 0.03296703296703297 ]
def update_account(self, account): """ Update the passed account. Returns the updated account. https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update """ url = ACCOUNTS_API.format(account.account_id) body = {"account": {"name": account.name}} return CanvasAccount(data=self._put_resource(url, body))
[ "def", "update_account", "(", "self", ",", "account", ")", ":", "url", "=", "ACCOUNTS_API", ".", "format", "(", "account", ".", "account_id", ")", "body", "=", "{", "\"account\"", ":", "{", "\"name\"", ":", "account", ".", "name", "}", "}", "return", "CanvasAccount", "(", "data", "=", "self", ".", "_put_resource", "(", "url", ",", "body", ")", ")" ]
36.9
[ 0.029411764705882353, 0.18181818181818182, 0.031746031746031744, 0, 0.07228915662650602, 0.18181818181818182, 0.03773584905660377, 0.04, 0, 0.03125 ]
def visit_classdef(self, node): """Class visitor.""" if not node_is_subclass(node, 'django.db.models.base.Model', '.Model'): # we only care about models return for child in node.get_children(): if _is_meta_with_abstract(child): return if isinstance(child, Assign): grandchildren = list(child.get_children()) if not isinstance(grandchildren[0], AssignName): continue name = grandchildren[0].name if name != '__unicode__': continue grandchild = grandchildren[1] assigned = grandchild.inferred()[0] if assigned.callable(): return self.add_message('E%s01' % BASE_ID, args=node.name, node=node) return if isinstance(child, FunctionDef) and child.name == '__unicode__': if PY3: self.add_message('W%s02' % BASE_ID, args=node.name, node=node) return # if we get here, then we have no __unicode__ method directly on the class itself # a different warning is emitted if a parent declares __unicode__ for method in node.methods(): if method.parent != node and _is_unicode_or_str_in_python_2_compatibility(method): # this happens if a parent declares the unicode method but # this node does not self.add_message('W%s03' % BASE_ID, args=node.name, node=node) return # if the Django compatibility decorator is used then we don't emit a warning # see https://github.com/PyCQA/pylint-django/issues/10 if _has_python_2_unicode_compatible_decorator(node): return if PY3: return self.add_message('W%s01' % BASE_ID, args=node.name, node=node)
[ "def", "visit_classdef", "(", "self", ",", "node", ")", ":", "if", "not", "node_is_subclass", "(", "node", ",", "'django.db.models.base.Model'", ",", "'.Model'", ")", ":", "# we only care about models", "return", "for", "child", "in", "node", ".", "get_children", "(", ")", ":", "if", "_is_meta_with_abstract", "(", "child", ")", ":", "return", "if", "isinstance", "(", "child", ",", "Assign", ")", ":", "grandchildren", "=", "list", "(", "child", ".", "get_children", "(", ")", ")", "if", "not", "isinstance", "(", "grandchildren", "[", "0", "]", ",", "AssignName", ")", ":", "continue", "name", "=", "grandchildren", "[", "0", "]", ".", "name", "if", "name", "!=", "'__unicode__'", ":", "continue", "grandchild", "=", "grandchildren", "[", "1", "]", "assigned", "=", "grandchild", ".", "inferred", "(", ")", "[", "0", "]", "if", "assigned", ".", "callable", "(", ")", ":", "return", "self", ".", "add_message", "(", "'E%s01'", "%", "BASE_ID", ",", "args", "=", "node", ".", "name", ",", "node", "=", "node", ")", "return", "if", "isinstance", "(", "child", ",", "FunctionDef", ")", "and", "child", ".", "name", "==", "'__unicode__'", ":", "if", "PY3", ":", "self", ".", "add_message", "(", "'W%s02'", "%", "BASE_ID", ",", "args", "=", "node", ".", "name", ",", "node", "=", "node", ")", "return", "# if we get here, then we have no __unicode__ method directly on the class itself", "# a different warning is emitted if a parent declares __unicode__", "for", "method", "in", "node", ".", "methods", "(", ")", ":", "if", "method", ".", "parent", "!=", "node", "and", "_is_unicode_or_str_in_python_2_compatibility", "(", "method", ")", ":", "# this happens if a parent declares the unicode method but", "# this node does not", "self", ".", "add_message", "(", "'W%s03'", "%", "BASE_ID", ",", "args", "=", "node", ".", "name", ",", "node", "=", "node", ")", "return", "# if the Django compatibility decorator is used then we don't emit a warning", "# see https://github.com/PyCQA/pylint-django/issues/10", "if", "_has_python_2_unicode_compatible_decorator", "(", "node", ")", ":", "return", "if", "PY3", ":", "return", "self", ".", "add_message", "(", "'W%s01'", "%", "BASE_ID", ",", "args", "=", "node", ".", "name", ",", "node", "=", "node", ")" ]
35.90566
[ 0.03225806451612903, 0.07142857142857142, 0.02531645569620253, 0.05128205128205128, 0.1111111111111111, 0, 0.04878048780487805, 0.044444444444444446, 0.09090909090909091, 0, 0.04878048780487805, 0.034482758620689655, 0, 0.03125, 0.07142857142857142, 0, 0.045454545454545456, 0.04878048780487805, 0.07142857142857142, 0, 0.044444444444444446, 0.0392156862745098, 0, 0.05128205128205128, 0.07692307692307693, 0, 0.02564102564102564, 0.09090909090909091, 0, 0.02564102564102564, 0.08695652173913043, 0.036585365853658534, 0.09090909090909091, 0, 0.033707865168539325, 0, 0.0273972602739726, 0.05405405405405406, 0.031914893617021274, 0.02702702702702703, 0.05555555555555555, 0.02564102564102564, 0.09090909090909091, 0, 0.03571428571428571, 0.03225806451612903, 0.03333333333333333, 0.1111111111111111, 0, 0.13333333333333333, 0.1111111111111111, 0, 0.02857142857142857 ]
def qps_pips(H, c, A, l, u, xmin=None, xmax=None, x0=None, opt=None): """Uses the Python Interior Point Solver (PIPS) to solve the following QP (quadratic programming) problem:: min 1/2 x'*H*x + C'*x x subject to:: l <= A*x <= u (linear constraints) xmin <= x <= xmax (variable bounds) Note the calling syntax is almost identical to that of QUADPROG from MathWorks' Optimization Toolbox. The main difference is that the linear constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B}, C{Aeq}, C{Beq}. See also L{pips}. Example from U{http://www.uc.edu/sashtml/iml/chap8/sect12.htm}: >>> from numpy import array, zeros, Inf >>> from scipy.sparse import csr_matrix >>> H = csr_matrix(array([[1003.1, 4.3, 6.3, 5.9], ... [4.3, 2.2, 2.1, 3.9], ... [6.3, 2.1, 3.5, 4.8], ... [5.9, 3.9, 4.8, 10 ]])) >>> c = zeros(4) >>> A = csr_matrix(array([[1, 1, 1, 1 ], ... [0.17, 0.11, 0.10, 0.18]])) >>> l = array([1, 0.10]) >>> u = array([1, Inf]) >>> xmin = zeros(4) >>> xmax = None >>> x0 = array([1, 0, 0, 1]) >>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0) >>> round(solution["f"], 11) == 1.09666678128 True >>> solution["converged"] True >>> solution["output"]["iterations"] 10 All parameters are optional except C{H}, C{C}, C{A} and C{L}. @param H: Quadratic cost coefficients. @type H: csr_matrix @param c: vector of linear cost coefficients @type c: array @param A: Optional linear constraints. @type A: csr_matrix @param l: Optional linear constraints. Default values are M{-Inf}. @type l: array @param u: Optional linear constraints. Default values are M{Inf}. @type u: array @param xmin: Optional lower bounds on the M{x} variables, defaults are M{-Inf}. @type xmin: array @param xmax: Optional upper bounds on the M{x} variables, defaults are M{Inf}. @type xmax: array @param x0: Starting value of optimization vector M{x}. @type x0: array @param opt: optional options dictionary with the following keys, all of which are also optional (default values shown in parentheses) - C{verbose} (False) - Controls level of progress output displayed - C{feastol} (1e-6) - termination tolerance for feasibility condition - C{gradtol} (1e-6) - termination tolerance for gradient condition - C{comptol} (1e-6) - termination tolerance for complementarity condition - C{costtol} (1e-6) - termination tolerance for cost condition - C{max_it} (150) - maximum number of iterations - C{step_control} (False) - set to True to enable step-size control - C{max_red} (20) - maximum number of step-size reductions if step-control is on - C{cost_mult} (1.0) - cost multiplier used to scale the objective function for improved conditioning. Note: The same value must also be passed to the Hessian evaluation function so that it can appropriately scale the objective function term in the Hessian of the Lagrangian. @type opt: dict @rtype: dict @return: The solution dictionary has the following keys: - C{x} - solution vector - C{f} - final objective function value - C{converged} - exit status - True = first order optimality conditions satisfied - False = maximum number of iterations reached - None = numerically failed - C{output} - output dictionary with keys: - C{iterations} - number of iterations performed - C{hist} - dictionary of arrays with trajectories of the following: feascond, gradcond, compcond, costcond, gamma, stepsize, obj, alphap, alphad - C{message} - exit message - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker multipliers on the constraints, with keys: - C{eqnonlin} - non-linear equality constraints - C{ineqnonlin} - non-linear inequality constraints - C{mu_l} - lower (left-hand) limit on linear constraints - C{mu_u} - upper (right-hand) limit on linear constraints - C{lower} - lower bound on optimization variables - C{upper} - upper bound on optimization variables @license: Apache License version 2.0 """ if H is None or H.nnz == 0: if A is None or A.nnz == 0 and \ xmin is None or len(xmin) == 0 and \ xmax is None or len(xmax) == 0: print 'qps_pips: LP problem must include constraints or variable bounds' return else: if A is not None and A.nnz >= 0: nx = A.shape[1] elif xmin is not None and len(xmin) > 0: nx = xmin.shape[0] elif xmax is not None and len(xmax) > 0: nx = xmax.shape[0] H = csr_matrix((nx, nx)) else: nx = H.shape[0] xmin = -Inf * ones(nx) if xmin is None else xmin xmax = Inf * ones(nx) if xmax is None else xmax c = zeros(nx) if c is None else c # if x0 is None: # x0 = zeros(nx) # k = flatnonzero( (VUB < 1e10) & (VLB > -1e10) ) # x0[k] = ((VUB[k] + VLB[k]) / 2) # k = flatnonzero( (VUB < 1e10) & (VLB <= -1e10) ) # x0[k] = VUB[k] - 1 # k = flatnonzero( (VUB >= 1e10) & (VLB > -1e10) ) # x0[k] = VLB[k] + 1 x0 = zeros(nx) if x0 is None else x0 opt = {} if opt is None else opt if not opt.has_key("cost_mult"): opt["cost_mult"] = 1 def qp_f(x): f = 0.5 * dot(x.T * H, x) + dot(c.T, x) df = H * x + c d2f = H return f, df, d2f # def qp_gh(x): # g = array([]) # h = array([]) # dg = None # dh = None # return g, h, dg, dh # # def qp_hessian(x, lmbda): # Lxx = H * opt["cost_mult"] # return Lxx # l = -Inf * ones(b.shape[0]) # l[:N] = b[:N] return pips(qp_f, x0, A, l, u, xmin, xmax, opt=opt)
[ "def", "qps_pips", "(", "H", ",", "c", ",", "A", ",", "l", ",", "u", ",", "xmin", "=", "None", ",", "xmax", "=", "None", ",", "x0", "=", "None", ",", "opt", "=", "None", ")", ":", "if", "H", "is", "None", "or", "H", ".", "nnz", "==", "0", ":", "if", "A", "is", "None", "or", "A", ".", "nnz", "==", "0", "and", "xmin", "is", "None", "or", "len", "(", "xmin", ")", "==", "0", "and", "xmax", "is", "None", "or", "len", "(", "xmax", ")", "==", "0", ":", "print", "'qps_pips: LP problem must include constraints or variable bounds'", "return", "else", ":", "if", "A", "is", "not", "None", "and", "A", ".", "nnz", ">=", "0", ":", "nx", "=", "A", ".", "shape", "[", "1", "]", "elif", "xmin", "is", "not", "None", "and", "len", "(", "xmin", ")", ">", "0", ":", "nx", "=", "xmin", ".", "shape", "[", "0", "]", "elif", "xmax", "is", "not", "None", "and", "len", "(", "xmax", ")", ">", "0", ":", "nx", "=", "xmax", ".", "shape", "[", "0", "]", "H", "=", "csr_matrix", "(", "(", "nx", ",", "nx", ")", ")", "else", ":", "nx", "=", "H", ".", "shape", "[", "0", "]", "xmin", "=", "-", "Inf", "*", "ones", "(", "nx", ")", "if", "xmin", "is", "None", "else", "xmin", "xmax", "=", "Inf", "*", "ones", "(", "nx", ")", "if", "xmax", "is", "None", "else", "xmax", "c", "=", "zeros", "(", "nx", ")", "if", "c", "is", "None", "else", "c", "# if x0 is None:", "# x0 = zeros(nx)", "# k = flatnonzero( (VUB < 1e10) & (VLB > -1e10) )", "# x0[k] = ((VUB[k] + VLB[k]) / 2)", "# k = flatnonzero( (VUB < 1e10) & (VLB <= -1e10) )", "# x0[k] = VUB[k] - 1", "# k = flatnonzero( (VUB >= 1e10) & (VLB > -1e10) )", "# x0[k] = VLB[k] + 1", "x0", "=", "zeros", "(", "nx", ")", "if", "x0", "is", "None", "else", "x0", "opt", "=", "{", "}", "if", "opt", "is", "None", "else", "opt", "if", "not", "opt", ".", "has_key", "(", "\"cost_mult\"", ")", ":", "opt", "[", "\"cost_mult\"", "]", "=", "1", "def", "qp_f", "(", "x", ")", ":", "f", "=", "0.5", "*", "dot", "(", "x", ".", "T", "*", "H", ",", "x", ")", "+", "dot", "(", "c", ".", "T", ",", "x", ")", "df", "=", "H", "*", "x", "+", "c", "d2f", "=", "H", "return", "f", ",", "df", ",", "d2f", "# def qp_gh(x):", "# g = array([])", "# h = array([])", "# dg = None", "# dh = None", "# return g, h, dg, dh", "#", "# def qp_hessian(x, lmbda):", "# Lxx = H * opt[\"cost_mult\"]", "# return Lxx", "# l = -Inf * ones(b.shape[0])", "# l[:N] = b[:N]", "return", "pips", "(", "qp_f", ",", "x0", ",", "A", ",", "l", ",", "u", ",", "xmin", ",", "xmax", ",", "opt", "=", "opt", ")" ]
39.467456
[ 0.028985507246376812, 0.02702702702702703, 0.1, 0, 0.06060606060606061, 0.21428571428571427, 0, 0.1875, 0, 0.07692307692307693, 0.061224489795918366, 0, 0.027777777777777776, 0.02666666666666667, 0.02702702702702703, 0.10526315789473684, 0, 0.09523809523809523, 0, 0.04477611940298507, 0, 0.06382978723404255, 0.06382978723404255, 0.05970149253731343, 0.029850746268656716, 0.029850746268656716, 0.057971014492753624, 0.125, 0.07352941176470588, 0.04285714285714286, 0.125, 0.0967741935483871, 0.1111111111111111, 0.13043478260869565, 0.08333333333333333, 0.04838709677419355, 0.05660377358490566, 0.16666666666666666, 0.09090909090909091, 0.16666666666666666, 0.06818181818181818, 0.2, 0, 0.03076923076923077, 0.047619047619047616, 0.08695652173913043, 0.041666666666666664, 0.1111111111111111, 0.047619047619047616, 0.08695652173913043, 0.02857142857142857, 0.1111111111111111, 0.028985507246376812, 0.1111111111111111, 0.02702702702702703, 0.12, 0.09523809523809523, 0.02702702702702703, 0.125, 0.09523809523809523, 0.034482758620689655, 0.10526315789473684, 0.02666666666666667, 0.03896103896103896, 0.05405405405405406, 0.06896551724137931, 0.05194805194805195, 0.06896551724137931, 0.05405405405405406, 0.06896551724137931, 0.06153846153846154, 0.044444444444444446, 0.05714285714285714, 0.06896551724137931, 0.06060606060606061, 0.05194805194805195, 0.07407407407407407, 0.05063291139240506, 0.05263157894736842, 0.05405405405405406, 0.02666666666666667, 0.02631578947368421, 0.025974025974025976, 0.029850746268656716, 0.10526315789473684, 0, 0.125, 0.03333333333333333, 0.07692307692307693, 0.05555555555555555, 0.06976744186046512, 0.04225352112676056, 0.046153846153846156, 0.06521739130434782, 0.05263157894736842, 0.04477611940298507, 0.039473684210526314, 0.038461538461538464, 0.06, 0.06521739130434782, 0.0379746835443038, 0.05084745762711865, 0.045454545454545456, 0.04285714285714286, 0.05263157894736842, 0.05194805194805195, 0.043478260869565216, 0.043478260869565216, 0, 0.05, 0.2857142857142857, 0.06451612903225806, 0.05, 0.06382978723404255, 0.07142857142857142, 0.03571428571428571, 0.1111111111111111, 0.15384615384615385, 0.045454545454545456, 0.06451612903225806, 0.038461538461538464, 0.058823529411764705, 0.038461538461538464, 0.058823529411764705, 0.0625, 0.2222222222222222, 0.08695652173913043, 0, 0.038461538461538464, 0.057692307692307696, 0, 0.05405405405405406, 0, 0.05263157894736842, 0.043478260869565216, 0.017857142857142856, 0.025, 0.017543859649122806, 0.037037037037037035, 0.017543859649122806, 0.037037037037037035, 0, 0.05, 0, 0.05555555555555555, 0.08333333333333333, 0.07142857142857142, 0, 0.125, 0.0425531914893617, 0.09090909090909091, 0.13333333333333333, 0.08, 0, 0.05555555555555555, 0.045454545454545456, 0.045454545454545456, 0.05555555555555555, 0.05555555555555555, 0.03571428571428571, 1, 0.03333333333333333, 0.02857142857142857, 0.05263157894736842, 0, 0.03125, 0.05555555555555555, 0, 0.03636363636363636 ]
def parse(self, rrstr): # type: (bytes) -> None ''' Parse a Rock Ridge Platform Dependent record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('PD record already initialized!') (su_len_unused, su_entry_version_unused) = struct.unpack_from('=BB', rrstr[:4], 2) self.padding = rrstr[4:] # We assume that the caller has already checked the su_entry_version, # so we don't bother. self._initialized = True
[ "def", "parse", "(", "self", ",", "rrstr", ")", ":", "# type: (bytes) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'PD record already initialized!'", ")", "(", "su_len_unused", ",", "su_entry_version_unused", ")", "=", "struct", ".", "unpack_from", "(", "'=BB'", ",", "rrstr", "[", ":", "4", "]", ",", "2", ")", "self", ".", "padding", "=", "rrstr", "[", "4", ":", "]", "# We assume that the caller has already checked the su_entry_version,", "# so we don't bother.", "self", ".", "_initialized", "=", "True" ]
30
[ 0.043478260869565216, 0.06451612903225806, 0.18181818181818182, 0.028985507246376812, 0, 0.10526315789473684, 0.05454545454545454, 0.125, 0.17647058823529413, 0.18181818181818182, 0.06896551724137931, 0.033707865168539325, 0, 0.03333333333333333, 0, 0.0625, 0, 0.025974025974025976, 0.06896551724137931, 0, 0.0625 ]
def _get_galaxy_loc_file(name, galaxy_dt, ref_dir, galaxy_base): """Retrieve Galaxy *.loc file for the given reference/aligner name. First tries to find an aligner specific *.loc file. If not defined or does not exist, then we need to try and remap it from the default reference file """ if "file" in galaxy_dt and os.path.exists(os.path.join(galaxy_base, galaxy_dt["file"])): loc_file = os.path.join(galaxy_base, galaxy_dt["file"]) need_remap = False elif alignment.TOOLS[name].galaxy_loc_file is None: loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE) need_remap = True else: loc_file = os.path.join(ref_dir, alignment.TOOLS[name].galaxy_loc_file) need_remap = False if not os.path.exists(loc_file): loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE) need_remap = True return loc_file, need_remap
[ "def", "_get_galaxy_loc_file", "(", "name", ",", "galaxy_dt", ",", "ref_dir", ",", "galaxy_base", ")", ":", "if", "\"file\"", "in", "galaxy_dt", "and", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "galaxy_base", ",", "galaxy_dt", "[", "\"file\"", "]", ")", ")", ":", "loc_file", "=", "os", ".", "path", ".", "join", "(", "galaxy_base", ",", "galaxy_dt", "[", "\"file\"", "]", ")", "need_remap", "=", "False", "elif", "alignment", ".", "TOOLS", "[", "name", "]", ".", "galaxy_loc_file", "is", "None", ":", "loc_file", "=", "os", ".", "path", ".", "join", "(", "ref_dir", ",", "alignment", ".", "BASE_LOCATION_FILE", ")", "need_remap", "=", "True", "else", ":", "loc_file", "=", "os", ".", "path", ".", "join", "(", "ref_dir", ",", "alignment", ".", "TOOLS", "[", "name", "]", ".", "galaxy_loc_file", ")", "need_remap", "=", "False", "if", "not", "os", ".", "path", ".", "exists", "(", "loc_file", ")", ":", "loc_file", "=", "os", ".", "path", ".", "join", "(", "ref_dir", ",", "alignment", ".", "BASE_LOCATION_FILE", ")", "need_remap", "=", "True", "return", "loc_file", ",", "need_remap" ]
45.45
[ 0.015625, 0.028169014084507043, 0, 0.04285714285714286, 0.03125, 0.07692307692307693, 0.2857142857142857, 0.03260869565217391, 0.031746031746031744, 0.07692307692307693, 0.03636363636363636, 0.02857142857142857, 0.08, 0.2222222222222222, 0.02531645569620253, 0.07692307692307693, 0.05555555555555555, 0.02857142857142857, 0.08, 0.06451612903225806 ]
def count(cls, path=None, objtype=None, query=None, **kwargs): """ Like __init__, but simply returns the number of objects that match the query rather than returning the objects NOTE: The path and objtype parameters to this function are to allow use of the DatabaseCollection class directly. However, this class is intended for subclassing and children of it should override either the OBJTYPE or PATH attribute rather than passing them as parameters here. @param path: the path of the database to query, in the form "database.colletion"; pass None to use the value of the PATH property of the object or, if that is none, the PATH property of OBJTYPE @param objtype: the object type to use for these DatabaseObjects; pass None to use the OBJTYPE property of the class @param query: a dictionary specifying key-value pairs that the result must match. If query is None, use kwargs in it's place @param **kwargs: used as query parameters if query is None @raise Exception: if path, PATH, and OBJTYPE.PATH are all None; the database path must be defined in at least one of these """ if not objtype: objtype = cls.OBJTYPE if not path: path = cls.PATH if not query: query = kwargs return objtype.db(path).find(query).count()
[ "def", "count", "(", "cls", ",", "path", "=", "None", ",", "objtype", "=", "None", ",", "query", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "objtype", ":", "objtype", "=", "cls", ".", "OBJTYPE", "if", "not", "path", ":", "path", "=", "cls", ".", "PATH", "if", "not", "query", ":", "query", "=", "kwargs", "return", "objtype", ".", "db", "(", "path", ")", ".", "find", "(", "query", ")", ".", "count", "(", ")" ]
48.733333
[ 0.016129032258064516, 0.18181818181818182, 0.02564102564102564, 0.0425531914893617, 0.25, 0.02666666666666667, 0.025974025974025976, 0.02564102564102564, 0.02564102564102564, 0.25, 0.029850746268656716, 0.04477611940298507, 0.046875, 0.05555555555555555, 0.0410958904109589, 0.03225806451612903, 0.025974025974025976, 0.029850746268656716, 0.045454545454545456, 0.25, 0.04225352112676056, 0.02857142857142857, 0.18181818181818182, 0.08695652173913043, 0.06060606060606061, 0.1, 0.07407407407407407, 0.09523809523809523, 0.07692307692307693, 0.0392156862745098 ]
def _load_reports(self, report_files): """ Args: report_files: list[file] reports to read in """ contents = [] for file_handle in report_files: # Convert to unicode, replacing unreadable chars contents.append( file_handle.read().decode( 'utf-8', 'replace' ) ) return contents
[ "def", "_load_reports", "(", "self", ",", "report_files", ")", ":", "contents", "=", "[", "]", "for", "file_handle", "in", "report_files", ":", "# Convert to unicode, replacing unreadable chars", "contents", ".", "append", "(", "file_handle", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ")", "return", "contents" ]
28.6
[ 0.02631578947368421, 0.18181818181818182, 0.15384615384615385, 0.03636363636363636, 0.18181818181818182, 0.09523809523809523, 0.05, 0.03333333333333333, 0.10714285714285714, 0.07142857142857142, 0.07142857142857142, 0.06896551724137931, 0.17647058823529413, 0.23076923076923078, 0.08695652173913043 ]
def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False): """Load a yaml file with path that is relative to one of given directories. Args: directories: list of directories to search name: relative path of the yaml file to load log_debug: log all messages as debug Returns: tuple (fullpath, loaded yaml structure) or None if not found """ for d in directories: if d.startswith(os.path.expanduser('~')) and not os.path.exists(d): os.makedirs(d) possible_path = os.path.join(d, rel_path) if os.path.exists(possible_path): loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug) if loaded is not None: return (possible_path, cls.load_yaml_by_path(possible_path)) return None
[ "def", "load_yaml_by_relpath", "(", "cls", ",", "directories", ",", "rel_path", ",", "log_debug", "=", "False", ")", ":", "for", "d", "in", "directories", ":", "if", "d", ".", "startswith", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ")", "and", "not", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "os", ".", "makedirs", "(", "d", ")", "possible_path", "=", "os", ".", "path", ".", "join", "(", "d", ",", "rel_path", ")", "if", "os", ".", "path", ".", "exists", "(", "possible_path", ")", ":", "loaded", "=", "cls", ".", "load_yaml_by_path", "(", "possible_path", ",", "log_debug", "=", "log_debug", ")", "if", "loaded", "is", "not", "None", ":", "return", "(", "possible_path", ",", "cls", ".", "load_yaml_by_path", "(", "possible_path", ")", ")", "return", "None" ]
43.9
[ 0.014285714285714285, 0.03614457831325301, 0, 0.15384615384615385, 0.037037037037037035, 0.03571428571428571, 0.041666666666666664, 0.125, 0.041666666666666664, 0.18181818181818182, 0.06896551724137931, 0.02531645569620253, 0.06666666666666667, 0.03773584905660377, 0.044444444444444446, 0.036585365853658534, 0.05263157894736842, 0.0375, 0, 0.10526315789473684 ]
def errorFunction(self, t, a): """ Using a hyperbolic arctan on the error slightly exaggerates the actual error non-linearly. Return t - a to just use the difference. t - target vector a - activation vector """ def difference(v): if not self.hyperbolicError: #if -0.1 < v < 0.1: return 0.0 #else: return v else: if v < -0.9999999: return -17.0 elif v > 0.9999999: return 17.0 else: return math.log( (1.0 + v) / (1.0 - v) ) #else: return Numeric.arctanh(v) # half that above return list(map(difference, t - a))
[ "def", "errorFunction", "(", "self", ",", "t", ",", "a", ")", ":", "def", "difference", "(", "v", ")", ":", "if", "not", "self", ".", "hyperbolicError", ":", "#if -0.1 < v < 0.1: return 0.0", "#else:", "return", "v", "else", ":", "if", "v", "<", "-", "0.9999999", ":", "return", "-", "17.0", "elif", "v", ">", "0.9999999", ":", "return", "17.0", "else", ":", "return", "math", ".", "log", "(", "(", "1.0", "+", "v", ")", "/", "(", "1.0", "-", "v", ")", ")", "#else: return Numeric.arctanh(v) # half that above", "return", "list", "(", "map", "(", "difference", ",", "t", "-", "a", ")", ")" ]
38.666667
[ 0.03333333333333333, 0.18181818181818182, 0.029850746268656716, 0.02531645569620253, 0.08, 0.06896551724137931, 0.18181818181818182, 0.07692307692307693, 0.05, 0.06521739130434782, 0.13636363636363635, 0.08333333333333333, 0.11764705882352941, 0.08163265306122448, 0.10204081632653061, 0.08064516129032258, 0.045454545454545456, 0.046511627906976744 ]
def _calculate(self, startingPercentage, endPercentage, startDate, endDate): """This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float """ # get the defined subset of error values errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate) errorValues = filter(lambda item: item is not None, errorValues) return sorted(errorValues)[len(errorValues)//2]
[ "def", "_calculate", "(", "self", ",", "startingPercentage", ",", "endPercentage", ",", "startDate", ",", "endDate", ")", ":", "# get the defined subset of error values", "errorValues", "=", "self", ".", "_get_error_values", "(", "startingPercentage", ",", "endPercentage", ",", "startDate", ",", "endDate", ")", "errorValues", "=", "filter", "(", "lambda", "item", ":", "item", "is", "not", "None", ",", "errorValues", ")", "return", "sorted", "(", "errorValues", ")", "[", "len", "(", "errorValues", ")", "//", "2", "]" ]
61.909091
[ 0.013157894736842105, 0.027777777777777776, 0, 0.03773584905660377, 0, 0.034782608695652174, 0.03614457831325301, 0.042105263157894736, 0.036036036036036036, 0.02727272727272727, 0.04918032786885246, 0.043010752688172046, 0.043478260869565216, 0, 0.05084745762711865, 0.14285714285714285, 0.18181818181818182, 0.041666666666666664, 0.030303030303030304, 0.027777777777777776, 0, 0.03636363636363636 ]
def start_adc_difference_comparator(self, differential, high_threshold, low_threshold, gain=1, data_rate=None, active_low=True, traditional=True, latching=False, num_readings=1): """Start continuous ADC conversions between two channels with the comparator enabled. See start_adc_difference for valid differential parameter values and their meaning. When enabled the comparator to will check if the ADC value is within the high_threshold & low_threshold value (both should be signed 16-bit integers) and trigger the ALERT pin. The behavior can be controlled by the following parameters: - active_low: Boolean that indicates if ALERT is pulled low or high when active/triggered. Default is true, active low. - traditional: Boolean that indicates if the comparator is in traditional mode where it fires when the value is within the threshold, or in window mode where it fires when the value is _outside_ the threshold range. Default is true, traditional mode. - latching: Boolean that indicates if the alert should be held until get_last_result() is called to read the value and clear the alert. Default is false, non-latching. - num_readings: The number of readings that match the comparator before triggering the alert. Can be 1, 2, or 4. Default is 1. Will return an initial conversion result, then call the get_last_result() function continuously to read the most recent conversion result. Call stop_adc() to stop conversions. """ assert 0 <= differential <= 3, 'Differential must be a value within 0-3!' # Start continuous reads with comparator and set the mux value to the # channel plus the highest bit (bit 3) set. return self._read_comparator(differential, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS, high_threshold, low_threshold, active_low, traditional, latching, num_readings)
[ "def", "start_adc_difference_comparator", "(", "self", ",", "differential", ",", "high_threshold", ",", "low_threshold", ",", "gain", "=", "1", ",", "data_rate", "=", "None", ",", "active_low", "=", "True", ",", "traditional", "=", "True", ",", "latching", "=", "False", ",", "num_readings", "=", "1", ")", ":", "assert", "0", "<=", "differential", "<=", "3", ",", "'Differential must be a value within 0-3!'", "# Start continuous reads with comparator and set the mux value to the", "# channel plus the highest bit (bit 3) set.", "return", "self", ".", "_read_comparator", "(", "differential", ",", "gain", ",", "data_rate", ",", "ADS1x15_CONFIG_MODE_CONTINUOUS", ",", "high_threshold", ",", "low_threshold", ",", "active_low", ",", "traditional", ",", "latching", ",", "num_readings", ")" ]
73.612903
[ 0.03488372093023256, 0.075, 0.07777777777777778, 0.028985507246376812, 0.0375, 0.0375, 0.037037037037037035, 0.02531645569620253, 0.031746031746031744, 0.03896103896103896, 0.02631578947368421, 0.04819277108433735, 0.047619047619047616, 0.047058823529411764, 0.04938271604938271, 0.038461538461538464, 0.03896103896103896, 0.046153846153846156, 0.04938271604938271, 0.04878048780487805, 0.037037037037037035, 0.02564102564102564, 0.05128205128205128, 0.18181818181818182, 0.037037037037037035, 0.025974025974025976, 0.0392156862745098, 0.04477611940298507, 0.04411764705882353, 0.0379746835443038, 0.0547945205479452 ]
def init_registered(self, request): """ Create default price list items for each registered resource. """ created_items = models.DefaultPriceListItem.init_from_registered_resources() if created_items: message = ungettext( _('Price item was created: %s.') % created_items[0].name, _('Price items were created: %s.') % ', '.join(item.name for item in created_items), len(created_items) ) self.message_user(request, message) else: self.message_user(request, _('Price items for all registered resources have been updated.')) return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist'))
[ "def", "init_registered", "(", "self", ",", "request", ")", ":", "created_items", "=", "models", ".", "DefaultPriceListItem", ".", "init_from_registered_resources", "(", ")", "if", "created_items", ":", "message", "=", "ungettext", "(", "_", "(", "'Price item was created: %s.'", ")", "%", "created_items", "[", "0", "]", ".", "name", ",", "_", "(", "'Price items were created: %s.'", ")", "%", "', '", ".", "join", "(", "item", ".", "name", "for", "item", "in", "created_items", ")", ",", "len", "(", "created_items", ")", ")", "self", ".", "message_user", "(", "request", ",", "message", ")", "else", ":", "self", ".", "message_user", "(", "request", ",", "_", "(", "'Price items for all registered resources have been updated.'", ")", ")", "return", "redirect", "(", "reverse", "(", "'admin:cost_tracking_defaultpricelistitem_changelist'", ")", ")" ]
48.266667
[ 0.02857142857142857, 0.025974025974025976, 0.03571428571428571, 0, 0.08, 0.09375, 0.0273972602739726, 0.03, 0.058823529411764705, 0.23076923076923078, 0.0425531914893617, 0.15384615384615385, 0.028846153846153848, 0, 0.034482758620689655 ]
def build(self, construct): """Build a single construct in CLIPS. The Python equivalent of the CLIPS build command. """ if lib.EnvBuild(self._env, construct.encode()) != 1: raise CLIPSError(self._env)
[ "def", "build", "(", "self", ",", "construct", ")", ":", "if", "lib", ".", "EnvBuild", "(", "self", ".", "_env", ",", "construct", ".", "encode", "(", ")", ")", "!=", "1", ":", "raise", "CLIPSError", "(", "self", ".", "_env", ")" ]
29.875
[ 0.037037037037037035, 0.044444444444444446, 0, 0.03508771929824561, 0, 0.18181818181818182, 0.03333333333333333, 0.05128205128205128 ]
def make_action(app_factory, hostname='localhost', port=5000, threaded=False, processes=1, stream=None, sort_by=('time', 'calls'), restrictions=()): """Return a new callback for :mod:`werkzeug.script` that starts a local server with the profiler enabled. :: from werkzeug.contrib import profiler action_profile = profiler.make_action(make_app) """ def action(hostname=('h', hostname), port=('p', port), threaded=threaded, processes=processes): """Start a new development server.""" from werkzeug.serving import run_simple app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions) run_simple(hostname, port, app, False, None, threaded, processes) return action
[ "def", "make_action", "(", "app_factory", ",", "hostname", "=", "'localhost'", ",", "port", "=", "5000", ",", "threaded", "=", "False", ",", "processes", "=", "1", ",", "stream", "=", "None", ",", "sort_by", "=", "(", "'time'", ",", "'calls'", ")", ",", "restrictions", "=", "(", ")", ")", ":", "def", "action", "(", "hostname", "=", "(", "'h'", ",", "hostname", ")", ",", "port", "=", "(", "'p'", ",", "port", ")", ",", "threaded", "=", "threaded", ",", "processes", "=", "processes", ")", ":", "\"\"\"Start a new development server.\"\"\"", "from", "werkzeug", ".", "serving", "import", "run_simple", "app", "=", "ProfilerMiddleware", "(", "app_factory", "(", ")", ",", "stream", ",", "sort_by", ",", "restrictions", ")", "run_simple", "(", "hostname", ",", "port", ",", "app", ",", "False", ",", "None", ",", "threaded", ",", "processes", ")", "return", "action" ]
43.111111
[ 0.03278688524590164, 0.08771929824561403, 0.08333333333333333, 0.02666666666666667, 0.05405405405405406, 0, 0.5, 0, 0.044444444444444446, 0.03636363636363636, 0.2857142857142857, 0.05172413793103448, 0.10909090909090909, 0.044444444444444446, 0.0425531914893617, 0.02564102564102564, 0.0273972602739726, 0.11764705882352941 ]
def metadata(self, default=True): """ Metadata of scenarios in the connected data source Parameter --------- default : bool, optional, default True Return *only* the default version of each Scenario. Any (`model`, `scenario`) without a default version is omitted. If :obj:`False`, return all versions. """ # at present this reads in all data for all scenarios, it could be sped # up in the future to try to query a subset default = 'true' if default else 'false' add_url = 'runs?getOnlyDefaultRuns={}&includeMetadata=true' url = self.base_url + add_url.format(default) headers = {'Authorization': 'Bearer {}'.format(self.auth())} r = requests.get(url, headers=headers) df = pd.read_json(r.content, orient='records') def extract(row): return ( pd.concat([row[['model', 'scenario']], pd.Series(row.metadata)]) .to_frame() .T .set_index(['model', 'scenario']) ) return pd.concat([extract(row) for idx, row in df.iterrows()], sort=False).reset_index()
[ "def", "metadata", "(", "self", ",", "default", "=", "True", ")", ":", "# at present this reads in all data for all scenarios, it could be sped", "# up in the future to try to query a subset", "default", "=", "'true'", "if", "default", "else", "'false'", "add_url", "=", "'runs?getOnlyDefaultRuns={}&includeMetadata=true'", "url", "=", "self", ".", "base_url", "+", "add_url", ".", "format", "(", "default", ")", "headers", "=", "{", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "self", ".", "auth", "(", ")", ")", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "df", "=", "pd", ".", "read_json", "(", "r", ".", "content", ",", "orient", "=", "'records'", ")", "def", "extract", "(", "row", ")", ":", "return", "(", "pd", ".", "concat", "(", "[", "row", "[", "[", "'model'", ",", "'scenario'", "]", "]", ",", "pd", ".", "Series", "(", "row", ".", "metadata", ")", "]", ")", ".", "to_frame", "(", ")", ".", "T", ".", "set_index", "(", "[", "'model'", ",", "'scenario'", "]", ")", ")", "return", "pd", ".", "concat", "(", "[", "extract", "(", "row", ")", "for", "idx", ",", "row", "in", "df", ".", "iterrows", "(", ")", "]", ",", "sort", "=", "False", ")", ".", "reset_index", "(", ")" ]
39.483871
[ 0.030303030303030304, 0.18181818181818182, 0.034482758620689655, 0, 0.11764705882352941, 0.11764705882352941, 0.06521739130434782, 0.06349206349206349, 0.05333333333333334, 0.12244897959183673, 0.18181818181818182, 0.02531645569620253, 0.0392156862745098, 0.041666666666666664, 0.029850746268656716, 0.03773584905660377, 0.029411764705882353, 0.043478260869565216, 0.037037037037037035, 0, 0.08, 0.15, 0.05555555555555555, 0.07692307692307693, 0.07407407407407407, 0.1111111111111111, 0.04081632653061224, 0.23076923076923078, 0, 0.04285714285714286, 0.1 ]
def decode(obj): """ Decoder for deserializing numpy data types. """ typ = obj.get('typ') if typ is None: return obj elif typ == 'timestamp': freq = obj['freq'] if 'freq' in obj else obj['offset'] return Timestamp(obj['value'], tz=obj['tz'], freq=freq) elif typ == 'nat': return NaT elif typ == 'period': return Period(ordinal=obj['ordinal'], freq=obj['freq']) elif typ == 'index': dtype = dtype_for(obj['dtype']) data = unconvert(obj['data'], dtype, obj.get('compress')) return Index(data, dtype=dtype, name=obj['name']) elif typ == 'range_index': return RangeIndex(obj['start'], obj['stop'], obj['step'], name=obj['name']) elif typ == 'multi_index': dtype = dtype_for(obj['dtype']) data = unconvert(obj['data'], dtype, obj.get('compress')) data = [tuple(x) for x in data] return MultiIndex.from_tuples(data, names=obj['names']) elif typ == 'period_index': data = unconvert(obj['data'], np.int64, obj.get('compress')) d = dict(name=obj['name'], freq=obj['freq']) freq = d.pop('freq', None) return PeriodIndex(PeriodArray(data, freq), **d) elif typ == 'datetime_index': data = unconvert(obj['data'], np.int64, obj.get('compress')) d = dict(name=obj['name'], freq=obj['freq']) result = DatetimeIndex(data, **d) tz = obj['tz'] # reverse tz conversion if tz is not None: result = result.tz_localize('UTC').tz_convert(tz) return result elif typ in ('interval_index', 'interval_array'): return globals()[obj['klass']].from_arrays(obj['left'], obj['right'], obj['closed'], name=obj['name']) elif typ == 'category': from_codes = globals()[obj['klass']].from_codes return from_codes(codes=obj['codes'], categories=obj['categories'], ordered=obj['ordered']) elif typ == 'interval': return Interval(obj['left'], obj['right'], obj['closed']) elif typ == 'series': dtype = dtype_for(obj['dtype']) pd_dtype = pandas_dtype(dtype) index = obj['index'] result = Series(unconvert(obj['data'], dtype, obj['compress']), index=index, dtype=pd_dtype, name=obj['name']) return result elif typ == 'block_manager': axes = obj['axes'] def create_block(b): values = _safe_reshape(unconvert( b['values'], dtype_for(b['dtype']), b['compress']), b['shape']) # locs handles duplicate column names, and should be used instead # of items; see GH 9618 if 'locs' in b: placement = b['locs'] else: placement = axes[0].get_indexer(b['items']) if is_datetime64tz_dtype(b['dtype']): assert isinstance(values, np.ndarray), type(values) assert values.dtype == 'M8[ns]', values.dtype values = DatetimeArray(values, dtype=b['dtype']) return make_block(values=values, klass=getattr(internals, b['klass']), placement=placement, dtype=b['dtype']) blocks = [create_block(b) for b in obj['blocks']] return globals()[obj['klass']](BlockManager(blocks, axes)) elif typ == 'datetime': return parse(obj['data']) elif typ == 'datetime64': return np.datetime64(parse(obj['data'])) elif typ == 'date': return parse(obj['data']).date() elif typ == 'timedelta': return timedelta(*obj['data']) elif typ == 'timedelta64': return np.timedelta64(int(obj['data'])) # elif typ == 'sparse_series': # dtype = dtype_for(obj['dtype']) # return SparseSeries( # unconvert(obj['sp_values'], dtype, obj['compress']), # sparse_index=obj['sp_index'], index=obj['index'], # fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name']) # elif typ == 'sparse_dataframe': # return SparseDataFrame( # obj['data'], columns=obj['columns'], # default_fill_value=obj['default_fill_value'], # default_kind=obj['default_kind'] # ) # elif typ == 'sparse_panel': # return SparsePanel( # obj['data'], items=obj['items'], # default_fill_value=obj['default_fill_value'], # default_kind=obj['default_kind']) elif typ == 'block_index': return globals()[obj['klass']](obj['length'], obj['blocs'], obj['blengths']) elif typ == 'int_index': return globals()[obj['klass']](obj['length'], obj['indices']) elif typ == 'ndarray': return unconvert(obj['data'], np.typeDict[obj['dtype']], obj.get('compress')).reshape(obj['shape']) elif typ == 'np_scalar': if obj.get('sub_typ') == 'np_complex': return c2f(obj['real'], obj['imag'], obj['dtype']) else: dtype = dtype_for(obj['dtype']) try: return dtype(obj['data']) except (ValueError, TypeError): return dtype.type(obj['data']) elif typ == 'np_complex': return complex(obj['real'] + '+' + obj['imag'] + 'j') elif isinstance(obj, (dict, list, set)): return obj else: return obj
[ "def", "decode", "(", "obj", ")", ":", "typ", "=", "obj", ".", "get", "(", "'typ'", ")", "if", "typ", "is", "None", ":", "return", "obj", "elif", "typ", "==", "'timestamp'", ":", "freq", "=", "obj", "[", "'freq'", "]", "if", "'freq'", "in", "obj", "else", "obj", "[", "'offset'", "]", "return", "Timestamp", "(", "obj", "[", "'value'", "]", ",", "tz", "=", "obj", "[", "'tz'", "]", ",", "freq", "=", "freq", ")", "elif", "typ", "==", "'nat'", ":", "return", "NaT", "elif", "typ", "==", "'period'", ":", "return", "Period", "(", "ordinal", "=", "obj", "[", "'ordinal'", "]", ",", "freq", "=", "obj", "[", "'freq'", "]", ")", "elif", "typ", "==", "'index'", ":", "dtype", "=", "dtype_for", "(", "obj", "[", "'dtype'", "]", ")", "data", "=", "unconvert", "(", "obj", "[", "'data'", "]", ",", "dtype", ",", "obj", ".", "get", "(", "'compress'", ")", ")", "return", "Index", "(", "data", ",", "dtype", "=", "dtype", ",", "name", "=", "obj", "[", "'name'", "]", ")", "elif", "typ", "==", "'range_index'", ":", "return", "RangeIndex", "(", "obj", "[", "'start'", "]", ",", "obj", "[", "'stop'", "]", ",", "obj", "[", "'step'", "]", ",", "name", "=", "obj", "[", "'name'", "]", ")", "elif", "typ", "==", "'multi_index'", ":", "dtype", "=", "dtype_for", "(", "obj", "[", "'dtype'", "]", ")", "data", "=", "unconvert", "(", "obj", "[", "'data'", "]", ",", "dtype", ",", "obj", ".", "get", "(", "'compress'", ")", ")", "data", "=", "[", "tuple", "(", "x", ")", "for", "x", "in", "data", "]", "return", "MultiIndex", ".", "from_tuples", "(", "data", ",", "names", "=", "obj", "[", "'names'", "]", ")", "elif", "typ", "==", "'period_index'", ":", "data", "=", "unconvert", "(", "obj", "[", "'data'", "]", ",", "np", ".", "int64", ",", "obj", ".", "get", "(", "'compress'", ")", ")", "d", "=", "dict", "(", "name", "=", "obj", "[", "'name'", "]", ",", "freq", "=", "obj", "[", "'freq'", "]", ")", "freq", "=", "d", ".", "pop", "(", "'freq'", ",", "None", ")", "return", "PeriodIndex", "(", "PeriodArray", "(", "data", ",", "freq", ")", ",", "*", "*", "d", ")", "elif", "typ", "==", "'datetime_index'", ":", "data", "=", "unconvert", "(", "obj", "[", "'data'", "]", ",", "np", ".", "int64", ",", "obj", ".", "get", "(", "'compress'", ")", ")", "d", "=", "dict", "(", "name", "=", "obj", "[", "'name'", "]", ",", "freq", "=", "obj", "[", "'freq'", "]", ")", "result", "=", "DatetimeIndex", "(", "data", ",", "*", "*", "d", ")", "tz", "=", "obj", "[", "'tz'", "]", "# reverse tz conversion", "if", "tz", "is", "not", "None", ":", "result", "=", "result", ".", "tz_localize", "(", "'UTC'", ")", ".", "tz_convert", "(", "tz", ")", "return", "result", "elif", "typ", "in", "(", "'interval_index'", ",", "'interval_array'", ")", ":", "return", "globals", "(", ")", "[", "obj", "[", "'klass'", "]", "]", ".", "from_arrays", "(", "obj", "[", "'left'", "]", ",", "obj", "[", "'right'", "]", ",", "obj", "[", "'closed'", "]", ",", "name", "=", "obj", "[", "'name'", "]", ")", "elif", "typ", "==", "'category'", ":", "from_codes", "=", "globals", "(", ")", "[", "obj", "[", "'klass'", "]", "]", ".", "from_codes", "return", "from_codes", "(", "codes", "=", "obj", "[", "'codes'", "]", ",", "categories", "=", "obj", "[", "'categories'", "]", ",", "ordered", "=", "obj", "[", "'ordered'", "]", ")", "elif", "typ", "==", "'interval'", ":", "return", "Interval", "(", "obj", "[", "'left'", "]", ",", "obj", "[", "'right'", "]", ",", "obj", "[", "'closed'", "]", ")", "elif", "typ", "==", "'series'", ":", "dtype", "=", "dtype_for", "(", "obj", "[", "'dtype'", "]", ")", "pd_dtype", "=", "pandas_dtype", "(", "dtype", ")", "index", "=", "obj", "[", "'index'", "]", "result", "=", "Series", "(", "unconvert", "(", "obj", "[", "'data'", "]", ",", "dtype", ",", "obj", "[", "'compress'", "]", ")", ",", "index", "=", "index", ",", "dtype", "=", "pd_dtype", ",", "name", "=", "obj", "[", "'name'", "]", ")", "return", "result", "elif", "typ", "==", "'block_manager'", ":", "axes", "=", "obj", "[", "'axes'", "]", "def", "create_block", "(", "b", ")", ":", "values", "=", "_safe_reshape", "(", "unconvert", "(", "b", "[", "'values'", "]", ",", "dtype_for", "(", "b", "[", "'dtype'", "]", ")", ",", "b", "[", "'compress'", "]", ")", ",", "b", "[", "'shape'", "]", ")", "# locs handles duplicate column names, and should be used instead", "# of items; see GH 9618", "if", "'locs'", "in", "b", ":", "placement", "=", "b", "[", "'locs'", "]", "else", ":", "placement", "=", "axes", "[", "0", "]", ".", "get_indexer", "(", "b", "[", "'items'", "]", ")", "if", "is_datetime64tz_dtype", "(", "b", "[", "'dtype'", "]", ")", ":", "assert", "isinstance", "(", "values", ",", "np", ".", "ndarray", ")", ",", "type", "(", "values", ")", "assert", "values", ".", "dtype", "==", "'M8[ns]'", ",", "values", ".", "dtype", "values", "=", "DatetimeArray", "(", "values", ",", "dtype", "=", "b", "[", "'dtype'", "]", ")", "return", "make_block", "(", "values", "=", "values", ",", "klass", "=", "getattr", "(", "internals", ",", "b", "[", "'klass'", "]", ")", ",", "placement", "=", "placement", ",", "dtype", "=", "b", "[", "'dtype'", "]", ")", "blocks", "=", "[", "create_block", "(", "b", ")", "for", "b", "in", "obj", "[", "'blocks'", "]", "]", "return", "globals", "(", ")", "[", "obj", "[", "'klass'", "]", "]", "(", "BlockManager", "(", "blocks", ",", "axes", ")", ")", "elif", "typ", "==", "'datetime'", ":", "return", "parse", "(", "obj", "[", "'data'", "]", ")", "elif", "typ", "==", "'datetime64'", ":", "return", "np", ".", "datetime64", "(", "parse", "(", "obj", "[", "'data'", "]", ")", ")", "elif", "typ", "==", "'date'", ":", "return", "parse", "(", "obj", "[", "'data'", "]", ")", ".", "date", "(", ")", "elif", "typ", "==", "'timedelta'", ":", "return", "timedelta", "(", "*", "obj", "[", "'data'", "]", ")", "elif", "typ", "==", "'timedelta64'", ":", "return", "np", ".", "timedelta64", "(", "int", "(", "obj", "[", "'data'", "]", ")", ")", "# elif typ == 'sparse_series':", "# dtype = dtype_for(obj['dtype'])", "# return SparseSeries(", "# unconvert(obj['sp_values'], dtype, obj['compress']),", "# sparse_index=obj['sp_index'], index=obj['index'],", "# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])", "# elif typ == 'sparse_dataframe':", "# return SparseDataFrame(", "# obj['data'], columns=obj['columns'],", "# default_fill_value=obj['default_fill_value'],", "# default_kind=obj['default_kind']", "# )", "# elif typ == 'sparse_panel':", "# return SparsePanel(", "# obj['data'], items=obj['items'],", "# default_fill_value=obj['default_fill_value'],", "# default_kind=obj['default_kind'])", "elif", "typ", "==", "'block_index'", ":", "return", "globals", "(", ")", "[", "obj", "[", "'klass'", "]", "]", "(", "obj", "[", "'length'", "]", ",", "obj", "[", "'blocs'", "]", ",", "obj", "[", "'blengths'", "]", ")", "elif", "typ", "==", "'int_index'", ":", "return", "globals", "(", ")", "[", "obj", "[", "'klass'", "]", "]", "(", "obj", "[", "'length'", "]", ",", "obj", "[", "'indices'", "]", ")", "elif", "typ", "==", "'ndarray'", ":", "return", "unconvert", "(", "obj", "[", "'data'", "]", ",", "np", ".", "typeDict", "[", "obj", "[", "'dtype'", "]", "]", ",", "obj", ".", "get", "(", "'compress'", ")", ")", ".", "reshape", "(", "obj", "[", "'shape'", "]", ")", "elif", "typ", "==", "'np_scalar'", ":", "if", "obj", ".", "get", "(", "'sub_typ'", ")", "==", "'np_complex'", ":", "return", "c2f", "(", "obj", "[", "'real'", "]", ",", "obj", "[", "'imag'", "]", ",", "obj", "[", "'dtype'", "]", ")", "else", ":", "dtype", "=", "dtype_for", "(", "obj", "[", "'dtype'", "]", ")", "try", ":", "return", "dtype", "(", "obj", "[", "'data'", "]", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "dtype", ".", "type", "(", "obj", "[", "'data'", "]", ")", "elif", "typ", "==", "'np_complex'", ":", "return", "complex", "(", "obj", "[", "'real'", "]", "+", "'+'", "+", "obj", "[", "'imag'", "]", "+", "'j'", ")", "elif", "isinstance", "(", "obj", ",", "(", "dict", ",", "list", ",", "set", ")", ")", ":", "return", "obj", "else", ":", "return", "obj" ]
38.322148
[ 0.0625, 0.2857142857142857, 0.0425531914893617, 0.2857142857142857, 0, 0.08333333333333333, 0.10526315789473684, 0.1111111111111111, 0.07142857142857142, 0.03225806451612903, 0.031746031746031744, 0.09090909090909091, 0.1111111111111111, 0.08, 0.031746031746031744, 0.08333333333333333, 0.05128205128205128, 0.06818181818181818, 0.08888888888888889, 0.03508771929824561, 0.06666666666666667, 0.07692307692307693, 0.07894736842105263, 0.07894736842105263, 0.11627906976744186, 0.06666666666666667, 0.05128205128205128, 0.06818181818181818, 0.08888888888888889, 0.05128205128205128, 0.031746031746031744, 0.06451612903225806, 0.029411764705882353, 0.038461538461538464, 0.058823529411764705, 0.03571428571428571, 0, 0.06060606060606061, 0.029411764705882353, 0.038461538461538464, 0.04878048780487805, 0.09090909090909091, 0, 0.06451612903225806, 0.07692307692307693, 0.03278688524590164, 0.09523809523809523, 0, 0.03773584905660377, 0.047619047619047616, 0.046875, 0.046153846153846156, 0.07352941176470588, 0.07407407407407407, 0.03636363636363636, 0.06666666666666667, 0.07272727272727272, 0.10204081632653061, 0, 0.07407407407407407, 0.03076923076923077, 0.08, 0.05128205128205128, 0.05263157894736842, 0, 0.07142857142857142, 0.04225352112676056, 0.08333333333333333, 0.07692307692307693, 0.0975609756097561, 0.09523809523809523, 0, 0.0625, 0.07692307692307693, 0, 0.07142857142857142, 0.06666666666666667, 0.0392156862745098, 0.06976744186046512, 0, 0.025974025974025976, 0.05714285714285714, 0.07407407407407407, 0.05405405405405406, 0.11764705882352941, 0.03389830508474576, 0, 0.04081632653061224, 0.029850746268656716, 0.03278688524590164, 0.03125, 0, 0.06818181818181818, 0.05970149253731343, 0.08, 0.10638297872340426, 0, 0.03508771929824561, 0.030303030303030304, 0.07407407407407407, 0.06060606060606061, 0.06896551724137931, 0.041666666666666664, 0.08695652173913043, 0.05, 0.07142857142857142, 0.05263157894736842, 0.06666666666666667, 0.0425531914893617, 0.058823529411764705, 0.05, 0.06896551724137931, 0.03076923076923077, 0.03225806451612903, 0.02564102564102564, 0.05405405405405406, 0.0625, 0.04081632653061224, 0.034482758620689655, 0.044444444444444446, 0.2, 0.06060606060606061, 0.07142857142857142, 0.044444444444444446, 0.034482758620689655, 0.043478260869565216, 0.06666666666666667, 0.04477611940298507, 0.07272727272727272, 0.07142857142857142, 0.028985507246376812, 0.07692307692307693, 0.046875, 0.05970149253731343, 0.07142857142857142, 0.043478260869565216, 0.03225806451612903, 0.15384615384615385, 0.046511627906976744, 0.125, 0.04878048780487805, 0.046511627906976744, 0.043478260869565216, 0.06896551724137931, 0.03278688524590164, 0.045454545454545456, 0.1111111111111111, 0.2222222222222222, 0.1111111111111111 ]
def rectify_partial_form_field(base_field, partial_form_fields): """ In base_field reset the attributes label and help_text, since they are overriden by the partial field. Additionally, from the list, or list of lists of partial_form_fields append the bound validator methods to the given base field. """ base_field.label = '' base_field.help_text = '' for fieldset in partial_form_fields: if not isinstance(fieldset, (list, tuple)): fieldset = [fieldset] for field in fieldset: base_field.validators.append(field.run_validators)
[ "def", "rectify_partial_form_field", "(", "base_field", ",", "partial_form_fields", ")", ":", "base_field", ".", "label", "=", "''", "base_field", ".", "help_text", "=", "''", "for", "fieldset", "in", "partial_form_fields", ":", "if", "not", "isinstance", "(", "fieldset", ",", "(", "list", ",", "tuple", ")", ")", ":", "fieldset", "=", "[", "fieldset", "]", "for", "field", "in", "fieldset", ":", "base_field", ".", "validators", ".", "append", "(", "field", ".", "run_validators", ")" ]
45.307692
[ 0.015625, 0.2857142857142857, 0.03296703296703297, 0.034482758620689655, 0.031746031746031744, 0.2857142857142857, 0.08, 0.06896551724137931, 0.05, 0.0392156862745098, 0.06060606060606061, 0.06666666666666667, 0.03225806451612903 ]
def accept_publication_role(cursor, publication_id, user_id, document_ids, is_accepted=False): """Accept or deny the document role attribution for the publication (``publication_id``) and user (at ``user_id``) for the documents (listed by id as ``document_ids``). """ cursor.execute("""\ UPDATE role_acceptances AS ra SET accepted = %s FROM pending_documents AS pd WHERE pd.publication_id = %s AND ra.user_id = %s AND pd.uuid = ANY(%s::UUID[]) AND pd.uuid = ra.uuid""", (is_accepted, publication_id, user_id, document_ids,))
[ "def", "accept_publication_role", "(", "cursor", ",", "publication_id", ",", "user_id", ",", "document_ids", ",", "is_accepted", "=", "False", ")", ":", "cursor", ".", "execute", "(", "\"\"\"\\\nUPDATE role_acceptances AS ra\nSET accepted = %s\nFROM pending_documents AS pd\nWHERE\n pd.publication_id = %s\n AND\n ra.user_id = %s\n AND\n pd.uuid = ANY(%s::UUID[])\n AND\n pd.uuid = ra.uuid\"\"\"", ",", "(", "is_accepted", ",", "publication_id", ",", "user_id", ",", "document_ids", ",", ")", ")" ]
30.947368
[ 0.03333333333333333, 0.06557377049180328, 0.027777777777777776, 0.08, 0.07017543859649122, 0.2857142857142857, 0.13043478260869565, 0.034482758620689655, 0.058823529411764705, 0.03571428571428571, 0.2, 0.125, 0.6, 0.17647058823529413, 0.6, 0.2222222222222222, 0.6, 0.17391304347826086, 0.0547945205479452 ]
def build_path(levels): """ make a linear directory structure from a list of path levels names levels = ["chefdir", "trees", "test"] builds ./chefdir/trees/test/ """ path = os.path.join(*levels) if not dir_exists(path): os.makedirs(path) return path
[ "def", "build_path", "(", "levels", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "*", "levels", ")", "if", "not", "dir_exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "return", "path" ]
28
[ 0.043478260869565216, 0.2857142857142857, 0.02857142857142857, 0.04878048780487805, 0.0625, 0.2857142857142857, 0.0625, 0.07142857142857142, 0.08, 0.13333333333333333 ]
def __coord_time(n, sr=22050, hop_length=512, **_kwargs): '''Get time coordinates from frames''' return core.frames_to_time(np.arange(n+1), sr=sr, hop_length=hop_length)
[ "def", "__coord_time", "(", "n", ",", "sr", "=", "22050", ",", "hop_length", "=", "512", ",", "*", "*", "_kwargs", ")", ":", "return", "core", ".", "frames_to_time", "(", "np", ".", "arange", "(", "n", "+", "1", ")", ",", "sr", "=", "sr", ",", "hop_length", "=", "hop_length", ")" ]
58.333333
[ 0.017543859649122806, 0.047619047619047616, 0.02631578947368421 ]
def desktop_lockdown(name, user=None, disable_application_handlers=None, disable_command_line=None, disable_lock_screen=None, disable_log_out=None, disable_print_setup=None, disable_printing=None, disable_save_to_disk=None, disable_user_switching=None, user_administration_disabled=None, **kwargs): ''' desktop_lockdown: sets values in the org.gnome.desktop.lockdown schema ''' gnome_kwargs = { 'user': user, 'schema': 'org.gnome.desktop.lockdown' } preferences = ['disable_application_handlers', 'disable_command_line', 'disable_lock_screen', 'disable_log_out', 'disable_print_setup', 'disable_printing', 'disable_save_to_disk', 'disable_user_switching', 'user_administration_disabled'] preferences_hash = {} for pref in preferences: if pref in locals() and locals()[pref] is not None: key = re.sub('_', '-', pref) preferences_hash[key] = locals()[pref] return _do(name, gnome_kwargs, preferences_hash)
[ "def", "desktop_lockdown", "(", "name", ",", "user", "=", "None", ",", "disable_application_handlers", "=", "None", ",", "disable_command_line", "=", "None", ",", "disable_lock_screen", "=", "None", ",", "disable_log_out", "=", "None", ",", "disable_print_setup", "=", "None", ",", "disable_printing", "=", "None", ",", "disable_save_to_disk", "=", "None", ",", "disable_user_switching", "=", "None", ",", "user_administration_disabled", "=", "None", ",", "*", "*", "kwargs", ")", ":", "gnome_kwargs", "=", "{", "'user'", ":", "user", ",", "'schema'", ":", "'org.gnome.desktop.lockdown'", "}", "preferences", "=", "[", "'disable_application_handlers'", ",", "'disable_command_line'", ",", "'disable_lock_screen'", ",", "'disable_log_out'", ",", "'disable_print_setup'", ",", "'disable_printing'", ",", "'disable_save_to_disk'", ",", "'disable_user_switching'", ",", "'user_administration_disabled'", "]", "preferences_hash", "=", "{", "}", "for", "pref", "in", "preferences", ":", "if", "pref", "in", "locals", "(", ")", "and", "locals", "(", ")", "[", "pref", "]", "is", "not", "None", ":", "key", "=", "re", ".", "sub", "(", "'_'", ",", "'-'", ",", "pref", ")", "preferences_hash", "[", "key", "]", "=", "locals", "(", ")", "[", "pref", "]", "return", "_do", "(", "name", ",", "gnome_kwargs", ",", "preferences_hash", ")" ]
38.3125
[ 0.07692307692307693, 0.12903225806451613, 0.07272727272727272, 0.0851063829787234, 0.08695652173913043, 0.09523809523809523, 0.08695652173913043, 0.09302325581395349, 0.0851063829787234, 0.08163265306122448, 0.07272727272727272, 0.12903225806451613, 0.2857142857142857, 0.02702702702702703, 0.2857142857142857, 0.15, 0.09523809523809523, 0.043478260869565216, 0.6, 0, 0.04054054054054054, 0.02631578947368421, 0.03636363636363636, 0.043478260869565216, 0, 0.08, 0.07142857142857142, 0.03389830508474576, 0.05, 0.04, 0, 0.038461538461538464 ]
def PXOR(cpu, dest, src): """ Logical exclusive OR. Performs a bitwise logical exclusive-OR (XOR) operation on the quadword source (second) and destination (first) operands and stores the result in the destination operand location. The source operand can be an MMX(TM) technology register or a quadword memory location; the destination operand must be an MMX register. Each bit of the result is 1 if the corresponding bits of the two operands are different; each bit is 0 if the corresponding bits of the operands are the same:: DEST = DEST XOR SRC; :param cpu: current CPU. :param dest: destination operand. :param src: quadword source operand. """ res = dest.write(dest.read() ^ src.read())
[ "def", "PXOR", "(", "cpu", ",", "dest", ",", "src", ")", ":", "res", "=", "dest", ".", "write", "(", "dest", ".", "read", "(", ")", "^", "src", ".", "read", "(", ")", ")" ]
42.263158
[ 0.04, 0.18181818181818182, 0.06896551724137931, 0, 0.0379746835443038, 0.05128205128205128, 0.037037037037037035, 0.04878048780487805, 0.037037037037037035, 0.04878048780487805, 0.06976744186046512, 0, 0.14705882352941177, 0, 0.09375, 0.07317073170731707, 0.06818181818181818, 0.18181818181818182, 0.04 ]
def load(**options): """ Load this driver :param \*\*options: A variadic list of options """ log.msg("Hello!, I am a dummy, so I won't do a thing!") if len(options): log.msg_debug("I have received a bunch of options, here they come ...") for o, v in options.items(): log.msg_debug("Option: name={name}, value={value}" .format(name=o, value=v))
[ "def", "load", "(", "*", "*", "options", ")", ":", "log", ".", "msg", "(", "\"Hello!, I am a dummy, so I won't do a thing!\"", ")", "if", "len", "(", "options", ")", ":", "log", ".", "msg_debug", "(", "\"I have received a bunch of options, here they come ...\"", ")", "for", "o", ",", "v", "in", "options", ".", "items", "(", ")", ":", "log", ".", "msg_debug", "(", "\"Option: name={name}, value={value}\"", ".", "format", "(", "name", "=", "o", ",", "value", "=", "v", ")", ")" ]
34.25
[ 0.05, 0.2857142857142857, 0.1, 0, 0.06, 0.2857142857142857, 0.03389830508474576, 0.1, 0.02531645569620253, 0.05555555555555555, 0.04838709677419355, 0.0784313725490196 ]
def pypy_version_monkeypatch(): """Patch Tox to work with non-default PyPy 3 versions.""" # Travis virtualenv do not provide `pypy3`, which tox tries to execute. # This doesnt affect Travis python version `pypy3`, as the pyenv pypy3 # is in the PATH. # https://github.com/travis-ci/travis-ci/issues/6304 # Force use of the virtualenv `python`. version = os.environ.get('TRAVIS_PYTHON_VERSION') if version and default_factors and version.startswith('pypy3.3-'): default_factors['pypy3'] = 'python'
[ "def", "pypy_version_monkeypatch", "(", ")", ":", "# Travis virtualenv do not provide `pypy3`, which tox tries to execute.", "# This doesnt affect Travis python version `pypy3`, as the pyenv pypy3", "# is in the PATH.", "# https://github.com/travis-ci/travis-ci/issues/6304", "# Force use of the virtualenv `python`.", "version", "=", "os", ".", "environ", ".", "get", "(", "'TRAVIS_PYTHON_VERSION'", ")", "if", "version", "and", "default_factors", "and", "version", ".", "startswith", "(", "'pypy3.3-'", ")", ":", "default_factors", "[", "'pypy3'", "]", "=", "'python'" ]
52.7
[ 0.03225806451612903, 0.03278688524590164, 0.02666666666666667, 0.02702702702702703, 0.09523809523809523, 0.03571428571428571, 0.046511627906976744, 0.03773584905660377, 0.02857142857142857, 0.046511627906976744 ]
def loadCoeffs(filename): """ load igrf12 coeffs from file :param filename: file which save coeffs (str) :return: g and h list one by one (list(float)) """ gh = [] gh2arr = [] with open(filename) as f: text = f.readlines() for a in text: if a[:2] == 'g ' or a[:2] == 'h ': b = a.split()[3:] b = [float(x) for x in b] gh2arr.append(b) gh2arr = np.array(gh2arr).transpose() N = len(gh2arr) for i in range(N): if i < 19: for j in range(120): gh.append(gh2arr[i][j]) else: for p in gh2arr[i]: gh.append(p) gh.append(0) return gh
[ "def", "loadCoeffs", "(", "filename", ")", ":", "gh", "=", "[", "]", "gh2arr", "=", "[", "]", "with", "open", "(", "filename", ")", "as", "f", ":", "text", "=", "f", ".", "readlines", "(", ")", "for", "a", "in", "text", ":", "if", "a", "[", ":", "2", "]", "==", "'g '", "or", "a", "[", ":", "2", "]", "==", "'h '", ":", "b", "=", "a", ".", "split", "(", ")", "[", "3", ":", "]", "b", "=", "[", "float", "(", "x", ")", "for", "x", "in", "b", "]", "gh2arr", ".", "append", "(", "b", ")", "gh2arr", "=", "np", ".", "array", "(", "gh2arr", ")", ".", "transpose", "(", ")", "N", "=", "len", "(", "gh2arr", ")", "for", "i", "in", "range", "(", "N", ")", ":", "if", "i", "<", "19", ":", "for", "j", "in", "range", "(", "120", ")", ":", "gh", ".", "append", "(", "gh2arr", "[", "i", "]", "[", "j", "]", ")", "else", ":", "for", "p", "in", "gh2arr", "[", "i", "]", ":", "gh", ".", "append", "(", "p", ")", "gh", ".", "append", "(", "0", ")", "return", "gh" ]
28.576923
[ 0.04, 0.2857142857142857, 0.0625, 0.08163265306122448, 0.08, 0.2857142857142857, 0.18181818181818182, 0.13333333333333333, 0.06896551724137931, 0.07142857142857142, 0.09090909090909091, 0.043478260869565216, 0.06060606060606061, 0.04878048780487805, 0.0625, 0.044444444444444446, 0.08695652173913043, 0.07692307692307693, 0.09090909090909091, 0.05555555555555555, 0.046511627906976744, 0.11764705882352941, 0.05714285714285714, 0.0625, 0.1, 0.11764705882352941 ]
def get_route_name_and_type(self, route_I): """ Get route short name and type Parameters ---------- route_I: int route index (database specific) Returns ------- name: str short name of the route, eg. 195N type: int route_type according to the GTFS standard """ cur = self.conn.cursor() results = cur.execute("SELECT name, type FROM routes WHERE route_I=(?)", (route_I,)) name, rtype = results.fetchone() return name, int(rtype)
[ "def", "get_route_name_and_type", "(", "self", ",", "route_I", ")", ":", "cur", "=", "self", ".", "conn", ".", "cursor", "(", ")", "results", "=", "cur", ".", "execute", "(", "\"SELECT name, type FROM routes WHERE route_I=(?)\"", ",", "(", "route_I", ",", ")", ")", "name", ",", "rtype", "=", "results", ".", "fetchone", "(", ")", "return", "name", ",", "int", "(", "rtype", ")" ]
27.9
[ 0.023255813953488372, 0.18181818181818182, 0.05405405405405406, 0, 0.1111111111111111, 0.1111111111111111, 0.1, 0.06976744186046512, 0, 0.13333333333333333, 0.13333333333333333, 0.11764705882352941, 0.044444444444444446, 0.11764705882352941, 0.03773584905660377, 0.18181818181818182, 0.0625, 0.03260869565217391, 0.05, 0.06451612903225806 ]
def download_tabular_rows_as_dicts(self, url, headers=1, keycolumn=1, **kwargs): # type: (str, Union[int, List[int], List[str]], int, Any) -> Dict[Dict] """Download multicolumn csv from url and return dictionary where keys are first column and values are dictionaries with keys from column headers and values from columns beneath Args: url (str): URL to download headers (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers. Defaults to 1. keycolumn (int): Number of column to be used for key. Defaults to 1. **kwargs: file_type (Optional[str]): Type of file. Defaults to inferring. delimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring. Returns: Dict[Dict]: Dictionary where keys are first column and values are dictionaries with keys from column headers and values from columns beneath """ kwargs['headers'] = headers stream = self.get_tabular_stream(url, **kwargs) output_dict = dict() headers = stream.headers key_header = headers[keycolumn - 1] for row in stream.iter(keyed=True): first_val = row[key_header] output_dict[first_val] = dict() for header in row: if header == key_header: continue else: output_dict[first_val][header] = row[header] return output_dict
[ "def", "download_tabular_rows_as_dicts", "(", "self", ",", "url", ",", "headers", "=", "1", ",", "keycolumn", "=", "1", ",", "*", "*", "kwargs", ")", ":", "# type: (str, Union[int, List[int], List[str]], int, Any) -> Dict[Dict]", "kwargs", "[", "'headers'", "]", "=", "headers", "stream", "=", "self", ".", "get_tabular_stream", "(", "url", ",", "*", "*", "kwargs", ")", "output_dict", "=", "dict", "(", ")", "headers", "=", "stream", ".", "headers", "key_header", "=", "headers", "[", "keycolumn", "-", "1", "]", "for", "row", "in", "stream", ".", "iter", "(", "keyed", "=", "True", ")", ":", "first_val", "=", "row", "[", "key_header", "]", "output_dict", "[", "first_val", "]", "=", "dict", "(", ")", "for", "header", "in", "row", ":", "if", "header", "==", "key_header", ":", "continue", "else", ":", "output_dict", "[", "first_val", "]", "[", "header", "]", "=", "row", "[", "header", "]", "return", "output_dict" ]
47.5
[ 0.025, 0.02531645569620253, 0.027522935779816515, 0.036585365853658534, 0, 0.15384615384615385, 0.07894736842105263, 0.031746031746031744, 0.05, 0.09523809523809523, 0.04, 0.04, 0, 0.125, 0.026785714285714284, 0.0392156862745098, 0, 0.18181818181818182, 0.05714285714285714, 0.03636363636363636, 0.07142857142857142, 0.0625, 0.046511627906976744, 0.046511627906976744, 0.05128205128205128, 0.046511627906976744, 0.06666666666666667, 0.05, 0.07142857142857142, 0.09523809523809523, 0.03125, 0.07692307692307693 ]
def evaluatephiforces(Pot,R,z,phi=None,t=0.,v=None): """ NAME: evaluatephiforces PURPOSE: convenience function to evaluate a possible sum of potentials INPUT: Pot - a potential or list of potentials R - cylindrical Galactocentric distance (can be Quantity) z - distance above the plane (can be Quantity) phi - azimuth (optional; can be Quantity) t - time (optional; can be Quantity) v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity) OUTPUT: F_phi(R,z,phi,t) HISTORY: 2010-04-16 - Written - Bovy (NYU) 2018-03-16 - Added velocity input for dissipative forces - Bovy (UofT) """ return _evaluatephiforces(Pot,R,z,phi=phi,t=t,v=v)
[ "def", "evaluatephiforces", "(", "Pot", ",", "R", ",", "z", ",", "phi", "=", "None", ",", "t", "=", "0.", ",", "v", "=", "None", ")", ":", "return", "_evaluatephiforces", "(", "Pot", ",", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "v", "=", "v", ")" ]
22.628571
[ 0.11538461538461539, 0.2857142857142857, 0.2222222222222222, 0, 0.125, 0, 0.16666666666666666, 0, 0.04411764705882353, 0, 0.2, 0.06521739130434782, 0, 0.0625, 0, 0.07547169811320754, 0, 0.10416666666666667, 0, 0.11627906976744186, 0, 0.045454545454545456, 0, 0.18181818181818182, 0, 0.2608695652173913, 0, 0.16666666666666666, 0, 0.1, 0, 0.05194805194805195, 0, 0.2857142857142857, 0.12962962962962962 ]
def Boolean(): """ Creates a validator that attempts to convert the given value to a boolean or raises an error. The following rules are used: ``None`` is converted to ``False``. ``int`` values are ``True`` except for ``0``. ``str`` values converted in lower- and uppercase: * ``y, yes, t, true`` * ``n, no, f, false`` """ @wraps(Boolean) def built(value): # Already a boolean? if isinstance(value, bool): return value # None if value == None: return False # Integers if isinstance(value, int): return not value == 0 # Strings if isinstance(value, str): if value.lower() in { 'y', 'yes', 't', 'true' }: return True elif value.lower() in { 'n', 'no', 'f', 'false' }: return False # Nope raise Error("Not a boolean value.") return built
[ "def", "Boolean", "(", ")", ":", "@", "wraps", "(", "Boolean", ")", "def", "built", "(", "value", ")", ":", "# Already a boolean?", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "value", "# None", "if", "value", "==", "None", ":", "return", "False", "# Integers", "if", "isinstance", "(", "value", ",", "int", ")", ":", "return", "not", "value", "==", "0", "# Strings", "if", "isinstance", "(", "value", ",", "str", ")", ":", "if", "value", ".", "lower", "(", ")", "in", "{", "'y'", ",", "'yes'", ",", "'t'", ",", "'true'", "}", ":", "return", "True", "elif", "value", ".", "lower", "(", ")", "in", "{", "'n'", ",", "'no'", ",", "'f'", ",", "'false'", "}", ":", "return", "False", "# Nope", "raise", "Error", "(", "\"Not a boolean value.\"", ")", "return", "built" ]
24.342105
[ 0.07142857142857142, 0.2857142857142857, 0.025974025974025976, 0.03773584905660377, 0, 0.07692307692307693, 0, 0.061224489795918366, 0, 0.07547169811320754, 0, 0.12, 0.12, 0.2857142857142857, 0.10526315789473684, 0.09523809523809523, 0.07142857142857142, 0.05714285714285714, 0.08333333333333333, 0, 0.14285714285714285, 0.12, 0.08333333333333333, 0, 0.1111111111111111, 0.058823529411764705, 0.06060606060606061, 0, 0.11764705882352941, 0.058823529411764705, 0.06666666666666667, 0.07407407407407407, 0.06451612903225806, 0.07142857142857142, 0, 0.14285714285714285, 0.046511627906976744, 0.125 ]
def __get_edge_by_two_vertices(self, vertex1, vertex2, key=None): """ Returns an instance of :class:`bg.edge.BBGEdge` edge between to supplied vertices (if ``key`` is supplied, returns a :class:`bg.edge.BBGEdge` instance about specified edge). Checks that both specified vertices are in current :class:`BreakpointGraph` and then depending on ``key`` argument, creates a new :class:`bg.edge.BBGEdge` instance and incorporates respective multi-color information into it. :param vertex1: first vertex instance out of two in current :class:`BreakpointGraph` :type vertex1: any hashable object :param vertex2: second vertex instance out of two in current :class:`BreakpointGraph` :type vertex2: any hashable object :param key: unique identifier of edge of interested to be retrieved from current :class:`BreakpointGraph` :type key: any python object. ``None`` or ``int`` is expected :return: edge between two specified edges respecting a ``key`` argument. :rtype: :class:`bg.edge.BGEdge` """ if vertex1 in self.bg and vertex2 in self.bg[vertex1]: if key is None: key = min(self.bg[vertex1][vertex2]) return BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=self.bg[vertex1][vertex2][key]["attr_dict"]["multicolor"], data=self.bg[vertex1][vertex2][key]["attr_dict"]["data"]) return None
[ "def", "__get_edge_by_two_vertices", "(", "self", ",", "vertex1", ",", "vertex2", ",", "key", "=", "None", ")", ":", "if", "vertex1", "in", "self", ".", "bg", "and", "vertex2", "in", "self", ".", "bg", "[", "vertex1", "]", ":", "if", "key", "is", "None", ":", "key", "=", "min", "(", "self", ".", "bg", "[", "vertex1", "]", "[", "vertex2", "]", ")", "return", "BGEdge", "(", "vertex1", "=", "vertex1", ",", "vertex2", "=", "vertex2", ",", "multicolor", "=", "self", ".", "bg", "[", "vertex1", "]", "[", "vertex2", "]", "[", "key", "]", "[", "\"attr_dict\"", "]", "[", "\"multicolor\"", "]", ",", "data", "=", "self", ".", "bg", "[", "vertex1", "]", "[", "vertex2", "]", "[", "key", "]", "[", "\"attr_dict\"", "]", "[", "\"data\"", "]", ")", "return", "None" ]
69.52381
[ 0.015384615384615385, 0.016216216216216217, 0, 0.04310344827586207, 0, 0.08695652173913043, 0.07142857142857142, 0.08602150537634409, 0.07142857142857142, 0.07079646017699115, 0.057971014492753624, 0.0625, 0.1794871794871795, 0.18181818181818182, 0.03225806451612903, 0.07407407407407407, 0.038461538461538464, 0.05084745762711865, 0.05263157894736842, 0.07228915662650602, 0.10526315789473684 ]
def get_version(): """ Return package version as listed in `__version__` in `init.py`. """ with open(os.path.join(os.path.dirname(__file__), 'argparsetree', '__init__.py')) as init_py: return re.search('__version__ = [\'"]([^\'"]+)[\'"]', init_py.read()).group(1)
[ "def", "get_version", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'argparsetree'", ",", "'__init__.py'", ")", ")", "as", "init_py", ":", "return", "re", ".", "search", "(", "'__version__ = [\\'\"]([^\\'\"]+)[\\'\"]'", ",", "init_py", ".", "read", "(", ")", ")", ".", "group", "(", "1", ")" ]
47
[ 0.05555555555555555, 0.2857142857142857, 0.04477611940298507, 0.2857142857142857, 0.030927835051546393, 0.03488372093023256 ]
def get(self, obj_id, byte_range=None): '''Download and return a file object or a specified byte_range from it. See HTTP Range header (rfc2616) for possible byte_range formats, Examples: "0-499" - byte offsets 0-499 (inclusive), "-500" - final 500 bytes.''' kwz = dict() if byte_range: kwz['headers'] = dict(Range='bytes={}'.format(byte_range)) return self(self._api_url_join(obj_id, 'content'), dict(download='true'), raw=True, **kwz)
[ "def", "get", "(", "self", ",", "obj_id", ",", "byte_range", "=", "None", ")", ":", "kwz", "=", "dict", "(", ")", "if", "byte_range", ":", "kwz", "[", "'headers'", "]", "=", "dict", "(", "Range", "=", "'bytes={}'", ".", "format", "(", "byte_range", ")", ")", "return", "self", "(", "self", ".", "_api_url_join", "(", "obj_id", ",", "'content'", ")", ",", "dict", "(", "download", "=", "'true'", ")", ",", "raw", "=", "True", ",", "*", "*", "kwz", ")" ]
63.285714
[ 0.02564102564102564, 0.0410958904109589, 0.05970149253731343, 0.060240963855421686, 0.21428571428571427, 0.05333333333333334, 0.043478260869565216 ]
def prepare_for_submission(self, folder): """Create the input files from the input nodes passed to this instance of the `CalcJob`. :param folder: an `aiida.common.folders.Folder` to temporarily write files on disk :return: `aiida.common.datastructures.CalcInfo` instance """ # create input structure if 'structure' in self.inputs: self.inputs.structure.export(folder.get_abs_path(self._DEFAULT_COORDS_FILE_NAME), fileformat="xyz") # create cp2k input file inp = Cp2kInput(self.inputs.parameters.get_dict()) inp.add_keyword("GLOBAL/PROJECT", self._DEFAULT_PROJECT_NAME) if 'structure' in self.inputs: for i, letter in enumerate('ABC'): inp.add_keyword('FORCE_EVAL/SUBSYS/CELL/' + letter, '{:<15} {:<15} {:<15}'.format(*self.inputs.structure.cell[i])) topo = "FORCE_EVAL/SUBSYS/TOPOLOGY" inp.add_keyword(topo + "/COORD_FILE_NAME", self._DEFAULT_COORDS_FILE_NAME) inp.add_keyword(topo + "/COORD_FILE_FORMAT", "XYZ") with io.open(folder.get_abs_path(self._DEFAULT_INPUT_FILE), mode="w", encoding="utf-8") as fobj: fobj.write(inp.render()) if 'settings' in self.inputs: settings = self.inputs.settings.get_dict() else: settings = {} # create code info codeinfo = CodeInfo() codeinfo.cmdline_params = settings.pop('cmdline', []) + ["-i", self._DEFAULT_INPUT_FILE] codeinfo.stdout_name = self._DEFAULT_OUTPUT_FILE codeinfo.join_files = True codeinfo.code_uuid = self.inputs.code.uuid # create calc info calcinfo = CalcInfo() calcinfo.stdin_name = self._DEFAULT_INPUT_FILE calcinfo.uuid = self.uuid calcinfo.cmdline_params = codeinfo.cmdline_params calcinfo.stdin_name = self._DEFAULT_INPUT_FILE calcinfo.stdout_name = self._DEFAULT_OUTPUT_FILE calcinfo.codes_info = [codeinfo] # file lists calcinfo.remote_symlink_list = [] if 'file' in self.inputs: calcinfo.local_copy_list = [] for fobj in self.inputs.file.values(): calcinfo.local_copy_list.append((fobj.uuid, fobj.filename, fobj.filename)) calcinfo.remote_copy_list = [] calcinfo.retrieve_list = [self._DEFAULT_OUTPUT_FILE, self._DEFAULT_RESTART_FILE_NAME] calcinfo.retrieve_list += settings.pop('additional_retrieve_list', []) # symlinks if 'parent_calc_folder' in self.inputs: comp_uuid = self.inputs.parent_calc_folder.computer.uuid remote_path = self.inputs.parent_calc_folder.get_remote_path() symlink = (comp_uuid, remote_path, self._DEFAULT_PARENT_CALC_FLDR_NAME) calcinfo.remote_symlink_list.append(symlink) # check for left over settings if settings: raise InputValidationError("The following keys have been found " + "in the settings input node {}, ".format(self.pk) + "but were not understood: " + ",".join(settings.keys())) return calcinfo
[ "def", "prepare_for_submission", "(", "self", ",", "folder", ")", ":", "# create input structure", "if", "'structure'", "in", "self", ".", "inputs", ":", "self", ".", "inputs", ".", "structure", ".", "export", "(", "folder", ".", "get_abs_path", "(", "self", ".", "_DEFAULT_COORDS_FILE_NAME", ")", ",", "fileformat", "=", "\"xyz\"", ")", "# create cp2k input file", "inp", "=", "Cp2kInput", "(", "self", ".", "inputs", ".", "parameters", ".", "get_dict", "(", ")", ")", "inp", ".", "add_keyword", "(", "\"GLOBAL/PROJECT\"", ",", "self", ".", "_DEFAULT_PROJECT_NAME", ")", "if", "'structure'", "in", "self", ".", "inputs", ":", "for", "i", ",", "letter", "in", "enumerate", "(", "'ABC'", ")", ":", "inp", ".", "add_keyword", "(", "'FORCE_EVAL/SUBSYS/CELL/'", "+", "letter", ",", "'{:<15} {:<15} {:<15}'", ".", "format", "(", "*", "self", ".", "inputs", ".", "structure", ".", "cell", "[", "i", "]", ")", ")", "topo", "=", "\"FORCE_EVAL/SUBSYS/TOPOLOGY\"", "inp", ".", "add_keyword", "(", "topo", "+", "\"/COORD_FILE_NAME\"", ",", "self", ".", "_DEFAULT_COORDS_FILE_NAME", ")", "inp", ".", "add_keyword", "(", "topo", "+", "\"/COORD_FILE_FORMAT\"", ",", "\"XYZ\"", ")", "with", "io", ".", "open", "(", "folder", ".", "get_abs_path", "(", "self", ".", "_DEFAULT_INPUT_FILE", ")", ",", "mode", "=", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "fobj", ":", "fobj", ".", "write", "(", "inp", ".", "render", "(", ")", ")", "if", "'settings'", "in", "self", ".", "inputs", ":", "settings", "=", "self", ".", "inputs", ".", "settings", ".", "get_dict", "(", ")", "else", ":", "settings", "=", "{", "}", "# create code info", "codeinfo", "=", "CodeInfo", "(", ")", "codeinfo", ".", "cmdline_params", "=", "settings", ".", "pop", "(", "'cmdline'", ",", "[", "]", ")", "+", "[", "\"-i\"", ",", "self", ".", "_DEFAULT_INPUT_FILE", "]", "codeinfo", ".", "stdout_name", "=", "self", ".", "_DEFAULT_OUTPUT_FILE", "codeinfo", ".", "join_files", "=", "True", "codeinfo", ".", "code_uuid", "=", "self", ".", "inputs", ".", "code", ".", "uuid", "# create calc info", "calcinfo", "=", "CalcInfo", "(", ")", "calcinfo", ".", "stdin_name", "=", "self", ".", "_DEFAULT_INPUT_FILE", "calcinfo", ".", "uuid", "=", "self", ".", "uuid", "calcinfo", ".", "cmdline_params", "=", "codeinfo", ".", "cmdline_params", "calcinfo", ".", "stdin_name", "=", "self", ".", "_DEFAULT_INPUT_FILE", "calcinfo", ".", "stdout_name", "=", "self", ".", "_DEFAULT_OUTPUT_FILE", "calcinfo", ".", "codes_info", "=", "[", "codeinfo", "]", "# file lists", "calcinfo", ".", "remote_symlink_list", "=", "[", "]", "if", "'file'", "in", "self", ".", "inputs", ":", "calcinfo", ".", "local_copy_list", "=", "[", "]", "for", "fobj", "in", "self", ".", "inputs", ".", "file", ".", "values", "(", ")", ":", "calcinfo", ".", "local_copy_list", ".", "append", "(", "(", "fobj", ".", "uuid", ",", "fobj", ".", "filename", ",", "fobj", ".", "filename", ")", ")", "calcinfo", ".", "remote_copy_list", "=", "[", "]", "calcinfo", ".", "retrieve_list", "=", "[", "self", ".", "_DEFAULT_OUTPUT_FILE", ",", "self", ".", "_DEFAULT_RESTART_FILE_NAME", "]", "calcinfo", ".", "retrieve_list", "+=", "settings", ".", "pop", "(", "'additional_retrieve_list'", ",", "[", "]", ")", "# symlinks", "if", "'parent_calc_folder'", "in", "self", ".", "inputs", ":", "comp_uuid", "=", "self", ".", "inputs", ".", "parent_calc_folder", ".", "computer", ".", "uuid", "remote_path", "=", "self", ".", "inputs", ".", "parent_calc_folder", ".", "get_remote_path", "(", ")", "symlink", "=", "(", "comp_uuid", ",", "remote_path", ",", "self", ".", "_DEFAULT_PARENT_CALC_FLDR_NAME", ")", "calcinfo", ".", "remote_symlink_list", ".", "append", "(", "symlink", ")", "# check for left over settings", "if", "settings", ":", "raise", "InputValidationError", "(", "\"The following keys have been found \"", "+", "\"in the settings input node {}, \"", ".", "format", "(", "self", ".", "pk", ")", "+", "\"but were not understood: \"", "+", "\",\"", ".", "join", "(", "settings", ".", "keys", "(", ")", ")", ")", "return", "calcinfo" ]
45.228571
[ 0.024390243902439025, 0.03125, 0, 0.05555555555555555, 0.0625, 0.18181818181818182, 0.0625, 0.05263157894736842, 0.02702702702702703, 0, 0.0625, 0.034482758620689655, 0.028985507246376812, 0.05263157894736842, 0.043478260869565216, 0.04477611940298507, 0.0425531914893617, 0.0425531914893617, 0.03488372093023256, 0.031746031746031744, 0, 0.028846153846153848, 0.05555555555555555, 0, 0.05405405405405406, 0.037037037037037035, 0.15384615384615385, 0.08, 0, 0.07692307692307693, 0.06896551724137931, 0.03125, 0.03571428571428571, 0.058823529411764705, 0.04, 0, 0.07692307692307693, 0.06896551724137931, 0.037037037037037035, 0.06060606060606061, 0.03508771929824561, 0.037037037037037035, 0.03571428571428571, 0.05, 0, 0.1, 0.04878048780487805, 0.06060606060606061, 0.04878048780487805, 0.04, 0.03333333333333333, 0, 0.05263157894736842, 0.03225806451612903, 0.02564102564102564, 0, 0.1111111111111111, 0.0425531914893617, 0.029411764705882353, 0.02702702702702703, 0.03614457831325301, 0.03571428571428571, 0, 0.05263157894736842, 0.1, 0.038461538461538464, 0.03333333333333333, 0.06153846153846154, 0, 0.08695652173913043 ]
def send(self, sender: PytgbotApiBot): """ Send the message via pytgbot. :param sender: The bot instance to send with. :type sender: pytgbot.bot.Bot :rtype: PytgbotApiMessage """ return sender.send_location( # receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id latitude=self.latitude, longitude=self.longitude, chat_id=self.receiver, reply_to_message_id=self.reply_id, live_period=self.live_period, disable_notification=self.disable_notification, reply_markup=self.reply_markup )
[ "def", "send", "(", "self", ",", "sender", ":", "PytgbotApiBot", ")", ":", "return", "sender", ".", "send_location", "(", "# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id", "latitude", "=", "self", ".", "latitude", ",", "longitude", "=", "self", ".", "longitude", ",", "chat_id", "=", "self", ".", "receiver", ",", "reply_to_message_id", "=", "self", ".", "reply_id", ",", "live_period", "=", "self", ".", "live_period", ",", "disable_notification", "=", "self", ".", "disable_notification", ",", "reply_markup", "=", "self", ".", "reply_markup", ")" ]
46.615385
[ 0.02631578947368421, 0.18181818181818182, 0.05405405405405406, 0, 0.05660377358490566, 0.07894736842105263, 0, 0.09090909090909091, 0.18181818181818182, 0.08333333333333333, 0.026785714285714284, 0.043859649122807015, 0.3333333333333333 ]
def hide_alert(self, id, **kwargs): # noqa: E501 """Hide a specific integration alert # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.hide_alert(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: (required) :return: ResponseContainerAlert If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.hide_alert_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.hide_alert_with_http_info(id, **kwargs) # noqa: E501 return data
[ "def", "hide_alert", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "hide_alert_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "hide_alert_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
40
[ 0.02040816326530612, 0.03389830508474576, 0, 0.13636363636363635, 0.02666666666666667, 0.04918032786885246, 0.05454545454545454, 0.09090909090909091, 0, 0.10344827586206896, 0.09090909090909091, 0.07692307692307693, 0.05357142857142857, 0.06818181818181818, 0.18181818181818182, 0.0425531914893617, 0.05714285714285714, 0.025974025974025976, 0.15384615384615385, 0.02531645569620253, 0.08695652173913043 ]
def plot_qq_exp(fignum, I, title, subplot=False): """ plots data against an exponential distribution in 0=>90. Parameters _________ fignum : matplotlib figure number I : data title : plot title subplot : boolean, if True plot as subplot with 1 row, two columns with fignum the plot number """ if subplot == True: plt.subplot(1, 2, fignum) else: plt.figure(num=fignum) X, Y, dpos, dneg = [], [], 0., 0. rad = old_div(np.pi, 180.) xsum = 0 for i in I: theta = (90. - i) * rad X.append(1. - np.cos(theta)) xsum += X[-1] X.sort() n = float(len(X)) kappa = old_div((n - 1.), xsum) for i in range(len(X)): p = old_div((float(i) - 0.5), n) Y.append(-np.log(1. - p)) f = 1. - np.exp(-kappa * X[i]) ds = old_div(float(i), n) - f if dpos < ds: dpos = ds ds = f - old_div((float(i) - 1.), n) if dneg < ds: dneg = ds if dneg > dpos: ds = dneg else: ds = dpos Me = (ds - (old_div(0.2, n))) * (np.sqrt(n) + 0.26 + (old_div(0.5, (np.sqrt(n))))) # Eq. 5.15 from Fisher et al. (1987) plt.plot(Y, X, 'ro') bounds = plt.axis() plt.axis([0, bounds[1], 0., bounds[3]]) notestr = 'N: ' + '%i' % (n) plt.text(.1 * bounds[1], .9 * bounds[3], notestr) notestr = 'Me: ' + '%7.3f' % (Me) plt.text(.1 * bounds[1], .8 * bounds[3], notestr) if Me > 1.094: notestr = "Not Exponential" else: notestr = "Exponential (95%)" plt.text(.1 * bounds[1], .7 * bounds[3], notestr) plt.title(title) plt.xlabel('Exponential Quantile') plt.ylabel('Data Quantile') return Me, 1.094
[ "def", "plot_qq_exp", "(", "fignum", ",", "I", ",", "title", ",", "subplot", "=", "False", ")", ":", "if", "subplot", "==", "True", ":", "plt", ".", "subplot", "(", "1", ",", "2", ",", "fignum", ")", "else", ":", "plt", ".", "figure", "(", "num", "=", "fignum", ")", "X", ",", "Y", ",", "dpos", ",", "dneg", "=", "[", "]", ",", "[", "]", ",", "0.", ",", "0.", "rad", "=", "old_div", "(", "np", ".", "pi", ",", "180.", ")", "xsum", "=", "0", "for", "i", "in", "I", ":", "theta", "=", "(", "90.", "-", "i", ")", "*", "rad", "X", ".", "append", "(", "1.", "-", "np", ".", "cos", "(", "theta", ")", ")", "xsum", "+=", "X", "[", "-", "1", "]", "X", ".", "sort", "(", ")", "n", "=", "float", "(", "len", "(", "X", ")", ")", "kappa", "=", "old_div", "(", "(", "n", "-", "1.", ")", ",", "xsum", ")", "for", "i", "in", "range", "(", "len", "(", "X", ")", ")", ":", "p", "=", "old_div", "(", "(", "float", "(", "i", ")", "-", "0.5", ")", ",", "n", ")", "Y", ".", "append", "(", "-", "np", ".", "log", "(", "1.", "-", "p", ")", ")", "f", "=", "1.", "-", "np", ".", "exp", "(", "-", "kappa", "*", "X", "[", "i", "]", ")", "ds", "=", "old_div", "(", "float", "(", "i", ")", ",", "n", ")", "-", "f", "if", "dpos", "<", "ds", ":", "dpos", "=", "ds", "ds", "=", "f", "-", "old_div", "(", "(", "float", "(", "i", ")", "-", "1.", ")", ",", "n", ")", "if", "dneg", "<", "ds", ":", "dneg", "=", "ds", "if", "dneg", ">", "dpos", ":", "ds", "=", "dneg", "else", ":", "ds", "=", "dpos", "Me", "=", "(", "ds", "-", "(", "old_div", "(", "0.2", ",", "n", ")", ")", ")", "*", "(", "np", ".", "sqrt", "(", "n", ")", "+", "0.26", "+", "(", "old_div", "(", "0.5", ",", "(", "np", ".", "sqrt", "(", "n", ")", ")", ")", ")", ")", "# Eq. 5.15 from Fisher et al. (1987)", "plt", ".", "plot", "(", "Y", ",", "X", ",", "'ro'", ")", "bounds", "=", "plt", ".", "axis", "(", ")", "plt", ".", "axis", "(", "[", "0", ",", "bounds", "[", "1", "]", ",", "0.", ",", "bounds", "[", "3", "]", "]", ")", "notestr", "=", "'N: '", "+", "'%i'", "%", "(", "n", ")", "plt", ".", "text", "(", ".1", "*", "bounds", "[", "1", "]", ",", ".9", "*", "bounds", "[", "3", "]", ",", "notestr", ")", "notestr", "=", "'Me: '", "+", "'%7.3f'", "%", "(", "Me", ")", "plt", ".", "text", "(", ".1", "*", "bounds", "[", "1", "]", ",", ".8", "*", "bounds", "[", "3", "]", ",", "notestr", ")", "if", "Me", ">", "1.094", ":", "notestr", "=", "\"Not Exponential\"", "else", ":", "notestr", "=", "\"Exponential (95%)\"", "plt", ".", "text", "(", ".1", "*", "bounds", "[", "1", "]", ",", ".7", "*", "bounds", "[", "3", "]", ",", "notestr", ")", "plt", ".", "title", "(", "title", ")", "plt", ".", "xlabel", "(", "'Exponential Quantile'", ")", "plt", ".", "ylabel", "(", "'Data Quantile'", ")", "return", "Me", ",", "1.094" ]
29.568966
[ 0.04081632653061224, 0.2857142857142857, 0.06666666666666667, 0, 0.14285714285714285, 0.15384615384615385, 0.08108108108108109, 0.25, 0.13636363636363635, 0.04081632653061224, 0.2857142857142857, 0.13043478260869565, 0.06060606060606061, 0.2222222222222222, 0.06666666666666667, 0.05405405405405406, 0.06666666666666667, 0.16666666666666666, 0.13333333333333333, 0.06451612903225806, 0.05555555555555555, 0.09523809523809523, 0.16666666666666666, 0.09523809523809523, 0.05714285714285714, 0.07407407407407407, 0.05, 0.06060606060606061, 0.05263157894736842, 0.05405405405405406, 0.09523809523809523, 0.09523809523809523, 0.045454545454545456, 0.09523809523809523, 0.09523809523809523, 0.10526315789473684, 0.11764705882352941, 0.2222222222222222, 0.11764705882352941, 0.05357142857142857, 0.04807692307692308, 0, 0.08333333333333333, 0.08695652173913043, 0.046511627906976744, 0.0625, 0.03773584905660377, 0.05405405405405406, 0.03773584905660377, 0.1111111111111111, 0.05714285714285714, 0.2222222222222222, 0.05405405405405406, 0.03773584905660377, 0.1, 0.05263157894736842, 0.06451612903225806, 0.1 ]
def remove_listener(self, listener): """Remove the given listener from the wrapped client. :param listener: A listener previously passed to :meth:`add_listener`. """ internal_listener = self._internal_listeners.pop(listener) return self._client.remove_listener(internal_listener)
[ "def", "remove_listener", "(", "self", ",", "listener", ")", ":", "internal_listener", "=", "self", ".", "_internal_listeners", ".", "pop", "(", "listener", ")", "return", "self", ".", "_client", ".", "remove_listener", "(", "internal_listener", ")" ]
44.857143
[ 0.027777777777777776, 0.03278688524590164, 0, 0.08974358974358974, 0.18181818181818182, 0.030303030303030304, 0.03225806451612903 ]
def set_object(self, name: str, pobj: ParameterObject) -> None: """Add or update an existing object.""" self.objects[zlib.crc32(name.encode())] = pobj
[ "def", "set_object", "(", "self", ",", "name", ":", "str", ",", "pobj", ":", "ParameterObject", ")", "->", "None", ":", "self", ".", "objects", "[", "zlib", ".", "crc32", "(", "name", ".", "encode", "(", ")", ")", "]", "=", "pobj" ]
54.666667
[ 0.015873015873015872, 0.0425531914893617, 0.037037037037037035 ]
def renew_lock(self): ''' Renew lock on itself if find queue name or topic name and subscription name. ''' if self._queue_name: self.service_bus_service.renew_lock_queue_message( self._queue_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken']) elif self._topic_name and self._subscription_name: self.service_bus_service.renew_lock_subscription_message( self._topic_name, self._subscription_name, self.broker_properties['SequenceNumber'], self.broker_properties['LockToken']) else: raise AzureServiceBusPeekLockError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_RENEW_LOCK)
[ "def", "renew_lock", "(", "self", ")", ":", "if", "self", ".", "_queue_name", ":", "self", ".", "service_bus_service", ".", "renew_lock_queue_message", "(", "self", ".", "_queue_name", ",", "self", ".", "broker_properties", "[", "'SequenceNumber'", "]", ",", "self", ".", "broker_properties", "[", "'LockToken'", "]", ")", "elif", "self", ".", "_topic_name", "and", "self", ".", "_subscription_name", ":", "self", ".", "service_bus_service", ".", "renew_lock_subscription_message", "(", "self", ".", "_topic_name", ",", "self", ".", "_subscription_name", ",", "self", ".", "broker_properties", "[", "'SequenceNumber'", "]", ",", "self", ".", "broker_properties", "[", "'LockToken'", "]", ")", "else", ":", "raise", "AzureServiceBusPeekLockError", "(", "_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_RENEW_LOCK", ")" ]
47.875
[ 0.047619047619047616, 0.036585365853658534, 0.17647058823529413, 0.07142857142857142, 0.04838709677419355, 0.06060606060606061, 0.03508771929824561, 0.057692307692307696, 0.034482758620689655, 0.043478260869565216, 0.06060606060606061, 0.05, 0.03508771929824561, 0.057692307692307696, 0.15384615384615385, 0.03260869565217391 ]
def get_distributed_seismicity_source_nodes(source): """ Returns list of nodes of attributes common to all distributed seismicity source classes :param source: Seismic source as instance of :class: `openquake.hazardlib.source.area.AreaSource` or :class: `openquake.hazardlib.source.point.PointSource` :returns: List of instances of :class:`openquake.baselib.node.Node` """ source_nodes = [] # parse msr source_nodes.append( Node("magScaleRel", text=source.magnitude_scaling_relationship.__class__.__name__)) # Parse aspect ratio source_nodes.append( Node("ruptAspectRatio", text=source.rupture_aspect_ratio)) # Parse MFD source_nodes.append(obj_to_node(source.mfd)) # Parse nodal plane distribution source_nodes.append( build_nodal_plane_dist(source.nodal_plane_distribution)) # Parse hypocentral depth distribution source_nodes.append( build_hypo_depth_dist(source.hypocenter_distribution)) return source_nodes
[ "def", "get_distributed_seismicity_source_nodes", "(", "source", ")", ":", "source_nodes", "=", "[", "]", "# parse msr", "source_nodes", ".", "append", "(", "Node", "(", "\"magScaleRel\"", ",", "text", "=", "source", ".", "magnitude_scaling_relationship", ".", "__class__", ".", "__name__", ")", ")", "# Parse aspect ratio", "source_nodes", ".", "append", "(", "Node", "(", "\"ruptAspectRatio\"", ",", "text", "=", "source", ".", "rupture_aspect_ratio", ")", ")", "# Parse MFD", "source_nodes", ".", "append", "(", "obj_to_node", "(", "source", ".", "mfd", ")", ")", "# Parse nodal plane distribution", "source_nodes", ".", "append", "(", "build_nodal_plane_dist", "(", "source", ".", "nodal_plane_distribution", ")", ")", "# Parse hypocentral depth distribution", "source_nodes", ".", "append", "(", "build_hypo_depth_dist", "(", "source", ".", "hypocenter_distribution", ")", ")", "return", "source_nodes" ]
35.655172
[ 0.019230769230769232, 0.2857142857142857, 0.02631578947368421, 0.1111111111111111, 0, 0.16666666666666666, 0.08888888888888889, 0.07936507936507936, 0.05555555555555555, 0.23076923076923078, 0.09230769230769231, 0.2857142857142857, 0.09523809523809523, 0.125, 0.125, 0.1111111111111111, 0.06578947368421052, 0.08333333333333333, 0.125, 0.045454545454545456, 0.13333333333333333, 0.041666666666666664, 0.05555555555555555, 0.125, 0.046875, 0.047619047619047616, 0.125, 0.04838709677419355, 0.08695652173913043 ]
def cdx_filter(cdx_iter, filter_strings): """ filter CDX by regex if each filter is :samp:`{field}:{regex}` form, apply filter to :samp:`cdx[{field}]`. """ # Support single strings as well if isinstance(filter_strings, str): filter_strings = [filter_strings] filters = [CDXFilter(filter_str) for filter_str in filter_strings] for cdx in cdx_iter: if all(x(cdx) for x in filters): yield cdx
[ "def", "cdx_filter", "(", "cdx_iter", ",", "filter_strings", ")", ":", "# Support single strings as well", "if", "isinstance", "(", "filter_strings", ",", "str", ")", ":", "filter_strings", "=", "[", "filter_strings", "]", "filters", "=", "[", "CDXFilter", "(", "filter_str", ")", "for", "filter_str", "in", "filter_strings", "]", "for", "cdx", "in", "cdx_iter", ":", "if", "all", "(", "x", "(", "cdx", ")", "for", "x", "in", "filters", ")", ":", "yield", "cdx" ]
31.285714
[ 0.024390243902439025, 0.2857142857142857, 0.09859154929577464, 0.14634146341463414, 0.2857142857142857, 0.05555555555555555, 0.05128205128205128, 0.04878048780487805, 0, 0.02857142857142857, 0, 0.08333333333333333, 0.05, 0.09523809523809523 ]
def _edges_from_permutation(self, feature_pathway_dict): """Given a dictionary mapping each feature to the pathways overrepresented in the feature, build a CoNetwork by creating edges for every pairwise combination of pathways in a feature. """ network_edges = {} for feature, pathway_list in feature_pathway_dict.items(): for i in range(len(pathway_list)): for j in range(i + 1, len(pathway_list)): vertex_i = pathway_list[i] vertex_j = pathway_list[j] new_edge = self.edge_tuple(vertex_i, vertex_j) if new_edge not in network_edges: network_edges[new_edge] = [] network_edges[new_edge].append(feature) self._augment_network(network_edges)
[ "def", "_edges_from_permutation", "(", "self", ",", "feature_pathway_dict", ")", ":", "network_edges", "=", "{", "}", "for", "feature", ",", "pathway_list", "in", "feature_pathway_dict", ".", "items", "(", ")", ":", "for", "i", "in", "range", "(", "len", "(", "pathway_list", ")", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "pathway_list", ")", ")", ":", "vertex_i", "=", "pathway_list", "[", "i", "]", "vertex_j", "=", "pathway_list", "[", "j", "]", "new_edge", "=", "self", ".", "edge_tuple", "(", "vertex_i", ",", "vertex_j", ")", "if", "new_edge", "not", "in", "network_edges", ":", "network_edges", "[", "new_edge", "]", "=", "[", "]", "network_edges", "[", "new_edge", "]", ".", "append", "(", "feature", ")", "self", ".", "_augment_network", "(", "network_edges", ")" ]
52.0625
[ 0.017857142857142856, 0.030303030303030304, 0.03333333333333333, 0.02531645569620253, 0.18181818181818182, 0.07692307692307693, 0.030303030303030304, 0.043478260869565216, 0.03508771929824561, 0.043478260869565216, 0.043478260869565216, 0.030303030303030304, 0.03773584905660377, 0.038461538461538464, 0.03389830508474576, 0.045454545454545456 ]
def check_lazy_load_terreinobject(f): ''' Decorator function to lazy load a :class:`Terreinobject`. ''' def wrapper(*args): terreinobject = args[0] if ( terreinobject._centroid is None or terreinobject._bounding_box is None or terreinobject._metadata is None ): log.debug('Lazy loading Terreinobject %s', terreinobject.id) terreinobject.check_gateway() t = terreinobject.gateway.get_terreinobject_by_id(terreinobject.id) terreinobject._centroid = t._centroid terreinobject._bounding_box = t._bounding_box terreinobject._metadata = t._metadata return f(*args) return wrapper
[ "def", "check_lazy_load_terreinobject", "(", "f", ")", ":", "def", "wrapper", "(", "*", "args", ")", ":", "terreinobject", "=", "args", "[", "0", "]", "if", "(", "terreinobject", ".", "_centroid", "is", "None", "or", "terreinobject", ".", "_bounding_box", "is", "None", "or", "terreinobject", ".", "_metadata", "is", "None", ")", ":", "log", ".", "debug", "(", "'Lazy loading Terreinobject %s'", ",", "terreinobject", ".", "id", ")", "terreinobject", ".", "check_gateway", "(", ")", "t", "=", "terreinobject", ".", "gateway", ".", "get_terreinobject_by_id", "(", "terreinobject", ".", "id", ")", "terreinobject", ".", "_centroid", "=", "t", ".", "_centroid", "terreinobject", ".", "_bounding_box", "=", "t", ".", "_bounding_box", "terreinobject", ".", "_metadata", "=", "t", ".", "_metadata", "return", "f", "(", "*", "args", ")", "return", "wrapper" ]
37.631579
[ 0.02702702702702703, 0.2857142857142857, 0.09836065573770492, 0.2857142857142857, 0.08695652173913043, 0.06451612903225806, 0.25, 0.043478260869565216, 0.04, 0.046511627906976744, 0.3, 0.027777777777777776, 0.04878048780487805, 0.02531645569620253, 0.04081632653061224, 0.03508771929824561, 0.04081632653061224, 0.08695652173913043, 0.1111111111111111 ]
def invoke(awsclient, function_name, payload, invocation_type=None, alias_name=ALIAS_NAME, version=None, outfile=None): """Send a ping request to a lambda function. :param awsclient: :param function_name: :param payload: :param invocation_type: :param alias_name: :param version: :param outfile: write response to file :return: ping response payload """ log.debug('invoking lambda function: %s', function_name) client_lambda = awsclient.get_client('lambda') if invocation_type is None: invocation_type = 'RequestResponse' if payload.startswith('file://'): log.debug('reading payload from file: %s' % payload) with open(payload[7:], 'r') as pfile: payload = pfile.read() if version: response = client_lambda.invoke( FunctionName=function_name, InvocationType=invocation_type, Payload=payload, Qualifier=version ) else: response = client_lambda.invoke( FunctionName=function_name, InvocationType=invocation_type, Payload=payload, Qualifier=alias_name ) results = response['Payload'].read() # payload is a 'StreamingBody' log.debug('invoke completed') # write to file if outfile: with open(outfile, 'w') as ofile: ofile.write(str(results)) ofile.flush() return else: return results
[ "def", "invoke", "(", "awsclient", ",", "function_name", ",", "payload", ",", "invocation_type", "=", "None", ",", "alias_name", "=", "ALIAS_NAME", ",", "version", "=", "None", ",", "outfile", "=", "None", ")", ":", "log", ".", "debug", "(", "'invoking lambda function: %s'", ",", "function_name", ")", "client_lambda", "=", "awsclient", ".", "get_client", "(", "'lambda'", ")", "if", "invocation_type", "is", "None", ":", "invocation_type", "=", "'RequestResponse'", "if", "payload", ".", "startswith", "(", "'file://'", ")", ":", "log", ".", "debug", "(", "'reading payload from file: %s'", "%", "payload", ")", "with", "open", "(", "payload", "[", "7", ":", "]", ",", "'r'", ")", "as", "pfile", ":", "payload", "=", "pfile", ".", "read", "(", ")", "if", "version", ":", "response", "=", "client_lambda", ".", "invoke", "(", "FunctionName", "=", "function_name", ",", "InvocationType", "=", "invocation_type", ",", "Payload", "=", "payload", ",", "Qualifier", "=", "version", ")", "else", ":", "response", "=", "client_lambda", ".", "invoke", "(", "FunctionName", "=", "function_name", ",", "InvocationType", "=", "invocation_type", ",", "Payload", "=", "payload", ",", "Qualifier", "=", "alias_name", ")", "results", "=", "response", "[", "'Payload'", "]", ".", "read", "(", ")", "# payload is a 'StreamingBody'", "log", ".", "debug", "(", "'invoke completed'", ")", "# write to file", "if", "outfile", ":", "with", "open", "(", "outfile", ",", "'w'", ")", "as", "ofile", ":", "ofile", ".", "write", "(", "str", "(", "results", ")", ")", "ofile", ".", "flush", "(", ")", "return", "else", ":", "return", "results" ]
30.702128
[ 0.029850746268656716, 0.11290322580645161, 0.041666666666666664, 0, 0.14285714285714285, 0.12, 0.15789473684210525, 0.1111111111111111, 0.13636363636363635, 0.15789473684210525, 0.07142857142857142, 0.08823529411764706, 0.2857142857142857, 0.03333333333333333, 0.04, 0.06451612903225806, 0.046511627906976744, 0.05405405405405406, 0.03333333333333333, 0.044444444444444446, 0.058823529411764705, 0, 0.13333333333333333, 0.075, 0.07692307692307693, 0.06976744186046512, 0.10714285714285714, 0.10344827586206896, 0.3333333333333333, 0.2222222222222222, 0.075, 0.07692307692307693, 0.06976744186046512, 0.10714285714285714, 0.09375, 0.3333333333333333, 0, 0.027777777777777776, 0.06060606060606061, 0.10526315789473684, 0.13333333333333333, 0.04878048780487805, 0.05405405405405406, 0.08, 0.14285714285714285, 0.2222222222222222, 0.09090909090909091 ]
def add_user_to_group(group, role, email): """Add a user to a group the caller owns Args: group (str): Group name role (str) : Role of user for group; either 'member' or 'admin' email (str): Email of user or group to add Swagger: https://api.firecloud.org/#!/Groups/addUserToGroup """ uri = "groups/{0}/{1}/{2}".format(group, role, email) return __put(uri)
[ "def", "add_user_to_group", "(", "group", ",", "role", ",", "email", ")", ":", "uri", "=", "\"groups/{0}/{1}/{2}\"", ".", "format", "(", "group", ",", "role", ",", "email", ")", "return", "__put", "(", "uri", ")" ]
31.230769
[ 0.023809523809523808, 0.045454545454545456, 0, 0.2222222222222222, 0.0967741935483871, 0.07042253521126761, 0.06, 0.5, 0.16666666666666666, 0.08620689655172414, 0.2857142857142857, 0.03508771929824561, 0.09523809523809523 ]
def getvalues(self): """Yields all the values from 'generator_func' and type-checks. Yields: Whatever 'generator_func' yields. Raises: TypeError: if subsequent values are of a different type than first value. ValueError: if subsequent iteration returns a different number of values than the first iteration over the generator. (This would mean 'generator_func' is not stable.) """ idx = 0 generator = self._generator_func() first_value = next(generator) self._value_type = type(first_value) yield first_value for idx, value in enumerate(generator): if not isinstance(value, self._value_type): raise TypeError( "All values of a repeated var must be of the same type." " First argument was of type %r, but argument %r is of" " type %r." % (self._value_type, value, repeated.value_type(value))) self._watermark = max(self._watermark, idx + 1) yield value # Iteration stopped - check if we're at the previous watermark and raise # if not. if idx + 1 < self._watermark: raise ValueError( "LazyRepetition %r was previously able to iterate its" " generator up to idx %d, but this time iteration stopped after" " idx %d! Generator function %r is not stable." % (self, self._watermark, idx + 1, self._generator_func)) # Watermark is higher than previous count! Generator function returned # more values this time than last time. if self._count is not None and self._watermark >= self._count: raise ValueError( "LazyRepetition %r previously iterated only up to idx %d but" " was now able to reach idx %d! Generator function %r is not" " stable." % (self, self._count - 1, idx + 1, self._generator_func)) # We've finished iteration - cache count. After this the count will be # watermark + 1 forever. self._count = self._watermark + 1
[ "def", "getvalues", "(", "self", ")", ":", "idx", "=", "0", "generator", "=", "self", ".", "_generator_func", "(", ")", "first_value", "=", "next", "(", "generator", ")", "self", ".", "_value_type", "=", "type", "(", "first_value", ")", "yield", "first_value", "for", "idx", ",", "value", "in", "enumerate", "(", "generator", ")", ":", "if", "not", "isinstance", "(", "value", ",", "self", ".", "_value_type", ")", ":", "raise", "TypeError", "(", "\"All values of a repeated var must be of the same type.\"", "\" First argument was of type %r, but argument %r is of\"", "\" type %r.\"", "%", "(", "self", ".", "_value_type", ",", "value", ",", "repeated", ".", "value_type", "(", "value", ")", ")", ")", "self", ".", "_watermark", "=", "max", "(", "self", ".", "_watermark", ",", "idx", "+", "1", ")", "yield", "value", "# Iteration stopped - check if we're at the previous watermark and raise", "# if not.", "if", "idx", "+", "1", "<", "self", ".", "_watermark", ":", "raise", "ValueError", "(", "\"LazyRepetition %r was previously able to iterate its\"", "\" generator up to idx %d, but this time iteration stopped after\"", "\" idx %d! Generator function %r is not stable.\"", "%", "(", "self", ",", "self", ".", "_watermark", ",", "idx", "+", "1", ",", "self", ".", "_generator_func", ")", ")", "# Watermark is higher than previous count! Generator function returned", "# more values this time than last time.", "if", "self", ".", "_count", "is", "not", "None", "and", "self", ".", "_watermark", ">=", "self", ".", "_count", ":", "raise", "ValueError", "(", "\"LazyRepetition %r previously iterated only up to idx %d but\"", "\" was now able to reach idx %d! Generator function %r is not\"", "\" stable.\"", "%", "(", "self", ",", "self", ".", "_count", "-", "1", ",", "idx", "+", "1", ",", "self", ".", "_generator_func", ")", ")", "# We've finished iteration - cache count. After this the count will be", "# watermark + 1 forever.", "self", ".", "_count", "=", "self", ".", "_watermark", "+", "1" ]
42.307692
[ 0.05, 0.028169014084507043, 0, 0.13333333333333333, 0.044444444444444446, 0, 0.13333333333333333, 0.02564102564102564, 0.09090909090909091, 0, 0.025974025974025976, 0.0379746835443038, 0.05660377358490566, 0.18181818181818182, 0.13333333333333333, 0.047619047619047616, 0.05405405405405406, 0.045454545454545456, 0.08, 0, 0.0425531914893617, 0.03636363636363636, 0.09375, 0.02631578947368421, 0.02666666666666667, 0.06060606060606061, 0.04054054054054054, 0, 0.03389830508474576, 0.08695652173913043, 0, 0.0375, 0.11764705882352941, 0.05405405405405406, 0.10344827586206896, 0.02857142857142857, 0.0375, 0.03076923076923077, 0.04225352112676056, 0, 0.02564102564102564, 0.0425531914893617, 0.02857142857142857, 0.10344827586206896, 0.025974025974025976, 0.025974025974025976, 0.07142857142857142, 0.04225352112676056, 0, 0.02564102564102564, 0.0625, 0.04878048780487805 ]
def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None): """ Trying to clean up cmd Args: command (str): string command shell (bool): if True, process is run in shell detatch (bool): if True, process is run in background verbose (int): verbosity mode verbout (bool): if True, `command` writes to stdout in realtime. defaults to True iff verbose > 0 Returns: dict: info - information about command status """ import shlex if isinstance(command, (list, tuple)): raise ValueError('command tuple not supported yet') args = shlex.split(command, posix=not WIN32) if verbose is True: verbose = 2 if verbout is None: verbout = verbose >= 1 if verbose >= 2: print('+=== START CMD2 ===') print('Command:') print(command) if verbout: print('----') print('Stdout:') proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell, universal_newlines=True) if detatch: info = {'proc': proc} else: write_fn = sys.stdout.write flush_fn = sys.stdout.flush logged_out = [] for line in _run_process(proc): #line_ = line if six.PY2 else line.decode('utf-8') line_ = line if six.PY2 else line if len(line_) > 0: if verbout: write_fn(line_) flush_fn() logged_out.append(line) try: from utool import util_str # NOQA # out = '\n'.join(logged_out) out = ''.join(logged_out) except UnicodeDecodeError: from utool import util_str # NOQA logged_out = util_str.ensure_unicode_strlist(logged_out) # out = '\n'.join(logged_out) out = ''.join(logged_out) # print('logged_out = %r' % (logged_out,)) # raise (out_, err) = proc.communicate() ret = proc.wait() info = { 'out': out, 'err': err, 'ret': ret, } if verbose >= 2: print('L___ END CMD2 ___') return info
[ "def", "cmd2", "(", "command", ",", "shell", "=", "False", ",", "detatch", "=", "False", ",", "verbose", "=", "False", ",", "verbout", "=", "None", ")", ":", "import", "shlex", "if", "isinstance", "(", "command", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "'command tuple not supported yet'", ")", "args", "=", "shlex", ".", "split", "(", "command", ",", "posix", "=", "not", "WIN32", ")", "if", "verbose", "is", "True", ":", "verbose", "=", "2", "if", "verbout", "is", "None", ":", "verbout", "=", "verbose", ">=", "1", "if", "verbose", ">=", "2", ":", "print", "(", "'+=== START CMD2 ==='", ")", "print", "(", "'Command:'", ")", "print", "(", "command", ")", "if", "verbout", ":", "print", "(", "'----'", ")", "print", "(", "'Stdout:'", ")", "proc", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "shell", "=", "shell", ",", "universal_newlines", "=", "True", ")", "if", "detatch", ":", "info", "=", "{", "'proc'", ":", "proc", "}", "else", ":", "write_fn", "=", "sys", ".", "stdout", ".", "write", "flush_fn", "=", "sys", ".", "stdout", ".", "flush", "logged_out", "=", "[", "]", "for", "line", "in", "_run_process", "(", "proc", ")", ":", "#line_ = line if six.PY2 else line.decode('utf-8')", "line_", "=", "line", "if", "six", ".", "PY2", "else", "line", "if", "len", "(", "line_", ")", ">", "0", ":", "if", "verbout", ":", "write_fn", "(", "line_", ")", "flush_fn", "(", ")", "logged_out", ".", "append", "(", "line", ")", "try", ":", "from", "utool", "import", "util_str", "# NOQA", "# out = '\\n'.join(logged_out)", "out", "=", "''", ".", "join", "(", "logged_out", ")", "except", "UnicodeDecodeError", ":", "from", "utool", "import", "util_str", "# NOQA", "logged_out", "=", "util_str", ".", "ensure_unicode_strlist", "(", "logged_out", ")", "# out = '\\n'.join(logged_out)", "out", "=", "''", ".", "join", "(", "logged_out", ")", "# print('logged_out = %r' % (logged_out,))", "# raise", "(", "out_", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "ret", "=", "proc", ".", "wait", "(", ")", "info", "=", "{", "'out'", ":", "out", ",", "'err'", ":", "err", ",", "'ret'", ":", "ret", ",", "}", "if", "verbose", ">=", "2", ":", "print", "(", "'L___ END CMD2 ___'", ")", "return", "info" ]
32.647059
[ 0.013333333333333334, 0.2857142857142857, 0.07692307692307693, 0, 0.2222222222222222, 0.08108108108108109, 0.05555555555555555, 0.04918032786885246, 0.08108108108108109, 0.05555555555555555, 0.045454545454545456, 0, 0.16666666666666666, 0.03773584905660377, 0.2857142857142857, 0.125, 0.047619047619047616, 0.03389830508474576, 0.041666666666666664, 0.08695652173913043, 0.10526315789473684, 0.08695652173913043, 0.06666666666666667, 0.1, 0.05555555555555555, 0.08, 0.09090909090909091, 0.10526315789473684, 0.08, 0.07142857142857142, 0.05263157894736842, 0.06060606060606061, 0.07692307692307693, 0.13333333333333333, 0.06896551724137931, 0.2222222222222222, 0.05714285714285714, 0.05714285714285714, 0.08695652173913043, 0.05128205128205128, 0.04838709677419355, 0.044444444444444446, 0.06666666666666667, 0.07407407407407407, 0.05714285714285714, 0.06666666666666667, 0.05128205128205128, 0.16666666666666666, 0.043478260869565216, 0.04878048780487805, 0.05405405405405406, 0.058823529411764705, 0.043478260869565216, 0.029411764705882353, 0.04878048780487805, 0.05405405405405406, 0.037037037037037035, 0.10526315789473684, 0.05, 0.08, 0.1875, 0.08695652173913043, 0.08695652173913043, 0.08695652173913043, 0.3333333333333333, 0.1, 0.058823529411764705, 0.13333333333333333 ]
def file_sort(my_list): """ Sort a list of files in a nice way. eg item-10 will be after item-9 """ def alphanum_key(key): """ Split the key into str/int parts """ return [int(s) if s.isdigit() else s for s in re.split("([0-9]+)", key)] my_list.sort(key=alphanum_key) return my_list
[ "def", "file_sort", "(", "my_list", ")", ":", "def", "alphanum_key", "(", "key", ")", ":", "\"\"\"\n Split the key into str/int parts\n \"\"\"", "return", "[", "int", "(", "s", ")", "if", "s", ".", "isdigit", "(", ")", "else", "s", "for", "s", "in", "re", ".", "split", "(", "\"([0-9]+)\"", ",", "key", ")", "]", "my_list", ".", "sort", "(", "key", "=", "alphanum_key", ")", "return", "my_list" ]
23.642857
[ 0.043478260869565216, 0.2857142857142857, 0.05128205128205128, 0.05714285714285714, 0.2857142857142857, 0, 0.07692307692307693, 0.18181818181818182, 0.05, 0.18181818181818182, 0.0375, 0, 0.058823529411764705, 0.1111111111111111 ]
def validate(self, value): """Make sure that value is of the right type """ if not isinstance(value, self.nested_klass): self.raise_error('NestedClass is of the wrong type: {0} vs expected {1}' .format(value.__class__.__name__, self.nested_klass.__name__)) super(NestedDocumentField, self).validate(value)
[ "def", "validate", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "self", ".", "nested_klass", ")", ":", "self", ".", "raise_error", "(", "'NestedClass is of the wrong type: {0} vs expected {1}'", ".", "format", "(", "value", ".", "__class__", ".", "__name__", ",", "self", ".", "nested_klass", ".", "__name__", ")", ")", "super", "(", "NestedDocumentField", ",", "self", ")", ".", "validate", "(", "value", ")" ]
60.833333
[ 0.038461538461538464, 0.03571428571428571, 0.038461538461538464, 0.047619047619047616, 0.054945054945054944, 0.03571428571428571 ]
def display(self): '''Print the results of this profiling''' self.pretty(self._timings, 'Raw Redis Commands') print() for key, value in self._commands.items(): self.pretty(value, 'Qless "%s" Command' % key) print()
[ "def", "display", "(", "self", ")", ":", "self", ".", "pretty", "(", "self", ".", "_timings", ",", "'Raw Redis Commands'", ")", "print", "(", ")", "for", "key", ",", "value", "in", "self", ".", "_commands", ".", "items", "(", ")", ":", "self", ".", "pretty", "(", "value", ",", "'Qless \"%s\" Command'", "%", "key", ")", "print", "(", ")" ]
37.714286
[ 0.05555555555555555, 0.04081632653061224, 0.03571428571428571, 0.13333333333333333, 0.04081632653061224, 0.034482758620689655, 0.10526315789473684 ]
def interpolate(self, year): """Interpolate missing values in timeseries (linear interpolation) Parameters ---------- year: int year to be interpolated """ df = self.pivot_table(index=IAMC_IDX, columns=['year'], values='value', aggfunc=np.sum) # drop year-rows where values are already defined if year in df.columns: df = df[np.isnan(df[year])] fill_values = df.apply(fill_series, raw=False, axis=1, year=year) fill_values = fill_values.dropna().reset_index() fill_values = fill_values.rename(columns={0: "value"}) fill_values['year'] = year self.data = self.data.append(fill_values, ignore_index=True)
[ "def", "interpolate", "(", "self", ",", "year", ")", ":", "df", "=", "self", ".", "pivot_table", "(", "index", "=", "IAMC_IDX", ",", "columns", "=", "[", "'year'", "]", ",", "values", "=", "'value'", ",", "aggfunc", "=", "np", ".", "sum", ")", "# drop year-rows where values are already defined", "if", "year", "in", "df", ".", "columns", ":", "df", "=", "df", "[", "np", ".", "isnan", "(", "df", "[", "year", "]", ")", "]", "fill_values", "=", "df", ".", "apply", "(", "fill_series", ",", "raw", "=", "False", ",", "axis", "=", "1", ",", "year", "=", "year", ")", "fill_values", "=", "fill_values", ".", "dropna", "(", ")", ".", "reset_index", "(", ")", "fill_values", "=", "fill_values", ".", "rename", "(", "columns", "=", "{", "0", ":", "\"value\"", "}", ")", "fill_values", "[", "'year'", "]", "=", "year", "self", ".", "data", "=", "self", ".", "data", ".", "append", "(", "fill_values", ",", "ignore_index", "=", "True", ")" ]
40.789474
[ 0.03571428571428571, 0.02702702702702703, 0, 0.1111111111111111, 0.1111111111111111, 0.11764705882352941, 0.08333333333333333, 0.18181818181818182, 0.047619047619047616, 0.09836065573770492, 0.03508771929824561, 0.06666666666666667, 0.05128205128205128, 0.06976744186046512, 0.11666666666666667, 0.03571428571428571, 0.03225806451612903, 0.058823529411764705, 0.029411764705882353 ]
def parse_xml(self, xml): ''' :param key_xml: lxml.etree.Element representing a single VocabularyCodeSet ''' xmlutils = XmlUtils(xml) self.name = xmlutils.get_string_by_xpath('name') self.family = xmlutils.get_string_by_xpath('family') self.version = xmlutils.get_string_by_xpath('version') for item in xml.xpath('code-item'): self.code_item.append(VocabularyCodeItem(item)) self.is_vocab_truncated = xmlutils.get_bool_by_xpath('is-vocab-truncated') self.language = xmlutils.get_lang()
[ "def", "parse_xml", "(", "self", ",", "xml", ")", ":", "xmlutils", "=", "XmlUtils", "(", "xml", ")", "self", ".", "name", "=", "xmlutils", ".", "get_string_by_xpath", "(", "'name'", ")", "self", ".", "family", "=", "xmlutils", ".", "get_string_by_xpath", "(", "'family'", ")", "self", ".", "version", "=", "xmlutils", ".", "get_string_by_xpath", "(", "'version'", ")", "for", "item", "in", "xml", ".", "xpath", "(", "'code-item'", ")", ":", "self", ".", "code_item", ".", "append", "(", "VocabularyCodeItem", "(", "item", ")", ")", "self", ".", "is_vocab_truncated", "=", "xmlutils", ".", "get_bool_by_xpath", "(", "'is-vocab-truncated'", ")", "self", ".", "language", "=", "xmlutils", ".", "get_lang", "(", ")" ]
47.5
[ 0.04, 0.18181818181818182, 0.046511627906976744, 0.18181818181818182, 0.0625, 0.03571428571428571, 0.03333333333333333, 0.03225806451612903, 0.046511627906976744, 0.03389830508474576, 0.036585365853658534, 0.046511627906976744 ]
def fingerprint(blob): """ Compute SSH fingerprint for specified blob. See https://en.wikipedia.org/wiki/Public_key_fingerprint for details. """ digest = hashlib.md5(blob).digest() return ':'.join('{:02x}'.format(c) for c in bytearray(digest))
[ "def", "fingerprint", "(", "blob", ")", ":", "digest", "=", "hashlib", ".", "md5", "(", "blob", ")", ".", "digest", "(", ")", "return", "':'", ".", "join", "(", "'{:02x}'", ".", "format", "(", "c", ")", "for", "c", "in", "bytearray", "(", "digest", ")", ")" ]
32.625
[ 0.045454545454545456, 0.2857142857142857, 0.0425531914893617, 0, 0.0410958904109589, 0.2857142857142857, 0.05128205128205128, 0.030303030303030304 ]