code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def format_exp_floats(decimals): """ sometimes the exp. column can be too large """ threshold = 10 ** 5 return ( lambda n: "{:.{prec}e}".format(n, prec=decimals) if n > threshold else "{:4.{prec}f}".format(n, prec=decimals) )
sometimes the exp. column can be too large
async def execute(self, coro, name, user, info=None): ''' Create a synapse task from the given coroutine. ''' task = self.schedCoro(coro) return await s_task.Task.anit(self, task, name, user, info=info)
Create a synapse task from the given coroutine.
def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1): """ Squeezing or expanding 2 fingers on this UI with given motion range and duration. Args: direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI duration (:py:obj:`float`): time interval in which the action is performed dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent`` Raises: PocoNoSuchNodeException: raised when the UI element does not exist """ if direction not in ('in', 'out'): raise ValueError('Argument `direction` should be one of "in" or "out". Got {}'.format(repr(direction))) if dead_zone >= percent: raise ValueError('Argument `dead_zone` should not be greater than `percent`. dead_zoon={}, percent={}' .format(repr(dead_zone), repr(percent))) w, h = self.get_size() x, y = self.get_position() # focus = self._focus or [0.5, 0.5] tracks = make_pinching(direction, [x, y], [w, h], percent, dead_zone, duration) speed = math.sqrt(w * h) * (percent - dead_zone) / 2 / duration # 速度慢的时候,精度适当要提高,这样有助于控制准确 ret = self.poco.apply_motion_tracks(tracks, accuracy=speed * 0.03) return ret
Squeezing or expanding 2 fingers on this UI with given motion range and duration. Args: direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI duration (:py:obj:`float`): time interval in which the action is performed dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent`` Raises: PocoNoSuchNodeException: raised when the UI element does not exist
def on_log(self): # type: () -> Callable """Decorate a callback function to handle MQTT logging. **Example Usage:** :: @mqtt.on_log() def handle_logging(client, userdata, level, buf): print(client, userdata, level, buf) """ def decorator(handler): # type: (Callable) -> Callable self.client.on_log = handler return handler return decorator
Decorate a callback function to handle MQTT logging. **Example Usage:** :: @mqtt.on_log() def handle_logging(client, userdata, level, buf): print(client, userdata, level, buf)
def fixminimized(self, alphabet): """ After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None """ insymbols = fst.SymbolTable() outsymbols = fst.SymbolTable() num = 1 for char in self.alphabet: self.isyms.__setitem__(char, num) self.osyms.__setitem__(char, num) insymbols.add_symbol(char, num) outsymbols.add_symbol(char, num) num = num + 1 self.automaton.set_input_symbols(insymbols) self.automaton.set_output_symbols(outsymbols) endstate = self.add_state() for state in self.states: for char in alphabet: found = 0 for arc in state.arcs: if self.isyms.find(arc.ilabel) == char: found = 1 break if found == 0: self.add_arc(state.stateid, endstate, char) self[endstate].final = False for char in alphabet: self.add_arc(endstate, endstate, char)
After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None
def predict_proba(self, time): """Return probability of an event after given time point. :math:`\\hat{S}(t) = P(T > t)` Parameters ---------- time : array, shape = (n_samples,) Time to estimate probability at. Returns ------- prob : array, shape = (n_samples,) Probability of an event. """ check_is_fitted(self, "unique_time_") time = check_array(time, ensure_2d=False) # K-M is undefined if estimate at last time point is non-zero extends = time > self.unique_time_[-1] if self.prob_[-1] > 0 and extends.any(): raise ValueError("time must be smaller than largest " "observed time point: {}".format(self.unique_time_[-1])) # beyond last time point is zero probability Shat = numpy.empty(time.shape, dtype=float) Shat[extends] = 0.0 valid = ~extends time = time[valid] idx = numpy.searchsorted(self.unique_time_, time) # for non-exact matches, we need to shift the index to left eps = numpy.finfo(self.unique_time_.dtype).eps exact = numpy.absolute(self.unique_time_[idx] - time) < eps idx[~exact] -= 1 Shat[valid] = self.prob_[idx] return Shat
Return probability of an event after given time point. :math:`\\hat{S}(t) = P(T > t)` Parameters ---------- time : array, shape = (n_samples,) Time to estimate probability at. Returns ------- prob : array, shape = (n_samples,) Probability of an event.
def classify(self, token_type, value, lineno, column, line): """Find the label for a token.""" if token_type == self.grammar.KEYWORD_TOKEN: label_index = self.grammar.keyword_ids.get(value, -1) if label_index != -1: return label_index label_index = self.grammar.token_ids.get(token_type, -1) if label_index == -1: raise ParseError("invalid token", token_type, value, lineno, column, line) return label_index
Find the label for a token.
def phi_vector(self): """property decorated method to get a vector of L2 norm (phi) for the realizations. The ObservationEnsemble.pst.weights can be updated prior to calling this method to evaluate new weighting strategies Return ------ pandas.DataFrame : pandas.DataFrame """ weights = self.pst.observation_data.loc[self.names,"weight"] obsval = self.pst.observation_data.loc[self.names,"obsval"] phi_vec = [] for idx in self.index.values: simval = self.loc[idx,self.names] phi = (((simval - obsval) * weights)**2).sum() phi_vec.append(phi) #return pd.DataFrame({"phi":phi_vec},index=self.index) return pd.Series(data=phi_vec,index=self.index)
property decorated method to get a vector of L2 norm (phi) for the realizations. The ObservationEnsemble.pst.weights can be updated prior to calling this method to evaluate new weighting strategies Return ------ pandas.DataFrame : pandas.DataFrame
def nrmse_iqr(simulated_array, observed_array, replace_nan=None, replace_inf=None, remove_neg=False, remove_zero=False): """Compute the IQR normalized root mean square error between the simulated and observed data. .. image:: /pictures/NRMSE_IQR.png **Range:** 0 ≤ NRMSE < inf. **Notes:** This metric is the RMSE normalized by the interquartile range of the observed time series (x). Normalizing allows comparison between data sets with different scales. The NRMSEquartile is the least sensitive to outliers of the three normalized rmse metrics. Parameters ---------- simulated_array: one dimensional ndarray An array of simulated data from the time series. observed_array: one dimensional ndarray An array of observed data from the time series. replace_nan: float, optional If given, indicates which value to replace NaN values with in the two arrays. If None, when a NaN value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. replace_inf: float, optional If given, indicates which value to replace Inf values with in the two arrays. If None, when an inf value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. remove_neg: boolean, optional If True, when a negative value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. remove_zero: boolean, optional If true, when a zero value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. Returns ------- float The IQR normalized root mean square error. Examples -------- >>> import HydroErr as he >>> import numpy as np >>> sim = np.array([5, 7, 9, 2, 4.5, 6.7]) >>> obs = np.array([4.7, 6, 10, 2.5, 4, 7]) >>> he.nrmse_iqr(sim, obs) 0.2595461185212093 References ---------- - Pontius, R.G., Thontteh, O., Chen, H., 2008. Components of information for multiple resolution comparison between maps that share a real variable. Environmental and Ecological Statistics 15(2) 111-142. """ # Checking and cleaning the data simulated_array, observed_array = treat_values( simulated_array, observed_array, replace_nan=replace_nan, replace_inf=replace_inf, remove_neg=remove_neg, remove_zero=remove_zero ) rmse_value = np.sqrt(np.mean((simulated_array - observed_array) ** 2)) q1 = np.percentile(observed_array, 25) q3 = np.percentile(observed_array, 75) iqr = q3 - q1 return rmse_value / iqr
Compute the IQR normalized root mean square error between the simulated and observed data. .. image:: /pictures/NRMSE_IQR.png **Range:** 0 ≤ NRMSE < inf. **Notes:** This metric is the RMSE normalized by the interquartile range of the observed time series (x). Normalizing allows comparison between data sets with different scales. The NRMSEquartile is the least sensitive to outliers of the three normalized rmse metrics. Parameters ---------- simulated_array: one dimensional ndarray An array of simulated data from the time series. observed_array: one dimensional ndarray An array of observed data from the time series. replace_nan: float, optional If given, indicates which value to replace NaN values with in the two arrays. If None, when a NaN value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. replace_inf: float, optional If given, indicates which value to replace Inf values with in the two arrays. If None, when an inf value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. remove_neg: boolean, optional If True, when a negative value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. remove_zero: boolean, optional If true, when a zero value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. Returns ------- float The IQR normalized root mean square error. Examples -------- >>> import HydroErr as he >>> import numpy as np >>> sim = np.array([5, 7, 9, 2, 4.5, 6.7]) >>> obs = np.array([4.7, 6, 10, 2.5, 4, 7]) >>> he.nrmse_iqr(sim, obs) 0.2595461185212093 References ---------- - Pontius, R.G., Thontteh, O., Chen, H., 2008. Components of information for multiple resolution comparison between maps that share a real variable. Environmental and Ecological Statistics 15(2) 111-142.
def _updatePoolingState(self, activeColWithPredictedInput, fractionUnpredicted): """ This function updates the pooling state of TP cells. A cell will stop pooling if: (1) It hasn't received any predicted input in the last self._poolingLife steps or (2) the overall fraction of unpredicted input to the TP is above _poolingThreshUnpredicted """ if fractionUnpredicted > self._poolingThreshUnpredicted: # Reset pooling activation if the fraction of unpredicted input # is above the threshold if self._spVerbosity > 3: print " reset pooling state for all cells" self._poolingActivation = numpy.zeros(self._numColumns, dtype="int32") else: # decrement activation of all pooling cells self._poolingActivation[self._poolingColumns] -= 1 # reset activation of cells that are receiving predicted input self._poolingActivation[activeColWithPredictedInput] = self._poolingLife self._poolingColumns = self._poolingActivation.nonzero()[0]
This function updates the pooling state of TP cells. A cell will stop pooling if: (1) It hasn't received any predicted input in the last self._poolingLife steps or (2) the overall fraction of unpredicted input to the TP is above _poolingThreshUnpredicted
def _init_vocab(self, token_generator, add_reserved_tokens=True): """Initialize vocabulary with tokens from token_generator.""" self._id_to_token = {} non_reserved_start_index = 0 if add_reserved_tokens: self._id_to_token.update(enumerate(RESERVED_TOKENS)) non_reserved_start_index = len(RESERVED_TOKENS) self._id_to_token.update( enumerate(token_generator, start=non_reserved_start_index)) # _token_to_id is the reverse of _id_to_token self._token_to_id = dict((v, k) for k, v in six.iteritems(self._id_to_token))
Initialize vocabulary with tokens from token_generator.
def startTicker(self, reqId, contract, tickType): """ Start a tick request that has the reqId associated with the contract. Return the ticker. """ ticker = self.tickers.get(id(contract)) if not ticker: ticker = Ticker( contract=contract, ticks=[], tickByTicks=[], domBids=[], domAsks=[], domTicks=[]) self.tickers[id(contract)] = ticker self.reqId2Ticker[reqId] = ticker self._reqId2Contract[reqId] = contract self.ticker2ReqId[tickType][ticker] = reqId return ticker
Start a tick request that has the reqId associated with the contract. Return the ticker.
def MakePmfFromList(t, name=''): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this PMF Returns: Pmf object """ hist = MakeHistFromList(t) d = hist.GetDict() pmf = Pmf(d, name) pmf.Normalize() return pmf
Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this PMF Returns: Pmf object
def to_geopandas(raster, **kwargs): """ Convert GeoRaster to GeoPandas DataFrame, which can be easily exported to other types of files and used to do other types of operations. The DataFrame has the geometry (Polygon), row, col, value, x, and y values for each cell Usage: df = gr.to_geopandas(raster) """ df = to_pandas(raster, **kwargs) df['geometry'] = df.apply(squares, georaster=raster, axis=1) df = gp.GeoDataFrame(df, crs=from_string(raster.projection.ExportToProj4())) return df
Convert GeoRaster to GeoPandas DataFrame, which can be easily exported to other types of files and used to do other types of operations. The DataFrame has the geometry (Polygon), row, col, value, x, and y values for each cell Usage: df = gr.to_geopandas(raster)
async def ensure_closed(self): """Send quit command and then close socket connection""" if self._writer is None: # connection has been closed return send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT) self._writer.write(send_data) await self._writer.drain() self.close()
Send quit command and then close socket connection
def add_missing_components(network): # Munich """Add missing transformer at Heizkraftwerk Nord in Munich and missing transformer in Stuttgart Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA """ """https://www.swm.de/privatkunden/unternehmen/energieerzeugung/heizkraftwerke.html?utm_medium=301 to bus 25096: 25369 (86) 28232 (24) 25353 to 25356 (79) to bus 23822: (110kV bus of 380/110-kV-transformer) 25355 (90) 28212 (98) 25357 to 665 (85) 25354 to 27414 (30) 27414 to 28212 (33) 25354 to 28294 (32/63) 28335 to 28294 (64) 28335 to 28139 (28) Overhead lines: 16573 to 24182 (part of 4) """ """ Installierte Leistung der Umspannungsebene Höchst- zu Hochspannung (380 kV / 110 kV): 2.750.000 kVA https://www.swm-infrastruktur.de/strom/netzstrukturdaten/strukturmerkmale.html """ new_trafo = str(network.transformers.index.astype(int).max() + 1) network.add("Transformer", new_trafo, bus0="16573", bus1="23648", x=0.135 / (2750 / 2), r=0.0, tap_ratio=1, s_nom=2750 / 2) def add_110kv_line(bus0, bus1, overhead=False): new_line = str(network.lines.index.astype(int).max() + 1) if not overhead: network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=280) else: network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=260) network.lines.loc[new_line, "scn_name"] = "Status Quo" network.lines.loc[new_line, "v_nom"] = 110 network.lines.loc[new_line, "version"] = "added_manually" network.lines.loc[new_line, "frequency"] = 50 network.lines.loc[new_line, "cables"] = 3.0 network.lines.loc[new_line, "country"] = 'DE' network.lines.loc[new_line, "length"] = ( pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]], network.buses.loc[bus1, ["x", "y"]]) [0][0] * 1.2) if not overhead: network.lines.loc[new_line, "r"] = (network.lines. loc[new_line, "length"] * 0.0177) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*78e-9) network.lines.loc[new_line, "x"] = (network.lines. loc[new_line, "length"] * 0.3e-3) network.lines.loc[new_line, "b"] = (network.lines. loc[new_line, "length"] * 250e-9) elif overhead: network.lines.loc[new_line, "r"] = (network.lines. loc[new_line, "length"] * 0.05475) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*40e-9) network.lines.loc[new_line, "x"] = (network.lines. loc[new_line, "length"] * 1.2e-3) network.lines.loc[new_line, "b"] = (network.lines. loc[new_line, "length"] * 9.5e-9) add_110kv_line("16573", "28353") add_110kv_line("16573", "28092") add_110kv_line("25096", "25369") add_110kv_line("25096", "28232") add_110kv_line("25353", "25356") add_110kv_line("23822", "25355") add_110kv_line("23822", "28212") add_110kv_line("25357", "665") add_110kv_line("25354", "27414") add_110kv_line("27414", "28212") add_110kv_line("25354", "28294") add_110kv_line("28335", "28294") add_110kv_line("28335", "28139") add_110kv_line("16573", "24182", overhead=True) # Stuttgart """ Stuttgart: Missing transformer, because 110-kV-bus is situated outside Heizkraftwerk Heilbronn: """ # new_trafo = str(network.transformers.index.astype(int).max()1) network.add("Transformer", '99999', bus0="18967", bus1="25766", x=0.135 / 300, r=0.0, tap_ratio=1, s_nom=300) """ According to: https://assets.ctfassets.net/xytfb1vrn7of/NZO8x4rKesAcYGGcG4SQg/b780d6a3ca4c2600ab51a30b70950bb1/netzschemaplan-110-kv.pdf the following lines are missing: """ add_110kv_line("18967", "22449", overhead=True) # visible in OSM & DSO map add_110kv_line("21165", "24068", overhead=True) # visible in OSM & DSO map add_110kv_line("23782", "24089", overhead=True) # visible in DSO map & OSM till 1 km from bus1 """ Umspannwerk Möhringen (bus 23697) https://de.wikipedia.org/wiki/Umspannwerk_M%C3%B6hringen there should be two connections: to Sindelfingen (2*110kV) to Wendingen (former 220kV, now 2*110kV) the line to Sindelfingen is connected, but the connection of Sindelfingen itself to 380kV is missing: """ add_110kv_line("19962", "27671", overhead=True) # visible in OSM & DSO map add_110kv_line("19962", "27671", overhead=True) """ line to Wendingen is missing, probably because it ends shortly before the way of the substation and is connected via cables: """ add_110kv_line("23697", "24090", overhead=True) # visible in OSM & DSO map add_110kv_line("23697", "24090", overhead=True) # Lehrte """ Lehrte: 220kV Bus located outsinde way of Betriebszentrtum Lehrte and therefore not connected: """ def add_220kv_line(bus0, bus1, overhead=False): new_line = str(network.lines.index.astype(int).max() + 1) if not overhead: network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=550) else: network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=520) network.lines.loc[new_line, "scn_name"] = "Status Quo" network.lines.loc[new_line, "v_nom"] = 220 network.lines.loc[new_line, "version"] = "added_manually" network.lines.loc[new_line, "frequency"] = 50 network.lines.loc[new_line, "cables"] = 3.0 network.lines.loc[new_line, "country"] = 'DE' network.lines.loc[new_line, "length"] = ( pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]], network.buses.loc[bus1, ["x", "y"]])[0][0] * 1.2) if not overhead: network.lines.loc[new_line, "r"] = (network.lines. loc[new_line, "length"] * 0.0176) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*67e-9) network.lines.loc[new_line, "x"] = (network.lines. loc[new_line, "length"] * 0.3e-3) network.lines.loc[new_line, "b"] = (network.lines. loc[new_line, "length"] * 210e-9) elif overhead: network.lines.loc[new_line, "r"] = (network.lines. loc[new_line, "length"] * 0.05475) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*30e-9) network.lines.loc[new_line, "x"] = (network.lines. loc[new_line, "length"] * 1e-3) network.lines.loc[new_line, "b"] = (network.lines. loc[new_line, "length"] * 11e-9 ) add_220kv_line("266", "24633", overhead=True) # temporary turn buses of transformers network.transformers["v_nom0"] = network.transformers.bus0.map( network.buses.v_nom) network.transformers["v_nom1"] = network.transformers.bus1.map( network.buses.v_nom) new_bus0 = network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1] new_bus1 = network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1] network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus0.values network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus1.values return network
Add missing transformer at Heizkraftwerk Nord in Munich and missing transformer in Stuttgart Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
def get_attr_text(self): """Get html attr text to render in template""" return ' '.join([ '{}="{}"'.format(key, value) for key, value in self.attr.items() ])
Get html attr text to render in template
def get_lldp_neighbor_detail_output_has_more(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") has_more = ET.SubElement(output, "has-more") has_more.text = kwargs.pop('has_more') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _get_directory_stash(self, path): """Stashes a directory. Directories are stashed adjacent to their original location if possible, or else moved/copied into the user's temp dir.""" try: save_dir = AdjacentTempDirectory(path) save_dir.create() except OSError: save_dir = TempDirectory(kind="uninstall") save_dir.create() self._save_dirs[os.path.normcase(path)] = save_dir return save_dir.path
Stashes a directory. Directories are stashed adjacent to their original location if possible, or else moved/copied into the user's temp dir.
def get_dict_for_class(self, class_name, state=None, base_name='View'): """The style dict for a given class and state. This collects the style attributes from parent classes and the class of the given object and gives precedence to values thereof to the children. The state attribute of the view instance is taken as the current state if state is None. If the state is not 'normal' then the style definitions for the 'normal' state are mixed-in from the given state style definitions, giving precedence to the non-'normal' style definitions. """ classes = [] klass = class_name while True: classes.append(klass) if klass.__name__ == base_name: break klass = klass.__bases__[0] if state is None: state = 'normal' style = {} for klass in classes: class_name = klass.__name__ try: state_styles = self._styles[class_name][state] except KeyError: state_styles = {} if state != 'normal': try: normal_styles = self._styles[class_name]['normal'] except KeyError: normal_styles = {} state_styles = dict(chain(normal_styles.iteritems(), state_styles.iteritems())) style = dict(chain(state_styles.iteritems(), style.iteritems())) return style
The style dict for a given class and state. This collects the style attributes from parent classes and the class of the given object and gives precedence to values thereof to the children. The state attribute of the view instance is taken as the current state if state is None. If the state is not 'normal' then the style definitions for the 'normal' state are mixed-in from the given state style definitions, giving precedence to the non-'normal' style definitions.
def hashes(self): """Return set of hashes uses in this resource_list.""" hashes = set() if (self.resources is not None): for resource in self: if (resource.md5 is not None): hashes.add('md5') if (resource.sha1 is not None): hashes.add('sha-1') if (resource.sha256 is not None): hashes.add('sha-256') return(hashes)
Return set of hashes uses in this resource_list.
def _get_local_ip(self): """Try to determine the local IP address of the machine.""" try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Use Google Public DNS server to determine own IP sock.connect(('8.8.8.8', 80)) return sock.getsockname()[0] except socket.error: try: return socket.gethostbyname(socket.gethostname()) except socket.gaierror: return '127.0.0.1' finally: sock.close()
Try to determine the local IP address of the machine.
def ss(inlist): """ Squares each value in the passed list, adds up these squares and returns the result. Usage: lss(inlist) """ ss = 0 for item in inlist: ss = ss + item * item return ss
Squares each value in the passed list, adds up these squares and returns the result. Usage: lss(inlist)
def voronoi(script, region_num=10, overlap=False): """Voronoi Atlas parameterization """ filter_xml = ''.join([ ' <filter name="Parametrization: Voronoi Atlas">\n', ' <Param name="regionNum"', 'value="%d"' % region_num, 'description="Approx. Region Num"', 'type="RichInt"', 'tooltip="An estimation of the number of regions that must be generated. Smaller regions could lead to parametrizations with smaller distortion."', '/>\n', ' <Param name="overlapFlag"', 'value="%s"' % str(overlap).lower(), 'description="Overlap"', 'type="RichBool"', 'tooltip="If checked the resulting parametrization will be composed by overlapping regions, e.g. the resulting mesh will have duplicated faces: each region will have a ring of ovelapping duplicate faces that will ensure that border regions will be parametrized in the atlas twice. This is quite useful for building mipmap robust atlases"', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Voronoi Atlas parameterization
def write_member(self, data): """Writes the given data as one gzip member. The data can be a string, an iterator that gives strings or a file-like object. """ if isinstance(data, basestring): self.write(data) else: for text in data: self.write(text) self.close_member()
Writes the given data as one gzip member. The data can be a string, an iterator that gives strings or a file-like object.
def make_gp_funs(cov_func, num_cov_params): """Functions that perform Gaussian process regression. cov_func has signature (cov_params, x, x')""" def unpack_kernel_params(params): mean = params[0] cov_params = params[2:] noise_scale = np.exp(params[1]) + 0.0001 return mean, cov_params, noise_scale def predict(params, x, y, xstar): """Returns the predictive mean and covariance at locations xstar, of the latent function value f (without observation noise).""" mean, cov_params, noise_scale = unpack_kernel_params(params) cov_f_f = cov_func(cov_params, xstar, xstar) cov_y_f = cov_func(cov_params, x, xstar) cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y)) pred_mean = mean + np.dot(solve(cov_y_y, cov_y_f).T, y - mean) pred_cov = cov_f_f - np.dot(solve(cov_y_y, cov_y_f).T, cov_y_f) return pred_mean, pred_cov def log_marginal_likelihood(params, x, y): mean, cov_params, noise_scale = unpack_kernel_params(params) cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y)) prior_mean = mean * np.ones(len(y)) return mvn.logpdf(y, prior_mean, cov_y_y) return num_cov_params + 2, predict, log_marginal_likelihood
Functions that perform Gaussian process regression. cov_func has signature (cov_params, x, x')
async def fetch_neighbourhood(lat: float, long: float) -> Optional[dict]: """ Gets the neighbourhood from the fetch that is associated with the given postcode. :return: A neighbourhood object parsed from the fetch. :raise ApiError: When there was an error connecting to the API. """ lookup_url = f"https://data.police.uk/api/locate-neighbourhood?q={lat},{long}" async with ClientSession() as session: try: async with session.get(lookup_url) as request: if request.status == 404: return None neighbourhood = await request.json() except ClientConnectionError as con_err: logger.debug(f"Could not connect to {con_err.host}") raise ApiError(f"Could not connect to {con_err.host}") except JSONDecodeError as dec_err: logger.error(f"Could not decode data: {dec_err}") raise ApiError(f"Could not decode data: {dec_err}") neighbourhood_url = f"https://data.police.uk/api/{neighbourhood['force']}/{neighbourhood['neighbourhood']}" try: async with session.get(neighbourhood_url) as request: neighbourhood_data = await request.json() except ConnectionError as con_err: logger.debug(f"Could not connect to {con_err.args[0].pool.host}") raise ApiError(f"Could not connect to {con_err.args[0].pool.host}") except JSONDecodeError as dec_err: logger.error(f"Could not decode data: {dec_err}") raise ApiError(f"Could not decode data: {dec_err}") return neighbourhood_data
Gets the neighbourhood from the fetch that is associated with the given postcode. :return: A neighbourhood object parsed from the fetch. :raise ApiError: When there was an error connecting to the API.
def newDocNodeEatName(self, ns, name, content): """Creation of a new node element within a document. @ns and @content are optional (None). NOTE: @content is supposed to be a piece of XML CDATA, so it allow entities references, but XML special chars need to be escaped first by using xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you don't need entities support. """ if ns is None: ns__o = None else: ns__o = ns._o ret = libxml2mod.xmlNewDocNodeEatName(self._o, ns__o, name, content) if ret is None:raise treeError('xmlNewDocNodeEatName() failed') __tmp = xmlNode(_obj=ret) return __tmp
Creation of a new node element within a document. @ns and @content are optional (None). NOTE: @content is supposed to be a piece of XML CDATA, so it allow entities references, but XML special chars need to be escaped first by using xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you don't need entities support.
def position(self, chromosome, position, exact=False): """ Shortcut to do a single position filter on genomic datasets. """ return self._clone( filters=[GenomicFilter(chromosome, position, exact=exact)])
Shortcut to do a single position filter on genomic datasets.
def oftype(self, typ): '''Return a generator of formatters codes of type typ''' for key, val in self.items(): if val.type == typ: yield key
Return a generator of formatters codes of type typ
def url_join(url, path): """ url version of os.path.join """ p = six.moves.urllib.parse.urlparse(url) t = None if p.path and p.path[-1] == '/': if path and path[0] == '/': path = path[1:] t = ''.join([p.path, path]) else: t = ('' if path and path[0] == '/' else '/').join([p.path, path]) return six.moves.urllib.parse.urlunparse( p[:2]+ (t,)+ # os.sep is different on windows, don't use it here. p[3:] )
url version of os.path.join
def add_arguments(self, parser): """Adds the unlock command arguments to the parser. Args: self (UnlockCommand): the ``UnlockCommand`` instance parser (argparse.ArgumentParser): the parser to add the arguments to Returns: ``None`` """ parser.add_argument('name', nargs=1, choices=['kinetis'], help='name of MCU to unlock') return self.add_common_arguments(parser, True)
Adds the unlock command arguments to the parser. Args: self (UnlockCommand): the ``UnlockCommand`` instance parser (argparse.ArgumentParser): the parser to add the arguments to Returns: ``None``
def plan_to_assignment(plan): """Convert the plan to the format used by cluster-topology.""" assignment = {} for elem in plan['partitions']: assignment[ (elem['topic'], elem['partition']) ] = elem['replicas'] return assignment
Convert the plan to the format used by cluster-topology.
def get_imagery(cls, lat, lon, date=None, dim=None, cloud_score=False): """ Returns satellite image Args: lat: latitude float lon: longitude float date: date instance of available date from `get_assets` dim: width and height of image in degrees as float cloud_score: boolean to calculate the percentage of the image covered by clouds Returns: json """ instance = cls('planetary/earth/imagery') filters = { 'lat': lat, 'lon': lon, 'date': date, 'dim': dim, 'cloud_score': cloud_score } return instance.get_resource(**filters)
Returns satellite image Args: lat: latitude float lon: longitude float date: date instance of available date from `get_assets` dim: width and height of image in degrees as float cloud_score: boolean to calculate the percentage of the image covered by clouds Returns: json
def write_artifacts_metadata(self): """Write out a JSON file with all built targets artifact metadata, if such output file is specified.""" if self.conf.artifacts_metadata_file: logger.info('Writing artifacts metadata to file "%s"', self.conf.artifacts_metadata_file) with open(self.conf.artifacts_metadata_file, 'w') as fp: json.dump(self.artifacts_metadata, fp)
Write out a JSON file with all built targets artifact metadata, if such output file is specified.
def get_rva_from_offset(self, offset): """Get the RVA corresponding to this file offset. """ s = self.get_section_by_offset(offset) if not s: if self.sections: lowest_rva = min( [ adjust_SectionAlignment( s.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] ) if offset < lowest_rva: # We will assume that the offset lies within the headers, or # at least points before where the earliest section starts # and we will simply return the offset as the RVA # # The case illustrating this behavior can be found at: # http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html # where the import table is not contained by any section # hence the RVA needs to be resolved to a raw offset return offset else: return offset #raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset) return s.get_rva_from_offset(offset)
Get the RVA corresponding to this file offset.
def mixin_class(target, cls): """Mix cls content in target.""" for name, field in getmembers(cls): Mixin.mixin(target, field, name)
Mix cls content in target.
def to_json_data(self): """ Returns ------- A dictionary of serialized data. """ # create data d = collections.OrderedDict((t.get_ref(), t.to_json_data()) for t in self._tables.values()) d["_comment"] = self._comment d.move_to_end("_comment", last=False) d["_external_files"] = self._dev_external_files_manager return d
Returns ------- A dictionary of serialized data.
def images_create(self, filename): """Create and image file or image group object from the given file. The type of the created database object is determined by the suffix of the given file. An ValueError exception is thrown if the file has an unknown suffix. Raises ValueError if invalid file is given. Parameters ---------- filename : File-type object File on local disk. Expected to be either an image file or an archive containing image. Returns ------- DataObjectHandle Handle for create dtabase object. Either an ImageHandle or an ImageGroupHandle """ # Check if file is a single image suffix = get_filename_suffix(filename, image.VALID_IMGFILE_SUFFIXES) if not suffix is None: # Create image object from given file return self.images.create_object(filename) # The file has not been recognized as a valid image. Check if the file # is a valid tar archive (based on suffix). suffix = get_filename_suffix(filename, ARCHIVE_SUFFIXES) if not suffix is None: # Unpack the file to a temporary folder . temp_dir = tempfile.mkdtemp() try: tf = tarfile.open(name=filename, mode='r') tf.extractall(path=temp_dir) except (tarfile.ReadError, IOError) as err: # Clean up in case there is an error during extraction shutil.rmtree(temp_dir) raise ValueError(str(err)) # Get names of all files with valid image suffixes and create an # object for each image object group = [] for img_file in image.get_image_files(temp_dir, []): img_obj = self.images.create_object(img_file) folder = img_file[len(temp_dir):-len(img_obj.name)] group.append(image.GroupImage( img_obj.identifier, folder, img_obj.name, img_obj.image_file )) # Create image group name = os.path.basename(os.path.normpath(filename))[:-len(suffix)] img_grp = self.image_groups.create_object(name, group, filename) # Delete the temporary folder shutil.rmtree(temp_dir) return img_grp else: # Not a valid file suffix raise ValueError('invalid file suffix: ' + os.path.basename(os.path.normpath(filename)))
Create and image file or image group object from the given file. The type of the created database object is determined by the suffix of the given file. An ValueError exception is thrown if the file has an unknown suffix. Raises ValueError if invalid file is given. Parameters ---------- filename : File-type object File on local disk. Expected to be either an image file or an archive containing image. Returns ------- DataObjectHandle Handle for create dtabase object. Either an ImageHandle or an ImageGroupHandle
def default(restart_cb=None, restart_func=None, close_fds=True): '''Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop() ''' if _active: msg = 'lazarus is already active' raise RuntimeWarning(msg) _python_path = os.getenv('PYTHONPATH') if not _python_path: msg = 'PYTHONPATH is not set' raise RuntimeError(msg) if restart_cb and not callable(restart_cb): msg = 'restart_cb keyword argument is not callable' raise TypeError(msg) if restart_func and not callable(restart_func): msg = 'restart_func keyword argument is not callable' raise TypeError(msg) global _close_fds _close_fds = close_fds try: from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler except ImportError as ie: msg = 'no watchdog support (%s)' % str(ie) raise RuntimeError(msg) class _Handler(FileSystemEventHandler): def __init__(self): self.active = True def dispatch(self, event): if not self.active: return super(_Handler, self).dispatch(event) def all_events(self, event): if is_restart_event(event): cancelled = _restart() if not cancelled: self.active = False def on_created(self, event): self.all_events(event) def on_deleted(self, event): self.all_events(event) def on_modified(self, event): self.all_events(event) def on_moved(self, event): self.all_events(event) global _observer _observer = Observer() handler = _Handler() _observer.schedule(handler, _python_path, recursive=True) global _restart_cb _restart_cb = restart_cb global _restart_func _restart_func = restart_func _activate() _observer.start()
Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop()
def next_id(self): """Next available positive integer id value in this story XML document. The value is determined by incrementing the maximum existing id value. Gaps in the existing id sequence are not filled. The id attribute value is unique in the document, without regard to the element type it appears on. """ id_str_lst = self._element.xpath('//@id') used_ids = [int(id_str) for id_str in id_str_lst if id_str.isdigit()] if not used_ids: return 1 return max(used_ids) + 1
Next available positive integer id value in this story XML document. The value is determined by incrementing the maximum existing id value. Gaps in the existing id sequence are not filled. The id attribute value is unique in the document, without regard to the element type it appears on.
def register_arrays(self, arrays): """ Register arrays using a list of dictionaries defining the arrays. The list should itself contain dictionaries. i.e. .. code-block:: python D = [{ 'name':'uvw', 'shape':(3,'ntime','nbl'),'dtype':np.float32 }, { 'name':'lm', 'shape':(2,'nsrc'),'dtype':np.float32 }] Parameters ---------- arrays : A list or dict. A list or dictionary of dictionaries describing arrays. """ if isinstance(arrays, collections.Mapping): arrays = arrays.itervalues() for ary in arrays: self.register_array(**ary)
Register arrays using a list of dictionaries defining the arrays. The list should itself contain dictionaries. i.e. .. code-block:: python D = [{ 'name':'uvw', 'shape':(3,'ntime','nbl'),'dtype':np.float32 }, { 'name':'lm', 'shape':(2,'nsrc'),'dtype':np.float32 }] Parameters ---------- arrays : A list or dict. A list or dictionary of dictionaries describing arrays.
def indirect_font(font, fonts, text): """ Check input font for indirect modes. :param font: input font :type font : str :param fonts: fonts list :type fonts : list :param text: input text :type text:str :return: font as str """ if font == "rnd-small" or font == "random-small" or font == "rand-small": font = random.choice(RND_SIZE_DICT["small_list"]) return font if font == "rnd-medium" or font == "random-medium" or font == "rand-medium": font = random.choice(RND_SIZE_DICT["medium_list"]) return font if font == "rnd-large" or font == "random-large" or font == "rand-large": font = random.choice(RND_SIZE_DICT["large_list"]) return font if font == "rnd-xlarge" or font == "random-xlarge" or font == "rand-xlarge": font = random.choice(RND_SIZE_DICT["xlarge_list"]) return font if font == "random" or font == "rand" or font == "rnd": filtered_fonts = list(set(fonts) - set(RANDOM_FILTERED_FONTS)) font = random.choice(filtered_fonts) return font if font == "wizard" or font == "wiz" or font == "magic": font = wizard_font(text) return font if font == "rnd-na" or font == "random-na" or font == "rand-na": font = random.choice(TEST_FILTERED_FONTS) return font if font not in FONT_MAP.keys(): distance_list = list(map(lambda x: distance_calc(font, x), fonts)) font = fonts[distance_list.index(min(distance_list))] return font
Check input font for indirect modes. :param font: input font :type font : str :param fonts: fonts list :type fonts : list :param text: input text :type text:str :return: font as str
def _init_metadata(self): """stub""" self._choice_ids_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'choice_ids'), 'element_label': 'response set', 'instructions': 'submit correct choice for answer', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_object_values': [[]], 'syntax': 'OBJECT', } self._choice_id_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'choice_id'), 'element_label': 'response set', 'instructions': 'submit correct choice for answer', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] }
stub
def get_covariance(datargs, outargs, vargs, datvar, outvar): """ Get covariance matrix. :param datargs: data arguments :param outargs: output arguments :param vargs: variable arguments :param datvar: variance of data arguments :param outvar: variance of output arguments :return: covariance """ # number of formula arguments that are not constant argn = len(vargs) # number of observations must be the same for all vargs nobs = 1 for m in xrange(argn): a = vargs[m] try: a = datargs[a] except (KeyError, TypeError): a = outargs[a] avar = outvar[a] else: avar = datvar[a] for n in xrange(argn): b = vargs[n] try: b = datargs[b] except (KeyError, TypeError): b = outargs[b] c = avar.get(b, 0.0) try: nobs = max(nobs, len(c)) except (TypeError, ValueError): LOGGER.debug('c of %s vs %s = %g', a, b, c) # covariance matrix is initially zeros cov = np.zeros((nobs, argn, argn)) # loop over arguments in both directions, fill in covariance for m in xrange(argn): a = vargs[m] try: a = datargs[a] except (KeyError, TypeError): a = outargs[a] avar = outvar[a] else: avar = datvar[a] for n in xrange(argn): b = vargs[n] try: b = datargs[b] except (KeyError, TypeError): b = outargs[b] cov[:, m, n] = avar.get(b, 0.0) if nobs == 1: cov = cov.squeeze() # squeeze out any extra dimensions LOGGER.debug('covariance:\n%r', cov) return cov
Get covariance matrix. :param datargs: data arguments :param outargs: output arguments :param vargs: variable arguments :param datvar: variance of data arguments :param outvar: variance of output arguments :return: covariance
def _has_not_qual(ntd): """Return True if the qualifiers contain a 'NOT'""" for qual in ntd.Qualifier: if 'not' in qual: return True if 'NOT' in qual: return True return False
Return True if the qualifiers contain a 'NOT
def vertical_horizontal_filter(data, period): """ Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1)) """ catch_errors.check_for_period_error(data, period) vhf = [abs(np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1])) / sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))] vhf = fill_for_noncomputable_vals(data, vhf) return vhf
Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1))
def query(self, where="1=1", out_fields="*", timeFilter=None, geometryFilter=None, returnGeometry=True, returnIDsOnly=False, returnCountOnly=False, returnFeatureClass=False, returnDistinctValues=False, returnExtentOnly=False, maxAllowableOffset=None, geometryPrecision=None, outSR=None, groupByFieldsForStatistics=None, statisticFilter=None, out_fc=None, **kwargs): """ queries a feature service based on a sql statement Inputs: where - the selection sql statement out_fields - the attribute fields to return timeFilter - a TimeFilter object where either the start time or start and end time are defined to limit the search results for a given time. The values in the timeFilter should be as UTC timestampes in milliseconds. No checking occurs to see if they are in the right format. geometryFilter - a GeometryFilter object to parse down a given query by another spatial dataset. returnGeometry - true means a geometry will be returned, else just the attributes returnIDsOnly - false is default. True means only OBJECTIDs will be returned returnCountOnly - if True, then an integer is returned only based on the sql statement returnFeatureClass - Default False. If true, query will be returned as feature class out_fc - only valid if returnFeatureClass is set to True. Output location of query. groupByFieldsForStatistics - One or more field names on which the values need to be grouped for calculating the statistics. statisticFilter - object that performs statistic queries kwargs - optional parameters that can be passed to the Query function. This will allow users to pass additional parameters not explicitly implemented on the function. A complete list of functions available is documented on the Query REST API. Output: A list of Feature Objects (default) or a path to the output featureclass if returnFeatureClass is set to True. """ params = {"f": "json", "where": where, "outFields": out_fields, "returnGeometry" : returnGeometry, "returnIdsOnly" : returnIDsOnly, "returnCountOnly" : returnCountOnly, "returnDistinctValues" : returnDistinctValues, "returnExtentOnly" : returnExtentOnly } if outSR is not None: params['outSR'] = outSR if not maxAllowableOffset is None: params['maxAllowableOffset'] = maxAllowableOffset if not geometryPrecision is None: params['geometryPrecision'] = geometryPrecision for k,v in kwargs.items(): params[k] = v if returnDistinctValues: params["returnGeometry"] = False if not timeFilter is None and \ isinstance(timeFilter, filters.TimeFilter): params['time'] = timeFilter.filter if not geometryFilter is None and \ isinstance(geometryFilter, filters.GeometryFilter): gf = geometryFilter.filter params['geometry'] = gf['geometry'] params['geometryType'] = gf['geometryType'] params['spatialRelationship'] = gf['spatialRel'] params['inSR'] = gf['inSR'] if "buffer" in gf: params['buffer'] = gf['buffer'] if "units" in gf: params['units'] = gf['units'] if not groupByFieldsForStatistics is None: params['groupByFieldsForStatistics'] = groupByFieldsForStatistics if not statisticFilter is None and \ isinstance(statisticFilter, filters.StatisticFilter): params['outStatistics'] = statisticFilter.filter fURL = self._url + "/query" results = self._post(fURL, params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'error' in results: raise ValueError (results) if not returnCountOnly and not returnIDsOnly and \ not returnDistinctValues and not returnExtentOnly: if returnFeatureClass: json_text = json.dumps(results) temp = scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json" with open(temp, 'wb') as writer: writer.write(json_text) writer.flush() del writer fc = json_to_featureclass(json_file=temp, out_fc=out_fc) os.remove(temp) return fc else: return FeatureSet.fromJSON(json.dumps(results)) else: return results return
queries a feature service based on a sql statement Inputs: where - the selection sql statement out_fields - the attribute fields to return timeFilter - a TimeFilter object where either the start time or start and end time are defined to limit the search results for a given time. The values in the timeFilter should be as UTC timestampes in milliseconds. No checking occurs to see if they are in the right format. geometryFilter - a GeometryFilter object to parse down a given query by another spatial dataset. returnGeometry - true means a geometry will be returned, else just the attributes returnIDsOnly - false is default. True means only OBJECTIDs will be returned returnCountOnly - if True, then an integer is returned only based on the sql statement returnFeatureClass - Default False. If true, query will be returned as feature class out_fc - only valid if returnFeatureClass is set to True. Output location of query. groupByFieldsForStatistics - One or more field names on which the values need to be grouped for calculating the statistics. statisticFilter - object that performs statistic queries kwargs - optional parameters that can be passed to the Query function. This will allow users to pass additional parameters not explicitly implemented on the function. A complete list of functions available is documented on the Query REST API. Output: A list of Feature Objects (default) or a path to the output featureclass if returnFeatureClass is set to True.
def pic_inflow_v2(self): """Update the inlet link sequences. Required inlet sequences: |dam_inlets.Q| |dam_inlets.S| |dam_inlets.R| Calculated flux sequence: |Inflow| Basic equation: :math:`Inflow = Q + S + R` """ flu = self.sequences.fluxes.fastaccess inl = self.sequences.inlets.fastaccess flu.inflow = inl.q[0]+inl.s[0]+inl.r[0]
Update the inlet link sequences. Required inlet sequences: |dam_inlets.Q| |dam_inlets.S| |dam_inlets.R| Calculated flux sequence: |Inflow| Basic equation: :math:`Inflow = Q + S + R`
def __import_vars(self, env_file): """Actual importing function.""" with open(env_file, "r") as f: # pylint: disable=invalid-name for line in f: try: line = line.lstrip() if line.startswith('export'): line = line.replace('export', '', 1) key, val = line.strip().split('=', 1) except ValueError: # Take care of blank or comment lines pass else: if not callable(val): if self.verbose_mode: if key in self.app.config: print( " * Overwriting an existing config var:" " {0}".format(key)) else: print( " * Setting an entirely new config var:" " {0}".format(key)) self.app.config[key] = re.sub( r"\A[\"']|[\"']\Z", "", val)
Actual importing function.
def attributive(adjective, gender=MALE, role=SUBJECT, article=None): """ For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive). """ w, g, c, a = \ adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None if w in adjective_attributive: return adjective_attributive[w] if a is None \ or a in ("mir", "dir", "ihm") \ or a in ("ein", "etwas", "mehr") \ or a.startswith(("all", "mehrer", "wenig", "viel")): return w + adjectives_strong.get((g, c), "") if a.startswith(("ein", "kein")) \ or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")): return w + adjectives_mixed.get((g, c), "") if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \ or a.startswith(( "derselb", "derjenig", "jed", "jeglich", "jen", "manch", "dies", "solch", "welch")): return w + adjectives_weak.get((g, c), "") # Default to strong inflection. return w + adjectives_strong.get((g, c), "")
For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive).
def append_summary_to_module_docstring(module): """ Change the ``module.__doc__`` docstring to include a summary table based on its contents as declared on ``module.__all__``. """ pairs = [(name, getattr(module, name)) for name in module.__all__] kws = dict(key_header="Name", summary_type="module contents") module.__doc__ = docstring_with_summary(module.__doc__, pairs, **kws)
Change the ``module.__doc__`` docstring to include a summary table based on its contents as declared on ``module.__all__``.
def filter_parts(self, predicate='', exclude=True): """ Filter the data by partition string. A partition string looks like `pt1=1,pt2=2/pt1=2,pt2=1`, where comma (,) denotes 'and', while (/) denotes 'or'. :param str|Partition predicate: predicate string of partition filter :param bool exclude: True if you want to exclude partition fields, otherwise False. True for default. :return: new collection :rtype: :class:`odps.df.expr.expressions.CollectionExpr` """ source = self._source_data if source is None: raise ExpressionError('Can only filter on data sources.') def _parse_partition_predicate(p): if '=' not in p: raise ExpressionError('Illegal partition predicate.') field_name, field_value = [s.strip() for s in p.split('=', 1)] if not hasattr(source, 'schema'): raise ExpressionError('filter_partition can only be applied on ODPS DataFrames') if field_name not in source.schema: raise ExpressionError('Column `%s` not exists in input collection' % field_name) if field_name not in source.schema._partition_schema: raise ExpressionError('`%s` is not a partition column' % field_name) part_col = self[field_name] if field_value.startswith('\'') or field_value.startswith('\"'): encoding = 'string-escape' if six.PY2 else 'unicode-escape' field_value = to_binary(field_value.strip('"\'')).decode(encoding) if isinstance(part_col.data_type, types.Integer): field_value = int(field_value) elif isinstance(part_col.data_type, types.Float): field_value = float(field_value) return part_col == field_value from ...models.partition import Partition from ...types import PartitionSpec if isinstance(predicate, Partition): predicate = predicate.partition_spec if isinstance(predicate, PartitionSpec): predicate = ','.join("%s='%s'" % (k, v) for k, v in six.iteritems(predicate.kv)) if isinstance(predicate, list): predicate = '/'.join(str(s) for s in predicate) elif not isinstance(predicate, six.string_types): raise ExpressionError('Only accept string predicates.') if not predicate: predicate_obj = None else: part_formatter = lambda p: reduce(operator.and_, map(_parse_partition_predicate, p.split(','))) predicate_obj = reduce(operator.or_, map(part_formatter, predicate.split('/'))) if not source.schema.partitions: raise ExpressionError('No partition columns in the collection.') if exclude: columns = [c for c in self.schema if c.name not in source.schema._partition_schema] new_schema = types.Schema.from_lists([c.name for c in columns], [c.type for c in columns]) return FilterPartitionCollectionExpr(self, predicate_obj, _schema=new_schema, _predicate_string=predicate) else: return self.filter(predicate_obj)
Filter the data by partition string. A partition string looks like `pt1=1,pt2=2/pt1=2,pt2=1`, where comma (,) denotes 'and', while (/) denotes 'or'. :param str|Partition predicate: predicate string of partition filter :param bool exclude: True if you want to exclude partition fields, otherwise False. True for default. :return: new collection :rtype: :class:`odps.df.expr.expressions.CollectionExpr`
def transform(self, y): """ Transform features per specified math function. :param y: :return: """ if self.transform_type == 'log': return np.log(y) elif self.transform_type == 'exp': return np.exp(y) elif self.transform_type == 'sqrt': return np.sqrt(y) elif self.transform_type == 'sin': return np.sin(y) elif self.transform_type == 'cos': return np.cos(y) elif self.transform_type == 'tan': return np.tan(y) elif self.transform_type == 'abs': return np.abs(y)
Transform features per specified math function. :param y: :return:
def long_fname_format(fmt_str, fmt_dict, hashable_keys=[], max_len=64, hashlen=16, ABS_MAX_LEN=255, hack27=False): r""" DEPRICATE Formats a string and hashes certain parts if the resulting string becomes too long. Used for making filenames fit onto disk. Args: fmt_str (str): format of fname fmt_dict (str): dict to format fname with hashable_keys (list): list of dict keys you are willing to have hashed max_len (int): tries to fit fname into this length ABS_MAX_LEN (int): throws AssertionError if fname over this length CommandLine: python -m utool.util_str --exec-long_fname_format Example: >>> # ENABLE_DOCTET >>> import utool as ut >>> fmt_str = 'qaid={qaid}_res_{cfgstr}_quuid={quuid}' >>> quuid_str = 'blahblahblahblahblahblah' >>> cfgstr = 'big_long_string__________________________________' >>> qaid = 5 >>> fmt_dict = dict(cfgstr=cfgstr, qaid=qaid, quuid=quuid_str) >>> hashable_keys = ['cfgstr', 'quuid'] >>> max_len = 64 >>> hashlen = 8 >>> fname0 = ut.long_fname_format(fmt_str, fmt_dict, max_len=None) >>> fname1 = ut.long_fname_format(fmt_str, fmt_dict, hashable_keys, >>> max_len=64, hashlen=8) >>> fname2 = ut.long_fname_format(fmt_str, fmt_dict, hashable_keys, max_len=42, >>> hashlen=8) >>> result = fname0 + '\n' + fname1 + '\n' + fname2 >>> print(result) qaid=5_res_big_long_string___________________________________quuid=blahblahblahblahblahblah qaid=5_res_racfntgq_quuid=blahblahblahblahblahblah qaid=5_res_racfntgq_quuid=yvuaffrp """ from utool import util_hash fname = fmt_str.format(**fmt_dict) if max_len is None: return fname if len(fname) > max_len: # Copy because we will overwrite fmt_dict values with hashed values fmt_dict_ = fmt_dict.copy() for key in hashable_keys: if hack27: fmt_dict_[key] = util_hash.hashstr27(fmt_dict_[key], hashlen=hashlen) else: fmt_dict_[key] = util_hash.hashstr(fmt_dict_[key], hashlen=hashlen) fname = fmt_str.format(**fmt_dict_) if len(fname) <= max_len: break if len(fname) > max_len: diff = len(fname) - max_len msg = ('[util_str] Warning: Too big by %d chars. Exausted all options' 'to make fname fit into size. ') % (diff,) print(msg) print('* len(fname) = %r' % len(fname)) print('* fname = %r' % fname) if ABS_MAX_LEN is not None and len(fname) > ABS_MAX_LEN: raise AssertionError(msg) return fname
r""" DEPRICATE Formats a string and hashes certain parts if the resulting string becomes too long. Used for making filenames fit onto disk. Args: fmt_str (str): format of fname fmt_dict (str): dict to format fname with hashable_keys (list): list of dict keys you are willing to have hashed max_len (int): tries to fit fname into this length ABS_MAX_LEN (int): throws AssertionError if fname over this length CommandLine: python -m utool.util_str --exec-long_fname_format Example: >>> # ENABLE_DOCTET >>> import utool as ut >>> fmt_str = 'qaid={qaid}_res_{cfgstr}_quuid={quuid}' >>> quuid_str = 'blahblahblahblahblahblah' >>> cfgstr = 'big_long_string__________________________________' >>> qaid = 5 >>> fmt_dict = dict(cfgstr=cfgstr, qaid=qaid, quuid=quuid_str) >>> hashable_keys = ['cfgstr', 'quuid'] >>> max_len = 64 >>> hashlen = 8 >>> fname0 = ut.long_fname_format(fmt_str, fmt_dict, max_len=None) >>> fname1 = ut.long_fname_format(fmt_str, fmt_dict, hashable_keys, >>> max_len=64, hashlen=8) >>> fname2 = ut.long_fname_format(fmt_str, fmt_dict, hashable_keys, max_len=42, >>> hashlen=8) >>> result = fname0 + '\n' + fname1 + '\n' + fname2 >>> print(result) qaid=5_res_big_long_string___________________________________quuid=blahblahblahblahblahblah qaid=5_res_racfntgq_quuid=blahblahblahblahblahblah qaid=5_res_racfntgq_quuid=yvuaffrp
def commercial_domains(): # type: () -> set """ Return list of commercial email domains, which means: - domain is not public - domain is not university - it is not personal (more than 1 person using this domain) >>> "google.com" in commercial_domains() True >>> "microsoft.com" in commercial_domains() True >>> "isri.cs.cmu.edu" in commercial_domains() # university department False >>> "jaraco.com" in commercial_domains() # personal False """ dus = domain_user_stats() es = "test@" + pd.Series(dus.index, index=dus.index) return set( dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index)
Return list of commercial email domains, which means: - domain is not public - domain is not university - it is not personal (more than 1 person using this domain) >>> "google.com" in commercial_domains() True >>> "microsoft.com" in commercial_domains() True >>> "isri.cs.cmu.edu" in commercial_domains() # university department False >>> "jaraco.com" in commercial_domains() # personal False
def _get_function_wrapper( self, func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]] ) -> typing.Callable[..., typing.Any]: """Here should be constructed and returned real decorator. :param func: Wrapped function :type func: typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]] :rtype: typing.Callable """ raise NotImplementedError()
Here should be constructed and returned real decorator. :param func: Wrapped function :type func: typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]] :rtype: typing.Callable
def render_item(self, all_posts): """ Renders the Post as HTML using the template specified in :attr:`html_template_path`. :param all_posts: An optional :class:`PostCollection` containing all of the posts in the site. :return: The rendered HTML as a string. """ index = all_posts.index(self) if index > 0: # has newer posts newer_post = all_posts[index - 1] else: newer_post = None if index < len(all_posts) - 1: # has older posts older_post = all_posts[index + 1] else: older_post = None return settings.JINJA_ENV.get_template(self.template).render( post=self, newer_post=newer_post, older_post=older_post, all_posts=all_posts, nav_context='post' )
Renders the Post as HTML using the template specified in :attr:`html_template_path`. :param all_posts: An optional :class:`PostCollection` containing all of the posts in the site. :return: The rendered HTML as a string.
def removeTab(self, index): """ Removes tab at index ``index``. This method will emits tab_closed for the removed tab. :param index: index of the tab to remove. """ widget = self.widget(index) try: self._widgets.remove(widget) except ValueError: pass self.tab_closed.emit(widget) self._del_code_edit(widget) QTabWidget.removeTab(self, index) if widget == self._current: self._current = None
Removes tab at index ``index``. This method will emits tab_closed for the removed tab. :param index: index of the tab to remove.
def on_exception(wait_gen, exception, max_tries=None, max_time=None, jitter=full_jitter, giveup=lambda e: False, on_success=None, on_backoff=None, on_giveup=None, logger='backoff', **wait_gen_kwargs): """Returns decorator for backoff and retry triggered by exception. Args: wait_gen: A generator yielding successive wait times in seconds. exception: An exception type (or tuple of types) which triggers backoff. max_tries: The maximum number of attempts to make before giving up. Once exhausted, the exception will be allowed to escape. The default value of None means their is no limit to the number of tries. If a callable is passed, it will be evaluated at runtime and its return value used. max_time: The maximum total amount of time to try for before giving up. Once expired, the exception will be allowed to escape. If a callable is passed, it will be evaluated at runtime and its return value used. jitter: A function of the value yielded by wait_gen returning the actual time to wait. This distributes wait times stochastically in order to avoid timing collisions across concurrent clients. Wait times are jittered by default using the full_jitter function. Jittering may be disabled altogether by passing jitter=None. giveup: Function accepting an exception instance and returning whether or not to give up. Optional. The default is to always continue. on_success: Callable (or iterable of callables) with a unary signature to be called in the event of success. The parameter is a dict containing details about the invocation. on_backoff: Callable (or iterable of callables) with a unary signature to be called in the event of a backoff. The parameter is a dict containing details about the invocation. on_giveup: Callable (or iterable of callables) with a unary signature to be called in the event that max_tries is exceeded. The parameter is a dict containing details about the invocation. logger: Name or Logger object to log to. Defaults to 'backoff'. **wait_gen_kwargs: Any additional keyword args specified will be passed to wait_gen when it is initialized. Any callable args will first be evaluated and their return values passed. This is useful for runtime configuration. """ def decorate(target): # change names because python 2.x doesn't have nonlocal logger_ = logger if isinstance(logger_, basestring): logger_ = logging.getLogger(logger_) on_success_ = _config_handlers(on_success) on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_) on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_) retry = None if sys.version_info[:2] >= (3, 5): # pragma: python=3.5 import asyncio if asyncio.iscoroutinefunction(target): import backoff._async retry = backoff._async.retry_exception elif _is_event_loop() and _is_current_task(): # Verify that sync version is not being run from coroutine # (that would lead to event loop hiccups). raise TypeError( "backoff.on_exception applied to a regular function " "inside coroutine, this will lead to event loop " "hiccups. Use backoff.on_exception on coroutines in " "asynchronous code.") if retry is None: retry = _sync.retry_exception return retry(target, wait_gen, exception, max_tries, max_time, jitter, giveup, on_success_, on_backoff_, on_giveup_, wait_gen_kwargs) # Return a function which decorates a target with a retry loop. return decorate
Returns decorator for backoff and retry triggered by exception. Args: wait_gen: A generator yielding successive wait times in seconds. exception: An exception type (or tuple of types) which triggers backoff. max_tries: The maximum number of attempts to make before giving up. Once exhausted, the exception will be allowed to escape. The default value of None means their is no limit to the number of tries. If a callable is passed, it will be evaluated at runtime and its return value used. max_time: The maximum total amount of time to try for before giving up. Once expired, the exception will be allowed to escape. If a callable is passed, it will be evaluated at runtime and its return value used. jitter: A function of the value yielded by wait_gen returning the actual time to wait. This distributes wait times stochastically in order to avoid timing collisions across concurrent clients. Wait times are jittered by default using the full_jitter function. Jittering may be disabled altogether by passing jitter=None. giveup: Function accepting an exception instance and returning whether or not to give up. Optional. The default is to always continue. on_success: Callable (or iterable of callables) with a unary signature to be called in the event of success. The parameter is a dict containing details about the invocation. on_backoff: Callable (or iterable of callables) with a unary signature to be called in the event of a backoff. The parameter is a dict containing details about the invocation. on_giveup: Callable (or iterable of callables) with a unary signature to be called in the event that max_tries is exceeded. The parameter is a dict containing details about the invocation. logger: Name or Logger object to log to. Defaults to 'backoff'. **wait_gen_kwargs: Any additional keyword args specified will be passed to wait_gen when it is initialized. Any callable args will first be evaluated and their return values passed. This is useful for runtime configuration.
def GetParserFromFilename(self, path): """Returns the appropriate parser class from the filename.""" # Find the configuration parser. handler_name = path.split("://")[0] for parser_cls in itervalues(GRRConfigParser.classes): if parser_cls.name == handler_name: return parser_cls # Handle the filename. extension = os.path.splitext(path)[1] if extension in [".yaml", ".yml"]: return YamlParser return ConfigFileParser
Returns the appropriate parser class from the filename.
def iat(x, maxlag=None): """Calculate the integrated autocorrelation time (IAT), given the trace from a Stochastic.""" if not maxlag: # Calculate maximum lag to which autocorrelation is calculated maxlag = _find_max_lag(x) acr = [autocorr(x, lag) for lag in range(1, maxlag + 1)] # Calculate gamma values gammas = [(acr[2 * i] + acr[2 * i + 1]) for i in range(maxlag // 2)] cut = _cut_time(gammas) if cut + 1 == len(gammas): print_("Not enough lag to calculate IAT") return np.sum(2 * gammas[:cut + 1]) - 1.0
Calculate the integrated autocorrelation time (IAT), given the trace from a Stochastic.
def put(self, destination): """ Copy the referenced directory to this path Note: This ignores anything not in the desired directory, given by ``self.dirname``. Args: destination (str): path to put this directory (which must NOT already exist) References: https://stackoverflow.com/a/8261083/1958900 """ target = get_target_path(destination, self.dirname) valid_paths = (self.dirname, './%s' % self.dirname) with tarfile.open(self.archive_path, 'r:*') as tf: members = [] for tarinfo in tf: # Get only files under the directory `self.dirname` pathsplit = os.path.normpath(tarinfo.path).split(os.sep) if pathsplit[0] not in valid_paths: print('WARNING: skipped file "%s" in archive; not in directory "%s"' % (tarinfo.path, self.dirname)) continue if len(pathsplit) == 1: continue tarinfo.name = os.path.join(*pathsplit[1:]) members.append(tarinfo) if not members: raise ValueError("No files under path directory '%s' in this tarfile") tf.extractall(target, members)
Copy the referenced directory to this path Note: This ignores anything not in the desired directory, given by ``self.dirname``. Args: destination (str): path to put this directory (which must NOT already exist) References: https://stackoverflow.com/a/8261083/1958900
def read(self, stream): """Reads the topology from a stream or file.""" def read_it(stream): bytes = stream.read() transportIn = TMemoryBuffer(bytes) protocolIn = TBinaryProtocol.TBinaryProtocol(transportIn) topology = StormTopology() topology.read(protocolIn) return topology if isinstance(stream, six.string_types): with open(stream, 'rb') as f: return read_it(f) else: return read_it(stream)
Reads the topology from a stream or file.
def get_version(self, diff_to_increase_ratio): """Gets version :param diff_to_increase_ratio: Ratio to convert number of changes into :return: Version of this code, based on commits diffs """ diffs = self.get_diff_amounts() version = Version() for diff in diffs: version.increase_by_changes(diff, diff_to_increase_ratio) return version
Gets version :param diff_to_increase_ratio: Ratio to convert number of changes into :return: Version of this code, based on commits diffs
def reparentUnions(self): ''' Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Namespaces and classes should have the unions defined in them to be in the child list of itself rather than floating around. Union nodes that are reparented (e.g. a union defined in a class) will be removed from the list ``self.unions`` since the Breathe directive for its parent (e.g. the class) will include the documentation for the union. The consequence of this is that a union defined in a class will **not** appear in the full api listing of Unions. ''' # unions declared in a class will not link to the individual union page, so # we will instead elect to remove these from the list of unions removals = [] for u in self.unions: parts = u.name.split("::") if len(parts) >= 2: # TODO: nested unions are not supported right now... parent_name = "::".join(p for p in parts[:-1]) reparented = False # see if the name matches any potential parents for node in itertools.chain(self.class_like, self.namespaces): if node.name == parent_name: node.children.append(u) u.parent = node reparented = True break # if not reparented, try the namespaces if reparented: removals.append(u) else: # << verboseBuild utils.verbose_log( "The union {0} has '::' in its name, but no parent was found!".format(u.name), utils.AnsiColors.BOLD_RED ) # remove the unions from self.unions that were declared in class_like objects for rm in removals: self.unions.remove(rm)
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Namespaces and classes should have the unions defined in them to be in the child list of itself rather than floating around. Union nodes that are reparented (e.g. a union defined in a class) will be removed from the list ``self.unions`` since the Breathe directive for its parent (e.g. the class) will include the documentation for the union. The consequence of this is that a union defined in a class will **not** appear in the full api listing of Unions.
def get(self, address): """ Get a loopback address by it's address. Find all loopback addresses by iterating at either the node level or the engine:: loopback = engine.loopback_interface.get('127.0.0.10') :param str address: ip address of loopback :raises InterfaceNotFound: invalid interface specified :rtype: LoopbackInterface """ loopback = super(LoopbackCollection, self).get(address=address) if loopback: return loopback raise InterfaceNotFound('Loopback address specified was not found')
Get a loopback address by it's address. Find all loopback addresses by iterating at either the node level or the engine:: loopback = engine.loopback_interface.get('127.0.0.10') :param str address: ip address of loopback :raises InterfaceNotFound: invalid interface specified :rtype: LoopbackInterface
def _ImportHookBySuffix( name, globals=None, locals=None, fromlist=None, level=None): """Callback when an import statement is executed by the Python interpreter. Argument names have to exactly match those of __import__. Otherwise calls to __import__ that use keyword syntax will fail: __import('a', fromlist=[]). """ _IncrementNestLevel() if level is None: # A level of 0 means absolute import, positive values means relative # imports, and -1 means to try both an absolute and relative import. # Since imports were disambiguated in Python 3, -1 is not a valid value. # The default values are 0 and -1 for Python 3 and 3 respectively. # https://docs.python.org/2/library/functions.html#__import__ # https://docs.python.org/3/library/functions.html#__import__ level = 0 if six.PY3 else -1 try: # Really import modules. module = _real_import(name, globals, locals, fromlist, level) finally: # This _real_import call may raise an exception (e.g., ImportError). # However, there might be several modules already loaded before the # exception was raised. For instance: # a.py # import b # success # import c # ImportError exception. # In this case, an 'import a' statement would have the side effect of # importing module 'b'. This should trigger the import hooks for module # 'b'. To achieve this, we always search/invoke import callbacks (i.e., # even when an exception is raised). # # Important Note: Do not use 'return' inside the finally block. It will # cause any pending exception to be discarded. _ProcessImportBySuffix(name, fromlist, globals) return module
Callback when an import statement is executed by the Python interpreter. Argument names have to exactly match those of __import__. Otherwise calls to __import__ that use keyword syntax will fail: __import('a', fromlist=[]).
def modify_column_if_table_exists(self, tablename: str, fieldname: str, newdef: str) -> Optional[int]: """Alters a column's definition without renaming it.""" if not self.table_exists(tablename): return None sql = "ALTER TABLE {t} MODIFY COLUMN {field} {newdef}".format( t=tablename, field=fieldname, newdef=newdef ) log.info(sql) return self.db_exec_literal(sql)
Alters a column's definition without renaming it.
def center(self): """The cartesian center of the Compound based on its Particles. Returns ------- np.ndarray, shape=(3,), dtype=float The cartesian center of the Compound based on its Particles """ if np.all(np.isfinite(self.xyz)): return np.mean(self.xyz, axis=0)
The cartesian center of the Compound based on its Particles. Returns ------- np.ndarray, shape=(3,), dtype=float The cartesian center of the Compound based on its Particles
def setup_config(self, cfg=None): ''' Open suitable config file. :return: ''' _opts, _args = optparse.OptionParser.parse_args(self) configs = self.find_existing_configs(_opts.support_unit) if configs and cfg not in configs: cfg = configs[0] return config.master_config(self.get_config_file_path(cfg))
Open suitable config file. :return:
def find_sanitiser_nodes( sanitiser, sanitisers_in_file ): """Find nodes containing a particular sanitiser. Args: sanitiser(string): sanitiser to look for. sanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser. Returns: Iterable of sanitiser nodes. """ for sanitiser_tuple in sanitisers_in_file: if sanitiser == sanitiser_tuple.trigger_word: yield sanitiser_tuple.cfg_node
Find nodes containing a particular sanitiser. Args: sanitiser(string): sanitiser to look for. sanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser. Returns: Iterable of sanitiser nodes.
def hypercube_edges(dims, use_map=False): '''Create edge lists for an arbitrary hypercube. TODO: this is probably not the fastest way.''' edges = [] nodes = np.arange(np.product(dims)).reshape(dims) for i,d in enumerate(dims): for j in range(d-1): for n1, n2 in zip(np.take(nodes, [j], axis=i).flatten(), np.take(nodes,[j+1], axis=i).flatten()): edges.append((n1,n2)) if use_map: return edge_map_from_edge_list(edges) return edges
Create edge lists for an arbitrary hypercube. TODO: this is probably not the fastest way.
def get_context_data(self, **kwargs): """ Add filter form to the context. TODO: Currently we construct the filter form object twice - in get_queryset and here, in get_context_data. Will need to figure out a good way to eliminate extra initialization. """ context = super(FilterFormMixin, self).get_context_data(**kwargs) context[self.context_filterform_name] = self.get_filter() return context
Add filter form to the context. TODO: Currently we construct the filter form object twice - in get_queryset and here, in get_context_data. Will need to figure out a good way to eliminate extra initialization.
async def volume(self, ctx, volume: int): """Changes the player's volume""" if ctx.voice_client is None: return await ctx.send("Not connected to a voice channel.") ctx.voice_client.source.volume = volume / 100 await ctx.send("Changed volume to {}%".format(volume))
Changes the player's volume
def process_doc(text): """ The :ref: role is supported by Sphinx but not by plain docutils """ # remove :ref: directives document = docutils.core.publish_doctree(text) # http://epydoc.sourceforge.net/docutils/private/docutils.nodes.document-class.html visitor = RefVisitor(document) document.walk(visitor) return visitor.kwd, visitor.values
The :ref: role is supported by Sphinx but not by plain docutils
def discharge(self): """Discharge of the element in each layer """ rv = np.zeros(self.aq[0].naq) Qls = self.parameters[:, 0] * self.dischargeinf() Qls.shape = (self.nls, self.nlayers, self.order + 1) Qls = np.sum(Qls, 2) for i, q in enumerate(Qls): rv[self.layers[i]] += q #rv[self.layers] = np.sum(Qls.reshape(self.nls * (self.order + 1), self.nlayers), 0) return rv
Discharge of the element in each layer
async def message_throttled(self, message: types.Message, throttled: Throttled): """ Notify user only on first exceed and notify about unlocking only on last exceed :param message: :param throttled: """ handler = current_handler.get() dispatcher = Dispatcher.get_current() if handler: key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}") else: key = f"{self.prefix}_message" # Calculate how many time is left till the block ends delta = throttled.rate - throttled.delta # Prevent flooding if throttled.exceeded_count <= 2: await message.reply('Too many requests! ') # Sleep. await asyncio.sleep(delta) # Check lock status thr = await dispatcher.check_key(key) # If current message is not last with current key - do not send message if thr.exceeded_count == throttled.exceeded_count: await message.reply('Unlocked.')
Notify user only on first exceed and notify about unlocking only on last exceed :param message: :param throttled:
def generate_private_investment(asset_manager_id=None, asset_id=None, client_id=None): attributes = generate_common(asset_manager_id=asset_manager_id, asset_id=asset_id) """currency, display_name""" private_investment = PrivateInvestment(client_id=client_id or random_string(5), asset_issuer_id=random_string(8), category='Private Equity', sub_category='Leverage Buyout Funds', num_shares=1000, price_share=1000, share_type='Ordinary Shares', maturity_date=random_date(), lock_up_period=52, investment_term=52, **attributes) return private_investment
currency, display_name
def append(self, parent, content): """ Select an appender and append the content to parent. @param parent: A parent node. @type parent: L{Element} @param content: The content to append. @type content: L{Content} """ appender = self.default for matcher, candidate_appender in self.appenders: if matcher == content.value: appender = candidate_appender break appender.append(parent, content)
Select an appender and append the content to parent. @param parent: A parent node. @type parent: L{Element} @param content: The content to append. @type content: L{Content}
def in_simo_and_inner(self): """ Test if a node is simo: single input and multiple output """ return len(self.successor) > 1 and self.successor[0] is not None and not self.successor[0].in_or_out and \ len(self.precedence) == 1 and self.precedence[0] is not None and not self.successor[0].in_or_out
Test if a node is simo: single input and multiple output
def _stream_blob(self, key, fileobj, progress_callback): """Streams contents of given key to given fileobj. Data is read sequentially in chunks without any seeks. This requires duplicating some functionality of the Azure SDK, which only allows reading entire blob into memory at once or returning data from random offsets""" file_size = None start_range = 0 chunk_size = self.conn.MAX_CHUNK_GET_SIZE end_range = chunk_size - 1 while True: try: # pylint: disable=protected-access blob = self.conn._get_blob(self.container_name, key, start_range=start_range, end_range=end_range) if file_size is None: file_size = self._parse_length_from_content_range(blob.properties.content_range) fileobj.write(blob.content) start_range += blob.properties.content_length if start_range == file_size: break if blob.properties.content_length == 0: raise StorageError( "Empty response received for {}, range {}-{}".format(key, start_range, end_range) ) end_range += blob.properties.content_length if end_range >= file_size: end_range = file_size - 1 if progress_callback: progress_callback(start_range, file_size) except azure.common.AzureHttpError as ex: # pylint: disable=no-member if ex.status_code == 416: # Empty file return raise
Streams contents of given key to given fileobj. Data is read sequentially in chunks without any seeks. This requires duplicating some functionality of the Azure SDK, which only allows reading entire blob into memory at once or returning data from random offsets
def dump_pk(obj, abspath, pk_protocol=pk_protocol, replace=False, compress=False, enable_verbose=True): """Dump Picklable Python Object to file. Provides multiple choice to customize the behavior. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param replace: (default False) If ``True``, when you dump Pickle to a existing path, it silently overwrite it. If False, an exception will be raised. Default False setting is to prevent overwrite file by mistake. :type replace: boolean :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: >>> from weatherlab.lib.dataIO.pk import dump_pk >>> pk = {"a": 1, "b": 2} >>> dump_pk(pk, "test.pickle", replace=True) Dumping to test.pickle... Complete! Elapse 0.001763 sec **中文文档** 将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化) 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖 原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。 :type replace: ``布尔值`` :param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值`` """ abspath = str(abspath) # try stringlize msg = Messenger(enable_verbose=enable_verbose) if compress: # check extension name root, ext = os.path.splitext(abspath) if ext != ".gz": if ext != ".tmp": raise Exception( "compressed pickle has to use extension '.gz'!") else: _, ext = os.path.splitext(root) if ext != ".gz": raise Exception( "compressed pickle has to use extension '.gz'!") else: root, ext = os.path.splitext(abspath) if ext != ".pickle": if ext != ".tmp": raise Exception("file extension are not '.pickle'!") else: _, ext = os.path.splitext(root) if ext != ".pickle": raise Exception("file extension are not '.pickle'!") msg.show("\nDumping to %s..." % abspath) st = time.clock() if os.path.exists(abspath): # if exists, check replace option if replace: # replace existing file if compress: with gzip.open(abspath, "wb") as f: f.write(pickle.dumps(obj, protocol=pk_protocol)) else: with open(abspath, "wb") as f: pickle.dump(obj, f, protocol=pk_protocol) else: # stop, print error message raise Exception("\tCANNOT WRITE to %s, " "it's already exists" % abspath) else: # if not exists, just write to it if compress: with gzip.open(abspath, "wb") as f: f.write(pickle.dumps(obj, protocol=pk_protocol)) else: with open(abspath, "wb") as f: pickle.dump(obj, f, protocol=pk_protocol) msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
Dump Picklable Python Object to file. Provides multiple choice to customize the behavior. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param replace: (default False) If ``True``, when you dump Pickle to a existing path, it silently overwrite it. If False, an exception will be raised. Default False setting is to prevent overwrite file by mistake. :type replace: boolean :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: >>> from weatherlab.lib.dataIO.pk import dump_pk >>> pk = {"a": 1, "b": 2} >>> dump_pk(pk, "test.pickle", replace=True) Dumping to test.pickle... Complete! Elapse 0.001763 sec **中文文档** 将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化) 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖 原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。 :type replace: ``布尔值`` :param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值``
def publish_predictions_to_core(self): """publish_predictions_to_core""" status = FAILED msg = "not started" try: msg = "generating request" log.info(msg) # noqa https://stackoverflow.com/questions/29815129/pandas-dataframe-to-list-of-dictionaries publish_req = generate_ai_request( predict_rows=self.df.fillna( ANTINEX_MISSING_VALUE).to_dict("records"), req_dict=self.request_dict) if publish_req["status"] != SUCCESS: log.error(("failed generate_ai_request with err={}") .format( publish_req["error"])) status = ERROR else: msg = "publishing as user={} url={} model={}".format( ANTINEX_USER, ANTINEX_URL, ANTINEX_USE_MODEL_NAME) log.info(msg) response = self.client.run_job( body=publish_req["data"]) if response["status"] == SUCCESS: log.info("predictions sent") status = SUCCESS elif response["status"] == FAILED: log.error(("job failed with error='{}' with response={}") .format( response["error"], response["data"])) status = ERROR elif response["status"] == ERROR: log.error(("job had an error='{}' with response={}") .format( response["error"], response["data"])) status = ERROR elif response["status"] == LOGIN_FAILED: log.error(("job reported user was not able to log in " "with an error='{}' with response={}") .format( response["error"], response["data"])) status = ERROR # logging for good/bad cases during publish # if generated a good request except Exception as e: log.error(("failed generating request last_step='{}' ex={}") .format( msg, e)) # end of try/ex return status
publish_predictions_to_core
def get_organizations(self, page=None): """Get organizations""" opts = {} if page: opts['page'] = page return self.api_call(ENDPOINTS['organizations']['list'], **opts)
Get organizations
def create_salt(length: int=128) -> bytes: """ Create a new salt :param int length: How many bytes should the salt be long? :return: The salt :rtype: bytes """ return b''.join(bytes([SystemRandom().randint(0, 255)]) for _ in range(length))
Create a new salt :param int length: How many bytes should the salt be long? :return: The salt :rtype: bytes
def delete_biggest(self): """ Delete all the biggest duplicates. Keeps all mail of the duplicate set but those sharing the biggest size. """ logger.info( "Deleting all mails sharing the biggest size of {} bytes..." "".format(self.biggest_size)) # Select candidates for deletion. candidates = [ mail for mail in self.pool if mail.size == self.biggest_size] if len(candidates) == self.size: logger.warning( "Skip deletion: all {} mails share the same size." "".format(self.size)) return logger.info( "{} candidates found for deletion.".format(len(candidates))) for mail in candidates: self.delete(mail)
Delete all the biggest duplicates. Keeps all mail of the duplicate set but those sharing the biggest size.
def check_async(paths, options, rootdir=None): """Check given paths asynchronously. :return list: list of errors """ LOGGER.info('Async code checking is enabled.') path_queue = Queue.Queue() result_queue = Queue.Queue() for num in range(CPU_COUNT): worker = Worker(path_queue, result_queue) worker.setDaemon(True) LOGGER.info('Start worker #%s', (num + 1)) worker.start() for path in paths: path_queue.put((path, dict(options=options, rootdir=rootdir))) path_queue.join() errors = [] while True: try: errors += result_queue.get(False) except Queue.Empty: break return errors
Check given paths asynchronously. :return list: list of errors
def begin_batch(self): ''' Starts the batch operation. Intializes the batch variables is_batch: batch operation flag. batch_table: the table name of the batch operation batch_partition_key: the PartitionKey of the batch requests. batch_row_keys: the RowKey list of adding requests. batch_requests: the list of the requests. ''' self.is_batch = True self.batch_table = '' self.batch_partition_key = '' self.batch_row_keys = [] self.batch_requests = []
Starts the batch operation. Intializes the batch variables is_batch: batch operation flag. batch_table: the table name of the batch operation batch_partition_key: the PartitionKey of the batch requests. batch_row_keys: the RowKey list of adding requests. batch_requests: the list of the requests.
def subdivide_to_size(vertices, faces, max_edge, max_iter=10): """ Subdivide a mesh until every edge is shorter than a specified length. Will return a triangle soup, not a nicely structured mesh. Parameters ------------ vertices : (n, 3) float Vertices in space faces : (m, 3) int Indices of vertices which make up triangles max_edge : float Maximum length of any edge in the result max_iter : int The maximum number of times to run subdivision Returns ------------ vertices : (j, 3) float Vertices in space faces : (q, 3) int Indices of vertices """ # store completed done_face = [] done_vert = [] # copy inputs and make sure dtype is correct current_faces = np.array(faces, dtype=np.int64, copy=True) current_vertices = np.array(vertices, dtype=np.float64, copy=True) # loop through iteration cap for i in range(max_iter + 1): # (n, 3, 3) float triangle soup triangles = current_vertices[current_faces] # compute the length of every triangle edge edge_lengths = (np.diff(triangles[:, [0, 1, 2, 0]], axis=1) ** 2).sum(axis=2) ** .5 too_long = (edge_lengths > max_edge).any(axis=1) # clean up the faces a little bit so we don't # store a ton of unused vertices unique, inverse = np.unique( current_faces[np.logical_not(too_long)], return_inverse=True) # store vertices and faces meeting criteria done_vert.append(current_vertices[unique]) done_face.append(inverse.reshape((-1, 3))) # met our goals so abort if not too_long.any(): break # run subdivision again (current_vertices, current_faces) = subdivide(current_vertices, current_faces[too_long]) # stack sequence into nice (n, 3) arrays vertices, faces = util.append_faces(done_vert, done_face) return vertices, faces
Subdivide a mesh until every edge is shorter than a specified length. Will return a triangle soup, not a nicely structured mesh. Parameters ------------ vertices : (n, 3) float Vertices in space faces : (m, 3) int Indices of vertices which make up triangles max_edge : float Maximum length of any edge in the result max_iter : int The maximum number of times to run subdivision Returns ------------ vertices : (j, 3) float Vertices in space faces : (q, 3) int Indices of vertices
def get_context_arguments(self): """Return a dictionary containing the current context arguments.""" cargs = {} for context in self.__context_stack: cargs.update(context.context_arguments) return cargs
Return a dictionary containing the current context arguments.
def system(cmd, data=None): ''' pipes the output of a program ''' import subprocess s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE) out, err = s.communicate(data) return out.decode('utf8')
pipes the output of a program
def Write2000256List(self, arr): """ Write an array of 64 byte items to the stream. Args: arr (list): a list of 2000 items of 64 bytes in size. """ for item in arr: ba = bytearray(binascii.unhexlify(item)) ba.reverse() self.WriteBytes(ba)
Write an array of 64 byte items to the stream. Args: arr (list): a list of 2000 items of 64 bytes in size.
def midi2f(params, midi=69): ''' Convert a midi value to a frequency. Midi value 69 corresponds to A4 (440Hz). Changing the midi value by 1 corresponds to one semitone :param params: buffer parameters, controls length of signal created :param midi: midi value :return: array of resulting frequency ''' midi = create_buffer(params, midi) output = 2**((midi - 69)/12)*440 return output
Convert a midi value to a frequency. Midi value 69 corresponds to A4 (440Hz). Changing the midi value by 1 corresponds to one semitone :param params: buffer parameters, controls length of signal created :param midi: midi value :return: array of resulting frequency
def _env_runner(base_env, extra_batch_callback, policies, policy_mapping_fn, unroll_length, horizon, preprocessors, obs_filters, clip_rewards, clip_actions, pack, callbacks, tf_sess, perf_stats, soft_horizon): """This implements the common experience collection logic. Args: base_env (BaseEnv): env implementing BaseEnv. extra_batch_callback (fn): function to send extra batch data to. policies (dict): Map of policy ids to PolicyGraph instances. policy_mapping_fn (func): Function that maps agent ids to policy ids. This is called when an agent first enters the environment. The agent is then "bound" to the returned policy for the episode. unroll_length (int): Number of episode steps before `SampleBatch` is yielded. Set to infinity to yield complete episodes. horizon (int): Horizon of the episode. preprocessors (dict): Map of policy id to preprocessor for the observations prior to filtering. obs_filters (dict): Map of policy id to filter used to process observations for the policy. clip_rewards (bool): Whether to clip rewards before postprocessing. pack (bool): Whether to pack multiple episodes into each batch. This guarantees batches will be exactly `unroll_length` in size. clip_actions (bool): Whether to clip actions to the space range. callbacks (dict): User callbacks to run on episode events. tf_sess (Session|None): Optional tensorflow session to use for batching TF policy evaluations. perf_stats (PerfStats): Record perf stats into this object. soft_horizon (bool): Calculate rewards but don't reset the environment when the horizon is hit. Yields: rollout (SampleBatch): Object containing state, action, reward, terminal condition, and other fields as dictated by `policy`. """ try: if not horizon: horizon = (base_env.get_unwrapped()[0].spec.max_episode_steps) except Exception: logger.debug("no episode horizon specified, assuming inf") if not horizon: horizon = float("inf") # Pool of batch builders, which can be shared across episodes to pack # trajectory data. batch_builder_pool = [] def get_batch_builder(): if batch_builder_pool: return batch_builder_pool.pop() else: return MultiAgentSampleBatchBuilder( policies, clip_rewards, callbacks.get("on_postprocess_traj")) def new_episode(): episode = MultiAgentEpisode(policies, policy_mapping_fn, get_batch_builder, extra_batch_callback) if callbacks.get("on_episode_start"): callbacks["on_episode_start"]({ "env": base_env, "policy": policies, "episode": episode, }) return episode active_episodes = defaultdict(new_episode) while True: perf_stats.iters += 1 t0 = time.time() # Get observations from all ready agents unfiltered_obs, rewards, dones, infos, off_policy_actions = \ base_env.poll() perf_stats.env_wait_time += time.time() - t0 if log_once("env_returns"): logger.info("Raw obs from env: {}".format( summarize(unfiltered_obs))) logger.info("Info return from env: {}".format(summarize(infos))) # Process observations and prepare for policy evaluation t1 = time.time() active_envs, to_eval, outputs = _process_observations( base_env, policies, batch_builder_pool, active_episodes, unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon, preprocessors, obs_filters, unroll_length, pack, callbacks, soft_horizon) perf_stats.processing_time += time.time() - t1 for o in outputs: yield o # Do batched policy eval t2 = time.time() eval_results = _do_policy_eval(tf_sess, to_eval, policies, active_episodes) perf_stats.inference_time += time.time() - t2 # Process results and update episode state t3 = time.time() actions_to_send = _process_policy_eval_results( to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions) perf_stats.processing_time += time.time() - t3 # Return computed actions to ready envs. We also send to envs that have # taken off-policy actions; those envs are free to ignore the action. t4 = time.time() base_env.send_actions(actions_to_send) perf_stats.env_wait_time += time.time() - t4
This implements the common experience collection logic. Args: base_env (BaseEnv): env implementing BaseEnv. extra_batch_callback (fn): function to send extra batch data to. policies (dict): Map of policy ids to PolicyGraph instances. policy_mapping_fn (func): Function that maps agent ids to policy ids. This is called when an agent first enters the environment. The agent is then "bound" to the returned policy for the episode. unroll_length (int): Number of episode steps before `SampleBatch` is yielded. Set to infinity to yield complete episodes. horizon (int): Horizon of the episode. preprocessors (dict): Map of policy id to preprocessor for the observations prior to filtering. obs_filters (dict): Map of policy id to filter used to process observations for the policy. clip_rewards (bool): Whether to clip rewards before postprocessing. pack (bool): Whether to pack multiple episodes into each batch. This guarantees batches will be exactly `unroll_length` in size. clip_actions (bool): Whether to clip actions to the space range. callbacks (dict): User callbacks to run on episode events. tf_sess (Session|None): Optional tensorflow session to use for batching TF policy evaluations. perf_stats (PerfStats): Record perf stats into this object. soft_horizon (bool): Calculate rewards but don't reset the environment when the horizon is hit. Yields: rollout (SampleBatch): Object containing state, action, reward, terminal condition, and other fields as dictated by `policy`.
def _fetch(self, url, params): """Fetch a resource. Method to fetch and to iterate over the contents of a type of resource. The method returns a generator of pages for that resource and parameters. :param url: the endpoint of the API :param params: parameters to filter :returns: the text of the response """ if not self.from_archive: self.sleep_for_rate_limit() headers = {'Authorization': 'Bearer ' + self.api_key} r = self.fetch(url, payload=params, headers=headers) if not self.from_archive: self.update_rate_limit(r) return r.text
Fetch a resource. Method to fetch and to iterate over the contents of a type of resource. The method returns a generator of pages for that resource and parameters. :param url: the endpoint of the API :param params: parameters to filter :returns: the text of the response
def run_flow(flow, storage, flags=None, http=None): """Core code for a command-line application. The ``run()`` function is called from your application and runs through all the steps to obtain credentials. It takes a ``Flow`` argument and attempts to open an authorization server page in the user's default web browser. The server asks the user to grant your application access to the user's data. If the user grants access, the ``run()`` function returns new credentials. The new credentials are also stored in the ``storage`` argument, which updates the file associated with the ``Storage`` object. It presumes it is run from a command-line application and supports the following flags: ``--auth_host_name`` (string, default: ``localhost``) Host name to use when running a local web server to handle redirects during OAuth authorization. ``--auth_host_port`` (integer, default: ``[8080, 8090]``) Port to use when running a local web server to handle redirects during OAuth authorization. Repeat this option to specify a list of values. ``--[no]auth_local_webserver`` (boolean, default: ``True``) Run a local web server to handle redirects during OAuth authorization. The tools module defines an ``ArgumentParser`` the already contains the flag definitions that ``run()`` requires. You can pass that ``ArgumentParser`` to your ``ArgumentParser`` constructor:: parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args(argv) Args: flow: Flow, an OAuth 2.0 Flow to step through. storage: Storage, a ``Storage`` to store the credential in. flags: ``argparse.Namespace``, (Optional) The command-line flags. This is the object returned from calling ``parse_args()`` on ``argparse.ArgumentParser`` as described above. Defaults to ``argparser.parse_args()``. http: An instance of ``httplib2.Http.request`` or something that acts like it. Returns: Credentials, the obtained credential. """ if flags is None: flags = argparser.parse_args() logging.getLogger().setLevel(getattr(logging, flags.logging_level)) if not flags.noauth_local_webserver: success = False port_number = 0 for port in flags.auth_host_port: port_number = port try: httpd = ClientRedirectServer((flags.auth_host_name, port), ClientRedirectHandler) except socket.error: pass else: success = True break flags.noauth_local_webserver = not success if not success: print(_FAILED_START_MESSAGE) if not flags.noauth_local_webserver: oauth_callback = 'http://{host}:{port}/'.format( host=flags.auth_host_name, port=port_number) else: oauth_callback = client.OOB_CALLBACK_URN flow.redirect_uri = oauth_callback authorize_url = flow.step1_get_authorize_url() if not flags.noauth_local_webserver: import webbrowser webbrowser.open(authorize_url, new=1, autoraise=True) print(_BROWSER_OPENED_MESSAGE.format(address=authorize_url)) else: print(_GO_TO_LINK_MESSAGE.format(address=authorize_url)) code = None if not flags.noauth_local_webserver: httpd.handle_request() if 'error' in httpd.query_params: sys.exit('Authentication request was rejected.') if 'code' in httpd.query_params: code = httpd.query_params['code'] else: print('Failed to find "code" in the query parameters ' 'of the redirect.') sys.exit('Try running with --noauth_local_webserver.') else: code = input('Enter verification code: ').strip() try: credential = flow.step2_exchange(code, http=http) except client.FlowExchangeError as e: sys.exit('Authentication has failed: {0}'.format(e)) storage.put(credential) credential.set_store(storage) print('Authentication successful.') return credential
Core code for a command-line application. The ``run()`` function is called from your application and runs through all the steps to obtain credentials. It takes a ``Flow`` argument and attempts to open an authorization server page in the user's default web browser. The server asks the user to grant your application access to the user's data. If the user grants access, the ``run()`` function returns new credentials. The new credentials are also stored in the ``storage`` argument, which updates the file associated with the ``Storage`` object. It presumes it is run from a command-line application and supports the following flags: ``--auth_host_name`` (string, default: ``localhost``) Host name to use when running a local web server to handle redirects during OAuth authorization. ``--auth_host_port`` (integer, default: ``[8080, 8090]``) Port to use when running a local web server to handle redirects during OAuth authorization. Repeat this option to specify a list of values. ``--[no]auth_local_webserver`` (boolean, default: ``True``) Run a local web server to handle redirects during OAuth authorization. The tools module defines an ``ArgumentParser`` the already contains the flag definitions that ``run()`` requires. You can pass that ``ArgumentParser`` to your ``ArgumentParser`` constructor:: parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args(argv) Args: flow: Flow, an OAuth 2.0 Flow to step through. storage: Storage, a ``Storage`` to store the credential in. flags: ``argparse.Namespace``, (Optional) The command-line flags. This is the object returned from calling ``parse_args()`` on ``argparse.ArgumentParser`` as described above. Defaults to ``argparser.parse_args()``. http: An instance of ``httplib2.Http.request`` or something that acts like it. Returns: Credentials, the obtained credential.
def upload_and_confirm(self, incoming, **kwargs): """Upload the file to okcupid and confirm, among other things, its thumbnail position. :param incoming: A filepath string, :class:`.Info` object or a file like object to upload to okcupid.com. If an info object is provided, its thumbnail positioning will be used by default. :param caption: The caption to add to the photo. :param thumb_nail_left: For thumb nail positioning. :param thumb_nail_top: For thumb nail positioning. :param thumb_nail_right: For thumb nail positioning. :param thumb_nail_bottom: For thumb nail positioning. """ response_dict = self.upload(incoming) if 'error' in response_dict: log.warning('Failed to upload photo') return response_dict if isinstance(incoming, Info): kwargs.setdefault('thumb_nail_left', incoming.thumb_nail_left) kwargs.setdefault('thumb_nail_top', incoming.thumb_nail_top) kwargs.setdefault('thumb_nail_right', incoming.thumb_nail_right) kwargs.setdefault('thumb_nail_bottom', incoming.thumb_nail_bottom) kwargs['height'] = response_dict.get('height') kwargs['width'] = response_dict.get('width') self.confirm(response_dict['id'], **kwargs) return response_dict
Upload the file to okcupid and confirm, among other things, its thumbnail position. :param incoming: A filepath string, :class:`.Info` object or a file like object to upload to okcupid.com. If an info object is provided, its thumbnail positioning will be used by default. :param caption: The caption to add to the photo. :param thumb_nail_left: For thumb nail positioning. :param thumb_nail_top: For thumb nail positioning. :param thumb_nail_right: For thumb nail positioning. :param thumb_nail_bottom: For thumb nail positioning.
def get_dyndns_records(login, password): """Gets the set of dynamic DNS records associated with this account""" params = dict(action='getdyndns', sha=get_auth_key(login, password)) response = requests.get('http://freedns.afraid.org/api/', params=params, timeout=timeout) raw_records = (line.split('|') for line in response.content.split()) try: records = frozenset(DnsRecord(*record) for record in raw_records) except TypeError: raise ApiError("Couldn't parse the server's response", response.content) return records
Gets the set of dynamic DNS records associated with this account
def _repr_html_(self): """Give a nice representation of columns in notebooks.""" out="<table class='taqltable'>\n" # Print column name (not if it is auto-generated) if not(self.name()[:4]=="Col_"): out+="<tr>" out+="<th><b>"+self.name()+"</b></th>" out+="</tr>" cropped=False rowcount=0 colkeywords=self.getkeywords() for row in self: out +="\n<tr>" out += "<td>" + _format_cell(row, colkeywords) + "</td>\n" out += "</tr>\n" rowcount+=1 out+="\n" if rowcount>=20: cropped=True break if out[-2:]=="\n\n": out=out[:-1] out+="</table>" if cropped: out+="<p style='text-align:center'>("+str(self.nrows()-20)+" more rows)</p>\n" return out
Give a nice representation of columns in notebooks.
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
1
Edit dataset card

Models trained or fine-tuned on AhmedSSoliman/CodeSearchNet-Python