code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def delete_agile_board(self, board_id): url = .format(str(board_id)) return self.delete(url)
Delete agile board by id :param board_id: :return:
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]: if not code.strip(): return {: , : self.execution_count, : [], : {}} if self.mva is None: self._allow_stdin = True self._start_sas() if self.lst_len < 0: self._get_lst_len() if code.startswith(): logger.debug("decoding string") tmp1 = code.split() decode = base64.b64decode(tmp1[-1]) code = decode.decode() if code.startswith() == False and code.startswith("CompleteshowSASLog_11092015") == False: logger.debug("code type: " + str(type(code))) logger.debug("code length: " + str(len(code))) logger.debug("code string: " + code) if code.startswith("/*SASKernelTest*/"): res = self.mva.submit(code, "text") else: res = self.mva.submit(code, prompt=self.promptDict) self.promptDict = {} if res[].find("SAS process has terminated unexpectedly") > -1: print(res[], "Restarting SAS session on your behalf") self.do_shutdown(True) return res[] output = res[] log = res[] return self._which_display(log, output) elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith() == False: full_log = highlight(self.mva.saslog(), SASLogLexer(), HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>", title="Full SAS Log")) return full_log.replace(, ) else: return self.cachedlog.replace(, )
This is the main method that takes code from the Jupyter cell and submits it to the SAS server :param code: code from the cell :param silent: :return: str with either the log or list
def unregister(self, id): result = self.rr.table(self.table).get(id).delete().run() if result != { :1, :0,:0, :0,:0,:0}: self.logger.warn( , id, result)
Remove the service with id `id` from the service registry.
def _left_click(self, event): self.update_active() iid = self.current_iid if iid is None: return args = (iid, event.x_root, event.y_root) self.call_callbacks(iid, "left_callback", args)
Function bound to left click event for marker canvas
def should_submit(stack): if stack.enabled: return True logger.debug("Stack %s is not enabled. Skipping.", stack.name) return False
Tests whether a stack should be submitted to CF for update/create Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be submitted, return True.
def index_data(self, data, index_name, doc_type): if not isinstance(data, dict): raise RuntimeError() try: self.els_search.index(index=index_name, doc_type=doc_type, body=data) except Exception, error: print % str(error) raise RuntimeError( % str(error))
Take an arbitrary dictionary of data and index it with ELS. Args: data: data to be Indexed. Should be a dictionary. index_name: Name of the index. doc_type: The type of the document. Raises: RuntimeError: When the Indexing fails.
def t_direction(self, s): r self.add_token(, s) self.pos += len(s)
r'^[+-]$
def skipline(self): position = self.tell() prefix = self._fix() self.seek(prefix, 1) suffix = self._fix() if prefix != suffix: raise IOError(_FIX_ERROR) return position, prefix
Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match.
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float: examples_len = len(y_true) correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)]) return correct / examples_len if examples_len else 0
Calculate accuracy in terms of absolute coincidence Args: y_true: array of true values y_predicted: array of predicted values Returns: portion of absolutely coincidental samples
def data(self, data): if self.is_caching: self.cache = data else: fcontents = self.file_contents with open(self.path, "w") as f: try: indent = self.indent if self.pretty else None json.dump(data, f, sort_keys=self.sort_keys, indent=indent) except Exception as e: f.seek(0) f.truncate() f.write(fcontents) raise e self._updateType()
Overwrite the file with new data. You probably shouldn't do this yourself, it's easy to screw up your whole file with this.
def get_decision(self, child, is_missing = False): value = self.value feature = self.split_feature_column index = self.split_feature_index if not is_missing: if self.left_id == child.node_id: if self.node_type in ["float", "integer"]: sign = "<" else: sign = "=" else: if self.node_type in ["float", "integer"]: sign = ">=" else: sign = "!=" else: sign = "missing" value = None return { "node_id" : self.node_id, "node_type" : self.node_type, "feature" : feature, "index" : index, "sign" : sign, "value" : value, "child_id" : child.node_id, "is_missing" : is_missing }
Get the decision from this node to a child node. Parameters ---------- child: Node A child node of this node. Returns ------- dict: A dictionary that describes how to get from this node to the child node.
def get_port(self): if len(self.client_nodes) > 0: node = self.client_nodes[0] else: node = self.nodes[0] return node.get_port()
Return a port to use to talk to this cluster.
def CountClientPlatformReleasesByLabel(self, day_buckets): return self._CountClientStatisticByLabel( day_buckets, lambda client_info: client_info.last_snapshot.Uname())
Computes client-activity stats for OS-release strings in the DB.
def get_public_url(self, doc_id, branch=): name, path_frag = self.get_repo_and_path_fragment(doc_id) return + name + + branch + + path_frag
Returns a GitHub URL for the doc in question (study, collection, ...)
def check_sum(buf, csum): csum = csum.encode() _csum = ord(buf[0]) for x in buf[1:]: _csum ^= ord(x) _csum = binascii.b2a_hex(chr(_csum).encode()).upper() if _csum != csum: sys.stderr.write(.format(_csum, csum)) return _csum == csum
检查数据的校验和 :param buf: :type buf: :param csum: :type csum: :return: :rtype:
def raw_input(prompt=""): sys.stderr.flush() tty = STDIN.is_a_TTY() and STDOUT.is_a_TTY() if RETURN_UNICODE: if tty: line_bytes = readline(prompt) line = stdin_decode(line_bytes) else: line = stdio_readline(prompt) else: if tty: line = readline(prompt) else: line_unicode = stdio_readline(prompt) line = stdin_encode(line_unicode) if line: return line[:-1] else: raise EOFError
raw_input([prompt]) -> string Read a string from standard input. The trailing newline is stripped. If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError. On Unix, GNU readline is used if enabled. The prompt string, if given, is printed without a trailing newline before reading.
def execCommand(Argv, collect_missing): r try: return _execCommand(Argv, collect_missing) except Exception as e: if Settings[]: Settings[](e) if Settings[]: import pdb pdb.post_mortem(sys.exc_info()[2]) if not Settings[]: import traceback etype, value, tb = sys.exc_info() tb = tb.tb_next.tb_next message = .join(traceback.format_exception(etype, value, tb))[:-1] else: if isinstance(e, HandledException): raise e message = str(e) raise HandledException(message)
r"""Executes the given task with parameters.
def addDataset(self, dataset): item = XChartDatasetItem() self.addItem(item) item.setDataset(dataset) return item
Creates a new dataset instance for this scene. :param dataset | <XChartDataset> :return <XChartDatasetItem>
def _compute_hparam_info_from_values(self, name, values): result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET) distinct_values = set( _protobuf_value_to_string(v) for v in values if _protobuf_value_type(v)) for v in values: v_type = _protobuf_value_type(v) if not v_type: continue if result.type == api_pb2.DATA_TYPE_UNSET: result.type = v_type elif result.type != v_type: result.type = api_pb2.DATA_TYPE_STRING if result.type == api_pb2.DATA_TYPE_STRING: break if result.type == api_pb2.DATA_TYPE_UNSET: return None if (result.type == api_pb2.DATA_TYPE_STRING and len(distinct_values) <= self._max_domain_discrete_len): result.domain_discrete.extend(distinct_values) return result
Builds an HParamInfo message from the hparam name and list of values. Args: name: string. The hparam name. values: list of google.protobuf.Value messages. The list of values for the hparam. Returns: An api_pb2.HParamInfo message.
def nvmlDeviceSetAccountingMode(handle, mode): r fn = _nvmlGetFunctionPointer("nvmlDeviceSetAccountingMode") ret = fn(handle, _nvmlEnableState_t(mode)) _nvmlCheckReturn(ret) return None
r""" /** * Enables or disables per process accounting. * * For Kepler &tm; or newer fully supported devices. * Requires root/admin permissions. * * @note This setting is not persistent and will default to disabled after driver unloads. * Enable persistence mode to be sure the setting doesn't switch off to disabled. * * @note Enabling accounting mode has no negative impact on the GPU performance. * * @note Disabling accounting clears all accounting pids information. * * See \ref nvmlDeviceGetAccountingMode * See \ref nvmlDeviceGetAccountingStats * See \ref nvmlDeviceClearAccountingPids * * @param device The identifier of the target device * @param mode The target accounting mode * * @return * - \ref NVML_SUCCESS if the new mode has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode
def save_var(self, key, value, **kwargs): INSERT OR REPLACE INTO %s (`key`, `type`, `value`) SELECT ? AS key, ? AS type, value FROM %s DROP TABLE %s' % tmp, commit = False) self.__commit_if_necessary(kwargs)
Save one variable to the database.
def truncate(value: Decimal, n_digits: int) -> Decimal: return Decimal(math.trunc(value * (10 ** n_digits))) / (10 ** n_digits)
Truncates a value to a number of decimals places
def create_organization(self, name): log.warning() url = data = {: name} return self.post(url, headers=self.experimental_headers, data=data)
To create an organization Jira administrator global permission or agent permission is required depending on the settings :param name: str :return: Organization data
def n_bifurcation_points(neurites, neurite_type=NeuriteType.all): return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.ibifurcation_point)
number of bifurcation points in a collection of neurites
def is_consecutive(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool: if self.inconsecutive_allowed: if self._consecutive is None: if self._numpy_bins is not None: self._consecutive = True self._consecutive = is_consecutive(self.bins, rtol, atol) return self._consecutive else: return True
Whether all bins are in a growing order. Parameters ---------- rtol, atol : numpy tolerance parameters
def update(self): self.holder = siget(self.holder.FullName) for key, value in self.__dict__.iteritems(): key = self.namespace + key if self._validate_key(key): if not self.holder.Parameters(key): self.holder.AddParameter3(key, C.siString) self.holder.Parameters(key).Value = encode(value)
This method should be called when you want to ensure all cached attributes are in sync with the actual object attributes at runtime. This happens because attributes could store mutable objects and be modified outside the scope of this class. The most common idiom that isn't automagically caught is mutating a list or dictionary. Lets say 'user' object have an attribute named 'friends' containing a list, calling 'user.friends.append(new_friend)' only get the attribute, SIWrapper isn't aware that the object returned was modified and the cached data is not updated.
def property_observer(self, name): def wrapper(fun): self.observe_property(name, fun) fun.unobserve_mpv_properties = lambda: self.unobserve_property(name, fun) return fun return wrapper
Function decorator to register a property observer. See ``MPV.observe_property`` for details.
def main(): infile,outfile,data,indata="","",[],[] if in sys.argv: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() outfile=sys.argv[ind+1] out=open(outfile,) if in sys.argv: print("Welcome to paleolatitude calculator\n") while 1: data=[] print("pick a plate: NA, SA, AF, IN, EU, AU, ANT, GL \n cntl-D to quit") try: plate=input("Plate\n").upper() except: print("Goodbye \n") sys.exit() lat=float(input( "Site latitude\n")) lon=float(input(" Site longitude\n")) age=float(input(" Age\n")) data=[plate,lat,lon,age] print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.") print(spitout(data)) elif in sys.argv: ind=sys.argv.index() infile=sys.argv[ind+1] f=open(infile,) inp=f.readlines() elif in sys.argv: ind=sys.argv.index() plate=sys.argv[ind+1].upper() if in sys.argv: ind=sys.argv.index() lat=float(sys.argv[ind+1]) else: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() lon=float(sys.argv[ind+1]) else: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() age=float(sys.argv[ind+1]) else: print(main.__doc__) sys.exit() data=[plate,lat,lon,age] outstring=spitout(data) if outfile=="": print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.") print(outstring) else: out.write(outstring) sys.exit() else: inp=sys.stdin.readlines() if len(inp)>0: for line in inp: data=[] rec=line.split() data.append(rec[0]) for k in range(1,4): data.append(float(rec[k])) indata.append(data) if len(indata)>0: for line in indata: outstring=spitout(line) if outfile=="": print(outstring) else: out.write(outstring) else: print() sys.exit()
NAME apwp.py DESCRIPTION returns predicted paleolatitudes, directions and pole latitude/longitude from apparent polar wander paths of Besse and Courtillot (2002). SYNTAX apwp.py [command line options][< filename] OPTIONS -h prints help message and quits -i allows interactive data entry f file: read plate, lat, lon, age data from file -F output_file: write output to output_file -P [NA, SA, AF, IN, EU, AU, ANT, GL] plate -lat LAT specify present latitude (positive = North; negative=South) -lon LON specify present longitude (positive = East, negative=West) -age AGE specify Age in Ma Note: must have all -P, -lat, -lon, -age or none. OUTPUT Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.
def setUpImports(self): i = self.imports print >>i, print >>i, module = self.getTypesModuleName() package = self.getTypesModulePath() if package: module = %(package, module) print >>i, %(module) print >>i, %(self.base_module_name, self.base_class_name)
set import statements
def setup(app): app.add_config_value(, [], ) app.add_config_value(, False, ) app.add_directive(, BokehPlotDirective) app.add_config_value(, True, ) app.connect(, builder_inited) app.connect(, build_finished)
Required Sphinx extension setup function.
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): period = int(max(1, period)) def _callback(iter_no, sym=None, arg=None, aux=None): if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback
Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit.
def _format_dict(self, info_dict): for key, value in info_dict.items(): if not value: info_dict[key] = "NA" return info_dict
Replaces empty content with 'NA's
def iteritems(self): for key in self: vals = _dict_getitem(self, key) for val in vals[1:]: yield vals[0], val
Iterate over all header lines, including duplicate ones.
def on_view_not_found( self, environ: Dict[str, Any], start_response: Callable) -> Iterable[bytes]: raise NotImplementedError()
called when view is not found
def _getProcessedImage(self): if self.imageDisp is None: self.imageDisp = self.image self.levelMin, self.levelMax = self._quickLevels( self.imageDisp) return self.imageDisp
Returns the image data after it has been processed by any normalization options in use. This method also sets the attributes self.levelMin and self.levelMax to indicate the range of data in the image.
def tvdb_login(api_key): url = "https://api.thetvdb.com/login" body = {"apikey": api_key} status, content = _request_json(url, body=body, cache=False) if status == 401: raise MapiProviderException("invalid api key") elif status != 200 or not content.get("token"): raise MapiNetworkException("TVDb down or unavailable?") return content["token"]
Logs into TVDb using the provided api key Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister Online docs: api.thetvdb.com/swagger#!/Authentication/post_login=
def apply_step(self, variables, deltas): if len(variables) != len(deltas): raise TensorForceError("Invalid variables and deltas lists.") return tf.group( *(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas)) )
Applies the given (and already calculated) step deltas to the variable values. Args: variables: List of variables. deltas: List of deltas of same length. Returns: The step-applied operation. A tf.group of tf.assign_add ops.
def data_changed(self, change): index = self.index if index: self.view.model.dataChanged.emit(index, index)
Notify the model that data has changed in this cell!
def get_dev_alarms(auth, url, devid=None, devip=None): if devip is not None: devid = get_dev_details(devip, auth, url)[] f_url = url + "/imcrs/fault/alarm?operatorName=admin&deviceId=" + \ str(devid) + "&desc=false" response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: dev_alarm = (json.loads(response.text)) if in dev_alarm: return dev_alarm[] else: return "Device has no alarms" except requests.exceptions.RequestException as error: return "Error:\n" + str(error) +
function takes the devId of a specific device and issues a RESTFUL call to get the current alarms for the target device. :param devid: int or str value of the target device :param devip: str of ipv4 address of the target device :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return:list of dictionaries containing the alarms for this device :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.alarms import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> dev_alarms = get_dev_alarms(auth.creds, auth.url, devip='10.101.0.221') >>> assert 'ackStatus' in dev_alarms[0]
def reset_password(self, token): expired, invalid, user = \ self.security_utils_service.reset_password_token_status(token) if invalid: self.flash( _(), category=) return self.redirect() elif expired: self.security_service.send_reset_password_instructions(user) self.flash(_(, email=user.email, within=app.config.SECURITY_RESET_PASSWORD_WITHIN), category=) return self.redirect() spa_redirect = app.config.SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT if request.method == and spa_redirect: return self.redirect(spa_redirect, token=token, _external=True) form = self._get_form() if form.validate_on_submit(): self.security_service.reset_password(user, form.password.data) self.security_service.login_user(user) self.after_this_request(self._commit) self.flash(_(), category=) if request.is_json: return self.jsonify({: user.get_auth_token(), : user}) return self.redirect(, ) elif form.errors and request.is_json: return self.errors(form.errors) return self.render(, reset_password_form=form, reset_password_token=token, **self.security.run_ctx_processor())
View function verify a users reset password token from the email we sent to them. It also handles the form for them to set a new password. Supports html and json requests.
def live_unread_notification_list(request): try: user_is_authenticated = request.user.is_authenticated() except TypeError: user_is_authenticated = request.user.is_authenticated if not user_is_authenticated: data = { : 0, : [] } return JsonResponse(data) default_num_to_fetch = get_config()[] try: } return JsonResponse(data)
Return a json with a unread notification list
def solveConsAggMarkov(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,MrkvArray, PermGroFac,PermGroFacAgg,aXtraGrid,BoroCnstArt,Mgrid, AFunc,Rfunc,wFunc,DeprFac): t handle cubic splines, nor can it calculate a value function. Parameters ---------- solution_next : ConsumerSolution The solution to the succeeding one period problem. IncomeDstn : [[np.array]] A list of lists, each containing five arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, idisyncratic permanent shocks, idiosyncratic transitory shocks, aggregate permanent shocks, aggregate transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. MrkvArray : np.array Markov transition matrix between discrete macroeconomic states. MrkvArray[i,j] is probability of being in state j next period conditional on being in state i this period. PermGroFac : float Expected permanent income growth factor at the end of this period, for the *individual*t* be None. Mgrid : np.array A grid of aggregate market resourses to permanent income in the economy. AFunc : [function] Aggregate savings as a function of aggregate market resources, for each Markov macro state. Rfunc : function The net interest factor on assets as a function of capital ratio k. wFunc : function The wage rate for labor as a function of capital-to-labor ratio k. DeprFac : float Capital Depreciation Rate Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (linear interpolation over linear interpola- tions) and marginal value function vPfunc. s states, assuming we reach each one at a time. EndOfPrdvPfunc_cond = [] BoroCnstNat_cond = [] for j in range(StateCount): AaggGrid = AFunc[j](Mgrid) AaggNow_tiled = np.tile(np.reshape(AaggGrid,(Mcount,1,1)),(1,aCount,ShkCount)) kNext_array = AaggNow_tiled/(PermGroFacAgg[j]*PermShkAggValsNext_tiled) aNrmMin_candidates = PermGroFac*PermGroFacAgg[j]*PermShkValsNext_tiled[:,0,:]*PermShkAggValsNext_tiled[:,0,:]/Reff_array[:,0,:]*\ (mNrmMinNext(Mnext_array[:,0,:]) - wEff_array[:,0,:]*TranShkValsNext_tiled[:,0,:]) aNrmMin_vec = np.max(aNrmMin_candidates,axis=1) BoroCnstNat_vec = aNrmMin_vec aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1,1)),(1,aCount,ShkCount)) aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled mNrmNext_array = Reff_array*aNrmNow_tiled/PermShkTotal_array + TranShkValsNext_tiled*wEff_array vPnext_array = Reff_array*PermShkTotal_array**(-CRRA)*vPfuncNext(mNrmNext_array,Mnext_array) EndOfPrdvP = DiscFac*LivPrb*np.sum(vPnext_array*ShkPrbsNext_tiled,axis=2) BoroCnstNat = LinearInterp(np.insert(AaggGrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0)) EndOfPrdvPnvrs = np.concatenate((np.zeros((Mcount,1)),EndOfPrdvP**(-1./CRRA)),axis=1) EndOfPrdvPnvrsFunc_base = BilinearInterp(np.transpose(EndOfPrdvPnvrs),np.insert(aXtraGrid,0,0.0),AaggGrid) EndOfPrdvPnvrsFunc = VariableLowerBoundFunc2D(EndOfPrdvPnvrsFunc_base,BoroCnstNat) EndOfPrdvPfunc_cond.append(MargValueFunc2D(EndOfPrdvPnvrsFunc,CRRA)) BoroCnstNat_cond.append(BoroCnstNat) aXtra_tiled = np.tile(np.reshape(aXtraGrid,(1,aCount)),(Mcount,1)) cFuncCnst = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),np.array([BoroCnstArt,BoroCnstArt+1.0]),np.array([0.0,1.0])) cFuncNow = [] vPfuncNow = [] mNrmMinNow = [] for i in range(StateCount): AaggNow = AFunc[i](Mgrid) aNrmMin_candidates = np.zeros((StateCount,Mcount)) + np.nan for j in range(StateCount): if MrkvArray[i,j] > 0.: aNrmMin_candidates[j,:] = BoroCnstNat_cond[j](AaggNow) aNrmMin_vec = np.nanmax(aNrmMin_candidates,axis=0) BoroCnstNat_vec = aNrmMin_vec aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1)),(1,aCount)) aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled AaggNow_tiled = np.tile(np.reshape(AaggNow,(Mcount,1)),(1,aCount)) EndOfPrdvP = np.zeros((Mcount,aCount)) for j in range(StateCount): if MrkvArray[i,j] > 0.: temp = EndOfPrdvPfunc_cond[j](aNrmNow_tiled,AaggNow_tiled) EndOfPrdvP += MrkvArray[i,j]*temp cNrmNow = EndOfPrdvP**(-1./CRRA) mNrmNow = aNrmNow_tiled + cNrmNow cFuncBaseByM_list = [] for n in range(Mcount): c_temp = np.insert(cNrmNow[n,:],0,0.0) m_temp = np.insert(mNrmNow[n,:] - BoroCnstNat_vec[n],0,0.0) cFuncBaseByM_list.append(LinearInterp(m_temp,c_temp)) BoroCnstNat = LinearInterp(np.insert(Mgrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0)) cFuncBase = LinearInterpOnInterp1D(cFuncBaseByM_list,Mgrid) cFuncUnc = VariableLowerBoundFunc2D(cFuncBase,BoroCnstNat) cFuncNow.append(LowerEnvelope2D(cFuncUnc,cFuncCnst)) mNrmMinNow.append(UpperEnvelope(BoroCnstNat,ConstantFunction(BoroCnstArt))) vPfuncNow.append(MargValueFunc2D(cFuncNow[-1],CRRA)) solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow,mNrmMin=mNrmMinNow) return solution_now
Solve one period of a consumption-saving problem with idiosyncratic and aggregate shocks (transitory and permanent). Moreover, the macroeconomic state follows a Markov process that determines the income distribution and aggregate permanent growth factor. This is a basic solver that can't handle cubic splines, nor can it calculate a value function. Parameters ---------- solution_next : ConsumerSolution The solution to the succeeding one period problem. IncomeDstn : [[np.array]] A list of lists, each containing five arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, idisyncratic permanent shocks, idiosyncratic transitory shocks, aggregate permanent shocks, aggregate transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. MrkvArray : np.array Markov transition matrix between discrete macroeconomic states. MrkvArray[i,j] is probability of being in state j next period conditional on being in state i this period. PermGroFac : float Expected permanent income growth factor at the end of this period, for the *individual*'s productivity. PermGroFacAgg : [float] Expected aggregate productivity growth in each Markov macro state. aXtraGrid : np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. BoroCnstArt : float Artificial borrowing constraint; minimum allowable end-of-period asset-to- permanent-income ratio. Unlike other models, this *can't* be None. Mgrid : np.array A grid of aggregate market resourses to permanent income in the economy. AFunc : [function] Aggregate savings as a function of aggregate market resources, for each Markov macro state. Rfunc : function The net interest factor on assets as a function of capital ratio k. wFunc : function The wage rate for labor as a function of capital-to-labor ratio k. DeprFac : float Capital Depreciation Rate Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (linear interpolation over linear interpola- tions) and marginal value function vPfunc.
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)): callsign = callsign.strip().upper() if self._lookuptype == "clublogxml": return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign) return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index) raise KeyError
Returns a CQ Zone if an exception exists for the given callsign Args: callsign (string): Amateur radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: Value of the the CQ Zone exception which exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN. >>> from pyhamtools import LookupLib >>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey") >>> print my_lookuplib.lookup_zone_exception("DP0GVN") 38 The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore in CQ Zone 38 Note: This method is available for - clublogxml - redis
def __parse_json_data(self, data): if isinstance(data, dict) or isinstance(data, list): self._raw_data = data self._json_data = copy.deepcopy(self._raw_data) else: raise TypeError("Provided Data is not json")
Process Json data :@param data :@type data: json/dict :throws TypeError
def extract(self, text: str, confidence=0.5, filter=[, , ]) -> List[Extraction]: filter = .join(filter) search_data = [(, confidence), (, text), (, filter)] search_headers = {: } r = requests.post(self._search_url, data=search_data, headers=search_headers) results = r.json() last_results = self._combiner(results) return last_results
Extract with the input text, confidence and fields filter to be used. Args: text (str): text input to be annotated confidence (float): the confidence of the annotation filter (List[str]): the fields that to be extracted Returns: List[Extraction]
def debug_sync(self, conn_id, cmd_name, cmd_args, progress_callback): done = threading.Event() result = {} def _debug_done(conn_id, adapter_id, success, retval, reason): result[] = success result[] = reason result[] = retval done.set() self.debug_async(conn_id, cmd_name, cmd_args, progress_callback, _debug_done) done.wait() return result
Asynchronously complete a named debug command. The command name and arguments are passed to the underlying device adapter and interpreted there. If the command is long running, progress_callback may be used to provide status updates. Callback is called when the command has finished. Args: conn_id (int): A unique identifier that will refer to this connection cmd_name (string): the name of the debug command we want to invoke cmd_args (dict): any arguments that we want to send with this command. progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count)
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None): if work is None or len(work.args_tuples) == 0: if on_success: on_success([]) else: def do_work(*args): self._do_work(work.func, *args, workunit_name=work.workunit_name, workunit_parent=workunit_parent, on_failure=on_failure) return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success)
Submit work to be executed in the background. :param work: The work to execute. :param workunit_parent: If specified, work is accounted for under this workunit. :param on_success: If specified, a callable taking a single argument, which will be a list of return values of each invocation, in order. Called only if all work succeeded. :param on_failure: If specified, a callable taking a single argument, which is an exception thrown in the work. :return: `multiprocessing.pool.MapResult` Don't do work in on_success: not only will it block the result handling thread, but that thread is not a worker and doesn't have a logging context etc. Use it just to submit further work to the pool.
def _get_summary_struct(self): section_titles=[,] vocab_length = len(self.vocabulary) verbose = self.verbose == 1 sections=[ [ (,_precomputed_field(vocab_length)) ], [ (, ), (,), (,), (, ), (, ), (, _precomputed_field(verbose)) ] ] return (sections, section_titles)
Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object.
def run(self, b, compute, times=[], **kwargs): self.run_checks(b, compute, times, **kwargs) logger.debug("rank:{}/{} calling get_packet_and_syns".format(mpi.myrank, mpi.nprocs)) packet, new_syns = self.get_packet_and_syns(b, compute, times, **kwargs) if mpi.enabled: mpi.comm.bcast(packet, root=0) packet[] = b rpacketlists = self._run_chunk(**packet) rpacketlists_per_worker = mpi.comm.gather(rpacketlists, root=0) else: rpacketlists_per_worker = [self._run_chunk(**packet)] return self._fill_syns(new_syns, rpacketlists_per_worker)
if within mpirun, workers should call _run_worker instead of run
def match(self, text): match_obj = None if self.fullmatch: match_obj = self.regex_obj.fullmatch(text) else: match_obj = self.regex_obj.search(text) if match_obj == None: return None matches = match_obj.groupdict() for key,match in matches.items(): try: if self.type_mapper[key] == : matches[key] = int(match) if self.type_mapper[key] == : matches[key] = float(match) except (TypeError, KeyError) as e: pass return matches
If text is matched with pattern, return variable names specified(%{pattern:variable name}) in pattern and their corresponding values.If not matched, return None. custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair) or custom_patterns_dir.
def get_artifact_info(self): exported = bool(self.provides) org = self.provides.org if exported else name = self.provides.name if exported else self.identifier return JarDependency(org=org, name=name, rev=None), exported
Returns a tuple composed of a :class:`pants.java.jar.JarDependency` describing the jar for this target and a bool indicating if this target is exportable.
def _load_github_repo(): if in os.environ: raise RuntimeError( ) try: with open(os.path.join(config_dir, ), ) as f: return f.read() except (OSError, IOError): raise RuntimeError( )
Loads the GitHub repository from the users config.
def allocate_elastic_ip(self): log = logging.getLogger(self.cls_logger + ) log.info() try: response = self.client.allocate_address( DryRun=False, Domain= ) except ClientError: _, ex, trace = sys.exc_info() msg = .format(e=str(ex)) log.error(msg) raise AWSAPIError, msg, trace allocation_id = response[] public_ip = response[] log.info(. format(a=allocation_id, p=public_ip)) log.info( ) ready = False verification_timer = [2]*60 + [5]*60 + [10]*18 num_checks = len(verification_timer) for i in range(0, num_checks): wait_time = verification_timer[i] try: self.client.describe_addresses( DryRun=False, AllocationIds=[allocation_id] ) except ClientError: _, ex, trace = sys.exc_info() log.info( .format(p=public_ip, a=allocation_id, w=wait_time, e=str(ex))) time.sleep(wait_time) else: log.info(.format( p=public_ip, a=allocation_id)) ready = True break if ready: return {: allocation_id, : public_ip} else: msg = . \ format(p=public_ip, a=allocation_id) log.error(msg) raise EC2UtilError(msg)
Allocates an elastic IP address :return: Dict with allocation ID and Public IP that were created :raises: AWSAPIError, EC2UtilError
def _get_fields(mcs, bases, namespace): fields = [ (name, namespace.pop(name)) for name, attribute in list(namespace.items()) if isinstance(attribute, BaseField) ] for base in reversed(bases): if hasattr(base, mcs._fields_storage_key): fields = list( getattr(base, mcs._fields_storage_key).items() ) + fields return OrderedDict(fields)
Create fields dictionary to be used in resource class namespace. Pop all field objects from attributes dict (namespace) and store them under _field_storage_key atrribute. Also collect all fields from base classes in order that ensures fields can be overriden. Args: bases: all base classes of created serializer class namespace (dict): namespace as dictionary of attributes
def gets(self, conn, key, default=None): values, cas_tokens = yield from self._multi_get( conn, key, with_cas=True) return values.get(key, default), cas_tokens.get(key)
Gets a single value from the server together with the cas token. :param key: ``bytes``, is the key for the item being fetched :param default: default value if there is no value. :return: ``bytes``, ``bytes tuple with the value and the cas
def flick(self, xspeed, yspeed): self._actions.append(lambda: self._driver.execute( Command.FLICK, { : int(xspeed), : int(yspeed)})) return self
Flicks, starting anywhere on the screen. :Args: - xspeed: The X speed in pixels per second. - yspeed: The Y speed in pixels per second.
def _result(self) -> ResultLazyType: config = cast(IntervalsCollectionBasedReplacerConfig, self.config) diff_acc = 0 for interval, aggregated_mark in self.continuous_intervals(): start, end = interval processed_start = start + diff_acc processed_end = end + diff_acc segment = self.input_sequence[start:end] if aggregated_mark is not None: processed_segment = config.labeler2repl[cast(Type[workflow.IntervalLabeler], aggregated_mark)](segment) if not processed_segment: processed_end = processed_start else: processed_end = processed_start + len(processed_segment) diff_acc += len(processed_segment) - len(segment) segment = processed_segment yield segment, (interval, (processed_start, processed_end), aggregated_mark is not None)
``self.config.replacer_function``(``Callable[[str], str]``) must exists.
def convert(self, request, response, data): return self.escape(request.environ.get(, {}).get( self.modifier.param, ))
Performs the desired Conversion. :param request: The webob Request object describing the request. :param response: The webob Response object describing the response. :param data: The data dictionary returned by the prepare() method. :returns: A string, the results of which are the desired conversion.
def match_rows(rows1, rows2, key, sort_keys=True): matched = OrderedDict() for i, rows in enumerate([rows1, rows2]): for row in rows: val = row[key] try: data = matched[val] except KeyError: matched[val] = ([], []) data = matched[val] data[i].append(row) vals = matched.keys() if sort_keys: vals = sorted(vals, key=safe_int) for val in vals: left, right = matched[val] yield (val, left, right)
Yield triples of `(value, left_rows, right_rows)` where `left_rows` and `right_rows` are lists of rows that share the same column value for *key*. This means that both *rows1* and *rows2* must have a column with the same name *key*. .. warning:: Both *rows1* and *rows2* will exist in memory for this operation, so it is not recommended for very large tables on low-memory systems. Args: rows1: a :class:`Table` or list of :class:`Record` objects rows2: a :class:`Table` or list of :class:`Record` objects key (str): the column name on which to match sort_keys (bool): if `True`, yield matching rows sorted by the matched key instead of the original order
def _spanning_tree_algorithm(self): port_roles = {} root_port = self._select_root_port() if root_port is None: self.logger.info(, extra=self.dpid_str) root_priority = self.root_priority root_times = self.root_times for port_no in self.ports: if self.ports[port_no].state is not PORT_STATE_DISABLE: port_roles[port_no] = DESIGNATED_PORT else: self.logger.info(, extra=self.dpid_str) root_priority = root_port.designated_priority root_times = root_port.designated_times port_roles[root_port.ofport.port_no] = ROOT_PORT d_ports = self._select_designated_port(root_port) for port_no in d_ports: port_roles[port_no] = DESIGNATED_PORT for port in self.ports.values(): if port.state is not PORT_STATE_DISABLE: port_roles.setdefault(port.ofport.port_no, NON_DESIGNATED_PORT) return port_roles, root_priority, root_times
Update tree roles. - Root bridge: all port is DESIGNATED_PORT. - Non root bridge: select one ROOT_PORT and some DESIGNATED_PORT, and the other port is set to NON_DESIGNATED_PORT.
def _remove_non_serializable_store_entries(store: Store) -> dict: cleaned_store_data = {} for key, value in store.items(): if Script._is_serializable(key) and Script._is_serializable(value): cleaned_store_data[key] = value else: _logger.info("Skip non-serializable item in the local script store. Key: , Value: . " "This item cannot be saved and therefore will be lost when autokey quits.".format( key, value )) return cleaned_store_data
Copy all serializable data into a new dict, and skip the rest. This makes sure to keep the items during runtime, even if the user edits and saves the script.
def setup_cluster(self, cluster, extra_args=tuple()): return self._run_playbook(cluster, self._playbook_path, extra_args)
Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt.
def create_with_claims(self, claims): new_kwargs = dict(self._kwargs) new_kwargs.update(claims) result = self.__class__(self._service_account_email, self._signer, scopes=self._scopes, private_key_id=self._private_key_id, client_id=self.client_id, user_agent=self._user_agent, **new_kwargs) result.token_uri = self.token_uri result.revoke_uri = self.revoke_uri result._private_key_pkcs8_pem = self._private_key_pkcs8_pem result._private_key_pkcs12 = self._private_key_pkcs12 result._private_key_password = self._private_key_password return result
Create credentials that specify additional claims. Args: claims: dict, key-value pairs for claims. Returns: ServiceAccountCredentials, a copy of the current service account credentials with updated claims to use when obtaining access tokens.
def get_dataset_date(self, date_format=None): dataset_date = self.get_dataset_date_as_datetime() return self._get_formatted_date(dataset_date, date_format)
Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
def set_tts(self, level): if level in self.TTS_GRANULARITY_MAP.keys(): tts_key, tts_path_key = self.TTS_GRANULARITY_MAP[level] self[self.TTS] = self[tts_key] self[self.TTS_PATH] = self[tts_path_key]
Set the values for :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS` and :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS_PATH` matching the given granularity level. Currently supported levels: * ``1`` (paragraph) * ``2`` (sentence) * ``3`` (word) :param int level: the desired granularity level
def _to_bel_lines_header(graph) -> Iterable[str]: yield .format( VERSION, bel_resources.constants.VERSION, time.asctime() ) yield from make_knowledge_header( namespace_url=graph.namespace_url, namespace_patterns=graph.namespace_pattern, annotation_url=graph.annotation_url, annotation_patterns=graph.annotation_pattern, annotation_list=graph.annotation_list, **graph.document )
Iterate the lines of a BEL graph's corresponding BEL script's header. :param pybel.BELGraph graph: A BEL graph
def set_default_host(cls, value): if value is None: cls.DEFAULT_HOST = "http://127.0.0.1:80" else: scheme, host, port = get_hostname_parameters_from_url(value) cls.DEFAULT_HOST = "%s://%s:%s" % (scheme, host, port)
Default: "http://127.0.0.1:80" A string that will be automatically included at the beginning of the url generated for doing each http request.
def _fill_function(*args): if len(args) == 2: func = args[0] state = args[1] elif len(args) == 5: func = args[0] keys = [, , , ] state = dict(zip(keys, args[1:])) elif len(args) == 6: func = args[0] keys = [, , , , ] state = dict(zip(keys, args[1:])) else: raise ValueError( % (args,)) if value is not _empty_cell_value: cell_set(cell, value) return func
Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func().
def touch(self, mode=0o666, exist_ok=True): if exist_ok: try: os.utime(self, None) except OSError: pass else: return flags = os.O_CREAT | os.O_WRONLY if not exist_ok: flags |= os.O_EXCL fd = os.open(self, flags, mode) os.close(fd)
Create this file with the given access mode, if it doesn't exist. Based on: https://github.com/python/cpython/blob/master/Lib/pathlib.py)
def config_args(self, section="main"): if self._config_parsed: return for a in self._filtered_actions("config"): for o in a.option_strings: try: i = sys.argv.index(o) sys.argv.pop(i) sys.argv.pop(i) except ValueError: pass for a in self._sorted_actions(): self._set_arg(a, section, True) self._config_parsed = True
Additional method for feeding input arguments from a config file. :param section: current config section name
def check_verifier(self, verifier): lower, upper = self.verifier_length return (set(verifier) <= self.safe_characters and lower <= len(verifier) <= upper)
Checks that the verifier contains only safe characters and is no shorter than lower and no longer than upper.
def normalize_lat_lng(arg): if isinstance(arg, dict): if "lat" in arg and "lng" in arg: return arg["lat"], arg["lng"] if "latitude" in arg and "longitude" in arg: return arg["latitude"], arg["longitude"] if _is_list(arg): return arg[0], arg[1] raise TypeError( "Expected a lat/lng dict or tuple, " "but got %s" % type(arg).__name__)
Take the various lat/lng representations and return a tuple. Accepts various representations: 1) dict with two entries - "lat" and "lng" 2) list or tuple - e.g. (-33, 151) or [-33, 151] :param arg: The lat/lng pair. :type arg: dict or list or tuple :rtype: tuple (lat, lng)
def get_specific(self, id, **kwargs): kwargs[] = True if kwargs.get(): return self.get_specific_with_http_info(id, **kwargs) else: (data) = self.get_specific_with_http_info(id, **kwargs) return data
Get specific License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: LicenseSingleton If the method is called asynchronously, returns the request thread.
def transform_file_output(result): from collections import OrderedDict new_result = [] iterable = result if isinstance(result, list) else result.get(, result) for item in iterable: new_entry = OrderedDict() entity_type = item[] is_dir = entity_type == new_entry[] = item[] + if is_dir else item[] new_entry[] = if is_dir else item[][] new_entry[] = item[] new_entry[] = item[][] or new_result.append(new_entry) return sorted(new_result, key=lambda k: k[])
Transform to convert SDK file/dir list output to something that more clearly distinguishes between files and directories.
def parse(region_string): rp = RegionParser() ss = rp.parse(region_string) sss1 = rp.convert_attr(ss) sss2 = _check_wcs(sss1) shape_list, comment_list = rp.filter_shape2(sss2) return ShapeList(shape_list, comment_list=comment_list)
Parse DS9 region string into a ShapeList. Parameters ---------- region_string : str Region string Returns ------- shapes : `ShapeList` List of `~pyregion.Shape`
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None, predicates=None, properties=None): if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) if column is not None: assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified" assert upperBound is not None, "upperBound can not be None when ``column`` is specified" assert numPartitions is not None, \ "numPartitions can not be None when ``column`` is specified" return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound), int(numPartitions), jprop)) if predicates is not None: gateway = self._spark._sc._gateway jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates) return self._df(self._jreader.jdbc(url, table, jpredicates, jprop)) return self._df(self._jreader.jdbc(url, table, jprop))
Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame
def _diff_emit_update(self, new_bookmarks): self.logger.debug("diffing %s, %s", self._bookmark_cache, new_bookmarks) def subdivide(level, old, new): if len(old) == len(new) == 1: old_entry = old.pop() new_entry = new.pop() if old_entry == new_entry: pass else: self.on_bookmark_changed(old_entry, new_entry) return ([], []) elif len(old) == 0: return ([], new) elif len(new) == 0: return (old, []) else: try: groups = {} for entry in old: group = groups.setdefault( entry.secondary[level], ([], []) ) group[0].append(entry) for entry in new: group = groups.setdefault( entry.secondary[level], ([], []) ) group[1].append(entry) except IndexError: common = min(len(old), len(new)) assert old[:common] == new[:common] return (old[common:], new[common:]) old_unhandled, new_unhandled = [], [] for old, new in groups.values(): unhandled = subdivide(level+1, old, new) old_unhandled += unhandled[0] new_unhandled += unhandled[1] i = -1 for i, (old_entry, new_entry) in enumerate( zip(old_unhandled, new_unhandled)): self.logger.debug("changed %s -> %s", old_entry, new_entry) self.on_bookmark_changed(old_entry, new_entry) i += 1 return old_unhandled[i:], new_unhandled[i:] changable_groups = {} for item in self._bookmark_cache: group = changable_groups.setdefault( (type(item), item.primary), ([], []) ) group[0].append(item) for item in new_bookmarks: group = changable_groups.setdefault( (type(item), item.primary), ([], []) ) group[1].append(item) for old, new in changable_groups.values(): if len(old) == len(new) == 1: old_entry = old.pop() new_entry = new.pop() if old_entry == new_entry: pass else: self.logger.debug("changed %s -> %s", old_entry, new_entry) self.on_bookmark_changed(old_entry, new_entry) elif len(new) == 0: for removed in old: self.logger.debug("removed %s", removed) self.on_bookmark_removed(removed) elif len(old) == 0: for added in new: self.logger.debug("added %s", added) self.on_bookmark_added(added) else: old, new = subdivide(0, old, new) assert len(old) == 0 or len(new) == 0 for removed in old: self.logger.debug("removed %s", removed) self.on_bookmark_removed(removed) for added in new: self.logger.debug("added %s", added) self.on_bookmark_added(added) self._bookmark_cache = new_bookmarks
Diff the bookmark cache and the new bookmark state, emit signals as needed and set the bookmark cache to the new data.
def memorize(func): @wraps(func) def wrapped_func(*args, **kwargs): if (len(args) > 0 and len(kwargs) > 0): cacheKey = list(args) cacheKey.append(kwargs) elif (len(args) > 0): cacheKey = args else: cacheKey = func.__name__ global __cache__ result = __cache__.get(cacheKey) if result is None: result = func(*args, **kwargs) __cache__[cacheKey] = result return result return wrapped_func
Simply memorize the calculated result :data:`func`. previously returned. Simply cached all calculated results from the decorated method/function into a global `dict`.
def resolve_path(schema, fragment): fragment = fragment.lstrip() parts = unquote(fragment).split() if fragment else [] for part in parts: part = part.replace(, ).replace(, ) if isinstance(schema, list): schema = schema[int(part)] elif part in schema: schema = schema[part] else: raise JsonSchemaException(.format(part)) return schema
Return definition from path. Path is unescaped according https://tools.ietf.org/html/rfc6901
def delete_grade_entry(self, grade_entry_id): collection = JSONClientValidated(, collection=, runtime=self._runtime) if not isinstance(grade_entry_id, ABCId): raise errors.InvalidArgument() grade_entry_map = collection.find_one( dict({: ObjectId(grade_entry_id.get_identifier())}, **self._view_filter())) objects.GradeEntry(osid_object_map=grade_entry_map, runtime=self._runtime, proxy=self._proxy)._delete() collection.delete_one({: ObjectId(grade_entry_id.get_identifier())})
Deletes the ``GradeEntry`` identified by the given ``Id``. arg: grade_entry_id (osid.id.Id): the ``Id`` of the ``GradeEntry`` to delete raise: NotFound - a ``GradeEntry`` was not found identified by the given ``Id`` raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def fetchmany(self, size=None): self._check_executed() r = self._fetch_row(size or self.arraysize) self.rownumber = self.rownumber + len(r) if not r: self._warning_check() return r
Fetch up to size rows from the cursor. Result set may be smaller than size. If size is not defined, cursor.arraysize is used.
def clean_indicators(indicators): output = list() for indicator in indicators: strip = [, ] for item in strip: indicator = indicator.replace(item, ) indicator = indicator.strip().strip() parts = indicator.split() if len(parts) > 0: indicator = parts.pop(0) output.append(indicator) output = list(set(output)) return output
Remove any extra details from indicators.
def listen(bot, receivers=None, token=None, port=10245, status_report=False, status_receiver=None, status_interval=DEFAULT_REPORT_TIME): global glb periodic_list = [] app = Application() wxbot = WxBot(bot, receivers, status_receiver) register_listener_handle(wxbot) process = psutil.Process() app.listen(port) if status_report: if isinstance(status_interval, datetime.timedelta): status_interval = status_interval.seconds * 1000 check_periodic = tornado.ioloop.PeriodicCallback(functools.partial(check_bot, SYSTEM_TASK), status_interval) check_periodic.start() periodic_list.append(check_periodic) glb = Global(wxbot=wxbot, run_info=process, periodic_list=periodic_list, ioloop=tornado.ioloop.IOLoop.instance(), token=token) tornado.ioloop.IOLoop.current().start()
传入 bot 实例并启动 wechat_sender 服务 :param bot: (必填|Bot对象) - wxpy 的 Bot 对象实例 :param receivers: (选填|wxpy.Chat 对象|Chat 对象列表) - 消息接收者,wxpy 的 Chat 对象实例, 或 Chat 对象列表,如果为 list 第一个 Chat 为默认接收者。如果为 Chat 对象,则默认接收者也是此对象。 不填为当前 bot 对象的文件接收者 :param token: (选填|str) - 信令,防止 receiver 被非法滥用,建议加上 token 防止非法使用,如果使用 token 请在初始化 `Sender()` 时也使用统一 token,否则无法发送。token 建议为 32 位及以上的无规律字符串 :param port: (选填|int) - 监听端口, 监听端口默认为 10245 ,如有冲突或特殊需要请自行指定,需要和 `Sender()` 统一 :param status_report: (选填|bool) - 是否开启状态报告,如果开启,wechat_sender 将会定时发送状态信息到 status_receiver :param status_receiver: (选填|Chat 对象) - 指定 status_receiver,不填将会发送状态消息给默认接收者 :param status_interval: (选填|int|datetime.timedelta) - 指定状态报告发送间隔时间,为 integer 时代表毫秒
def convert_values(self, value, field): if value is None: return None if field and field.get_internal_type() == : if isinstance(value, string_types) and value: value = parse_datetime(value) return value elif field and field.get_internal_type() == : if isinstance(value, datetime.datetime): value = value.date() elif isinstance(value, string_types): value = parse_date(value) elif field and field.get_internal_type() == : if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1): value = value.time() elif isinstance(value, string_types): value = parse_time(value) elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0: value = value.date() elif value is not None and field and field.get_internal_type() == : value = float(value) return value
Coerce the value returned by the database backend into a consistent type that is compatible with the field type. In our case, cater for the fact that SQL Server < 2008 has no separate Date and Time data types. TODO: See how we'll handle this for SQL Server >= 2008
def get_config_value(self, overrides, skip_environment=False): label, override, key = self._search_overrides( overrides, skip_environment ) if override is None and self.default is None and self.required: raise YapconfItemNotFound( .format(self.fq_name), self ) if override is None: self.logger.debug( .format(self.name) ) value = self.default else: value = override[key] if value is None: return value converted_value = self.convert_config_value(value, label) self._validate_value(converted_value) return converted_value
Get the configuration value from all overrides. Iterates over all overrides given to see if a value can be pulled out from them. It will convert each of these values to ensure they are the correct type. Args: overrides: A list of tuples where each tuple is a label and a dictionary representing a configuration. skip_environment: Skip looking through the environment. Returns: The converted configuration value. Raises: YapconfItemNotFound: If an item is required but could not be found in the configuration. YapconfItemError: If a possible value was found but the type cannot be determined. YapconfValueError: If a possible value is found but during conversion, an exception was raised.
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2): bitset = BinnedBitSet( MAX ) for line in f: if line.startswith(" fields = line.split() if fields[chrom_col] == chrom: start, end = int( fields[start_col] ), int( fields[end_col] ) bitset.set_range( start, end-start ) return bitset
Read a file by chrom name into a bitset
def CAS_from_any(ID, autoload=False): InChI=1S/InChI=1/InChIKey=PubChem=SMILES=CCNawater7732-18-5InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H364-17-5CCCCCCCCCC124-18-5InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N64-17-5pubchem=70264-17-5O17778-80-2 ID = ID.strip() ID_lower = ID.lower() if ID in periodic_table: if periodic_table[ID].number not in homonuclear_elemental_gases: return periodic_table[ID].CAS else: for i in [periodic_table.symbol_to_elements, periodic_table.number_to_elements, periodic_table.CAS_to_elements]: if i == periodic_table.number_to_elements: if int(ID in i): return periodic_table[int(ID)].CAS else: if ID in i: return periodic_table[ID].CAS if checkCAS(ID): CAS_lookup = pubchem_db.search_CAS(ID, autoload) if CAS_lookup: return CAS_lookup.CASs CAS_alternate_loopup = pubchem_db.search_name(ID, autoload) if CAS_alternate_loopup: return CAS_alternate_loopup.CASs if not autoload: return CAS_from_any(ID, autoload=True) raise Exception() ID_len = len(ID) if ID_len > 9: inchi_search = False if ID_lower[0:9] == : inchi_search = ID[9:] elif ID_lower[0:8] == : inchi_search = ID[8:] if inchi_search: inchi_lookup = pubchem_db.search_InChI(inchi_search, autoload) if inchi_lookup: return inchi_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception() if ID_lower[0:9] == : inchi_key_lookup = pubchem_db.search_InChI_key(ID[9:], autoload) if inchi_key_lookup: return inchi_key_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception() if ID_len > 8: if ID_lower[0:8] == : pubchem_lookup = pubchem_db.search_pubchem(ID[8:], autoload) if pubchem_lookup: return pubchem_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception() if ID_len > 7: if ID_lower[0:7] == : smiles_lookup = pubchem_db.search_smiles(ID[7:], autoload) if smiles_lookup: return smiles_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception() smiles_lookup = pubchem_db.search_smiles(ID, autoload) if smiles_lookup: return smiles_lookup.CASs try: formula_query = pubchem_db.search_formula(serialize_formula(ID), autoload) if formula_query and type(formula_query) == ChemicalMetadata: return formula_query.CASs except: pass name_lookup = pubchem_db.search_name(ID, autoload) if name_lookup: return name_lookup.CASs ID_no_space = ID.replace(, ) ID_no_space_dash = ID_no_space.replace(, ) for name in [ID, ID_no_space, ID_no_space_dash]: for name2 in [name, name.lower()]: name_lookup = pubchem_db.search_name(name2, autoload) if name_lookup: return name_lookup.CASs if ID[-1] == and in ID: first_identifier, second_identifier = ID[0:-1].split(, 1) try: CAS1 = CAS_from_any(first_identifier) CAS2 = CAS_from_any(second_identifier) assert CAS1 == CAS2 return CAS1 except: pass if not autoload: return CAS_from_any(ID, autoload=True) raise Exception()
Looks up the CAS number of a chemical by searching and testing for the string being any of the following types of chemical identifiers: * Name, in IUPAC form or common form or a synonym registered in PubChem * InChI name, prefixed by 'InChI=1S/' or 'InChI=1/' * InChI key, prefixed by 'InChIKey=' * PubChem CID, prefixed by 'PubChem=' * SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex. 'C' will return Carbon as it is an element whereas the SMILES interpretation for 'C' is methane) * CAS number (obsolete numbers may point to the current number) If the input is an ID representing an element, the following additional inputs may be specified as * Atomic symbol (ex 'Na') * Atomic number (as a string) Parameters ---------- ID : str One of the name formats described above Returns ------- CASRN : string A three-piece, dash-separated set of numbers Notes ----- An exception is raised if the name cannot be identified. The PubChem database includes a wide variety of other synonyms, but these may not be present for all chemcials. Examples -------- >>> CAS_from_any('water') '7732-18-5' >>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3') '64-17-5' >>> CAS_from_any('CCCCCCCCCC') '124-18-5' >>> CAS_from_any('InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N') '64-17-5' >>> CAS_from_any('pubchem=702') '64-17-5' >>> CAS_from_any('O') # only elements can be specified by symbol '17778-80-2'
def get_recipients(self, name): to_str = self.render_string(self.data[name]) formatted_emails = [ email.utils.formataddr(addr_pair) for addr_pair in email.utils.getaddresses([to_str]) ] return [i for i in formatted_emails if i]
For example get_recipients('to')
def get_network_by_name(self, nwk_name): ret_net_lst = [] try: body = {} net_list = self.neutronclient.list_networks(body=body) net_list = net_list.get() for net in net_list: if net.get() == nwk_name: ret_net_lst.append(net) except Exception as exc: LOG.error("Failed to get network by name %(name)s, " "Exc %(exc)s", {: nwk_name, : str(exc)}) return ret_net_lst
Search for a openstack network by name.
def edit(self, body): assert isinstance(body, (str, unicode)), body post_parameters = { "body": body, } headers, data = self._requester.requestJsonAndCheck( "PATCH", self.url, input=post_parameters ) self._useAttributes(data)
:calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_ :param body: string :rtype: None
def _closeElements(childs, HTMLElement): out = [] for e in childs: if not e.isTag(): out.append(e) continue if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \ and e.endtag is None: e.childs = _closeElements(e.childs, HTMLElement) out.append(e) out.append(HTMLElement("</" + e.getTagName() + ">")) e.endtag = out[-1] out[-1].openertag = e else: out.append(e) return out
Create `endtags` to elements which looks like openers, but doesn't have proper :attr:`HTMLElement.endtag`. Args: childs (list): List of childs (:class:`HTMLElement` obj) - typically from :attr:`HTMLElement.childs` property. Returns: list: List of closed elements.
def load_external_types(self, path): folder, filename = os.path.split(path) try: fileobj, pathname, description = imp.find_module(filename, [folder]) mod = imp.load_module(filename, fileobj, pathname, description) except ImportError as exc: raise ArgumentError("could not import module in order to load external types", module_path=path, parent_directory=folder, module_name=filename, error=str(exc)) self.load_type_module(mod)
Given a path to a python package or module, load that module, search for all defined variables inside of it that do not start with _ or __ and inject them into the type system. If any of the types cannot be injected, silently ignore them unless verbose is True. If path points to a module it should not contain the trailing .py since this is added automatically by the python import system
def process_jwt(jwt): header, claims, _ = jwt.split() parsed_header = json_decode(base64url_decode(header)) parsed_claims = json_decode(base64url_decode(claims)) return parsed_header, parsed_claims
Process a JSON Web Token without verifying it. Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key. :param jwt: The JSON Web Token to verify. :type jwt: str or unicode :rtype: tuple :returns: ``(header, claims)``
def validate_revocation_request(self, request): self._raise_on_missing_token(request) self._raise_on_invalid_client(request) self._raise_on_unsupported_token(request)
Ensure the request is valid. The client constructs the request by including the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: token (REQUIRED). The token that the client wants to get revoked. token_type_hint (OPTIONAL). A hint about the type of the token submitted for revocation. Clients MAY pass this parameter in order to help the authorization server to optimize the token lookup. If the server is unable to locate the token using the given hint, it MUST extend its search accross all of its supported token types. An authorization server MAY ignore this parameter, particularly if it is able to detect the token type automatically. This specification defines two such values: * access_token: An Access Token as defined in [RFC6749], `section 1.4`_ * refresh_token: A Refresh Token as defined in [RFC6749], `section 1.5`_ Specific implementations, profiles, and extensions of this specification MAY define other values for this parameter using the registry defined in `Section 4.1.2`_. The client also includes its authentication credentials as described in `Section 2.3`_. of [`RFC6749`_]. .. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4 .. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5 .. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3 .. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2 .. _`RFC6749`: https://tools.ietf.org/html/rfc6749
def percent_overlapping_calls(records, min_gab=300): calls = [r for r in records if r.interaction == "call"] if len(calls) == 0: return 0. overlapping_calls = 0 for i, r in enumerate(calls): if i <= len(calls) - 2: if r.datetime + timedelta(seconds=r.call_duration - min_gab) >= calls[i + 1].datetime: overlapping_calls += 1 return (float(overlapping_calls) / len(calls))
Return the percentage of calls that overlap with the next call. Parameters ---------- records : list The records for a single user. min_gab : int Number of seconds that the calls must overlap to be considered an issue. Defaults to 5 minutes.
def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs): self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs)
Changes the cache implementation for the named cache
def _inject_synthetic_target(self, vt, sources): target = vt.target synthetic_target_dir = self.synthetic_target_dir(target, vt.results_dir) synthetic_target_type = self.synthetic_target_type(target) synthetic_extra_dependencies = self.synthetic_target_extra_dependencies(target, synthetic_target_dir) copied_attributes = {} for attribute in self._copy_target_attributes: copied_attributes[attribute] = getattr(target, attribute) if self._supports_exports(synthetic_target_type): extra_exports = self.synthetic_target_extra_exports(target, synthetic_target_dir) extra_exports_not_in_extra_dependencies = set(extra_exports).difference( set(synthetic_extra_dependencies)) if len(extra_exports_not_in_extra_dependencies) > 0: raise self.MismatchedExtraExports( .format(extra_exports_not_in_extra_dependencies, target)) extra_export_specs = {e.address.spec for e in extra_exports} original_export_specs = self._original_export_specs(target) union = set(original_export_specs).union(extra_export_specs) copied_attributes[] = sorted(union) synthetic_target = self.context.add_new_target( address=self._get_synthetic_address(target, synthetic_target_dir), target_type=synthetic_target_type, dependencies=synthetic_extra_dependencies, sources=sources, derived_from=target, **copied_attributes ) build_graph = self.context.build_graph for dependent_address in build_graph.dependents_of(target.address): build_graph.inject_dependency( dependent=dependent_address, dependency=synthetic_target.address, ) for concrete_dependency_address in build_graph.dependencies_of(target.address): build_graph.inject_dependency( dependent=synthetic_target.address, dependency=concrete_dependency_address, ) if target in self.context.target_roots: self.context.target_roots.append(synthetic_target) return synthetic_target
Create, inject, and return a synthetic target for the given target and workdir. :param vt: A codegen input VersionedTarget to inject a synthetic target for. :param sources: A FilesetWithSpec to inject for the target.
def sync(self, raw_data, row_change_callback=None): return self._update(raw_data, row_change_callback, delete_rows=True)
Equivalent to the inject method but will delete rows from the google spreadsheet if their key is not found in the input (raw_data) dictionary. Args: raw_data (dict): See inject method row_change_callback (Optional) (func): See inject method Returns: UpdateResults (object): See inject method
def synchronize_simultaneous(self, node_ip): for candidate in self.factory.candidates[node_ip]: if not candidate["con"].connected: continue if candidate["time"] -\ self.factory.nodes["simultaneous"][node_ip]["time"] >\ self.challege_timeout: msg = "RECONNECT" self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) return self.cleanup_candidates(node_ip) self.propogate_candidates(node_ip)
Because adjacent mappings for certain NAT types can be stolen by other connections, the purpose of this function is to ensure the last connection by a passive simultaneous node is recent compared to the time for a candidate to increase the chance that the precited mappings remain active for the TCP hole punching attempt.
def _init_metadata(self): DecimalValuesFormRecord._init_metadata(self) IntegerValuesFormRecord._init_metadata(self) TextAnswerFormRecord._init_metadata(self) super(MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord, self)._init_metadata() self._tolerance_mode_metadata = { : Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, ), : , : , : True, : False, : False, : False, : [{ : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }], : , : 0, : 1024, : [] }
stub