content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def gamescriptToJson(title: str, version: str = None) -> dict: """ Get game script heirarchy as a dictionary (for saving as json, etc) """ scripts = GameScript.objects.all().filter(title=title) if version: scripts = scripts.filter(version=version) if len(scripts) == 0: print("No title with that name and version") return if len(scripts) > 1: print("The following titles with versions were found. Please choose one.") print([script.title for script in scripts]) return script = scripts[0] return script.toJson()
c76779b76b69fb1816f9e96136fdee903212d831
3,659,400
def is_ignored_faces(faces): """Check if the faces are ignored faces. Args: faces: Encoded face from face_recognition. Returns: bool: If a not ignored face appeared, return false, otherwise true. """ global ignored_faces for face in faces: matches = face_recognition.compare_faces(ignored_faces, face) if False in matches: return False return True
bda7703cfb471ac5c95cb6aa30f6d758129ae8a5
3,659,401
from typing import Optional def get_prediction_model_status(hub_name: Optional[str] = None, prediction_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPredictionModelStatusResult: """ The prediction model status. :param str hub_name: The name of the hub. :param str prediction_name: The name of the Prediction. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['hubName'] = hub_name __args__['predictionName'] = prediction_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:customerinsights/v20170426:getPredictionModelStatus', __args__, opts=opts, typ=GetPredictionModelStatusResult).value return AwaitableGetPredictionModelStatusResult( message=__ret__.message, model_version=__ret__.model_version, prediction_guid_id=__ret__.prediction_guid_id, prediction_name=__ret__.prediction_name, signals_used=__ret__.signals_used, status=__ret__.status, tenant_id=__ret__.tenant_id, test_set_count=__ret__.test_set_count, training_accuracy=__ret__.training_accuracy, training_set_count=__ret__.training_set_count, validation_set_count=__ret__.validation_set_count)
14ff24d3f7edf674c5cd29a643ae28a1a3d8ed99
3,659,402
def build_2d_grid(ir): """ Build simple grid with a column for each gate.""" grid = [] for g in ir.gates: step = [None] * ir.ngates if g.is_single(): step[g.idx0] = g if g.is_ctl(): step[g.ctl] = g.ctl step[g.idx1] = g grid.append(step) return grid
55c17327fb530301ca505b42cdb8d47426491374
3,659,403
import argparse import os def parse_args(): """ Wrapper function of argument parsing process. """ parser = argparse.ArgumentParser() parser.add_argument( '--save_loc', type=str, default='.', help='where to save results' ) parser.add_argument( '--log_dir', type=str, default=os.path.join('logs', f'{_current_file_name}.{_time}.log'), help='the directory of the log file' ) args = parser.parse_args() return args
2083d01de27f7f3b4481fe90dbc39614257aeb5d
3,659,404
from typing import Union from typing import List from typing import Tuple from typing import Dict from typing import Any import copy def emmental_collate_fn( batch: Union[List[Tuple[Dict[str, Any], Dict[str, Tensor]]], List[Dict[str, Any]]], min_data_len: int = 0, max_data_len: int = 0, ) -> Union[Tuple[Dict[str, Any], Dict[str, Tensor]], Dict[str, Any]]: """Collate function. Args: batch: The batch to collate. min_data_len: The minimal data sequence length, defaults to 0. max_data_len: The maximal data sequence length (0 means no limit), defaults to 0. Returns: The collated batch. """ X_batch: defaultdict = defaultdict(list) Y_batch: defaultdict = defaultdict(list) for item in batch: # Check if batch is (x_dict, y_dict) pair if isinstance(item, dict): x_dict = item y_dict: Dict[str, Any] = dict() else: x_dict, y_dict = item for field_name, value in x_dict.items(): if isinstance(value, list): X_batch[field_name] += value else: X_batch[field_name].append(value) for label_name, value in y_dict.items(): if isinstance(value, list): Y_batch[label_name] += value else: Y_batch[label_name].append(value) field_names = copy.deepcopy(list(X_batch.keys())) for field_name in field_names: values = X_batch[field_name] # Only merge list of tensors if isinstance(values[0], Tensor): item_tensor, item_mask_tensor = list_to_tensor( values, min_len=min_data_len, max_len=max_data_len, ) X_batch[field_name] = item_tensor if item_mask_tensor is not None: X_batch[f"{field_name}_mask"] = item_mask_tensor for label_name, values in Y_batch.items(): Y_batch[label_name] = list_to_tensor( values, min_len=min_data_len, max_len=max_data_len, )[0] if len(Y_batch) != 0: return dict(X_batch), dict(Y_batch) else: return dict(X_batch)
b18c7ebf50f5554055da5de8a2ddce9e758ea1ef
3,659,405
def trap_jac_factory(j, dt): """Factory function to return a function for evaluating the Jacobian of the trapezoidal formula. This returns a function of x_n (x at this time step). :param j: Jacobian of the function of x. :param dt: time step. :returns: trap_jac, callable which takes x_n and evaluates the Jacobian of the trapezoidal formula. """ def trap_jac(x_n): """Function to compute the Jacobian of the implicit trapezoidal equation. """ return np.identity(x_n.shape[0]) - dt / 2 * j(x_n) return trap_jac
5e6c365b6b92c13577249d34e7580827dc894604
3,659,406
from pathlib import Path def get_position_object(file_path: FilePathType): """ Read position data from .bin or .pos file and convert to pynwb.behavior.SpatialSeries objects. If possible it should always be preferred to read position data from the `.bin` file to ensure samples are locked to ecephys time courses. Parameters: ---------- file_path (Path or Str): Full file_path of Axona file with any extension. Returns: ------- position (pynwb.behavior.Position) """ position = Position() position_channel_names = [ "time(ms)", "X", "Y", "x", "y", "PX", "px", "px_total", "unused", ] if Path(file_path).suffix == ".bin": position_data = read_bin_file_position_data(file_path) else: position_data = read_pos_file_position_data(file_path) position_timestamps = position_data[:, 0] for ichan in range(0, position_data.shape[1]): spatial_series = SpatialSeries( name=position_channel_names[ichan], timestamps=position_timestamps, data=position_data[:, ichan], reference_frame="start of raw acquisition (.bin file)", ) position.add_spatial_series(spatial_series) return position
2d20e5b0a4f7077748650e7a3e3054c79b68185c
3,659,407
import random def throw_dice(n): """Throw `n` dice, returns list of integers""" results = [] while n > 0: results += [random.randint(1,6)] n = n-1 return results
68c56b468ecd1eff59932099dd4620bae9581f45
3,659,408
import json def verify_token_signature(token): """Verify the signature of the token and return the claims such as subject/username on valid signature""" key = jwk.JWK.from_password(flask.current_app.config.get("SECRET_KEY")) try: jwttoken = jwt.JWT(key=key, jwt=token, algs=["HS256"]) return json.loads(jwttoken.claims) except jwt.JWTExpired: # jwt dependency uses a 60 seconds leeway to check exp # it also prints out a stack trace for it, so we handle it here raise AuthenticationError(message="Expired token")
d93233acb8a26ba0552ddc26777ccab4e40c4306
3,659,409
def logtime_r2(t, y, ppd): """ Convert y=f(t) data from linear in time to logarithmic in time. Args: t: is the input time vector, linearly spaced y: is the input vector of y values ppd: number of points per decade for the output Returns: A 3-tuple (tout, yout, wt) where tout and yout are logarithimically-spaced versions of t and y and wt is a vector of weights giving the number of points averaged for each yout value. """ zt = len(t) zy = len(y) assert zt == zy # Find the index of t = 0 by taking the index where t^2 is minimum. indzero = np.argmin(np.power(t,2)) if t[indzero] < 0: indzero += 1 # tmin is minimum nonzero value of t after start. tmin = t[indzero] tmax = np.max(t) if tmin == 0: tmin = t[indzero+1] ltmin = np.log10(tmin) ltmax = np.log10(tmax) tt = np.arange(ltmin, ltmax, 1/(2*ppd)) tt = np.power(10, tt) ztt = tt.size # perform resampling from indzero to end, forward in time icount, jcount = indzero, 0 tout, yout, wt = np.zeros(ztt), np.zeros(ztt), np.zeros(ztt) for i in np.arange(1, ztt, 2): # accumulate points until we reach the end of the interval while icount < zt and t[icount] < tt[i]: tout[jcount] = tout[jcount] + t[icount] yout[jcount] = yout[jcount] + y[icount] wt[jcount] += 1 icount += 1 # If we accumulated data points, then average by the number of points. if wt[jcount] > 0: tout[jcount] = tout[jcount] / wt[jcount]; yout[jcount] = yout[jcount] / wt[jcount]; jcount += 1 # Purposely allocated too much space at the start. Trim zeroes from the end. yout = np.trim_zeros(yout, 'b') tout = tout[:yout.size] wt = wt[:yout.size] # If we started at the beginning, then we are done. if indzero == 0: return (tout, yout, wt) # If not, perform resampling from indzero backwards in time. tmp_t, tmp_y = -t[indzero-1::-1], y[indzero-1::-1] tmp_zt = len(tmp_t) icount, jcount = 0, 0 tmp_tout, tmp_yout, tmp_wt = np.zeros(ztt), np.zeros(ztt), np.zeros(ztt) for i in np.arange(1, ztt, 2): while icount < tmp_zt and tmp_t[icount] < tt[i]: tmp_tout[jcount] = tmp_tout[jcount] + tmp_t[icount] tmp_yout[jcount] = tmp_yout[jcount] + tmp_y[icount] tmp_wt[jcount] += 1 icount += 1 if tmp_wt[jcount] > 0: tmp_tout[jcount] = tmp_tout[jcount] / tmp_wt[jcount]; tmp_yout[jcount] = tmp_yout[jcount] / tmp_wt[jcount]; jcount += 1 # Purposely allocated too much space at the start. Trim zeroes from the end. tmp_yout = np.trim_zeros(tmp_yout, 'b') tmp_tout = tmp_tout[:tmp_yout.size] tmp_wt = tmp_wt[:tmp_yout.size] # Concat results and return return (np.concatenate([-tmp_tout[::-1], tout]), np.concatenate([tmp_yout[::-1], yout]), np.concatenate([tmp_wt[::-1], wt]))
ed77d7665488d3620d5cb62f4ba443b2361944b4
3,659,410
def parcours_serpentin(n): """Retourne la liste des indices (colonne,ligne) (!!attention ici ligne et colonne sont inversées!!) des cases correspondant à un parcours de tableau de taille n x n en serpentin. Ex: pour T = [ [1,2,3], [4,5,6], [7,8,9] ] le parcours correspond aux cases 1,2,3,6,9,8,7,4,5 et la fonction retournera la liste d'indices [(0,0),(1,0),(2,0),(2,1) ...] """ return []
189e486ad82d75923244daf51c223254f7b29fcc
3,659,411
def bdev_rbd_unregister_cluster(client, name): """Remove Rados cluster object from the system. Args: name: name of Rados cluster object to unregister """ params = {'name': name} return client.call('bdev_rbd_unregister_cluster', params)
03bf70352b8df65044eba1c9ece4b156590e11bc
3,659,412
def get_rndc_secret(): """Use the singleton from the DesignateBindCharm to retrieve the RNDC secret :returns: str or None. Secret if available, None if not. """ return DesignateBindCharm.singleton.get_rndc_secret()
b6fb5aebd272a6bc4db7d6541112566109e28195
3,659,413
def transform_tweet(source_tweet): """ Perform transformation on one tweet, producing a new, transformed tweet. :param source_tweet: Tweet text to transform :type source_tweet: str :return: Transformed tweet text :rtype: str """ no_emojis = replace_emojis(source_tweet) as_tokens = tokenize_string(no_emojis) result = ' '.join(as_tokens) if not result: return pd.NaT else: return result
9c4722200c7c85157aefca0c65946b6dd0e264d5
3,659,414
import json def pdFrame(file): """Creates a pandas data frame from a json log file Args: file: json log file to read Returns: pandas data frame """ logger.debug("creating pandas data frame from {}".format(file)) data = [] with open(file) as f: for line in f: tmp = [] log = json.loads(line) try: tmp.append(pd.Timestamp(log['timestamp'])) except KeyError: tmp.append('no value') try: tmp.append(str(log['resource']['type'])) except KeyError: tmp.append('no value') try: tmp.append(str(log['severity'])) except KeyError: tmp.append('no value') try: tmp.append(str(log['protoPayload']['authenticationInfo']['principalEmail'])) except KeyError: tmp.append('no value') data.append(tmp) fieldNames = ['timestamp', 'resourceType', 'severity', 'account'] logs = pd.DataFrame(data, columns=fieldNames) return logs
bfb299820e4cd3001de89f3598a664a11988edc4
3,659,415
import scipy import time def fitDataBFGSM2(M, val, c_w_l, init=None, nozero=True, k=3e34, lam=1., name='W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave'): #init is the three initial values of the gaussian needed to fit the data """ function for determining the optimal fit given the desired parabolic regularization""" #intialize start position temp = scipy.io.readsav(name) init = temp['abundance'][:,36] reg = gen2Diff(init) bndarray = scipy.ones((len(init),2)) bndarray[:,0] = 1e-10 bndarray[:,1] = 1e10 Te = temp['en'] y = time.time() output = scipy.optimize.minimize(fullObjectiveLog, init, args=(val, c_w_l, M/k), jac=objectiveLogJac2, bounds=bndarray) print(time.time()-y) return output
35ddd0690e2ed60d6271f9be232cea3d808d562f
3,659,416
def set_complete_cfg_spacy(false_or_true: str): """Set all SpaCy configuration parameters to the same logical value.""" return pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_SPACY, [ (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_CLUSTER, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_DEP_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_DOC, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_IOB_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_KB_ID_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_TYPE_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_HEAD, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_I, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IDX, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_ALPHA, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_ASCII, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_BRACKET, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_CURRENCY, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_DIGIT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_LEFT_PUNCT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_LOWER, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_OOV, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_PUNCT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_QUOTE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_RIGHT_PUNCT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SENT_END, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SENT_START, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SPACE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_STOP, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_TITLE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_UPPER, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LANG_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEFT_EDGE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEMMA_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEX, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEX_ID, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_EMAIL, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_NUM, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_URL, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LOWER_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_MORPH, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_NORM_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ORTH_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_POS_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_PREFIX_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_PROB, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_RANK, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_RIGHT_EDGE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SENT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SENTIMENT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SHAPE_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SUFFIX_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TAG_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TENSOR, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TEXT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TEXT_WITH_WS, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_VOCAB, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_WHITESPACE_, false_or_true), ], )
10ac74714e11b8c8492de7ec1d2809323819b8eb
3,659,417
import sys import traceback def guard_unexpected_errors(func): """Decorator to be used in PyObjC callbacks where an error bubbling up would cause a crash. Instead of crashing, print the error to stderr and prevent passing to PyObjC layer. For Python 3, print the exception using chaining. Accomplished by setting the cause of :exc:`rumps.exceptions.InternalRumpsError` to the exception. For Python 2, emulate exception chaining by printing the original exception followed by :exc:`rumps.exceptions.InternalRumpsError`. """ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: internal_error = exceptions.InternalRumpsError( 'an unexpected error occurred within an internal callback' ) if compat.PY2: traceback.print_exc() print('\nThe above exception was the direct cause of the following exception:\n', file=sys.stderr) traceback.print_exception(exceptions.InternalRumpsError, internal_error, None) else: internal_error.__cause__ = e traceback.print_exception(exceptions.InternalRumpsError, internal_error, None) return wrapper
42b8e4ce05cca51272679ab5024d07798dafb357
3,659,418
def get_lun_ids(service_instance=None): """ Return a list of LUN (Logical Unit Number) NAA (Network Addressing Authority) IDs. """ if service_instance is None: service_instance = get_service_instance(opts=__opts__, pillar=__pillar__) hosts = utils_esxi.get_hosts(service_instance=service_instance, get_all_hosts=True) ids = [] for host in hosts: for datastore in host.datastore: for extent in datastore.info.vmfs.extent: ids.append(extent.diskName) return ids
6194a8f73a71730d928391a492d5e8fe0fdb3f50
3,659,419
def parse_midi_file(midi_file, max_notes=float('Inf'), max_time_signatures=1, max_tempos=1, ignore_polyphonic_notes=True, convert_to_drums=False, steps_per_quarter=16): """Summary Parameters ---------- midi_file : TYPE Description max_notes : TYPE, optional Description max_time_signatures : int, optional Description max_tempos : int, optional Description ignore_polyphonic_notes : bool, optional Description convert_to_drums : bool, optional Description steps_per_quarter : int, optional Description Returns ------- TYPE Description """ seq = midi_io.midi_file_to_sequence_proto(midi_file) while len(seq.notes) > max_notes: seq.notes.pop() while len(seq.time_signatures) > max_time_signatures: seq.time_signatures.pop() while len(seq.tempos) > max_tempos: seq.tempos.pop() if convert_to_drums: for note_i in range(len(seq.notes)): seq.notes[note_i].program = 10 if ignore_polyphonic_notes: convert_to_monophonic(seq) seq = sequences_lib.quantize_note_sequence( seq, steps_per_quarter=steps_per_quarter) if seq.tempos: qpm = seq.tempos[0].qpm else: qpm = 120 melody = Melody() melody.from_quantized_sequence( seq, ignore_polyphonic_notes=ignore_polyphonic_notes) seq = melody.to_sequence(qpm=qpm) return seq, qpm
6c3ce0135bf45a8992f94197f5b10ab472407f40
3,659,420
def filter_prediction(disable_valid_filter, disable_extra_one_word_filter, pred_token_2dlist_stemmed): """ Remove the duplicate predictions, can optionally remove invalid predictions and extra one word predictions :param disable_valid_filter: :param disable_extra_one_word_filter: :param pred_token_2dlist_stemmed: :param pred_token_2d_list: :return: """ num_predictions = len(pred_token_2dlist_stemmed) is_unique_mask = check_duplicate_keyphrases(pred_token_2dlist_stemmed) # boolean array, 1=unqiue, 0=duplicate pred_filter = is_unique_mask if not disable_valid_filter: is_valid_mask = check_valid_keyphrases(pred_token_2dlist_stemmed) pred_filter = pred_filter * is_valid_mask if not disable_extra_one_word_filter: extra_one_word_seqs_mask, num_one_word_seqs = compute_extra_one_word_seqs_mask(pred_token_2dlist_stemmed) pred_filter = pred_filter * extra_one_word_seqs_mask filtered_stemmed_pred_str_list = [word_list for word_list, is_keep in zip(pred_token_2dlist_stemmed, pred_filter) if is_keep] num_duplicated_predictions = num_predictions - np.sum(is_unique_mask) return filtered_stemmed_pred_str_list, num_duplicated_predictions, is_unique_mask
8cbeb93c6fdfdc64cfa5819baa903699544ccb3d
3,659,421
def simple_dict_event_extractor(row, condition_for_creating_event, id_field, timestamp_field, name_of_event): """ Takes a row of the data df and returns an event record {id, event, timestamp} if the row satisfies the condition (i.e. condition_for_creating_event(row) returns True) """ if condition_for_creating_event(row): return {'id': row[id_field], 'event': name_of_event, 'timestamp': row[timestamp_field]}
2195acf5df6f465fdf3160df3abbac54e5ac0320
3,659,422
def split_fused_prelu(input_graph_def: util.GraphDef) -> util.GraphDef: """ This function looks for fused operations that include a 'Prelu'-activation. Matching nodes will be split into individual operations. TFJS uses fused operations for performance. Some fused activations aren't supported by TF (e.g. 'Prelu'), so we need to split the fused ops back into individual ops and replace unsupported functions by equivalent supported constructs later. Args: input_graph_def: TF graph definition to examine Returns: Updated copy of the input graph with matching nodes replaced by individual operations """ def _predicate(node): return (util.is_fused_conv2d(node, b'Prelu') or util.is_fused_matmul(node, b'Prelu')) return util.replace_matching_nodes(input_graph_def, _predicate, _split_fused_op)
36b22afa67dd9259aae9f7be8ec6c4ffdf7c1167
3,659,423
import gc def test_harvest_lost_resources(pool): """Test unreferenced resources are returned to the pool.""" def get_resource_id(): """ Ensures ``Resource`` falls out of scope before calling ``_harvest_lost_resources()``. """ return id(pool.get_resource()._resource) r_id = get_resource_id() # Run garbage collection to ensure ``Resource`` created in # ``get_resource_id()`` is destroyed. gc.collect() pool._harvest_lost_resources() assert r_id == id(pool.get_resource()._resource)
04b8b29520c2ae9c2c47cef412659e9c567c6a8a
3,659,424
def __call__for_keras_init_v1(self, shape, dtype=None, partition_info=None): """ Making keras VarianceScaling initializers v1 support dynamic shape. """ if dtype is None: dtype = self.dtype scale = self.scale scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape fan_in, fan_out = _compute_fans_for_keras_init_v1_v2(scale_shape) fan_in = math_ops.cast(fan_in, dtype=dtype) fan_out = math_ops.cast(fan_out, dtype=dtype) if self.mode == "fan_in": scale /= math_ops.maximum(1., fan_in) elif self.mode == "fan_out": scale /= math_ops.maximum(1., fan_out) else: scale /= math_ops.maximum(1., (fan_in + fan_out) / 2.) if self.distribution == "normal" or self.distribution == "truncated_normal": # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) stddev = math_ops.sqrt(scale) / .87962566103423978 return random_ops.truncated_normal(shape, 0.0, stddev, dtype, seed=self.seed) elif self.distribution == "untruncated_normal": stddev = math_ops.sqrt(scale) return random_ops.random_normal(shape, 0.0, stddev, dtype, seed=self.seed) else: limit = math_ops.sqrt(3.0 * scale) return random_ops.random_uniform(shape, -limit, limit, dtype, seed=self.seed)
860dc27ecd133b5bb193c4856736f9bb1a52d243
3,659,425
def create_line(net, from_bus, to_bus, length_km, std_type, name=None, index=None, geodata=None, df=1., parallel=1, in_service=True, max_loading_percent=nan): """ create_line(net, from_bus, to_bus, length_km, std_type, name=None, index=None, \ geodata=None, df=1., parallel=1, in_service=True, max_loading_percent=nan) Creates a line element in net["line"] The line parameters are defined through the standard type library. INPUT: **net** - The net within this line should be created **from_bus** (int) - ID of the bus on one side which the line will be connected with **to_bus** (int) - ID of the bus on the other side which the line will be connected with **length_km** (float) - The line length in km **std_type** (string) - The linetype of a standard line pre-defined in standard_linetypes. OPTIONAL: **name** (string) - A custom name for this line **index** (int, None) - Force a specified ID if it is available. If None, the index one \ higher than the highest already existing index is selected. **geodata** (array, default None, shape= (,2L)) - The linegeodata of the line. The first row should be the coordinates of bus a and the last should be the coordinates of bus b. The points in the middle represent the bending points of the line **in_service** (boolean) - True for in_service or False for out of service **df** (float) - derating factor: maximal current of line in relation to nominal current \ of line (from 0 to 1) **parallel** (integer) - number of parallel line systems **max_loading_percent (float)** - maximum current loading (only needed for OPF) OUTPUT: **index** (int) - The unique ID of the created line EXAMPLE: create_line(net, "line1", from_bus = 0, to_bus = 1, length_km=0.1, std_type="NAYY 4x50 SE") """ # check if bus exist to attach the line to for b in [from_bus, to_bus]: if b not in net["bus"].index.values: raise UserWarning("Line %s tries to attach to non-existing bus %s" % (name, b)) if index is None: index = get_free_id(net["line"]) if index in net["line"].index: raise UserWarning("A line with index %s already exists" % index) v = { "name": name, "length_km": length_km, "from_bus": from_bus, "to_bus": to_bus, "in_service": bool(in_service), "std_type": std_type, "df": df, "parallel": parallel } lineparam = load_std_type(net, std_type, "line") v.update({ "r_ohm_per_km": lineparam["r_ohm_per_km"], "x_ohm_per_km": lineparam["x_ohm_per_km"], "c_nf_per_km": lineparam["c_nf_per_km"], "max_i_ka": lineparam["max_i_ka"] }) if "type" in lineparam: v.update({"type": lineparam["type"]}) # store dtypes dtypes = net.line.dtypes net.line.loc[index, list(v.keys())] = list(v.values()) # and preserve dtypes _preserve_dtypes(net.line, dtypes) if geodata is not None: net["line_geodata"].loc[index, "coords"] = geodata if not isnan(max_loading_percent): if "max_loading_percent" not in net.line.columns: net.line.loc[:, "max_loading_percent"] = pd.Series() net.line.loc[index, "max_loading_percent"] = float(max_loading_percent) return index
218a3a16bce0d746465991c0992f614bddf98892
3,659,426
def get_initmap(X, A=None, standardize=False, cov_func=None): """ Give back parameters such that we have the L U decomposition of the product with A (if given, or the PCA scores if not). That is we will get back: X[:, perm]*L*U + b = ((X-meanvec)/stdvec)*A where A are PCA directions if not given, L, U are LU decomposition, and meanvec, stdvec are zeros, ones vectors if not standardizing. Args: X: N x d array of training data A: d x d linear map to decompose, XA+b, (uses Identity if None given with no cov_func). standardize: boolean that indicates to standardize the dimensions of X after applying linear map. cov_func: function that yeilds a linear map given covariance matrix of X. Returns: init_mat: d x d matrix where stricly lower triangle is corresponds to L and upper triangle corresponds to U. b: d length vector of offset perm: permuation of dimensions of X """ # import pdb; pdb.set_trace() # XXX BREAKPOINT N, d = X.shape if A is None: if cov_func is None: A = np.eye(d) b = np.zeros((1, d)) else: b = -np.mean(X, 0, keepdims=True) M = (X+b) # Has mean zero. cov = np.matmul(M.T, M)/N A = cov_func(cov) b = np.matmul(b, A) if standardize: z = np.matmul(X, A)+b mean_vec = np.mean(z, 0, keepdims=True) # std_vec = np.std(z, 0, keepdims=True) # Standardizing may lead to outliers, better to get things in [-1, 1]. # std_vec = np.max(np.abs(z-mean_vec), 0, keepdims=True) std_vec = np.maximum(np.max(np.abs(z-mean_vec), 0, keepdims=True), np.ones((1, d)), dtype=np.float32) # import pdb; pdb.set_trace() # XXX BREAKPOINT else: mean_vec = np.zeros((1, d)) std_vec = np.ones((1, d)) AS = np.divide(A, std_vec) P, L, U = linalg.lu(AS) perm = np.concatenate([np.flatnonzero(P[:, i]) for i in range(P.shape[1])]) init_mat = np.tril(L, -1) + U init_b = np.squeeze((b-mean_vec)/std_vec) return np.float32(init_mat), np.float32(init_b), perm
53ec26f8efe4c0869b4e4423419db32ed08128e0
3,659,427
def read_FQ_matlab(file_open): """ Opens FISH-quant result files generated with Matlab (tab-delimited text file). Args: file_open (string): string containing the full file name. Returns: dictionary containing outlines of cells, and if present the detected spots. """ # Open file with open(file_open, "r") as fh: data = fh.readlines() # Strip white space characters data = [x.strip() for x in data] # Loop over read-in data fq_dict = {'cells':{},'file_names':{},'settings':{}} iLine = 0 while iLine < len(data): line = data[iLine] # READ FILE NAMES if 'IMG_Raw' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'smFISH':img_name[1]}) if 'IMG_Filtered' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'smFISH_filt':img_name[1]}) if 'IMG_DAPI' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'DAPI':img_name[1]}) if 'FILE_settings' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'settings':img_name[1]}) # READ IMAGE PARAMETERS if 'PARAMETERS' in line: iLine += 2 par_microscope = data[iLine].split('\t') fq_dict['settings'].update({'microscope':{'pix_xy':float(par_microscope[0]), 'pix_z':float(par_microscope[1]), 'RI':float(par_microscope[2]), 'EX':float(par_microscope[3]), 'EM':float(par_microscope[4]), 'NA':float(par_microscope[5]), 'type':par_microscope[6]}}) # New cell if 'CELL_START' in line: # Get name of cell cell_id = line.split('\t')[1] ### POSITION OF CELL # Read X-POS iLine += 1 pos_list = (data[iLine].replace('X_POS\t','')).split('\t') x_pos = [int(s) for s in pos_list] # Read Y-POS iLine += 1 pos_list = (data[iLine].replace('Y_POS\t','')).split('\t') y_pos = [int(s) for s in pos_list] # Read Z-POS iLine += 1 pos_list = (data[iLine].replace('Z_POS\t','')).split('\t') if len(pos_list) > 1: z_pos = [int(s) for s in pos_list] else: z_pos = [''] fq_dict['cells'].update({cell_id:{'cell_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}}) # New nucleus if 'Nucleus_START' in line: # Get name of cell nuc_id = line.split('\t')[1] ### POSITION OF CELL # Read X-POS iLine += 1 pos_list = (data[iLine].replace('X_POS\t','')).split('\t') x_pos = [int(s) for s in pos_list] # Read Y-POS iLine += 1 pos_list = (data[iLine].replace('Y_POS\t','')).split('\t') y_pos = [int(s) for s in pos_list] # Read Z-POS iLine += 1 pos_list = (data[iLine].replace('Z_POS\t','')).split('\t') if len(pos_list) > 1: z_pos = [int(s) for s in pos_list] else: z_pos = [''] fq_dict['cells'][cell_id].update({nuc_id:{'nuc_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}}) # Position of detected RNAS if 'SPOTS_START' in line: iLine += 2 # Move over header RNA_prop = [] while not('SPOTS_END' in data[iLine]): RNA_prop.append([float(s) for s in data[iLine].split('\t')]) iLine += 1 # Assign to dictionary fq_dict['cells'][cell_id].update({'spots': np.array(RNA_prop)}) # Up date line counter iLine += 1 return fq_dict
01c2c2263573e754c216c69496f648a883bb1843
3,659,428
def create_default_reporting_options(embedded=True, config={}): """ config must follow this scheme: { `table_name`: { `option1`: `value1` } } The different options will depend on the table role. - for ALL tables: {n 'data' : { 'remove_columns': ['column_name1'], 'subsampling_factor': 1.0, 'keep_last_n_rows': 1000 } - For role `data_samples`: { 'default': 'Scatter X Axis': value, 'Scatter Y Axis': value, 'Color by': value, 'Color scheme': value, 'Binning X Axis': value, 'Binning Y Axis': value, 'Label with': value, 'Display with': value, } """ o = Object() o.image_size = 80 o.font_size = 19 o.frame_size_x = None o.frame_size_y = 768 o.data_samples = Object() o.data_samples.display_tabular = True o.data_samples.display_scatter = True o.data_samples.max_numpy_display = 10 # if array below this size, the content will be displayed o.data_samples.resize_heterogeneous_numpy = True # if True, numpy arrays of different shape will be resized to common average size o.db_root = None o.embedded = embedded o.style = Object() o.style.color_by_line_width = 1 o.style.scatter_aspect_ratio = 1.5 o.style.tool_window_size_x = 200 o.style.tool_window_size_y = 500 o.style.sorted_legend = True o.style.category_margin = 0.2 o.style.scatter_continuous_factor = 10 o.data = Object() o.data.refresh_time = 5.0 o.data.unpack_numpy_arrays_with_less_than_x_columns = 15 o.data.types_to_discard = [ np.dtype('|S1'), # binary string ] o.config = config return o
cc7d341a0d63979bbf3223a241c5707acf057547
3,659,429
def get_patient_note(state, patient_id, note_id, *args, **kwargs): """ Return a note for a patient. --- tags: ["FHIR"] parameters: - name: patient_id in: path description: ID of the patient of interest required: true schema: type: string - name: note_id in: path description: ID of the note of interest required: true schema: type: string responses: 200: description: "Note returned" content: application/json: schema: type: array items: type: object 404: description: "No patient or note exists with identifier" content: text/plain: schema: type: string 428: description: "No FHIR data currently in application state" content: text/plain: schema: type: string """ p = state.patients.get(patient_id) if p is None: return ( f'No patient exists with identifier "{patient_id}".', 404, {'Content-Type': 'text/plain'} ) n = p.notes.get(note_id) if n is None: return ( f'No note exists with identifier "{note_id}".', 404, {'Content-Type': 'text/plain'} ) return jsonify(n.to_dict())
399212c31d2ae34b96a5617ca73063745c22621c
3,659,430
def _html_build_item(tag: str, text: str, attributes: map = None, include_tags=True) -> str: """Builds an HTML inline element and returns the HTML output. :param str tag: the HTML tag :param str text: the text between the HTML tags :param map attributes: map of attributes :param bool include_tags: True if the tags should be part of the output """ attributes = attributes if attributes is not None else {} opening_tag = "<" + tag + _html_build_attributes(attributes) + ">" closing_tag = "</" + tag + ">" if include_tags: return opening_tag + text + closing_tag else: return text
13b165a98679c2ebaf9a1dec7619a3297c729a63
3,659,431
from typing import Dict from typing import Optional import random def sim_sample( out_prefix: str, sample_id: int, chrom_start: int = 0, chrom_end: int = 10000, start_rate: float = 0.001, end_rate: float = 0.01, mut_rate: float = 0.01, ) -> Dict[str, File]: """ Simulate sequencing data for one sample (assume one chromosome). regions are sequenced intervals of a chromsome. muts are SNP locations, assume heterozygous. """ regions = [] muts = [] region_start: Optional[int] # Sample initial state. non_seq_len = 1 / start_rate seq_len = 1 / end_rate if random.random() < seq_len / (seq_len + non_seq_len): region_start = chrom_start else: region_start = None # Use poisson process to sample regions and mutation sites. pos = chrom_start while pos < chrom_end: pos += 1 if region_start is None: pos += int(sample_exponential(start_rate)) if pos >= chrom_end: break region_start = pos else: region_end = min(pos + int(sample_exponential(end_rate)), chrom_end - 1) mut_pos = pos + int(sample_exponential(mut_rate)) if region_end <= mut_pos: regions.append((region_start, region_end, 2)) region_start = None pos = region_end else: pos = mut_pos muts.append((mut_pos, 1)) return { "regions": write_regions(f"{out_prefix}/regions/{sample_id}.regions", regions), "mutations": write_mutations(f"{out_prefix}/muts/{sample_id}.muts", muts), }
d8a858b3f8099dd57cdc7abb4f1473e238038536
3,659,432
def vif_col(X, y, col_name): """计算vif 计算具体一个column的vif, 一般阈值在5或者10,超过这个数字则表明有 共线性。 Attributes: X (pd.DataFrame): 自变量 y (pd.Series): 因变量 col_name (str): 需要判断的列 References: James, Gareth, Daniela Witten, Trevor Hastie, and Robert Tibshirani. An Introduction to Statistical Learning. pp. 112, Vol. 112: Springer, 2013. """ r_square_minus = model(X.loc[:, X.columns != col_name].values, y).rsquared return 1 / (1 - r_square_minus)
6d9c88d928934d60182b597a89c6da6d1f7d1194
3,659,433
def get_mesh_stat(stat_id_start_str, attr_value, xmin, ymin, xmax, ymax): """ 地域メッシュの統計情報を取得する @param stat_id_start_str 統計IDの開始文字 この文字から始まるIDをすべて取得する. @param attr_value cat01において絞り込む値 @param xmin 取得範囲 @param ymin 取得範囲 @param xmax 取得範囲 @param ymax 取得範囲 """ rows = database_proxy.get_conn().execute(""" SELECT statValue.value, AsGeoJson(MapArea.Geometry) FROM MapArea inner join idx_MapArea_Geometry ON pkid = MapArea.id AND xmin > ? AND ymin > ? AND xmax < ? AND ymax < ? inner join statValueAttr ON MapArea.stat_val_attr_id = statValueAttr.id inner join statValueAttr AS b ON b.stat_value_id = statValueAttr.stat_value_id AND b.attr_value = ? inner join statValue ON statValue.id = b.stat_value_id WHERE MapArea.stat_id like ?; """, (xmin, ymin, xmax, ymax, attr_value, stat_id_start_str + '%')) ret = [] for r in rows: ret.append({ 'value': r[0], 'geometory': r[1] }) return ret
9a861925436c2cf10eb4773be9dfa79c901d43f4
3,659,434
def babel_extract(fileobj, keywords, comment_tags, options): """Babel extraction method for Jinja templates. .. versionchanged:: 2.3 Basic support for translation comments was added. If `comment_tags` is now set to a list of keywords for extraction, the extractor will try to find the best preceeding comment that begins with one of the keywords. For best results, make sure to not have more than one gettext call in one line of code and the matching comment in the same line or the line before. .. versionchanged:: 2.5.1 The `newstyle_gettext` flag can be set to `True` to enable newstyle gettext calls. .. versionchanged:: 2.7 A `silent` option can now be provided. If set to `False` template syntax errors are propagated instead of being ignored. :param fileobj: the file-like object the messages should be extracted from :param keywords: a list of keywords (i.e. function names) that should be recognized as translation functions :param comment_tags: a list of translator tags to search for and include in the results. :param options: a dictionary of additional options (optional) :return: an iterator over ``(lineno, funcname, message, comments)`` tuples. (comments will be empty currently) """ extensions = set() for extension in options.get('extensions', '').split(','): extension = extension.strip() if not extension: continue extensions.add(import_string(extension)) if InternationalizationExtension not in extensions: extensions.add(InternationalizationExtension) def getbool(options, key, default=False): return options.get(key, str(default)).lower() in \ ('1', 'on', 'yes', 'true') silent = getbool(options, 'silent', True) environment = Environment( options.get('block_start_string', BLOCK_START_STRING), options.get('block_end_string', BLOCK_END_STRING), options.get('variable_start_string', VARIABLE_START_STRING), options.get('variable_end_string', VARIABLE_END_STRING), options.get('comment_start_string', COMMENT_START_STRING), options.get('comment_end_string', COMMENT_END_STRING), options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX, options.get('line_comment_prefix') or LINE_COMMENT_PREFIX, getbool(options, 'trim_blocks', TRIM_BLOCKS), getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS), NEWLINE_SEQUENCE, getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE), frozenset(extensions), cache_size=0, auto_reload=False ) if getbool(options, 'trimmed'): environment.policies['ext.i18n.trimmed'] = True if getbool(options, 'newstyle_gettext'): environment.newstyle_gettext = True source = fileobj.read().decode(options.get('encoding', 'utf-8')) try: node = environment.parse(source) tokens = list(environment.lex(environment.preprocess(source))) except TemplateSyntaxError as e: if not silent: raise # skip templates with syntax errors return finder = _CommentFinder(tokens, comment_tags) for lineno, func, message in extract_from_ast(node, keywords): yield lineno, func, message, finder.find_comments(lineno)
35ee7c05ee91afc1ccf7c752bdff72e3c3d30d78
3,659,435
def main(directory='.', verbose=True): """Lists "data" files recursively in a given directory, tar files are extracted. The "data" files have :file:`info` and :file:`pickle` extensions. TODO: not only recognize .tar and .tar.gz and .tgz but .zip... """ filelist = list() directory = get_directory(directory, True) # Search through the directory directory and all its subfolders. for root, _dirs, files in os.walk(directory): if verbose: print 'Searching in %s ...' % root for elem in files: if elem.endswith('.info') or elem.endswith('.pickle') or elem.endswith('.pickle.gz'): filelist.append(os.path.join(root, elem)) if verbose: print 'Found %d file(s).' % (len(filelist)) if not filelist: warnings.warn('Could not find any file of interest in %s!' % root) return filelist
40cf44878b88e2a0ea312602e98ea7c6821c4c03
3,659,436
import numpy def onedthreegaussian(x, H, A1, dx1, w1, A2, dx2, w2, A3, dx3, w3): """ Returns two 1-dimensional gaussian of form H+A*numpy.exp(-(x-dx)**2/(2*w**2)) """ g1 = A1 * numpy.exp(-(x-dx1)**2 / (2*w1**2)) g2 = A2 * numpy.exp(-(x-dx2)**2 / (2*w2**2)) g3 = A3 * numpy.exp(-(x-dx3)**2 / (2*w3**2)) return H + g1 + g2 + g3
f93ea1339fe1498fdaeaee91f75b7ba316455646
3,659,437
def match_any_if_key_matches(audit_id, result_to_compare, args): """ We want to compare things if we found our interested key Even if the list does not have my interested name, it will pass Match dictionary elements dynamically. Match from a list of available dictionaries There is an argument: match_key. Match only when we found this key in result_to_compare True, if match_key found, and mentioned attributes matches , if match_key NOT found. Not even try to match anything else False, if match_key found and attributes do not match comparator: type: dict match_any_if_key_matches: match_key: name args: - name: abc running: false - name: xyz running: false Input: {name: hjk, running: false} Output: True, as didn't found name: hjk Input: {name: abc, running: false} Output: True, as found name: abc and matched running: false :param result_to_compare: Dictionary values to compare :param args: Comparator dictionary as mentioned in the check. """ log.debug('Running dict::match_any_if_key_matches for audit_id: {0}'.format(audit_id)) key_name = args['match_any_if_key_matches']['match_key'] if key_name not in result_to_compare: log.debug("Required key '%s' is not found in '%s' for audit_id '%s'", key_name, result_to_compare, audit_id) return True, "pass_as_key_not_found" key_found_once = False for to_match_dict in args['match_any_if_key_matches']['args']: errors = [] if result_to_compare[key_name] == to_match_dict[key_name]: key_found_once = True _compare_dictionary(audit_id, result_to_compare, to_match_dict, errors) if not errors: # found a match log.debug("dictionary comparison successful." " '%s' matches '%s'", to_match_dict, result_to_compare) return True, "Dictionary comparison passed" else: log.debug("dictionary comparison is not successful." " '%s' does not match '%s'", to_match_dict, result_to_compare) return False, "Dictionary comparison failed in dict::match_any_if_key_matches, " \ "errors={0}".format(str(errors)) if not key_found_once: error_message = "key '{0}' exists in dict '{1}', " \ "but does not match intended values".format(key_name, result_to_compare) log.debug(error_message) return False, error_message
2fc5f4dea92fdc231f496b1cbe4d78554a32e930
3,659,438
from typing import Optional from typing import Union def confusion_matrix_by_prob(true: np.ndarray, predicted_prob: np.ndarray, thresholds: Optional[Union[list, tuple, np.ndarray]] = None, pos_label: Union[bool, str, int] = _DEFAULT_POS_LABEL, output_metrics: Optional[list] = None, table: bool = True, **kwargs): """ confusion matrix for binary classification according to a given set of thresholds; :param true: numpy.ndarray(shape=(m), ), an array of true classes; :param predicted_prob: numpy.ndarray(shape=(m), ), an array of predicted probabilities of being the positive class; :param thresholds: [list, tuple, np.array, None] the thresholds set on predicted probabilities such that any predicted probability greater or equal to the threshold will be classified as the positive class; :param pos_label: [str, bool, int], positive class label, label that is considered as the positive class; :param output_metrics: [list, None], metrics to be outputted if selected; :param table: bool, if exported as a pd table table; :param kwargs: :param metric_order: [list, None], if table is selected to be the output, metric order specifies the order of metrics presented in the table; :return: dict, a set of confusion matrices, {threshold: {metric_name: metric_value, ...}, ...}; """ # convert true series to positive series true = true == pos_label # select output: if isinstance(output_metrics, list): for selected_metric in output_metrics: if selected_metric not in _FULL_METRICS: raise KeyError(f"metric {selected_metric} is not recognized.") elif output_metrics == 'confusion': output_metrics = ['TP', 'FN', 'FP', 'TN', 'Recall', 'FNR', 'FPR', 'TNR', 'Precision', 'FOR', 'FDR', 'NPV', 'Prevalence', 'Accuracy'] else: output_metrics = _FULL_METRICS metrics_by_thresholds = dict() for threshold in thresholds: metrics_by_threshold = dict() predicted = predicted_prob >= threshold confusion_matrix_dict = confusion_matrix(true=true, predicted=predicted, normalize=False) confusion_matrix_nor_true = normalize_confusion_matrix(confusion_matrix_dict=confusion_matrix_dict, normalize_index=0) confusion_matrix_nor_predicted = normalize_confusion_matrix(confusion_matrix_dict=confusion_matrix_dict, normalize_index=1) if 'TP' in output_metrics: metrics_by_threshold['TP'] = confusion_matrix_dict[(True, True)] if 'FN' in output_metrics: metrics_by_threshold['FN'] = confusion_matrix_dict[(True, False)] if 'FP' in output_metrics: metrics_by_threshold['FP'] = confusion_matrix_dict[(False, True)] if 'TN' in output_metrics: metrics_by_threshold['TN'] = confusion_matrix_dict[(False, False)] if 'Recall' in output_metrics: metrics_by_threshold['Recall'] = confusion_matrix_nor_true[(True, True)] if 'FNR' in output_metrics: metrics_by_threshold['FNR'] = confusion_matrix_nor_true[(True, False)] if 'FPR' in output_metrics: metrics_by_threshold['FPR'] = confusion_matrix_nor_true[(False, True)] if 'TNR' in output_metrics: metrics_by_threshold['TNR'] = confusion_matrix_nor_true[(False, False)] if 'Precision' in output_metrics: metrics_by_threshold['Precision'] = confusion_matrix_nor_predicted[(True, True)] if 'FOR' in output_metrics: metrics_by_threshold['FOR'] = confusion_matrix_nor_predicted[(True, False)] if 'FDR' in output_metrics: metrics_by_threshold['FDR'] = confusion_matrix_nor_predicted[(False, True)] if 'NPV' in output_metrics: metrics_by_threshold['NPV'] = confusion_matrix_nor_predicted[(False, False)] if 'Prevalence' in output_metrics: metrics_by_threshold['Prevalence'] = \ (confusion_matrix_dict[(True, True)] + confusion_matrix_dict[(True, False)]) / sum(confusion_matrix_dict.values()) if 'Accuracy' in output_metrics: metrics_by_threshold['Accuracy'] = \ (confusion_matrix_dict[(True, True)] + confusion_matrix_dict[(False, False)]) / sum(confusion_matrix_dict.values()) if 'LR+' in output_metrics: # positive likelihood ratio: try: metrics_by_threshold['LR+'] = confusion_matrix_nor_true[(True, True)] / confusion_matrix_nor_true[(False, True)] except ZeroDivisionError: metrics_by_threshold['LR+'] = '-' if 'LR-' in output_metrics: # negative likelihood ratio: try: metrics_by_threshold['LR-'] = confusion_matrix_nor_true[(True, False)] / confusion_matrix_nor_true[(False, False)] except ZeroDivisionError: metrics_by_threshold['LR-'] = '-' if 'DOR' in output_metrics: # diagnostic odds ratio: try: metrics_by_threshold['DOR'] = (confusion_matrix_nor_true[(True, True)] / confusion_matrix_nor_true[(False, True)]) / \ (confusion_matrix_nor_true[(True, False)] / confusion_matrix_nor_true[(False, False)]) except ZeroDivisionError: metrics_by_threshold['DOR'] = '-' if 'F1' in output_metrics: # F1 score: try: metrics_by_threshold['F1'] = 2 * (confusion_matrix_nor_true[(True, True)] * confusion_matrix_nor_predicted[(True, True)]) / \ (confusion_matrix_nor_true[(True, True)] + confusion_matrix_nor_predicted[(True, True)]) except ZeroDivisionError: metrics_by_threshold['F1'] = '-' metrics_by_thresholds[threshold] = metrics_by_threshold if table: if 'metric_order' in kwargs: metric_order = kwargs['metric_order'] else: metric_order = None metrics_by_thresholds = \ convert_confusion_matrix_by_prob_to_table_with_reformat_precision(metrics_by_thresholds=metrics_by_thresholds, metric_order=metric_order) return metrics_by_thresholds
29bc8808ae1f35f13e52ac26e4e1993c423c6dc6
3,659,439
from typing import Sequence from typing import Mapping import itertools def group_slaves_by_key_func( key_func: _GenericNodeGroupingFunctionT, slaves: Sequence[_GenericNodeT], sort_func: _GenericNodeSortFunctionT = None, ) -> Mapping[_KeyFuncRetT, Sequence[_GenericNodeT]]: """ Given a function for grouping slaves, return a dict where keys are the unique values returned by the key_func and the values are all those slaves which have that specific value. :param key_func: a function which consumes a slave and returns a value :param slaves: a list of slaves :returns: a dict of key: [slaves] """ sorted_slaves: Sequence[_GenericNodeT] if sort_func is None: sorted_slaves = sorted(slaves, key=key_func) else: sorted_slaves = sort_func(slaves) return {k: list(v) for k, v in itertools.groupby(sorted_slaves, key=key_func)}
c3e286d2ff618758cd86c16f1b6685faea4b4d7a
3,659,440
def init_clfs(): """ init classifiers to train Returns: dict, clfs """ clfs = dict() # clfs['xgb'] = XGBClassifier(n_jobs=-1) clfs['lsvc'] = LinearSVC() return clfs
4725656eda4e6991cc215bcd5a209ff23171eea6
3,659,441
def get_field_types(): """Get a dict with all registration field types.""" return get_field_definitions(RegistrationFormFieldBase)
a9fe05535a541a7a5ada74dc9138a6c2ab29f528
3,659,442
def get_md_links(filepath): """Get markdown links from a md file. The links' order of appearance in the file IS preserved in the output. This is to check for syntax of the format [...](...). The returned 'links' inside the () are not checked for validity or subtle differences (e.g. '/' vs no '/' at the end of a URL). Args: filepath (pathlib Path): Path object representing the file from which info will be extracted. Returns: list of strings """ text_str = _get_ascii_plaintext_from_md_file(filepath) links = _get_all_md_link_info_from_ascii_plaintext(text_str) if links: # links only, not their text return [t[-1] for t in links] else: return links
3076f77802965cb281101530f4ab360e5996f627
3,659,443
import tqdm def dask_to_zarr(df, z, loc, chunk_size, nthreads: int, msg: str = None): # TODO: perhaps change name of Dask array so it does not get confused with a dataframe """ Creates a Zarr hierarchy from a Dask array. Args: df (): Dask array. z (): Zarr hierarchy. loc (): Location to write data/Zarr hierarchy to. chunk_size (): Size of chunks to load into memory and process. nthreads (int): Number of threads to use. msg (str): Message to use with progress bar (Default: f"Writing data to {loc}"). """ if msg is None: msg = f"Writing data to {loc}" og = create_zarr_dataset(z, loc, chunk_size, 'float64', df.shape) pos_start, pos_end = 0, 0 for i in tqdm(df.blocks, total=df.numblocks[0], desc=msg): pos_end += i.shape[0] og[pos_start:pos_end, :] = controlled_compute(i, nthreads) pos_start = pos_end return None
aa05321183cf086f6a397f6a3cb1f3493eb6689d
3,659,444
def get_reactor_logs(project_id, application_id, api_key=None, **request_kwargs): """ Get the logs of a Reactor script. :param project_id: The Project of the Application. :type project_id: str :param application_id: The Application to get the script logs for. :type application_id: str :param api_key: The API key to authorize request against. :type api_key: str :return: """ url = '/projects/{}/applications/{}/reactorLogs'.format( project_id, application_id) return utils.request('GET', url, api_key=api_key, accept=True, **request_kwargs)
82743619292f387708e7b1dc3fe93c59e232d1cf
3,659,445
import os def bids_init(bids_src_dir, overwrite=False): """ Initialize BIDS source directory :param bids_src_dir: string BIDS source directory :param overwrite: string Overwrite flag :return True """ # Create template JSON dataset description datadesc_json = os.path.join(bids_src_dir, 'dataset_description.json') meta_dict = dict({'BIDSVersion': "1.0.0", 'License': "This data is made available under the Creative Commons BY-SA 4.0 International License.", 'Name': "The dataset name goes here", 'ReferencesAndLinks': "References and links for this dataset go here"}) # Write JSON file bids_write_json(datadesc_json, meta_dict, overwrite) return True
3f728dbeaabf575fb6395a28175e8d94d4260e68
3,659,446
def summation_i_squared(n): """Summation without for loop""" if not isinstance(n, int) or n < 1: return None return int(((n*(n+1)*(2*n+1))/6))
dec0aba274bcaf3e3a821db5962af51d39835438
3,659,447
def str_to_number(this): """ Convert string to a Number """ try: return mknumber(int(this.value)) except ValueError: return mknumber(float(this.value))
e67df9c0de5a5cdbc76a3026f7e31cd3190013c4
3,659,448
import logging def _LinterRunCommand(cmd, debug, **kwargs): """Run the linter with common RunCommand args set as higher levels expect.""" return cros_build_lib.RunCommand(cmd, error_code_ok=True, print_cmd=debug, debug_level=logging.NOTICE, **kwargs)
a48355f692b9c75d8ad14bf899f2e9a305bd25a2
3,659,449
def plotTSNE(Xdata, target = None, useMulti=True, num=2500, savename=None, njobs=4, size=4, cmap=None, dim=(12,8)): """ Plot TSNE for training data Inputs: > Xdata: The training feature data (DataFrame) > target: The training target data (Series) > num (2500 by default): The number of rows to use Output: None """ sns.set(style="ticks") if Xdata is None: print("Xdata is NONE in plotTSNE!") return None if not isDataFrame(Xdata): print("Xdata is not a Pandas DataFrame!") return None if target is not None: if not isSeries(target): print("target is not a Pandas Series!") return None print("Computing TSNE for {0} events with {1} features".format(num, Xdata.shape[1])) projection, tsneFeatures, tsneTarget = computeTSNE(Xdata=Xdata, target=target, useMulti=useMulti, num=num, njobs=njobs) print("Plotting TSNE for {0} events".format(num)) showTSNE(projection=projection, target=target, savename=savename, title="TSNE", size=size, cmap=cmap, dim=dim) return projection, tsneFeatures, tsneTarget
9751f861df2d67516e93218000d23e23ba0ad4fe
3,659,450
import os def _get_distance(captcha_url): """ 获取缺口距离 :param captcha_url: 验证码 url :return: """ save_path = os.path.abspath('...') + '\\' + 'images' if not os.path.exists(save_path): os.mkdir(save_path) img_path = _pic_download(captcha_url, 'captcha') img1 = cv2.imread(img_path, 0) img2 = cv2.imread(save_path + '\\' + "slider.jpg", 0) res = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= 0.6) for pt in zip(*loc[::-1]): p = pt try: cv2.imshow('Detected', img1[p[1]:, p[0]:]) cv2.waitKey(3000) except Exception as e: print(e.args) return None res = cv2.resize(img1, (255, int(300 * (255 / 600))), interpolation=cv2.INTER_CUBIC) cv2.imshow("res", res[:, int(p[0] * (255 / 600) + 15):]) # cv2.waitKey(3000) return int(p[0] * (290 / 600))
a1e79e775bf2c298992b1a0f986318b2ca70edd8
3,659,451
def adjust_contrast(img, contrast_factor): """Adjust contrast of an Image. Args: img (PIL Image): PIL Image to be adjusted. contrast_factor (float): How much to adjust the contrast. Can be any non negative number. 0 gives a solid gray image, 1 gives the original image while 2 increases the contrast by a factor of 2. Returns: PIL Image: Contrast adjusted image. """ if not is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Contrast(img) img = enhancer.enhance(contrast_factor) return img
aedd8bb489df64138189626585228ffc086e2428
3,659,452
def matplotlib_view(gviz: Digraph): """ Views the diagram using Matplotlib Parameters --------------- gviz Graphviz """ return gview.matplotlib_view(gviz)
9eb0a686c6d01a7d24273bbbc6ddb9b4ee7cb9ac
3,659,453
def shuf_repeat(lst, count): """ Xiaolong's code expects LMDBs with the train list shuffled and repeated, so creating that here to avoid multiple steps. """ final_list = [] ordering = range(len(lst)) for _ in range(count): np.random.shuffle(ordering) final_list += [lst[i] for i in ordering] assert len(final_list) == count * len(lst) return final_list
fea9478aaa37f5b1c58d4a41126055d9cfa4b035
3,659,454
def create_query(table_name, schema_dict): """ see datatypes documentation here: https://www.postgresql.org/docs/11/datatype.html """ columns = db_schema[table_name] return ( f"goodbooks_{table_name}", [f"{column} {value}" for column, value in columns.items()], )
3b330d57f45ca053cfbe90952adc7aa1658ab76d
3,659,455
from typing import Any from typing import Tuple def new_document( source_path: str, settings: Any = None ) -> Tuple[nodes.document, JSONReporter]: """Return a new empty document object. Replicates ``docutils.utils.new_document``, but uses JSONReporter, which is also returned Parameters ---------- source_path : str The path to or description of the source text of the document. settings : optparse.Values Runtime settings. If none are provided, a default core set will be used. If you will use the document object with any Docutils components, you must provide their default settings as well. For example, if parsing, at least provide the parser settings, obtainable as follows:: settings = docutils.frontend.OptionParser( components=(docutils.parsers.rst.Parser,) ).get_default_values() """ # TODO cache creation, as in sphinx.util.docutils.new_document, possibly using a # 'partial' lru_cache, as in https://stackoverflow.com/a/37611009/5033292 if settings is None: settings = OptionParser().get_default_values() # TODO can probably remove decode_path, given python 3 only support source_path = decode_path(source_path) reporter = JSONReporter( source_path, settings.report_level, settings.halt_level, stream=settings.warning_stream, debug=settings.debug, encoding=settings.error_encoding, error_handler=settings.error_encoding_error_handler, ) document = nodes.document(settings, reporter, source=source_path) document.note_source(source_path, -1) return document, reporter
9ec26dd8f8b9c7a2e3a4bc56520b7872e7b53a7a
3,659,456
import requests def delete_repleciation(zfssrcfs, repel_uuid): """ZFS repleciation action status accepts: An exsistng ZFS action uuid (id). returns: the ZFS return status code. """ r = requests.delete( "%s/api/storage/v1/replication/actions/%s" % (url, repel_uuid), auth=zfsauth, verify=False, headers=jsonheader, ) logger.info("Deleting local repleciation for %s(%s)", repel_uuid, zfssrcfs) return r.status_code
f62ad1ec3e31ac7c54cf749982690631bb7b72d2
3,659,457
from pathlib import Path import random import torch import sys def load_checkpoint( neox_args, model, optimizer, lr_scheduler, inference=False, iteration=None ): """Load a model checkpoint and return the iteration.""" if neox_args.deepspeed: load_optim_and_scheduler = ( not neox_args.no_load_optim ) # TODO: These should be configured by separate args if neox_args.finetune: load_optim_and_scheduler = False if iteration is not None: tag = f"global_step{iteration}" else: tag = None checkpoint_name, state_dict = model.load_checkpoint( neox_args.load, load_optimizer_states=load_optim_and_scheduler, load_lr_scheduler_states=load_optim_and_scheduler, tag=tag, ) if checkpoint_name is None: # if an iteration is specified, we want to raise an error here rather than # continuing silently, since we are trying to load a specific checkpoint if iteration is not None: available_checkpoints = sorted( [ int(i.name.replace("global_step", "")) for i in Path(neox_args.load).glob("global_step*") ] ) raise ValueError( f"Unable to load checkpoint for iteration {iteration}. \nAvailable iterations: {pformat(available_checkpoints)}" ) if mpu.get_data_parallel_rank() == 0: print("Unable to load checkpoint.") return 0 # iteration 0, if not checkpoint loaded else: raise ValueError("Must be using deepspeed to use neox") # Set iteration. if neox_args.finetune: iteration = 0 else: iteration = state_dict.get("iteration") or state_dict.get( "total_iters" ) # total_iters backward compatible with older checkpoints if iteration is None: raise ValueError( f"Unable to load iteration from checkpoint {checkpoint_name} with keys {state_dict.keys()}, exiting" ) # Check arguments. if "args" in state_dict: checkpoint_args = state_dict["args"] check_checkpoint_args(neox_args=neox_args, checkpoint_args=checkpoint_args) print_rank_0( " > validated currently set args with arguments in the checkpoint ..." ) else: print_rank_0(" > could not find arguments in the checkpoint for validation...") # Check loaded checkpoint with forward pass if neox_args.checkpoint_validation_with_forward_pass: if "checkpoint_validation_logits" in state_dict: check_forward_pass( neox_args=neox_args, model=model, checkpoint_logits=state_dict["checkpoint_validation_logits"], inference=inference, ) print_rank_0(" > validated loaded checkpoint with forward pass ...") else: if mpu.get_data_parallel_rank() == 0: print( " > WARNING: checkpoint_validation_with_forward_pass is configured but no checkpoint validation data available in checkpoint {}".format( checkpoint_name ) ) # rng states. if not neox_args.finetune and not neox_args.no_load_rng: try: random.setstate(state_dict["random_rng_state"]) np.random.set_state(state_dict["np_rng_state"]) torch.set_rng_state(state_dict["torch_rng_state"]) torch.cuda.set_rng_state(state_dict["cuda_rng_state"]) mpu.get_cuda_rng_tracker().set_states(state_dict["rng_tracker_states"]) except KeyError: print_rank_0( "Unable to load optimizer from checkpoint {}. " "Specify --no-load-rng or --finetune to prevent " "attempting to load the optimizer state, " "exiting ...".format(checkpoint_name) ) sys.exit() torch.distributed.barrier() if mpu.get_data_parallel_rank() == 0: print(" successfully loaded {}".format(checkpoint_name)) return iteration
7395cc48a1be6c86cf15cd4576257d7f3b5c0f19
3,659,458
import aiohttp def get_logged_in_session(websession: aiohttp.ClientSession) -> RenaultSession: """Get initialised RenaultSession.""" return RenaultSession( websession=websession, country=TEST_COUNTRY, locale_details=TEST_LOCALE_DETAILS, credential_store=get_logged_in_credential_store(), )
87a5a439c5ca583c01151f340ce79f2f4a79558c
3,659,459
def __getStationName(name, id): """Construct a station name.""" name = name.replace("Meetstation", "") name = name.strip() name += " (%s)" % id return name
daab36ed8020536c8dd2c073c352634696a63f3e
3,659,460
import io import os import torch def load_hist(path): """ load spatial histogram """ # load all hist properties logpYX = io.loadmat(os.path.join(path, 'logpYX'))['value'] xlab = io.loadmat(os.path.join(path, 'xlab'))['value'] ylab = io.loadmat(os.path.join(path, 'ylab'))['value'] rg_bin = io.loadmat(os.path.join(path, 'rg_bin'))['value'] prior_count = io.loadmat(os.path.join(path, 'prior_count'))['value'] # fix some of the properties, convert to torch tensors logpYX = torch.tensor(logpYX, dtype=torch.float) xlab = torch.tensor(xlab[0], dtype=torch.float) ylab = torch.tensor(ylab[0], dtype=torch.float) rg_bin = torch.tensor(rg_bin[0], dtype=torch.float) prior_count = prior_count.item() # build the SpatialHist instance H = SpatialHist() H.set_properties(logpYX, xlab, ylab, rg_bin, prior_count) return H
e494f5e351c8c098b26bd0e7f417ec634a15d9c3
3,659,461
def post_url(url): """Post url argument type :param str url: the post url :rtype: str :returns: the post url """ url = url.strip() if len(url) == 0: raise ArgumentTypeError("A url is required") elif len(url) > Url.URL_LENGTH: raise ArgumentTypeError("The url length is over the maximum allowed") return url
65d3c670580d6abfcfefcc8bcff35ca4e7d51f5c
3,659,462
def create_planner(request): """Create a new planner and redirect to new planner page.""" user = request.user plan = Plan.objects.create(author=user) plan.save() return HttpResponseRedirect(reverse('planner:edit_plan', args=[plan.id], ))
ab22dfa950208b44c308690dcff6e0f228faa406
3,659,463
def rule_matching_evaluation(df, model, seed_num, rein_num, eval_num, label_map, refer_label, lime_flag=True, scan_flag=False , content_direction='forward', xcol_name='text', n_cores=20): """A integrated rule extraction, refinement and validation process. On the dataset, sample based methods are used. Seed rules are extracted and unmatched samples in reinforcement samples are re-fed into extraction procedure. Validation are conducted in loops until certain condition is meet. Args: df: dataframe to be explained. model: model that can classify instances. seed_num: sample size for seed rule generation. rein_num: sample size for reinforcement procedure. eval_num: sample size for evaluation procedure. label_map: label text and value mappings. refer_label: the reference label for lime. lime_flag: on-off flag for lime based inference rules. scan_flag: on-off flag for LCS based scan rules. content_direction: cut out sequences from 'forward' or 'backward' xcol_name: column name for content to be explained in df. n_cores: number of cores to utilize. Returns: match_result: match result on evaluation test sets. rules_tobe_validate: final rules generated. matched_rules: rules hit by evaluation test samples. """ # shuffle dataset df.sample(frac=1, random_state=1) # generate seed rules df_for_seed = df[df['target'] == label_map['malicious']].sample(seed_num, random_state=2) rules_seed = get_rules(df_for_seed, model, label_map, 'malicious', lime_flag=lime_flag, scan_flag=scan_flag, content_direction=content_direction, n_cores=n_cores) print(rules_seed) # reinforce rules max_iter_times = 2 df_split = np.array_split(df, max_iter_times) rules_tobe_validate = rules_seed for i in range(0, max_iter_times): print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') print('Reinforce iteration loop %d'% (i+1)) print('Seed rules number: %d' % rules_tobe_validate.shape[0]) df_for_reinforce = df_split[i].sample(rein_num, random_state=3) match_result, rules_tobe_validate = rule_validation(df_for_reinforce, rules_tobe_validate, n_cores=n_cores) # # make duplicate removal for each validation # rules_tobe_validate = rule_deduplicate(rules_tobe_validate) metrics = get_metrics(match_result) print(metrics) if float(metrics['acc']) > 0.98: print("Validation finished, metrics is fine.") break else: # Reinforcement the unrecognizable malicious flows according to validation results df_rein = match_result.loc[(match_result.match == 0) & (match_result.target == label_map['malicious'])][['text', 'target']] df_rein['text'] = df_rein['text'].astype(str) result_rein = get_rules(df_rein, model, label_map, 'malicious', lime_flag=lime_flag, scan_flag=scan_flag, content_direction=content_direction, n_cores=n_cores) result_final = pd.concat([rules_tobe_validate, result_rein]) # index start from 1 result_final.index = np.arange(1, len(result_final)+1) rules_tobe_validate = result_final print('New rein rules number: %d' % result_rein.shape[0]) print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') df_for_final_eval = df.sample(seed_num, random_state=4) match_result, rules_tobe_validate = rule_validation(df_for_final_eval, rules_tobe_validate, final_flag=True, n_cores=n_cores) if rules_tobe_validate.shape[0] == 0: print("Rule extraction failed!!!!!") return 0, 0, 0 else: print('The final results are:') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') matched_rules = get_final_rules(match_result, rules_tobe_validate) metrics = get_metrics(match_result) print(metrics) print(matched_rules) print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print("Final validation finished") return match_result, rules_tobe_validate, matched_rules
9ed0d5653797544de384c41ef6d9e402d2a57403
3,659,464
def login(): """ Typical login page """ # if current user is already logged in, then don't log in again if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(username=form.username.data).first() if user is None or not user.check_password(form.password.data): flash('Invalid username or password') return redirect(url_for('login')) # user exists and password is correct login_user(user, remember=form.remember_me.data) # if user came from a local page, then return them to that # page after authentication ... else go to /index next_page = request.args.get('next') if not next_page or url_parse(next_page).netloc != '': next_page = url_for('index') return redirect(next_page) # GET just renders the empty login screen return render_template('login.html', title='Sign In', form=form)
e4114979a6b5b5845f32442bb66ee0798357f4e7
3,659,465
def create_timeperiod_map( start: spec.Timestamp = None, end: spec.Timestamp = None, length: spec.Timelength = None, ) -> spec.TimeperiodMap: """create Timeperiod with representation TimeperiodMap ## Inputs - start: Timestamp - end: Timestamp - length: Timelength ## Returns - TimeperiodMap """ start, end = compute_start_end(start=start, end=end, length=length) return {'start': start, 'end': end}
c8087ea252e86b97c55376bfb21b93c2b50e3b19
3,659,466
import requests async def patched_send_async(self, *args, **kwargs): """Patched send function that push to queue idx of server to which request is routed.""" buf = args[0] if buf and len(buf) >= 6: op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER) # Filter only caches operation. if 1000 <= op_code < 1100: requests.append(self.port % 100) return await old_send_async(self, *args, **kwargs)
c78c9b437547266b4bfa82627c45e3c7c6450049
3,659,467
from datetime import datetime def add_event_records(df, event_type, event_date): """Add event records for the event type.""" log(f'Adding {DATASET_ID} event records for {event_type}') this_year = datetime.now().year df = df.loc[df[event_date].notnull(), :].copy() df['event_id'] = db.create_ids(df, 'events') df['dataset_id'] = DATASET_ID df['year'] = df[event_date].dt.strftime('%Y').astype(int) df['year'] = df['year'].apply(lambda x: x - 100 if x > this_year else x) df['day'] = df[event_date].dt.strftime('%j').astype(int) df['event_type'] = event_type df['event_json'] = util.json_object(df, EVENT_FIELDS) df.loc[:, db.EVENT_FIELDS].to_sql( 'events', db.connect(), if_exists='append', index=False) return df
d3e804d9b24274e5a87e1e470f1f758214e1f805
3,659,468
def _renderPath(path,drawFuncs,countOnly=False,forceClose=False): """Helper function for renderers.""" # this could be a method of Path... points = path.points i = 0 hadClosePath = 0 hadMoveTo = 0 active = not countOnly for op in path.operators: if op == _MOVETO: if forceClose: if hadMoveTo and pop!=_CLOSEPATH: hadClosePath += 1 if active: drawFuncs[_CLOSEPATH]() hadMoveTo += 1 nArgs = _PATH_OP_ARG_COUNT[op] j = i + nArgs drawFuncs[op](*points[i:j]) i = j if op == _CLOSEPATH: hadClosePath += 1 pop = op if forceClose and hadMoveTo and pop!=_CLOSEPATH: hadClosePath += 1 if active: drawFuncs[_CLOSEPATH]() return hadMoveTo == hadClosePath
17a2fc3224b2ba80de9dee0110468c4d934281b7
3,659,469
def _search_focus(s, code=None): """ Search for a particular module / presentation. The search should return only a single item. """ if not code: code = input("Module code (e.g. TM129-17J): ") results = _search_by_code(s, code) if not len(results): print('Nothing found for "{}"'.format(code)) elif len(results) > 1: print( "Please be more specific:\n\t{}\n".format( "\n\t".join([r[0].split(" ")[0] for r in results]) ) ) else: return results[0] return (None, None)
8eec36dbe48c1825d742c9834776a7a0705429b6
3,659,470
def parse_line(sample): """Parse an ndjson line and return ink (as np array) and classname.""" class_name = sample["word"] inkarray = sample["drawing"] stroke_lengths = [len(stroke[0]) for stroke in inkarray] total_points = sum(stroke_lengths) np_ink = np.zeros((total_points, 3), dtype=np.float32) current_t = 0 for stroke in inkarray: for i in [0, 1]: np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i] current_t += len(stroke[0]) np_ink[current_t - 1, 2] = 1 # stroke_end # Preprocessing. # 1. Size normalization. lower = np.min(np_ink[:, 0:2], axis=0) upper = np.max(np_ink[:, 0:2], axis=0) scale = upper - lower scale[scale == 0] = 1 np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale # 2. Compute deltas. np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2] return np_ink, class_name
19d20f7e67b58d699c0aea47f1f03095a957f757
3,659,471
def evalRPN(self, tokens): # ! 求解逆波兰式,主要利用栈 """ :type tokens: List[str] :rtype: int """ stack = [] for item in tokens: # print(stack) if item.isdigit(): stack.append(int(item)) if item[0] == '-' and len(item) > 1 and item[1:].isdigit(): stack.append(int(item)) if item == '*': num1 = stack.pop() num2 = stack.pop() stack.append(num1 * num2) if item == '/': num1 = stack.pop() num2 = stack.pop() stack.append(int(num2 / num1)) if item == '+': num1 = stack.pop() num2 = stack.pop() stack.append(num1 + num2) if item == '-': num1 = stack.pop() num2 = stack.pop() stack.append(num2 - num1) return stack[0]
6b2050f6f635324878116371cd81a6d25ea31240
3,659,472
def _validate_flags(): """Returns True if flag values are valid or prints error and returns False.""" if FLAGS.list_ports: print("Input ports: '%s'" % ( "', '".join(midi_hub.get_available_input_ports()))) print("Ouput ports: '%s'" % ( "', '".join(midi_hub.get_available_output_ports()))) return False if FLAGS.bundle_files is None: print('--bundle_files must be specified.') return False if (len(FLAGS.bundle_files.split(',')) > 1 and FLAGS.generator_select_control_number is None): tf.logging.warning( 'You have specified multiple bundle files (generators), without ' 'setting `--generator_select_control_number`. You will only be able to ' 'use the first generator (%s).', FLAGS.bundle_files[0]) return True
812791a8c71cc354a1ebe32f3fa9a3cc0f1c0182
3,659,473
def proto_test(test): """ If test is a ProtoTest, I just return it. Otherwise I create a ProtoTest out of test and return it. """ if isinstance(test, ProtoTest): return test else: return ProtoTest(test)
3326ea07ae5e4f90d3ae49cedee7b16aa97a3c65
3,659,474
def get_frames(): """Get frames for an episode Params: episode: int The episode for which the frames shall be returned Returns: frames: dict The frames for an episode per timestep """ episode = int(request.args.get('user')) frames = data_preprocessor.get_frames_for_episode(episode) return frames, 200, JSON_TYPE
1180c38175ef07f5e58ce8b77d748f6c1c1ab17b
3,659,475
def remove(s1,s2): """ Returns a copy of s, with all characters in s2 removed. Examples: remove('abc','ab') returns 'c' remove('abc','xy') returns 'abc' remove('hello world','ol') returns 'he wrd' Parameter s1: the string to copy Precondition: s1 is a string Parameter s2: the characters to remove Precondition: s2 is a string """ assert isinstance(s1) == str assert isinstance(s2) == str result = '' for x in s1: if not x in s2: result = result + x return result
089107767063309d1cc34360ae290e7fa74133e7
3,659,476
import re import os def get_firebase_db_url(): """Grabs the databaseURL from the Firebase config snippet. Regex looks scary, but all it is doing is pulling the 'databaseURL' field from the Firebase javascript snippet""" regex = re.compile(r'\bdatabaseURL\b.*?["\']([^"\']+)') cwd = os.path.dirname(__file__) try: with open(os.path.join(cwd, 'templates', config.FIREBASE_CONFIG)) as f: url = next(regex.search(line) for line in f if regex.search(line)) except StopIteration: raise ValueError( 'Error parsing databaseURL. Please copy Firebase web snippet ' 'into templates/{}'.format(config.FIREBASE_CONFIG)) return url.group(1)
aafc688c20adc060046ebd96b047741bedae600f
3,659,477
def get_issuer_plan_ids(issuer): """Given an issuer id, return all of the plan ids registered to that issuer.""" df = pd.read_csv(PATH_TO_PLANS) df = df[df.IssuerId.astype(str) == issuer] return set(df.StandardComponentId.unique())
b41b36b70000736acde63673961f92231a62f9a4
3,659,478
def add_args(parser): """ parser : argparse.ArgumentParser return a parser added with args required by fit """ # Training settings parser.add_argument('--model', type=str, default='mobilenet', metavar='N', help='neural network used in training') parser.add_argument('--dataset', type=str, default='cifar10', metavar='N', help='dataset used for training') parser.add_argument('--data_dir', type=str, default='./../../../data/cifar10', help='data directory') parser.add_argument('--partition_method', type=str, default='hetero', metavar='N', help='how to partition the dataset on local workers') parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA', help='partition alpha (default: 0.5)') parser.add_argument('--defense_type', type=str, default='weak_dp', metavar='N', help='the robust aggregation method to use on the server side') parser.add_argument('--norm_bound', type=str, default=30.0, metavar='N', help='the norm bound of the weight difference in norm clipping defense.') parser.add_argument('--stddev', type=str, default=0.025, metavar='N', help='the standard deviation of the Gaussian noise added in weak DP defense.') parser.add_argument('--client_num_in_total', type=int, default=1000, metavar='NN', help='number of workers in a distributed cluster') parser.add_argument('--client_num_per_round', type=int, default=4, metavar='NN', help='number of workers') #parser.add_argument('--attack_method', type=str, default="blackbox", # help='describe the attack type: blackbox|pgd|graybox|no-attack|') parser.add_argument('--poison_type', type=str, default='southwest', help='specify source of data poisoning: |ardis|(for EMNIST), |southwest|howto|(for CIFAR-10)') # TODO(hwang): we will add PGD attack soon, stay tuned! #parser.add_argument('--adv_lr', type=float, default=0.02, # help='learning rate for adv in PGD setting') parser.add_argument('--attack_freq', type=int, default=10, help='a single adversary per X federated learning rounds e.g. 10 means there will be an attacker in each 10 FL rounds.') parser.add_argument('--batch_size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--client_optimizer', type=str, default='adam', help='SGD with momentum; adam') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)') parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001) parser.add_argument('--epochs', type=int, default=5, metavar='EP', help='how many epochs will be trained locally') parser.add_argument('--comm_round', type=int, default=10, help='how many round of communications we shoud use') parser.add_argument('--is_mobile', type=int, default=0, help='whether the program is running on the FedML-Mobile server side') parser.add_argument('--frequency_of_the_test', type=int, default=1, help='the frequency of the algorithms') parser.add_argument('--gpu_server_num', type=int, default=1, help='gpu_server_num') parser.add_argument('--gpu_num_per_server', type=int, default=4, help='gpu_num_per_server') args = parser.parse_args() return args
e1e2d1e61976b8f2dea6d6ab5f928b72bcdd15a5
3,659,479
def parse_coords(lines): """Parse skbio's ordination results file into coords, labels, eigvals, pct_explained. Returns: - list of sample labels in order - array of coords (rows = samples, cols = axes in descending order) - list of eigenvalues - list of percent variance explained For the file format check skbio.math.stats.ordination.OrdinationResults.from_file Strategy: read the file using skbio's parser and return the objects we want """ try: pcoa_results = OrdinationResults.from_file(lines) return (pcoa_results.site_ids, pcoa_results.site, pcoa_results.eigvals, pcoa_results.proportion_explained) except FileFormatError: if type(lines) == file: lines.seek(0) return qiime_parse_coords(lines)
fec53839f5f995f94f07120cac5bab1ba66f7b4c
3,659,480
def run_ann(model, train, test, params_save_path, iteration, optimizer, loss, callbacks=None, valid=None, shuffle_training=True, batch_size=16, num_epochs=30): """ Run analog network with cross-validation :param batch_size: batch size during training :param model: reference to the tensorflow model :param train: pair of training data (x_train, y_train) :param valid: pair of validation data (x_val, y_val) :param test: pair of testing data (x_test, y_test) :param params_save_path: output path to save weights of the network :param iteration: number of the iteration in CV :param shuffle_training: shuffle samples :param num_epochs: number of epochs to train for :return: accuracy, precision, recall, f1 and confusion matrix from the testing data """ x_train, y_train = train[0], train[1] x_test, y_test = test[0], test[1] if valid is not None: x_valid, y_valid = valid[0], valid[1] converter = nengo_dl.Converter(model) with nengo_dl.Simulator(converter.net, minibatch_size=batch_size) as simulator: simulator.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) input_layer = converter.inputs[model.get_layer('input_layer')] # get the input layer reference output_layer = converter.outputs[model.get_layer('output_layer')] # get the output layer reference # fit the model with the training data simulator.fit( x={input_layer: x_train}, y={output_layer: y_train}, validation_data=( {input_layer: x_valid}, {output_layer: y_valid} ) if valid is not None else None, epochs=num_epochs, shuffle=shuffle_training, callbacks=callbacks # early stop to avoid overfitting ) simulator.save_params(params_save_path) # save weights to the file # Get the statistics accuracy, precision, recall, f1, confusion_matrix = get_metrics(simulator, output_layer, x_test, y_test, batch_size, f'{iteration}. CNN') return { 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'confusion_matrix': confusion_matrix }
9df68d8c6cdf6df08177bd1cc5d3116c10ae073e
3,659,481
def get_sector(session, sector_name=None, sector_id=None): """ Get a sector by it's name or id. """ return get_by_name_or_id(session, Sector, model_id=sector_id, name=sector_name)
69de99bbdd630fb0cc5412c2b3124dff819287ed
3,659,482
def is_valid_pre_6_2_version(xml): """Returns whether the given XML object corresponds to an XML output file of Quantum ESPRESSO pw.x pre v6.2 :param xml: a parsed XML output file :return: boolean, True when the XML was produced by Quantum ESPRESSO with the old XML format """ element_header = xml.find('HEADER') if element_header is None: return False element_format = element_header.find('FORMAT') if element_format is None: return False try: name = element_format.attrib['NAME'] except KeyError: return False if name != 'QEXML': return False return True
80bda73addc68a88b2a1dc5828c0553cbaf7e6f2
3,659,483
import warnings def exportdf (df =None, refout:str =None, to:str =None, savepath:str =None, modname:str ='_wexported_', reset_index:bool =True): """ Export dataframe ``df`` to `refout` files. `refout` file can be Excell sheet file or '.json' file. To get more details about the `writef` decorator , see :doc:`watex.utils.decorator.writef`. :param refout: Output filename. If not given will be created refering to the exported date. :param to: Export type; Can be `.xlsx` , `.csv`, `.json` and else. :param savepath: Path to save the `refout` filename. If not given will be created. :param modname: Folder to hold the `refout` file. Change it accordingly. :returns: - `df_`: new dataframe to be exported. """ if df is None : warnings.warn( 'Once ``df`` arguments in decorator :`class:~decorator.writef`' ' is selected. The main type of file ready to be written MUST be ' 'a pd.DataFrame format. If not an error raises. Please refer to ' ':doc:`~.utils.decorator.writef` for more details.') raise Wex.WATexError_file_handling( 'No dataframe detected. Please provided your dataFrame.') df_ =df.copy(deep=True) if reset_index is True : df_.reset_index(inplace =True) if savepath is None : savepath = savePath(modname) return df_, to, refout, savepath, reset_index
0bc6d2750f236c5f3e529b2489be47658ddbf2d9
3,659,484
def clean_bpoa_seniority_list(csv): """Clean a digitized BPOA seniority list.""" dirty = pd.read_csv(csv) clean = pd.DataFrame() clean["job_title"] = dirty["Rank"] clean["last_name"] = dirty["Last name"] clean["first_name"] = dirty["First Name"] clean = clean.apply(correct_name, axis=1) clean["star_no"] = dirty["Badge No."] clean["employment_date"] = dirty["Hire Date"].apply(pd.to_datetime) return clean
b1af748d92c4cdced4a77fd3799dada318c0f57e
3,659,485
def topk(table, metric, dimensions, is_asc, k, **kwargs): """ This function returns both the results according to the intent as well as the debiasing suggestions. Some of the oversights considered in this intent are- 1. Regression to the mean 2. Looking at tails to find causes - TODO Args: table: Type-pandas.dataframe It has the contents of the csv file metric: Type-string It is the name of the column according to which we sort, and in the case when grouping has to be done, summary operator is applied on metric. Metric could a column containing strings, if we are applying count operator on it. dimensions: Type-list of str It is the name of column we want. In query:'top 5 batsman according to runs', dimension is 'batsman'. When summary_operator is not None, we group by dimensions. is_asc: Type-Bool Denotes the sort order, True for ascending, False for Descending k: Type-int It is the number of entries to be taken date_range: Type-tuple Tuple of start_date and end_date date_column_name: Type-str It is the name of column which contains date date_format: Type-str It is required by datetime.strp_time to parse the date in the format Format Codes https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior slices: Type-List of tuples Tuple represents the conditon to keep the row. (column_name, filter, value) column_name - is the value of the column that the condition is applied upon. filter - Filters enum members, ex. Filters.IN summary_operator: Type-summary_operators enum members It denotes the summary operator, after grouping by dimensions. ex. SummaryOperators.MAX, SummaryOperators.SUM Note-summary_operator is always applied on metric column passed, and only when grouping is done Returns: The function will return both suggestions and the results in a tuple. (results, suggestions) results: Type - pandas dataframe, The results of the intended top-k suggestions: Type - List of strings, List of suggestions. """ date_column_name = kwargs.get('date_column_name', 'date') date_range = kwargs.get('date_range', None) date_format = kwargs.get('date_format', '%Y-%m-%d') slices = kwargs.get('slices', None) summary_operator = kwargs.get('summary_operator', None) result_table = topk_results(table, metric, dimensions, is_asc, k, date_column_name=date_column_name, date_range=date_range, date_format=date_format, slices=slices, summary_operator=summary_operator) suggestions = [] duplicates_in_topk_suggestion = duplicates_in_topk(result_table, dimensions) if duplicates_in_topk_suggestion is not None: suggestions.append(duplicates_in_topk_suggestion) else: # Check for RMT suggestion only when no duplicates present. rmt_suggestion = regression_to_mean(table, metric, dimensions, is_asc, k, date_column_name=date_column_name, date_range=date_range, date_format=date_format, slices=slices, summary_operator=summary_operator) if rmt_suggestion is not None: suggestions.append(rmt_suggestion) results_without_k_condition = topk_results(table, metric, dimensions, is_asc, -1, date_column_name=date_column_name, date_range=date_range, date_format=date_format, slices=slices, summary_operator=summary_operator) more_than_just_topk_suggestion = more_than_just_topk(results_without_k_condition, k, metric) if more_than_just_topk_suggestion is not None: suggestions.append(more_than_just_topk_suggestion) looking_at_tails_suggestion = looking_at_tails(results_without_k_condition, k, metric) if looking_at_tails_suggestion is not None: suggestions.append(looking_at_tails_suggestion) return (result_table, suggestions)
be8387b349da558d07fdb86fc8261f9153869028
3,659,486
def addMovieElement(findings, data): """ Helper Function which handles unavailable information for each movie""" if len(findings) != 0: data.append(findings[0]) else: data.append("") return data
af3c45c8b8d4c0cb7ba1cac4925d0f5998affe93
3,659,487
from typing import Optional def get_bst_using_min_and_max_value(preorder: list) -> Node: """ time complexity: O(n) space complexity: O(n) """ def construct_tree(min_: int, max_: int) -> Optional[Node]: nonlocal pre_index nonlocal l if pre_index >= l: return None value = preorder[pre_index] if min_ < value < max_: node = Node(value) pre_index += 1 node.left = construct_tree(min_, value) node.right = construct_tree(value, max_) return node return None pre_index: int = 0 l: int = len(preorder) return construct_tree(-1_000_000, 1_000_000)
809c74967e73c82a428f317d8551432bb392d5ea
3,659,488
import math def qwtStepSize(intervalSize, maxSteps, base): """this version often doesn't find the best ticks: f.e for 15: 5, 10""" minStep = divideInterval(intervalSize, maxSteps, base) if minStep != 0.0: # # ticks per interval numTicks = math.ceil(abs(intervalSize / minStep)) - 1 # Do the minor steps fit into the interval? if ( qwtFuzzyCompare( (numTicks + 1) * abs(minStep), abs(intervalSize), intervalSize ) > 0 ): # The minor steps doesn't fit into the interval return 0.5 * intervalSize return minStep
57d1c4140e32dbf4a8bd0e306b9c10d4e9dae9bd
3,659,489
def get_trimmed_glyph_name(gname, num): """ Glyph names cannot have more than 31 characters. See https://docs.microsoft.com/en-us/typography/opentype/spec/... recom#39post39-table Trims an input string and appends a number to it. """ suffix = '_{}'.format(num) return gname[:31 - len(suffix)] + suffix
a5e90163d15bd4fc0b315414fffd2ac227768ab0
3,659,490
def vmatrix(ddir, file_prefix): """ generate vmatrix DataFile """ name = autofile.name.vmatrix(file_prefix) writer_ = autofile.write.vmatrix reader_ = autofile.read.vmatrix return factory.DataFile(ddir=ddir, name=name, writer_=writer_, reader_=reader_)
b9303e08f10e0604fde7b40116b74e66aac553dc
3,659,491
import os def fetch_precision_overlay(precision): """ Returns the overlay for the given precision value as cv2 image. """ overlay_folder = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../assets/precision_overlays' ) img_path = os.path.join( overlay_folder, f'overlay_{str(int(precision*100)).zfill(3)}.png' ) assert os.path.isfile(img_path), f'overlay does not exist at {img_path}' return cv2.imread(img_path)
7d8e8c82676bc4686f9b08b171a6deb60fb60a9e
3,659,492
import ast from typing import Callable from typing import MutableMapping from typing import Union import inspect def get_argument_sources( source: Source, node: ast.Call, func: Callable, vars_only: bool, pos_only: bool ) -> MutableMapping[str, Union[ast.AST, str]]: """Get the sources for argument from an ast.Call node >>> def func(a, b, c, d=4): >>> ... >>> x = y = z = 1 >>> func(y, x, c=z) >>> # argument_sources = {'a': 'y', 'b', 'x', 'c': 'z'} >>> func(y, x, c=1) >>> # argument_sources = {'a': 'y', 'b', 'x', 'c': ast.Num(n=1)} """ # <Signature (a, b, c, d=4)> signature = inspect.signature(func, follow_wrapped=False) # func(y, x, c=z) # ['y', 'x'], {'c': 'z'} arg_sources = [argnode_source(source, argnode, vars_only) for argnode in node.args] kwarg_sources = { argnode.arg: argnode_source(source, argnode.value, vars_only) for argnode in node.keywords } if not pos_only else {} bound_args = signature.bind_partial(*arg_sources, **kwarg_sources) argument_sources = bound_args.arguments # see if *args and **kwargs have anything assigned # if not, assign () and {} to them for parameter in signature.parameters.values(): if parameter.kind == inspect.Parameter.VAR_POSITIONAL: argument_sources.setdefault(parameter.name, ()) if parameter.kind == inspect.Parameter.VAR_KEYWORD: argument_sources.setdefault(parameter.name, {}) return argument_sources
1ab344b5ccf9754ade06210e74540db51fe8c671
3,659,493
def _register_dataset(service, dataset, compression): """Registers a dataset with the tf.data service. This transformation is similar to `register_dataset`, but supports additional parameters which we do not yet want to add to the public Python API. Args: service: A string or a tuple indicating how to connect to the tf.data service. If it's a string, it should be in the format `[<protocol>://]<address>`, where `<address>` identifies the dispatcher address and `<protocol>` can optionally be used to override the default protocol to use. If it's a tuple, it should be (protocol, address). dataset: A `tf.data.Dataset` to register with the tf.data service. compression: How to compress the dataset's elements before transferring them over the network. "AUTO" leaves the decision of how to compress up to the tf.data service runtime. `None` indicates not to compress. Returns: A scalar int64 tensor of the registered dataset's id. """ _validate_compression(compression) if isinstance(service, tuple): protocol, address = service else: protocol, address = _parse_service(service) external_state_policy = dataset.options().experimental_external_state_policy if external_state_policy is None: external_state_policy = ExternalStatePolicy.WARN encoded_spec = "" if context.executing_eagerly(): coder = nested_structure_coder.StructureCoder() encoded_spec = coder.encode_structure( dataset.element_spec).SerializeToString() if compression == COMPRESSION_AUTO: dataset = dataset.map( lambda *x: compression_ops.compress(x), num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset.prefetch(dataset_ops.AUTOTUNE) dataset = dataset._apply_debug_options() # pylint: disable=protected-access dataset_id = gen_experimental_dataset_ops.register_dataset( dataset._variant_tensor, # pylint: disable=protected-access address=address, protocol=protocol, external_state_policy=external_state_policy.value, element_spec=encoded_spec) return dataset_id
e95edfeaccc324bf7d732658846a3ef25c1a371c
3,659,494
def rivers_by_station_number(stations,N): """function that uses stations_by_rivers to return a dictionary that it then itterates each river for, summing the number of stations on the river into tuples""" stationsOfRivers = stations_by_rivers(stations) listOfNumberStations = [] for river in stationsOfRivers: listOfNumberStations.append((river, len(stationsOfRivers[river]))) listofNumberStationsSorted = [] listofNumberStationsSorted = sorted_by_key(listOfNumberStations, 1, True) boo = True while boo == True: if listofNumberStationsSorted[N-1][1] == listofNumberStationsSorted[N][1]: N += 1 else: boo =False return listofNumberStationsSorted[:N]
ca159843f10cbadf5a35529c45656121672972e0
3,659,495
import itertools def generate_itoa_dict( bucket_values=[-0.33, 0, 0.33], valid_movement_direction=[1, 1, 1, 1]): """ Set cartesian product to generate action combination spaces for the fetch environments valid_movement_direction: To set """ action_space_extended = [bucket_values if m == 1 else [0] for m in valid_movement_direction] return list(itertools.product(*action_space_extended))
b8264174857aeb9d64226cce1cd1625f7e65b726
3,659,496
import dateutil from datetime import datetime def try_convert(value, datetime_to_ms=False, precise=False): """Convert a str into more useful python type (datetime, float, int, bool), if possible Some precision may be lost (e.g. Decimal converted to a float) >>> try_convert('false') False >>> try_convert('123456789.123456') 123456789.123456 >>> try_convert('1234') 1234 >>> try_convert(1234) 1234 >>> try_convert(['1234']) ['1234'] >>> try_convert('12345678901234567890123456789012345678901234567890', precise=True) 12345678901234567890123456789012345678901234567890L >>> try_convert('12345678901234567890123456789012345678901234567890.1', precise=True) Decimal('12345678901234567890123456789012345678901234567890.1') """ if not isinstance(value, basestring): return value if value in db.YES_VALUES or value in db.TRUE_VALUES: return True elif value in db.NO_VALUES or value in db.FALSE_VALUES: return False elif value in db.NULL_VALUES: return None try: if not precise: try: return int(value) except: try: return float(value) except: pass else: dec, i, f = None, None, None try: dec = Decimal(value) except: return try_convert(value, precise=False) try: i = int(value) except: try: f = float(value) except: pass if dec is not None: if dec == i: return i elif dec == f: return f return dec except: pass try: dt = dateutil.parse(value) if dt and isinstance(dt, datetime.datetime) and (3000 >= dt.year >= 1900): if datetime_to_ms: return db.datetime_in_milliseconds(dt) return dt except: pass return value
59f8a16310e4ac6604a145dcff1ff390df259da9
3,659,497
def signin(request, auth_form=AuthenticationForm, template_name='accounts/signin_form.html', redirect_field_name=REDIRECT_FIELD_NAME, redirect_signin_function=signin_redirect, extra_context=None): """ Signin using email or username with password. Signs a user in by combining email/username with password. If the combination is correct and the user :func:`is_active` the :func:`redirect_signin_function` is called with the arguments ``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is trying the login. The returned value of the function will be the URL that is redirected to. A user can also select to be remembered for ``ACCOUNTS_REMEMBER_DAYS``. :param auth_form: Form to use for signing the user in. Defaults to the :class:`AuthenticationForm` supplied by accounts. :param template_name: String defining the name of the template to use. Defaults to ``accounts/signin_form.html``. :param redirect_field_name: Form field name which contains the value for a redirect to the succeeding page. Defaults to ``next`` and is set in ``REDIRECT_FIELD_NAME`` setting. :param redirect_signin_function: Function which handles the redirect. This functions gets the value of ``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It must return a string which specifies the URI to redirect to. :param extra_context: A dictionary containing extra variables that should be passed to the rendered template. The ``form`` key is always the ``auth_form``. **Context** ``form`` Form used for authentication supplied by ``auth_form``. """ form = auth_form() if request.method == 'POST': form = auth_form(request.POST, request.FILES) if form.is_valid(): identification = form.cleaned_data['identification'] password = form.cleaned_data['password'] remember_me = form.cleaned_data['remember_me'] user = authenticate(identification=identification, password=password) if user.is_active: login(request, user) if remember_me: request.session.set_expiry(accounts_settings.ACCOUNTS_REMEMBER_ME_DAYS[1] * 86400) else: request.session.set_expiry(0) if accounts_settings.ACCOUNTS_USE_MESSAGES: messages.success(request, _('You have been signed in.'), fail_silently=True) # Whereto now? redirect_to = redirect_signin_function( request.GET.get(redirect_field_name), user) return redirect(redirect_to) else: return redirect(reverse('accounts_disabled', kwargs={'username': user.username})) if not extra_context: extra_context = dict() extra_context.update({ 'form': form, 'next': request.GET.get(redirect_field_name), }) return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
6a8536fb3a0c551ae4cdb7f01de622c012d0734c
3,659,498
import random def run_syncdb(database_info): """Make sure that the database tables are created. database_info -- a dictionary specifying the database info as dictated by Django; if None then the default database is used Return the identifier the import process should use. """ django.setup() dataset_identifier = 'default' if database_info: # create an entry in DATABASES if database_info is present dataset_identifier = '12345' while dataset_identifier in settings.DATABASES: dataset_identifier = str(random.randint(1, 2000000)) settings.DATABASES[dataset_identifier] = database_info call_command('migrate', database=dataset_identifier) return dataset_identifier
19da3e97226363fbee885ff8ee24c7abe0489d3c
3,659,499