code
stringlengths
57
46.3k
quality_prob
float64
0.7
0.99
learning_prob
float64
0.5
1
import torch def zero_loss(*args, **kwargs): """Dummy loss that always returns zero. Parameters ---------- args : list Can take any number of positional arguments (without using them). kwargs : dict Can take any number of keyword arguments (without using them). Returns ------- loss : torch.tensor torch.tensor(0) """ return torch.tensor(0)
0.807916
0.547162
def improved_euler_method(f, t0: float, x0: float, timestep: float, end: float, exact_solution=None): """ Implementation of the improved euler method to numerically compute the solution to the differential equation x'=f(x) Parameters ---------- f: function The implementation of the function `f` appearing in the differential equation. t0: float The initial time. x0: float The initial condition to the differential equation, i.e. the value of x(t=t0). timestep: float The timestep to employ for the numerical solution of the differential equation. end: float The maximal time step up to which to compute the the solution. exact_solution: function The exact solution. If the value is different from `None` the exact solution will be evaluated at each time step and the corresponding values will be returned in order to be able to check the convergence of the numerical solution. """ if end < t0: raise ValueError("Initial time is larger than the end time!") # Store the time steps time_steps = [t0] # Store the value at each time step values = [x0] # Store the exact values of the solutions at each time step, if the exact # solution is provided if exact_solution: exact_values = [exact_solution(t0)] # Now start solving the differential equation numerically t = t0 x = x0 while t < end: t = t + timestep time_steps.append(t) x_tilde = x + f(x) * timestep x = x + 0.5 * (f(x) + f(x_tilde)) * timestep values.append(x) if exact_solution: exact_values.append(exact_solution(t)) return time_steps, values, None if not exact_solution else exact_values
0.928287
0.921922
def normalize(data): """ Normalize data set to have zero mean and unit variance. Args data: A numpy array of arrays containing input or target data. Returns A normalized numpy array of arrays. """ return (data - data.mean(axis=0)) / data.var(axis=0)
0.897718
0.930363
def axesMovementsDict(T_axes, point): """A dictionary to compute movement in Tonnetz Space.""" x, y = point # the point represents the poisition of the previous note # and T_axes represent the distance. movementsDict = { 0: (x, y), T_axes[0]: (x, y + 1), T_axes[1]: (x + 1, y), T_axes[2]: (x - 1, y - 1), 12 - T_axes[0]: (x, y - 1), 12 - T_axes[1]: (x - 1, y), 12 - T_axes[2]: (x + 1, y + 1) } return movementsDict
0.753013
0.81928
def axesMovementsDictDistancePlus(T_axes, point): """A dict to compute movement in Tonnetz for distances bigger than one.""" x, y = point # the point represents the poisition of the previous note # and T_axes represent the distance. movementsDict = { (T_axes[0] * 2) % 12: (x, y + 2), ((12 - T_axes[2]) * 2) % 12: (x + 2, y + 2), (T_axes[1] - T_axes[0]) % 12: (x + 1, y - 1), (12 - T_axes[2]) + T_axes[1]: (x + 2, y + 1), (12 - T_axes[2]) + T_axes[1]: (x + 1, y + 2), } return movementsDict
0.774754
0.850531
def _unit2coef(strUnit): """ Function returns a unit coefficient based on a unit symbol. Available unit names, symbols and coefficients: (femto): 'f' = 1e-15 (pico): 'p' = 1e-12 (nano): 'n' = 1e-9 (micro): 'u' = 1e-6 (mili): 'm' = 1e-3 (none): ' ' = 1 (kilo): 'k' = 1e3 (Mega): 'M' = 1e6 (Giga): 'G' = 1e9 (Tera): 'T' = 1e12 (hour): 'h' = 3600 Args: strUnit (string): key of the unit Returns: iCoef (int): unit coefficient """ # The name of the function (for error purposes) strFunc = 'rxcs.console._unit2coef' # ---------------------------------------------------------------- # femto if strUnit == 'f': iCoef = 1e-15 # pico elif strUnit == 'p': iCoef = 1e-12 # nano elif strUnit == 'n': iCoef = 1e-9 # micro elif strUnit == 'u': iCoef = 1e-6 # mili elif strUnit == 'm': iCoef = 1e-3 # none elif strUnit == ' ': iCoef = 1 # kilo elif strUnit == 'k': iCoef = 1e3 # Mega elif strUnit == 'M': iCoef = 1e6 # Giga elif strUnit == 'G': iCoef = 1e9 # Tera elif strUnit == 'T': iCoef = 1e12 # hour elif strUnit == 'h': iCoef = 3600 # ---------------------------------------------------------------- # Unknown unit else: strErr = strFunc + ' : ' strErr = strErr + ('> %s < is an unknown unit symbol') % (strUnit) raise Exception(strErr) # ---------------------------------------------------------------- return iCoef
0.70028
0.617628
def upper(x): """Helper function: argument x must be a dot. Returns dot above (actually below) x.""" return (x[0], x[1]+1)
0.708213
0.978467
def identity(x): """Identity function. Returns x. >>> identity(3) 3 """ return x
0.723602
0.625724
def fill_gaps(df, gap_filler_dict): """ Given a Pandas dataframe and a dictionary containing the column names the correct 'fillers' for each column, this function will fill each column with the correct values when empty cells are found. Parameters: pd_df (pd.Dataframe): the variable to which the dataframe containing the csv data is assigned gap_filler_dict (dict): a dictionary with column name and value to fill gaps as key value pairs, e.g. {"Age":"All","Sex":"T"} Returns: pd.Dataframe: A dataframe with all cells full""" df = df.fillna(gap_filler_dict, axis=1) return df
0.775435
0.843057
def clean_episode(episode): """Standardize the properties of an episode record. Different podcasts and podcast parsing logic can result in minor differences in the description of epsiodes. This tries to standardize reporting of the podcast attributes. @param episode: Dictionary describing a single episode that should be standardized. @type episode: dict @return: Dictionary describing the same episode after standardizing reporting of episode properties. @rtype: dict """ return { 'name': episode['name'], 'date': episode['date'], 'loc': episode['loc'], 'duration': episode['duration'], 'tags': map(lambda x: x.lower(), episode['tags']) }
0.754553
0.543651
def inv_mod_p(a, p): """ Returns the inverse of a mod p Parameters ---------- a : int Number to compute the inverse mod p p : int(prime) Returns ------- m : int Integer such that m * a = 1 (mod p) Raises ------ ValueError If p is not a prime number """ x0, x1 = 0, 1 y0, y1 = 1, 0 b = p while a != 0: q, b, a = b // a, a, b % a y0, y1 = y1, y0 - q * y1 x0, x1 = x1, x0 - q * x1 if b != 1: print("Error:{} is not a prime number".format(p)) raise ValueError if x0 < 0: x0 = x0 + p return x0
0.909384
0.678689
import torch def sigmoid_threshold(tensor, threshold=0.5): """Applies the sigmoid function to the tensor and thresholds the values out_tensor = sigmoid(tensor) > threshold Arguments: tensor (torch.Tensor): the tensor to threshold. threshold (scalar or array-like): the threshold value or values. Can be a list, tuple, NumPy ndarray, scalar, and other types. If array-like, the size must match the size of `tensor`. Default: 0.5. Returns: torch.Tensor: same shape as the input with values {0, 1}. """ threshold = torch.tensor(threshold, dtype=tensor.dtype).to(tensor.device) out = torch.sigmoid(tensor) return out > threshold
0.89389
0.81549
def _CalcThroughput(samples): """Calculates the throughput in MiB/second. @type samples: sequence @param samples: List of samples, each consisting of a (timestamp, mbytes) tuple @rtype: float or None @return: Throughput in MiB/second """ if len(samples) < 2: # Can't calculate throughput return None (start_time, start_mbytes) = samples[0] (end_time, end_mbytes) = samples[-1] return (float(end_mbytes) - start_mbytes) / (float(end_time) - start_time)
0.814201
0.557243
def decimate(data, decimation_ratio): """ Decimate a 1D array . This means some value are dropped, not averaged @param 1d numpy array data : The data to decimated @param int decimation_ratio: The number of old value per new value @return 1d numpy array : The array decimated """ decimation_ratio = int(decimation_ratio) length = (len(data) // decimation_ratio) * decimation_ratio data_decimated = data[:length:decimation_ratio] return data_decimated
0.857231
0.861887
def get_intercept(x, y, slope): """Calculate intercept by taking first value.""" return y[0] - slope*x[0]
0.738198
0.562026
def lorentzian(x, gamma, mu, A, B): """A is the maximum of the peak, mu the center, and gamma the FWHM. B is background.""" return 1 / (1 + (2 * (x - mu) / gamma) ** 2) * A + B
0.774498
0.915583
def lmfit_lorentzian(x, amplitude, center, sigma): """``amplitude`` is the maximum of the peak, ``center`` the center and ``sigma`` the FWHM.""" return 1 / (1 + (2 * (x - center) / sigma) ** 2) * amplitude
0.804713
0.723065
def differential_reflectance(signal, reference): """Return '(signal - reference) / signal'.""" return (signal - reference)/(signal)
0.734215
0.887058
def contrast_reflectance(signal, reference): """Return '(signal - reference) / (signal + reference)'.""" return (signal - reference)/(signal + reference)
0.758332
0.731994
def get_parameters(filename): """Return a dictionary with all the parameters contained in the filename given, following the established regex. The file extension must be removed from the filename. Parameters looked for in the filename -------------------------------------- temperature : in kelvin laser_wavelength : in nanometer power : in micro watt wavelength : the central wavelength of the spectro in nm grooves : number of line per mm on the diffraction grating tacq : acquisition time in seconds slit : width of the slit in micrometers """ list_params = filename.split('_') # Get the parameters. temperature = int(list_params[0][:-1]) sample = list_params[1] laser_wavelength = float(list_params[2][5:-2]) power = float(list_params[3][:-2]) wavelength = float(list_params[4][:-2]) grooves = float(list_params[5][:-7]) time = list_params[6] items = time[:-3].split('x') number_acq = int(items[0]) tacq = float(items[1]) slit = float(list_params[7][4:]) filter_ = list_params[8] calibration = list_params[9] try: position = list_params[10][:] # I keep the 'P' before the number, ex: 'P2' for position 2. except Exception: position = 0 return {'temperature': temperature, 'sample': sample, 'laser_wavelength': laser_wavelength, 'power': power, 'wavelength': wavelength, 'grooves': grooves, 'number_acq': number_acq, 'tacq': tacq, 'slit': slit, 'filter': filter_, 'calibration': calibration, 'position': position}
0.835886
0.592784
def _is_paired(fastq, fastq2, single_end): """Determines the workflow based on file inputs. Args: """ if fastq and fastq2: paired_end = True interleaved = False elif single_end: paired_end = False interleaved = False else: paired_end = True interleaved = True return paired_end, interleaved
0.701917
0.601828
def self_neighbors(matches): """ Returns a pandas data series intended to be used as a mask. Each row is True if it is not matched to a point in the same image (good) and False if it is (bad.) Parameters ---------- matches : dataframe the matches dataframe stored along the edge of the graph containing matched points with columns containing: matched image name, query index, train index, and descriptor distance Returns ------- : dataseries Intended to mask the matches dataframe. True means the row is not matched to a point in the same image and false the row is. """ return matches.source_image != matches.destination_image
0.875721
0.859015
def mirroring_test(matches): """ Compute and return a mask for the matches dataframe on each edge of the graph which will keep only entries in which there is both a source -> destination match and a destination -> source match. Parameters ---------- matches : dataframe the matches dataframe stored along the edge of the graph containing matched points with columns containing: matched image name, query index, train index, and descriptor distance Returns ------- duplicates : dataseries Intended to mask the matches dataframe. Rows are True if the associated keypoint passes the mirroring test and false otherwise. That is, if 1->2, 2->1, both rows will be True, otherwise, they will be false. Keypoints with only one match will be False. Removes duplicate rows. """ duplicate_mask = matches.duplicated(subset=['source_idx', 'destination_idx', 'distance'], keep='last') return duplicate_mask
0.875121
0.922552
import torch def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 Arguments: netD (network) -- discriminator network real_data (tensor array) -- real images fake_data (tensor array) -- generated images from the generator device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') type (str) -- if we mix real and fake data or not [real | fake | mixed]. constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 lambda_gp (float) -- weight for this loss Returns the gradient penalty loss """ if lambda_gp > 0.0: if type == 'real': # either use real images, fake images, or a linear interpolation of two. interpolatesv = real_data elif type == 'fake': interpolatesv = fake_data elif type == 'mixed': alpha = torch.rand(real_data.shape[0], 1) alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view( *real_data.shape) alpha = alpha.to(device) interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) else: raise NotImplementedError('{} not implemented'.format(type)) interpolatesv.requires_grad_(True) disc_interpolates = netD(interpolatesv) gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, grad_outputs=torch.ones(disc_interpolates.size()).to(device), create_graph=True, retain_graph=True, only_inputs=True) gradients = gradients[0].view(real_data.size(0), -1) # flat the data gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps return gradient_penalty, gradients else: return 0.0, None
0.882504
0.532364
def as_array(array, dtype, transpose=False): """ Given an array and dtype, array will be converted to dtype if and only if array.dtype != dtype. If transpose is set to True then array will be transposed before returning. :param array: A NumPy array. :param dtype: The type to return the array as. :type dtype: A NumPy data type (e.g. ``numpy.float32``). :param transpose: If set then array will be transposed before returning. Useful for passing arrays into Fortran routiines. Default is False. :type transpose: Bool. :return: A :py:class:`numpy.ndarry` of type ``dtype`` with the same dimensions as array. """ if array.dtype != dtype: if transpose: return array.astype(dtype).transpose() return array.astype(dtype) if transpose: return array.transpose() return array
0.837952
0.872836
def multiplier(price): """ The "multiplier" is a number that indicates what to multiply price difference by to get pips. Examples: The pip distance between 1.25661 and 1.10896 on EUR/USD chart is 1476.5 pips: 1.25661 – 1.10896 = 0.14765 Then multiply 0.14765 by 10000 The pip distance between 114.234 and 114.212 = abs(price1 - price2) * 100 The multiplier for prices with 3 digits after decimal = 100 The multiplier for prices with 4 or 5 digits after decimal = 10000 :param price: a floating point number that will have 3, 4, or 5 digits after decimal. E.g. 112.321 :return: """ before, after = str(price).split('.') if len(after) == 3: return 100 if len(after) == 4: return 10000 if len(after) == 5: return 10000 raise Exception(f"unable to calculate multipler for price {price}.")
0.908696
0.844088
def signed_distance_to_plane(pp, plane): """ Find the signed distances of the given 3D points to the given plane. Note that the distances are signed, and can thus be negative. """ a, b, c, d = plane plane_norm = (a**2 + b**2 + c**2) ** 0.5 return (a * pp[:, 0] + b * pp[:, 1] + c * pp[:, 2] + d) / plane_norm
0.790045
0.991178
def interval(mean, std, z): """Calculate the interval.""" z_std = std * z return (mean - z_std, mean + z_std)
0.778986
0.54958
def control_metric(name): """Returns the (mode, metric) pair in History for the given control.""" return ("train", "training/{}".format(name))
0.701509
0.523847
def maybe_flip(value, flip): """Flips a control (or not). Meant to translate controls that naturally take values close to 1 (e.g. momentum) to a space where multiplication makes sense (i.e. close to 0). Args: value: float or numpy array, value of the control. flip: bool, whether to flip or not. Returns: Either value or 1 - value based on flip. """ if flip: value = 1 - value return value
0.811601
0.718002
def resolve_filter_value(v): """Resolve a filter value to one that the search API can handle We can't pass model instances for example. """ return getattr(v, 'pk', v)
0.708011
0.539165
def check_diagonal(diagonal): r""" Checks that the diagonal length used to normalize the images' size is ``>= 20``. Parameters ---------- diagonal : `int` The value to check. Returns ------- diagonal : `int` The value if it's correct. Raises ------ ValueError diagonal must be >= 20 or None """ if diagonal is not None and diagonal < 20: raise ValueError("diagonal must be >= 20 or None") return diagonal
0.923992
0.85183
def get_roi_params(separation: str = "uplc", instrument: str = "qtof"): """ Creates a dictionary with recommended parameters for the make_roi function in different use cases. Parameters ---------- separation : {"uplc", "hplc"} Mode in which the data was acquired. Used to set minimum length of the roi and number of missing values. instrument : {"qtof", "orbitrap"} Type of MS instrument. Used to set the tolerance. Returns ------- roi_parameters : dict """ roi_params = {"min_intensity": 500, "multiple_match": "reduce"} if separation == "uplc": roi_params.update({"max_missing": 1, "min_length": 10}) elif separation == "hplc": roi_params.update({"max_missing": 1, "min_length": 20}) else: msg = "valid `separation` are uplc and hplc" raise ValueError(msg) if instrument == "qtof": roi_params.update({"tolerance": 0.01}) elif instrument == "orbitrap": roi_params.update({"tolerance": 0.005}) else: msg = "valid `instrument` are qtof and orbitrap" raise ValueError(msg) roi_params["mode"] = separation return roi_params
0.83612
0.600013
def _get_find_centroid_params(instrument: str): """ Set default parameters to find_centroid method using instrument information. Parameters ---------- instrument : {"qtof", "orbitrap"} Returns ------- params : dict """ params = {"snr": 10} if instrument == "qtof": md = 0.01 else: # valid values for instrument are qtof or orbitrap md = 0.005 params["min_distance"] = md return params
0.758868
0.517449
def get_average_brightness(block, size, gradation_step): """ Get average brightness of image param block: mosaic block size param size: size param gradation_step: gradation of gray return int >>> get_average_brightness(np.ones((3, 3, 3)) * 200, 2, 15) 195 >>> get_average_brightness(np.ones((3, 3, 3)) * 100, 2, 15) 90 >>> get_average_brightness(np.ones((3, 3, 3)) * 100, 6, 6) 24 >>> get_average_brightness(np.ones((10, 10, 3)) * 100, 6, 6) 96 """ average_color = (block[:size, :size].sum() / 3) // size ** 2 res = int(average_color // gradation_step) * gradation_step return res
0.892767
0.73029
def mean(values): """Calculate the mean. Parameters ---------- values : list Values to find the mean of Returns ------- out : float The mean """ return sum(values)/float(len(values)) if len(values) > 0 else 0.0
0.809125
0.916484
def f1_score(precision, recall): """ Computes the harmonic mean of precision and recall (f1 score) """ if precision == 0 and recall == 0: return 0 return 2 * precision * recall / (precision + recall)
0.762159
0.603727
def filter_scalar_input_data(scalars_in, scenario_select, scenario_overwrite): r""" Updates Scalars.csv DataFrame to contain only scenario-specific values of parameters. Needed to properly process Scalars.csv rows starting with something like 'FlexMex1UC2'. (FlexMex-specific function) Parameters ---------- scalars_in : pandas.DataFrame Scalars.csv DataFrame from input data scenario_select : list of str 'Scenario' values of parameters to keep, drop everything else scenario_overwrite : list of str 'Scenario' values who's parameter values will overwrite those in 'scenario_select' Returns ------- Scalars.csv DataFrame """ scalars = scalars_in.copy() scalars_overwrite = scalars_in.copy() scalars_overwrite = scalars_overwrite.loc[ scalars_overwrite["Scenario"] == scenario_overwrite ] scalars = scalars.loc[scalars["Scenario"].isin(scenario_select), :] # Save column order before setting and resetting index columns = scalars.columns scalars.set_index(["Region", "Parameter"], inplace=True) scalars_overwrite.set_index(["Region", "Parameter"], inplace=True) scalars.update(scalars_overwrite) scalars = scalars.reset_index() # Restore column order scalars = scalars[columns] return scalars
0.739799
0.691667
def normalize_inputs(data): """ Normalizes the inputs to [-1, 1] :param data: input data array :return: normalized data to [-1, 1] """ data = (data - 127.5) / 127.5 return data
0.804367
0.810066
def translate_bbox(bbox, y_offset=0, x_offset=0): """Translate bounding boxes. This method is mainly used together with image transforms, such as padding and cropping, which translates the left top point of the image from coordinate :math:`(0, 0)` to coordinate :math:`(y, x) = (y_{offset}, x_{offset})`. The bounding boxes are expected to be packed into a two dimensional tensor of shape :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in the image. The second axis represents attributes of the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the four attributes are coordinates of the top left and the bottom right vertices. Args: bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is :math:`(R, 4)`. :math:`R` is the number of bounding boxes. y_offset (int or float): The offset along y axis. x_offset (int or float): The offset along x axis. Returns: ~numpy.ndarray: Bounding boxes translated according to the given offsets. """ out_bbox = bbox.copy() out_bbox[:, :2] += (y_offset, x_offset) out_bbox[:, 2:] += (y_offset, x_offset) return out_bbox
0.954732
0.929055
def resize_bbox(bbox, in_size, out_size): """Resize bounding boxes according to image resize. The bounding boxes are expected to be packed into a two dimensional tensor of shape :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in the image. The second axis represents attributes of the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the four attributes are coordinates of the top left and the bottom right vertices. Args: bbox (~numpy.ndarray): An array whose shape is :math:`(R, 4)`. :math:`R` is the number of bounding boxes. in_size (tuple): A tuple of length 2. The height and the width of the image before resized. out_size (tuple): A tuple of length 2. The height and the width of the image after resized. Returns: ~numpy.ndarray: Bounding boxes rescaled according to the given image shapes. """ bbox = bbox.copy() y_scale = float(out_size[0]) / in_size[0] x_scale = float(out_size[1]) / in_size[1] bbox[:, 0] = y_scale * bbox[:, 0] bbox[:, 2] = y_scale * bbox[:, 2] bbox[:, 1] = x_scale * bbox[:, 1] bbox[:, 3] = x_scale * bbox[:, 3] return bbox
0.940298
0.992334
def flip_bbox(bbox, size, y_flip=False, x_flip=False): """Flip bounding boxes accordingly. The bounding boxes are expected to be packed into a two dimensional tensor of shape :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in the image. The second axis represents attributes of the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the four attributes are coordinates of the top left and the bottom right vertices. Args: bbox (~numpy.ndarray): An array whose shape is :math:`(R, 4)`. :math:`R` is the number of bounding boxes. size (tuple): A tuple of length 2. The height and the width of the image before resized. y_flip (bool): Flip bounding box according to a vertical flip of an image. x_flip (bool): Flip bounding box according to a horizontal flip of an image. Returns: ~numpy.ndarray: Bounding boxes flipped according to the given flips. """ H, W = size bbox = bbox.copy() if y_flip: y_max = H - bbox[:, 0] y_min = H - bbox[:, 2] bbox[:, 0] = y_min bbox[:, 2] = y_max if x_flip: x_max = W - bbox[:, 1] x_min = W - bbox[:, 3] bbox[:, 1] = x_min bbox[:, 3] = x_max return bbox
0.95112
0.971966
def hms(s): """ conversion of seconds into hours, minutes and secondes :param s: :return: int, int, float """ h = int(s) // 3600 s %= 3600 m = int(s) // 60 s %= 60 return '{:d}:{:d}:{:.2f}'.format(h, m, s)
0.713831
0.52342
def find_average_record(sen_set, voting_dict): """ Input: a set of last names, a voting dictionary Output: a vector containing the average components of the voting records of the senators in the input set Example: >>> voting_dict = {'Klein': [-1,0,1], 'Fox-Epstein': [-1,-1,-1], 'Ravella': [0,0,1]} >>> senators = {'Fox-Epstein','Ravella'} >>> find_average_record(senators, voting_dict) [-0.5, -0.5, 0.0] >>> voting_dict == {'Klein': [-1,0,1], 'Fox-Epstein': [-1,-1,-1], 'Ravella': [0,0,1]} True >>> senators {'Fox-Epstein','Ravella'} >>> d = {'c': [-1,-1,0], 'b': [0,1,1], 'a': [0,1,1], 'e': [-1,-1,1], 'd': [-1,1,1]} >>> find_average_record({'a','c','e'}, d) [-0.6666666666666666, -0.3333333333333333, 0.6666666666666666] >>> find_average_record({'a','c','e','b'}, d) [-0.5, 0.0, 0.75] >>> find_average_record({'a'}, d) [0.0, 1.0, 1.0] """ return ...
0.812756
0.656631
def bitter_rivals(voting_dict): """ Input: a dictionary mapping senator names to lists representing their voting records Output: a tuple containing the two senators who most strongly disagree with one another. Example: >>> voting_dict = {'Klein':[-1,0,1], 'Fox-Epstein':[-1,-1,-1], 'Ravella':[0,0,1], 'Oyakawa':[1,1,1], 'Loery':[1,1,0]} >>> br = bitter_rivals(voting_dict) >>> br == ('Fox-Epstein', 'Oyakawa') or br == ('Oyakawa', 'Fox-Epstein') True """ return (..., ...)
0.746693
0.6771
def _proportion(graph, degree): """ Calculates the proportion of intersection types in a graph. """ import collections values = list(dict(graph.nodes(degree)).values()) counts = collections.Counter(values) return counts
0.719876
0.579936
def sqrt(number): """Returns the square root of the specified number as an int, rounded down""" assert number >= 0 offset = 1 while offset ** 2 <= number: offset *= 2 count = 0 while offset > 0: if (count + offset) ** 2 <= number: count += offset offset //= 2 return count
0.760206
0.91708
def get_geom_type(geom): """Returns the HoloViews geometry type. Args: geom: A shapely geometry Returns: A string representing type of the geometry. """ from shapely.geometry import ( Point, LineString, Polygon, Ring, MultiPoint, MultiPolygon, MultiLineString ) if isinstance(geom, (Point, MultiPoint)): return 'Point' elif isinstance(geom, (LineString, MultiLineString)): return 'Line' elif isinstance(geom, Ring): return 'Ring' elif isinstance(geom, (Polygon, MultiPolygon)): return 'Polygon'
0.856602
0.825695
def HHV_modified_Dulong(mass_fractions): r""" Return higher heating value [HHV; in J/g] based on the modified Dulong's equation [1]_. Parameters ---------- mass_fractions : dict[str, float] Dictionary of atomic mass fractions [-]. Returns ------- HHV : float Higher heating value [J/mol]. Notes ----- The heat of combustion in J/mol is given by Dulong's equation [1]_: .. math:: Hc (J/mol) = MW \cdot (338C + 1428(H - O/8)+ 95S) This equation is only good for <10 wt. % Oxygen content. Variables C, H, O, and S are atom weight fractions. Examples -------- Dry bituminous coal: >>> HHV_modified_Dulong({'C': 0.716, 'H': 0.054, 'S': 0.016, 'N': 0.016, 'O': 0.093, 'Ash': 0.105}) -304.0395 References ---------- .. [1] <NAME>. Waste management. In Perry’s Chemical Engineers’ Handbook, 9 ed.; McGraw-Hill Education, 2018 """ C = mass_fractions.get('C', 0.) H = mass_fractions.get('H', 0.) O = mass_fractions.get('O', 0.) S = mass_fractions.get('S', 0.) if O > 0.105: raise ValueError("Dulong's formula is only valid at 10 wt. %% Oxygen " "or less (%s given)" %(O)) return - (338.*C + 1428.*(H - O/8.)+ 95.*S)
0.9255
0.757436
def LHV_from_HHV(HHV, N_H2O): r""" Return the lower heating value [LHV; in J/mol] of a chemical given the higher heating value [HHV; in J/mol] and the number of water molecules formed per molecule burned. Parameters ---------- HHV : float Higher heating value [J/mol]. N_H2O : int Number of water molecules produced [-]. Returns ------- LHV : float Lower heating value [J/mol]. Notes ----- The LHV is calculated as follows: .. math:: LHV = HHV + H_{vap} \cdot H_2O .. math:: H_{vap} = 44011.496 \frac{J}{mol H_2O} .. math:: H_2O = \frac{mol H_2O}{mol} Examples -------- Methanol lower heat of combustion: >>> LHV_from_HHV(-726024.0, 2) -638001.008 """ return HHV + 44011.496 * N_H2O
0.871666
0.782704
def categorical_error(pred, label): """ Compute categorical error given score vectors and labels as numpy.ndarray. """ pred_label = pred.argmax(1) return (pred_label != label.flat).mean()
0.742702
0.804675
def find_neighbors_from_file(input_vtk): """ Generate the list of unique, sorted indices of neighboring vertices for all vertices in the faces of a triangular mesh in a VTK file. Parameters ---------- input_vtk : string name of input VTK file containing surface mesh Returns ------- neighbor_lists : list of lists of integers each list contains indices to neighboring vertices for each vertex Examples -------- >>> import numpy as np >>> from mindboggle.guts.mesh import find_neighbors_from_file >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> neighbor_lists = find_neighbors_from_file(vtk_file) >>> neighbor_lists[0:3] [[1, 4, 48, 49], [0, 4, 5, 49, 2], [1, 5, 6, 49, 50, 54]] Write results to vtk file and view (skip test): >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> index = 100 # doctest: +SKIP >>> IDs = -1 * np.ones(len(neighbor_lists)) # doctest: +SKIP >>> IDs[index] = 1 # doctest: +SKIP >>> IDs[neighbor_lists[index]] = 2 # doctest: +SKIP >>> rewrite_scalars(vtk_file, 'find_neighbors_from_file.vtk', IDs, ... 'neighbors', IDs) # doctest: +SKIP >>> plot_surfaces('find_neighbors_from_file.vtk') # doctest: +SKIP """ from mindboggle.mio.vtks import read_faces_points from mindboggle.guts.mesh import find_neighbors faces, points, npoints = read_faces_points(input_vtk) neighbor_lists = find_neighbors(faces, npoints) return neighbor_lists
0.920901
0.74772
def dilate(indices, nedges, neighbor_lists): """ Dilate region on a surface mesh. Parameters ---------- indices : list of integers indices of vertices to dilate nedges : integer number of edges to dilate across neighbor_lists : list of lists of integers each list contains indices to neighboring vertices for each vertex Returns ------- dilated_indices : list of integers indices of original vertices with dilated vertices Examples -------- >>> import numpy as np >>> from mindboggle.guts.mesh import dilate, find_neighbors_from_file >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> neighbor_lists = find_neighbors_from_file(vtk_file) >>> nedges = 3 >>> # Select a single fold: >>> folds, name = read_scalars(folds_file, True, True) >>> fold_number = 4 >>> indices = [i for i,x in enumerate(folds) if x == fold_number] >>> dilated_indices = dilate(indices, nedges, neighbor_lists) >>> (len(indices), len(dilated_indices)) (1151, 1545) >>> dilated_indices[0:10] [50317, 50324, 50325, 50326, 50327, 50332, 50333, 50334, 50339, 50340] Write results to vtk file and view (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import rewrite_scalars >>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP >>> IDs[dilated_indices] = 2 # doctest: +SKIP >>> IDs[indices] = 1 # doctest: +SKIP >>> rewrite_scalars(vtk_file, 'dilate.vtk', IDs, 'dilated_fold', IDs) # doctest: +SKIP >>> plot_surfaces('dilate.vtk') # doctest: +SKIP """ from mindboggle.guts.mesh import find_neighborhood N = find_neighborhood(neighbor_lists, indices, nedges) dilated_indices = indices[:] dilated_indices.extend(N) return dilated_indices
0.929087
0.715759
def erode(indices, nedges, neighbor_lists): """ Erode region on a surface mesh. Parameters ---------- indices : list of integers indices of vertices to erode nedges : integer number of edges to erode across neighbor_lists : list of lists of integers each list contains indices to neighboring vertices for each vertex Returns ------- eroded_indices : list of integers indices of original vertices without eroded vertices Examples -------- >>> import numpy as np >>> from mindboggle.guts.mesh import erode, find_neighbors_from_file >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> neighbor_lists = find_neighbors_from_file(vtk_file) >>> nedges = 3 >>> # Select a single fold: >>> folds, name = read_scalars(folds_file, True, True) >>> fold_number = 4 >>> indices = [i for i,x in enumerate(folds) if x == fold_number] >>> eroded_indices = erode(indices, nedges, neighbor_lists) >>> (len(indices), len(eroded_indices)) (1151, 809) Write results to vtk file and view (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP >>> IDs[indices] = 1 # doctest: +SKIP >>> IDs[eroded_indices] = 2 # doctest: +SKIP >>> rewrite_scalars(vtk_file, 'erode.vtk', IDs, 'eroded_fold', IDs) # doctest: +SKIP >>> plot_surfaces('erode.vtk') # doctest: +SKIP """ from mindboggle.guts.mesh import find_neighborhood N1 = find_neighborhood(neighbor_lists, indices, nedges=1) N2 = find_neighborhood(neighbor_lists, N1, nedges) eroded_indices = list(frozenset(indices).difference(N2)) return eroded_indices
0.921719
0.68875
def extract_edge(indices, neighbor_lists): """ Erode region on a surface mesh to extract the region's edge. Parameters ---------- indices : list of integers indices of vertices to erode neighbor_lists : list of lists of integers each list contains indices to neighboring vertices for each vertex Returns ------- edge_indices : list of integers indices of eroded vertices Examples -------- >>> import numpy as np >>> from mindboggle.guts.mesh import extract_edge >>> from mindboggle.guts.mesh import find_neighbors_from_file >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> neighbor_lists = find_neighbors_from_file(vtk_file) >>> # Select a single fold: >>> folds, name = read_scalars(folds_file, True, True) >>> fold_number = 4 >>> indices = [i for i,x in enumerate(folds) if x == fold_number] >>> edge_indices = extract_edge(indices, neighbor_lists) >>> (len(indices), len(edge_indices)) (1151, 111) Write results to vtk file and view (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP >>> IDs[indices] = 1 # doctest: +SKIP >>> IDs[edge_indices] = 2 # doctest: +SKIP >>> rewrite_scalars(vtk_file, 'extract_edge.vtk', IDs, 'edges_of_fold', IDs) # doctest: +SKIP >>> plot_surfaces('extract_edge.vtk') # doctest: +SKIP """ from mindboggle.guts.mesh import find_neighborhood N1 = find_neighborhood(neighbor_lists, indices, nedges=1) N2 = find_neighborhood(neighbor_lists, N1, nedges=1) edge_indices = list(set(N2).intersection(indices)) return edge_indices
0.924505
0.748306
def binary_cross_entropy_with_logits(input, target, weight=None, size_average=True, reduce=True): r"""Function that measures Binary Cross Entropy between target and output logits. See :class:`~torch.nn.BCEWithLogitsLoss` for details. Args: input: Variable of arbitrary shape target: Variable of the same shape as input weight (Variable, optional): a manual rescaling weight if provided it's repeated to match input tensor shape size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per input/target element instead and ignores :attr:`size_average`. Default: ``True`` Examples:: >>> input = torch.randn(3, requires_grad=True) >>> target = torch.FloatTensor(3).random_(2) >>> loss = F.binary_cross_entropy_with_logits(input, target) >>> loss.backward() """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size())) max_val = (-input).clamp(min=0) loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log() if weight is not None: loss = loss * weight if not reduce: return loss elif size_average: return loss.mean() else: return loss.sum()
0.961271
0.732161
def batched_notes_to_notes(batched_notes): """ Convert batch-friendly notes into loose note groups. Parameters ---------- batched_notes : ndarray (N x 3) Array of note intervals and pitches by row N - number of notes Returns ---------- pitches : ndarray (N) Array of pitches corresponding to notes N - number of notes intervals : ndarray (N x 2) Array of onset-offset time pairs corresponding to notes N - number of notes """ # Split along the final dimension into the loose groups pitches, intervals = batched_notes[..., 2], batched_notes[:, :2] return pitches, intervals
0.918567
0.780286
def threshold_activations(activations, threshold=0.5): """ Performs binary thresholding on an array of activations. Parameters ---------- activations : ndarray Provided activations threshold : float Value under which activations are negative Returns ---------- activations : ndarray Thresholded activations """ # Set all elements below threshold to zero (negative activation) activations[activations < threshold] = 0 # Set remaining elements to one (positive activation) activations[activations != 0] = 1 return activations
0.905042
0.781247
def tensor_to_array(tensor): """ Simple helper function to convert a PyTorch tensor into a NumPy array in order to keep code readable. Parameters ---------- tensor : PyTorch tensor Tensor to convert to array Returns ---------- array : NumPy ndarray Converted array """ # Change device to CPU, # detach from gradient graph, # and convert to NumPy array array = tensor.cpu().detach().numpy() return array
0.893762
0.985314
import torch def array_to_tensor(array, device=None): """ Simple helper function to convert a NumPy array into a PyTorch tensor in order to keep code readable. Parameters ---------- array : NumPy ndarray Array to convert to tensor device : string, or None (optional) Add tensor to this device, if specified Returns ---------- tensor : PyTorch tensor Converted tensor """ # Convert to PyTorch tensor tensor = torch.from_numpy(array) # Add tensor to device, if specified if device is not None: tensor = tensor.to(device) return tensor
0.829112
0.726983
def typename(atype): """Convert a type object into a fully qualified typename. Parameters ---------- atype : type The type to convert Returns ------- typename : str The string typename. For example, >>> typename(type(1)) 'int' >>> import numpy >>> x = numpy.array([1,2,3], numpy.float32) >>> typename(type(x)) 'numpy.ndarray' """ if not isinstance(atype, type): raise Exception('Argument is not a type') modulename = atype.__module__ typename = atype.__name__ if modulename not in ['__builtin__', 'builtins']: typename = modulename + '.' + typename return typename
0.870583
0.542197
def compute_ga_days_for_charttime(charttime, intime, ga_days_birth): """Compute the gestational age in days at a specific charttime Args: charttime (datetime.datetime): The charttime intime (datetime.datetime): The admission time of the ICU stay ga_days_birth: The gestational age in days at birth Returns: (int) Gestational age in days """ return round(((charttime - intime).days + ga_days_birth))
0.756358
0.662452
def pad(value, digits, to_right=False): """Only use for positive binary numbers given as strings. Pads to the left by default, or to the right using to_right flag. Inputs: value -- string of bits digits -- number of bits in representation to_right -- Boolean, direction of padding Output: string of bits of length 'digits' Raises exception if value is larger than digits in length. Example: pad('0010', 6) -> '000010' pad('0010', 6, True) -> '001000' """ len_val = len(value) assert len_val <= digits rem_digits = digits - len_val if to_right: return value + "0"*rem_digits else: return "0"*rem_digits + value
0.869673
0.789599
def Aligned(value, alignment): """Helper utility to calculate aligned numbers. Args: value: an integer as original value. alignment: an integer for alignment. """ remains = value % alignment return value - remains + (alignment if remains else 0)
0.854308
0.571169
def descale(data, data_max, data_min): """Reverse normalization Args: data (np.array): Normalized data data_max (float): max value before normalization data_min (float): min value before normalization Returns: [np.array]: Reverse-Normalized data """ data_descaled = data*(data_max-data_min)+data_min return data_descaled
0.812235
0.773772
def extract_capa_units(string): """ Takes a string and returns a list of floats representing the string given. Temporary capacity unit model. Usage:: test_string = 'mAh/g' end_value = extract_value(test_string) print(end_value) # "Gram^(-1.0) Hour^(1.0) MilliAmpere^(1.0)" :param str string: A representation of the units as a string :returns: The unit model :rtype: string """ if string == "Ah/kg" or string == "Ahkg-1": return "Ampere^(1.0) Hour^(1.0) KiloGram^(-1.0)" elif string == "Ah/g" or string == "Ahg-1": return "Ampere^(1.0) Gram^(-1.0) Hour^(1.0)" elif string == "mAh/kg" or string == "mAhkg-1": return "Hour^(1.0) KiloGram^(-1.0) MilliAmpere^(1.0)" else: return "Gram^(-1.0) Hour^(1.0) MilliAmpere^(1.0)"
0.794664
0.793306
def extract_volt_units(string): """ Takes a string and returns a list of floats representing the string given. Temporary voltage unit model. Usage:: test_string = 'mAh/g' end_value = extract_value(test_string) print(end_value) # "Gram^(-1.0) Hour^(1.0) MilliAmpere^(1.0)" :param str string: A representation of the units as a string :returns: The unit model :rtype: string """ if string == "mV": return "MilliVolt^(1.0)" else: return "Volt^(1.0)"
0.781872
0.813387
def extract_coul_units(string): """ Takes a string and returns a list of floats representing the string given. Temporary capacity unit model. Usage:: test_string = 'mAh/g' end_value = extract_value(test_string) print(end_value) # "Gram^(-1.0) Hour^(1.0) MilliAmpere^(1.0)" :param str string: A representation of the units as a string :returns: The unit model :rtype: string """ return "Percent^(1.0)"
0.78287
0.863392
def extract_cond_units(string): """ Takes a string and returns a list of floats representing the string given. Temporary capacity unit model. Usage:: test_string = 'mAh/g' end_value = extract_value(test_string) print(end_value) # "Gram^(-1.0) Hour^(1.0) MilliAmpere^(1.0)" :param str string: A representation of the units as a string :returns: The unit model :rtype: string """ if string == "mS/cm" or string == "mScm-1": return "CentiMeter^(-1.0) MilliSiemens^(1.0)" elif string == "mS/m" or string == "mSm-1": return "Meter^(-1.0) MilliSiemens^(1.0)" else: return "CentiMeter^(-1.0) Siemens^(1.0)"
0.778902
0.757884
def extract_ener_units(string): """ Takes a string and returns a list of floats representing the string given. Temporary capacity unit model. Usage:: test_string = 'mAh/g' end_value = extract_value(test_string) print(end_value) # "Gram^(-1.0) Hour^(1.0) MilliAmpere^(1.0)" :param str string: A representation of the units as a string :returns: The unit model :rtype: string """ if string == "Wh/g" or string == "Whg-1": return "Gram^(-1.0) WattHour^(1.0)" else: return "KiloGram^(-1.0) WattHour^(1.0)"
0.796015
0.78016
def get_neighbor(hex, direction): """ Simply returns the neighbor, in the direction specified, of the hexagon. :param hex: Cube coordinates of the hexagon. :param direction: A direction from the DIR class. :return: The location of the neighbor in cube coordinates. """ return hex + direction
0.8156
0.832169
def cartesian_vector(i): """ Generates a random 3D unit vector (direction) with a uniform spherical distribution Algo from http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution :return: """ if i == 0: return 1, 0, 0 elif i == 1: return -1, 0, 0 elif i == 2: return 0, 1, 0 elif i == 3: return 0, -1, 0 elif i == 4: return 0, 0, 1 elif i == 5: return 0, 0, -1 else: print('Bad input')
0.716219
0.689737
def tastes_like_gdal(seq): """Return True if `seq` matches the GDAL geotransform pattern.""" return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0
0.73848
0.562898
def round_to(value: float, target: float): """ Round price to price tick value. """ rounded = int(round(value / target)) * target return rounded
0.798344
0.727153
import torch def video_to_tensor(pic): """Convert a ``numpy.ndarray`` to tensor. Converts a numpy.ndarray (T x H x W x C) to a torch.FloatTensor of shape (C x T x H x W) Args: pic (numpy.ndarray): Video to be converted to tensor. Returns: Tensor: Converted video. """ return torch.from_numpy(pic.transpose([3, 0, 1, 2]))
0.833155
0.732927
import torch def quat2mat(quat): """Convert quaternion coefficients to rotation matrix. Args: quat: size = [B, 4] 4 <===>(w, x, y, z) Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """ norm_quat = quat norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True) w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3] B = quat.size(0) w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) wx, wy, wz = w*x, w*y, w*z xy, xz, yz = x*y, x*z, y*z rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz, 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx, 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3) return rotMat
0.843444
0.917893
def orthographic_projection(X, camera): """Perform orthographic projection of 3D points X using the camera parameters Args: X: size = [B, N, 3] camera: size = [B, 3] Returns: Projected 2D points -- size = [B, N, 2] """ camera = camera.view(-1, 1, 3) X_trans = X[:, :, :2] + camera[:, :, 1:] shape = X_trans.shape X_2d = (camera[:, :, 0] * X_trans.view(shape[0], -1)).view(shape) return X_2d
0.898053
0.966474
def vortex_position_in_panel(P1, P2, P3, P4): """ For a given panel defined by points P1, P2, P3 and P4 returns the position of the horseshoe vortex defined by points A, B and its control point P. ^ y | Points defining the panel | are named clockwise. P3--B--|-----P4 | | | | | | | | T1 | +--P--T2----> | | | x | | | P2--A--------P1 Parameters ---------- P1, P2, P3, P4 : array_like Points that define the panel Returns ------- [P, A, B] : dict P - control point where the boundary condition V*n = 0 is applied according to the Vortice Lattice Method. A, B - points that define the horseshoe position """ P2P1 = P1 - P2 P3P4 = P4 - P3 P2P3 = P3 - P2 P1P4 = P4 - P1 T1 = P2 + P2P3 / 2 T2 = P1 + P1P4 / 2 T1T2 = T2 - T1 A = P2 + P2P1 / 4 B = P3 + P3P4 / 4 P = T1 + (3/4) * T1T2 return P, A, B
0.894605
0.795896
def affix(request): """Parameterizes over the different variations of a local_snapshot property. These include ``property``, ``ghost_property``, and ``property_with_ghosts``. """ return request.param
0.718496
0.614886
def clamp(value, min_value, max_value): """ Return *value* clipped to the range *min_value* to *max_value* inclusive. """ return min(max_value, max(min_value, value))
0.789193
0.574932
def is_empty(value: str): """ Returns True if the specified string is empty. :param value: the string to check :return: True if the specified string is empty """ return "".__eq__(value)
0.728362
0.565299
def is_blank(value: str): """ Returns True if the specified string is whitespace or empty. :param value: the string to check :return: True if the specified string is whitespace or empty """ try: return "".__eq__(value.strip()) except AttributeError: return False
0.746416
0.736247
def is_not_blank(value: str): """ Returns True if the specified string is not whitespace or empty. :param value: the string to check :return: True if the specified string is not whitespace or empty """ try: return not "".__eq__(value.strip()) except AttributeError: return True
0.719581
0.716045
def strings_match_at_indices(string_a, index_a, string_b, index_b): """Check if both strings match at given indices and indices aren't 0. Args: string_a (str): First string. index_a (int): Index of character in first string. string_b (str): Second string. index_b (int): Index of character in second string. Returns: boolean: If both strings match at given indices and indices aren't 0. """ if index_a == 0: return False if index_b == 0: return False return string_a[index_a - 1] == string_b[index_b - 1]
0.846356
0.72645
def prune_regulon(expr, regulon, regulon_size): """ Prunes regulon with secondary interactions that do not meet the necessary number of downstream interactions metric {regulon_size} Args: expr (pandas DataFrame obj): pandas DataFrame of [n_samples, n_features] regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood'] regulon_size (int) : number of downstream interactions required for a given regulator in order to calculate enrichment score Returns: filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood'] """ expr_filtered_regulon = regulon[((regulon.UpGene.isin(expr.columns)) & (regulon.DownGene.isin(expr.columns)))] expr_filtered_regulon.set_index('UpGene', inplace=True) idx = (expr_filtered_regulon.index.value_counts() >= regulon_size) filt_idx = idx[idx==True] filtered_regulon = expr_filtered_regulon.loc[filt_idx.index] filtered_regulon.reset_index(inplace=True) return filtered_regulon
0.83772
0.793626
def timedelta_seconds(delta): """ Converts the given timedelta to seconds. :type delta: timedelta :rtype: float """ return delta.days * 24 * 60 * 60 + delta.seconds + \ delta.microseconds / 1000000.0
0.765155
0.824815
def normalize_images(fixed_image, moving_image): """ Noramlize image intensities by extracting joint minimum and dividing by joint maximum Note: the function is inplace fixed_image (Image): fixed image moving_image (Image): moving image return (Image, Image): normalized images """ fixed_min = fixed_image.image.min() moving_min = moving_image.image.min() min_val = min(fixed_min, moving_min) fixed_image.image -= min_val moving_image.image -= min_val moving_max = moving_image.image.max() fixed_max = fixed_image.image.max() max_val = max(fixed_max, moving_max) fixed_image.image /= max_val moving_image.image /= max_val return (fixed_image, moving_image)
0.865636
0.728616
import numpy def constant_potential_single_point(phi0, a, r, kappa): """ It computes the potential in a point 'r' due to a spherical surface with constant potential phi0, immersed in water. Solution to the Poisson-Boltzmann problem. Arguments ---------- phi0 : float, constant potential on the surface of the sphere. a : float, radius of the sphere. r : float, distance from the center of the sphere to the evaluation point. kappa: float, reciprocal of Debye length. Returns -------- phi : float, potential. """ phi = a / r * phi0 * numpy.exp(kappa * (a - r)) return phi
0.877948
0.611614
import numpy def constant_charge_single_point(sigma0, a, r, kappa, epsilon): """ It computes the potential in a point 'r' due to a spherical surface with constant charge sigma0 immersed in water. Solution to the Poisson-Boltzmann problem. . Arguments ---------- sigma0 : float, constant charge on the surface of the sphere. a : float, radius of the sphere. r : float, distance from the center of the sphere to the evaluation point. kappa : float, reciprocal of Debye length. epsilon: float, water dielectric constant. Returns -------- phi : float, potential. """ dphi0 = -sigma0 / epsilon phi = -dphi0 * a * a / (1 + kappa * a) * numpy.exp(kappa * (a - r)) / r return phi
0.920883
0.616445
def constant_potential_single_charge(phi0, radius, kappa, epsilon): """ It computes the surface charge of a sphere at constant potential, immersed in water. Arguments ---------- phi0 : float, constant potential on the surface of the sphere. radius : float, radius of the sphere. kappa : float, reciprocal of Debye length. epsilon: float, water dielectric constant . Returns -------- sigma : float, surface charge. """ dphi = -phi0 * ((1. + kappa * radius) / radius) sigma = -epsilon * dphi # Surface charge return sigma
0.939203
0.609292
def constant_charge_single_potential(sigma0, radius, kappa, epsilon): """ It computes the surface potential on a sphere at constant charged, immersed in water. Arguments ---------- sigma0 : float, constant charge on the surface of the sphere. radius : float, radius of the sphere. kappa : float, reciprocal of Debye length. epsilon: float, water dielectric constant. Returns -------- phi : float, potential. """ dphi = -sigma0 / epsilon phi = -dphi * radius / (1. + kappa * radius) # Surface potential return phi
0.916952
0.673205
import numpy def Cext_analytical(radius, wavelength, diel_out, diel_in): """ Calculates the analytical solution of the extinction cross section. This solution is valid when the nano particle involved is a sphere. Arguments ---------- radius : float, radius of the sphere in [nm]. wavelength: float/array of floats, wavelength of the incident electric field in [nm]. diel_out : complex/array of complex, dielectric constant inside surface. diel_in : complex/array of complex, dielectric constant inside surface. Returns -------- Cext_an : float/array of floats, extinction cross section. """ wavenumber = 2 * numpy.pi * numpy.sqrt(diel_out) / wavelength C1 = wavenumber**2 * (diel_in / diel_out - 1) / (diel_in / diel_out + 2) Cext_an = 4 * numpy.pi * radius**3 / wavenumber.real * C1.imag return Cext_an
0.886174
0.683763
def validate_with_errors(xml, xsd): """ Returns a tuple with a boolean product of the XSD validation as the first element and the error log object as the second element. """ validation = xsd.validate(xml) return (validation, xsd.error_log, )
0.713631
0.892046
def between_percentiles_mean(scores, min_percentile=0.450, max_percentile=0.55): """ Get the mean of the scores between the specified percentiles """ import numpy scores = numpy.array(scores) sorted_scores = numpy.sort(scores) high_scores = sorted_scores[ int(min_percentile * len(sorted_scores)) : int( max_percentile * len(sorted_scores) ) ] return numpy.mean(high_scores)
0.706393
0.752445
def flatten(image, char): """ Given a layered image (typically (y, x, RGB)), return a plain 2D image (y, x) according to a spec. Args: image (np.ndarray): The image to flatten char (char): One of (R, G, B, or V (=value)) Returns: np.ndarray - The 2D image. """ if image.ndim < 3: return image char2idx = dict(R=0, G=1, B=2) ret = None if char == "V": ret = image.mean(axis=2) elif char in char2idx: ret = image[:, :, char2idx[char]] else: # Shouldn't happen assert False, "Unhandled - invalid flat spec '%s'" % char return ret
0.924232
0.663206
def CWof(date): """The calendar week number of a date. @param date: python datetime """ return date.isocalendar()[1]
0.812979
0.66583
def prime_factors(number): """Finds prime factors of an integer (by trial-division). :param number: The integer to factor :type number: int :rtype: list of ints **Examples** >>> prime_factors(314) [2, 157] >>> prime_factors(31) [31] """ factor = 2 factors = [] while factor * factor <= number: if number % factor: factor += 1 else: number //= factor factors.append(factor) if number > 1: factors.append(number) return factors
0.840488
0.806205
def pad_to(x, k=8): """Pad int value up to divisor of k. Examples: >>> pad_to(31, 8) 32 """ return x + (x % k > 0) * (k - x % k)
0.853791
0.560072
def correlation(df, tag_a, tag_b): """Determine the probability of correlation/association.""" # Find all rows where a AND b == True a_and_b = df[(df[tag_a]) & (df[tag_b])] # Find all rows where a == True AND b != True a_not_b = df[(df[tag_a]) & ~(df[tag_b])] # Find all rows where b == True AND a != True b_not_a = df[(df[tag_b]) & ~(df[tag_a])] # Calculate the number of positive and possible outcomes using the shape attribute possible_outcomes = ( a_and_b.shape[0] + a_not_b.shape[0] + b_not_a.shape[0] ) # shape[0] returns the number of rows positive_outcomes = a_and_b.shape[0] # Calculate the final correlation coefficient r = positive_outcomes / possible_outcomes return r
0.892422
0.647304
def velocity_to_bin(velocity, step=4): """ Velocity in a midi file can take on any integer value in the range (0, 127) But, so that each vector in the midiparser is fewer dimensions than it has to be, without really losing any resolution in dynamics, the velocity is shifted down to the previous multiple of step """ assert (128 % step == 0), "128 possible midi velocities must be divisible into the number of bins" assert 0 <= velocity <= 127, f"velocity must be between 0 and 127, not {velocity}" #bins = np.arange(0, 127, step) #bins[i] is the ith multiple of step, i.e., step * i idx = velocity // step return idx #returns the bin into which the actual velocity is placed
0.820146
0.623448