id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequencelengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
sequencelengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
docstring_summary
stringclasses
1 value
parameters
stringclasses
1 value
return_statement
stringclasses
1 value
argument_list
stringclasses
1 value
identifier
stringclasses
1 value
nwo
stringclasses
1 value
score
float32
-1
-1
500
wiheto/teneto
teneto/utils/utils.py
load_parcellation_coords
def load_parcellation_coords(parcellation_name): """ Loads coordinates of included parcellations. Parameters ---------- parcellation_name : str options: 'gordon2014_333', 'power2012_264', 'shen2013_278'. Returns ------- parc : array parcellation cordinates """ path = tenetopath[0] + '/data/parcellation/' + parcellation_name + '.csv' parc = np.loadtxt(path, skiprows=1, delimiter=',', usecols=[1, 2, 3]) return parc
python
def load_parcellation_coords(parcellation_name): """ Loads coordinates of included parcellations. Parameters ---------- parcellation_name : str options: 'gordon2014_333', 'power2012_264', 'shen2013_278'. Returns ------- parc : array parcellation cordinates """ path = tenetopath[0] + '/data/parcellation/' + parcellation_name + '.csv' parc = np.loadtxt(path, skiprows=1, delimiter=',', usecols=[1, 2, 3]) return parc
[ "def", "load_parcellation_coords", "(", "parcellation_name", ")", ":", "path", "=", "tenetopath", "[", "0", "]", "+", "'/data/parcellation/'", "+", "parcellation_name", "+", "'.csv'", "parc", "=", "np", ".", "loadtxt", "(", "path", ",", "skiprows", "=", "1", ",", "delimiter", "=", "','", ",", "usecols", "=", "[", "1", ",", "2", ",", "3", "]", ")", "return", "parc" ]
Loads coordinates of included parcellations. Parameters ---------- parcellation_name : str options: 'gordon2014_333', 'power2012_264', 'shen2013_278'. Returns ------- parc : array parcellation cordinates
[ "Loads", "coordinates", "of", "included", "parcellations", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L789-L809
-1
501
wiheto/teneto
teneto/utils/utils.py
create_traj_ranges
def create_traj_ranges(start, stop, N): """ Fills in the trajectory range. # Adapted from https://stackoverflow.com/a/40624614 """ steps = (1.0/(N-1)) * (stop - start) if np.isscalar(steps): return steps*np.arange(N) + start else: return steps[:, None]*np.arange(N) + start[:, None]
python
def create_traj_ranges(start, stop, N): """ Fills in the trajectory range. # Adapted from https://stackoverflow.com/a/40624614 """ steps = (1.0/(N-1)) * (stop - start) if np.isscalar(steps): return steps*np.arange(N) + start else: return steps[:, None]*np.arange(N) + start[:, None]
[ "def", "create_traj_ranges", "(", "start", ",", "stop", ",", "N", ")", ":", "steps", "=", "(", "1.0", "/", "(", "N", "-", "1", ")", ")", "*", "(", "stop", "-", "start", ")", "if", "np", ".", "isscalar", "(", "steps", ")", ":", "return", "steps", "*", "np", ".", "arange", "(", "N", ")", "+", "start", "else", ":", "return", "steps", "[", ":", ",", "None", "]", "*", "np", ".", "arange", "(", "N", ")", "+", "start", "[", ":", ",", "None", "]" ]
Fills in the trajectory range. # Adapted from https://stackoverflow.com/a/40624614
[ "Fills", "in", "the", "trajectory", "range", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L893-L903
-1
502
wiheto/teneto
teneto/utils/utils.py
get_dimord
def get_dimord(measure, calc=None, community=None): """ Get the dimension order of a network measure. Parameters ---------- measure : str Name of funciton in teneto.networkmeasures. calc : str, default=None Calc parameter for the function community : bool, default=None If not null, then community property is assumed to be believed. Returns ------- dimord : str Dimension order. So "node,node,time" would define the dimensions of the network measure. """ if not calc: calc = '' else: calc = '_' + calc if not community: community = '' else: community = 'community' if 'community' in calc and 'community' in community: community = '' if calc == 'community_avg' or calc == 'community_pairs': community = '' dimord_dict = { 'temporal_closeness_centrality': 'node', 'temporal_degree_centrality': 'node', 'temporal_degree_centralit_avg': 'node', 'temporal_degree_centrality_time': 'node,time', 'temporal_efficiency': 'global', 'temporal_efficiency_global': 'global', 'temporal_efficiency_node': 'node', 'temporal_efficiency_to': 'node', 'sid_global': 'global,time', 'community_pairs': 'community,community,time', 'community_avg': 'community,time', 'sid': 'community,community,time', 'reachability_latency_global': 'global', 'reachability_latency': 'global', 'reachability_latency_node': 'node', 'fluctuability': 'node', 'fluctuability_global': 'global', 'bursty_coeff': 'edge,edge', 'bursty_coeff_edge': 'edge,edge', 'bursty_coeff_node': 'node', 'bursty_coeff_meanEdgePerNode': 'node', 'volatility_global': 'time', } if measure + calc + community in dimord_dict: return dimord_dict[measure + calc + community] else: print('WARNINGL: get_dimord() returned unknown dimension labels') return 'unknown'
python
def get_dimord(measure, calc=None, community=None): """ Get the dimension order of a network measure. Parameters ---------- measure : str Name of funciton in teneto.networkmeasures. calc : str, default=None Calc parameter for the function community : bool, default=None If not null, then community property is assumed to be believed. Returns ------- dimord : str Dimension order. So "node,node,time" would define the dimensions of the network measure. """ if not calc: calc = '' else: calc = '_' + calc if not community: community = '' else: community = 'community' if 'community' in calc and 'community' in community: community = '' if calc == 'community_avg' or calc == 'community_pairs': community = '' dimord_dict = { 'temporal_closeness_centrality': 'node', 'temporal_degree_centrality': 'node', 'temporal_degree_centralit_avg': 'node', 'temporal_degree_centrality_time': 'node,time', 'temporal_efficiency': 'global', 'temporal_efficiency_global': 'global', 'temporal_efficiency_node': 'node', 'temporal_efficiency_to': 'node', 'sid_global': 'global,time', 'community_pairs': 'community,community,time', 'community_avg': 'community,time', 'sid': 'community,community,time', 'reachability_latency_global': 'global', 'reachability_latency': 'global', 'reachability_latency_node': 'node', 'fluctuability': 'node', 'fluctuability_global': 'global', 'bursty_coeff': 'edge,edge', 'bursty_coeff_edge': 'edge,edge', 'bursty_coeff_node': 'node', 'bursty_coeff_meanEdgePerNode': 'node', 'volatility_global': 'time', } if measure + calc + community in dimord_dict: return dimord_dict[measure + calc + community] else: print('WARNINGL: get_dimord() returned unknown dimension labels') return 'unknown'
[ "def", "get_dimord", "(", "measure", ",", "calc", "=", "None", ",", "community", "=", "None", ")", ":", "if", "not", "calc", ":", "calc", "=", "''", "else", ":", "calc", "=", "'_'", "+", "calc", "if", "not", "community", ":", "community", "=", "''", "else", ":", "community", "=", "'community'", "if", "'community'", "in", "calc", "and", "'community'", "in", "community", ":", "community", "=", "''", "if", "calc", "==", "'community_avg'", "or", "calc", "==", "'community_pairs'", ":", "community", "=", "''", "dimord_dict", "=", "{", "'temporal_closeness_centrality'", ":", "'node'", ",", "'temporal_degree_centrality'", ":", "'node'", ",", "'temporal_degree_centralit_avg'", ":", "'node'", ",", "'temporal_degree_centrality_time'", ":", "'node,time'", ",", "'temporal_efficiency'", ":", "'global'", ",", "'temporal_efficiency_global'", ":", "'global'", ",", "'temporal_efficiency_node'", ":", "'node'", ",", "'temporal_efficiency_to'", ":", "'node'", ",", "'sid_global'", ":", "'global,time'", ",", "'community_pairs'", ":", "'community,community,time'", ",", "'community_avg'", ":", "'community,time'", ",", "'sid'", ":", "'community,community,time'", ",", "'reachability_latency_global'", ":", "'global'", ",", "'reachability_latency'", ":", "'global'", ",", "'reachability_latency_node'", ":", "'node'", ",", "'fluctuability'", ":", "'node'", ",", "'fluctuability_global'", ":", "'global'", ",", "'bursty_coeff'", ":", "'edge,edge'", ",", "'bursty_coeff_edge'", ":", "'edge,edge'", ",", "'bursty_coeff_node'", ":", "'node'", ",", "'bursty_coeff_meanEdgePerNode'", ":", "'node'", ",", "'volatility_global'", ":", "'time'", ",", "}", "if", "measure", "+", "calc", "+", "community", "in", "dimord_dict", ":", "return", "dimord_dict", "[", "measure", "+", "calc", "+", "community", "]", "else", ":", "print", "(", "'WARNINGL: get_dimord() returned unknown dimension labels'", ")", "return", "'unknown'" ]
Get the dimension order of a network measure. Parameters ---------- measure : str Name of funciton in teneto.networkmeasures. calc : str, default=None Calc parameter for the function community : bool, default=None If not null, then community property is assumed to be believed. Returns ------- dimord : str Dimension order. So "node,node,time" would define the dimensions of the network measure.
[ "Get", "the", "dimension", "order", "of", "a", "network", "measure", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L906-L969
-1
503
wiheto/teneto
teneto/utils/utils.py
create_supraadjacency_matrix
def create_supraadjacency_matrix(tnet, intersliceweight=1): """ Returns a supraadjacency matrix from a temporal network structure Parameters -------- tnet : TemporalNetwork Temporal network (any network type) intersliceweight : int Weight that links the same node from adjacent time-points Returns -------- supranet : dataframe Supraadjacency matrix """ newnetwork = tnet.network.copy() newnetwork['i'] = (tnet.network['i']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) newnetwork['j'] = (tnet.network['j']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) if 'weight' not in newnetwork.columns: newnetwork['weight'] = 1 newnetwork.drop('t', axis=1, inplace=True) timepointconns = pd.DataFrame() timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N) timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T)) timepointconns['weight'] = intersliceweight supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True) return supranet
python
def create_supraadjacency_matrix(tnet, intersliceweight=1): """ Returns a supraadjacency matrix from a temporal network structure Parameters -------- tnet : TemporalNetwork Temporal network (any network type) intersliceweight : int Weight that links the same node from adjacent time-points Returns -------- supranet : dataframe Supraadjacency matrix """ newnetwork = tnet.network.copy() newnetwork['i'] = (tnet.network['i']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) newnetwork['j'] = (tnet.network['j']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) if 'weight' not in newnetwork.columns: newnetwork['weight'] = 1 newnetwork.drop('t', axis=1, inplace=True) timepointconns = pd.DataFrame() timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N) timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T)) timepointconns['weight'] = intersliceweight supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True) return supranet
[ "def", "create_supraadjacency_matrix", "(", "tnet", ",", "intersliceweight", "=", "1", ")", ":", "newnetwork", "=", "tnet", ".", "network", ".", "copy", "(", ")", "newnetwork", "[", "'i'", "]", "=", "(", "tnet", ".", "network", "[", "'i'", "]", ")", "+", "(", "(", "tnet", ".", "netshape", "[", "0", "]", ")", "*", "(", "tnet", ".", "network", "[", "'t'", "]", ")", ")", "newnetwork", "[", "'j'", "]", "=", "(", "tnet", ".", "network", "[", "'j'", "]", ")", "+", "(", "(", "tnet", ".", "netshape", "[", "0", "]", ")", "*", "(", "tnet", ".", "network", "[", "'t'", "]", ")", ")", "if", "'weight'", "not", "in", "newnetwork", ".", "columns", ":", "newnetwork", "[", "'weight'", "]", "=", "1", "newnetwork", ".", "drop", "(", "'t'", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "timepointconns", "=", "pd", ".", "DataFrame", "(", ")", "timepointconns", "[", "'i'", "]", "=", "np", ".", "arange", "(", "0", ",", "(", "tnet", ".", "N", "*", "tnet", ".", "T", ")", "-", "tnet", ".", "N", ")", "timepointconns", "[", "'j'", "]", "=", "np", ".", "arange", "(", "tnet", ".", "N", ",", "(", "tnet", ".", "N", "*", "tnet", ".", "T", ")", ")", "timepointconns", "[", "'weight'", "]", "=", "intersliceweight", "supranet", "=", "pd", ".", "concat", "(", "[", "newnetwork", ",", "timepointconns", "]", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "return", "supranet" ]
Returns a supraadjacency matrix from a temporal network structure Parameters -------- tnet : TemporalNetwork Temporal network (any network type) intersliceweight : int Weight that links the same node from adjacent time-points Returns -------- supranet : dataframe Supraadjacency matrix
[ "Returns", "a", "supraadjacency", "matrix", "from", "a", "temporal", "network", "structure" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L1092-L1121
-1
504
wiheto/teneto
teneto/utils/io.py
tnet_to_nx
def tnet_to_nx(df, t=None): """ Creates undirected networkx object """ if t is not None: df = get_network_when(df, t=t) if 'weight' in df.columns: nxobj = nx.from_pandas_edgelist( df, source='i', target='j', edge_attr='weight') else: nxobj = nx.from_pandas_edgelist(df, source='i', target='j') return nxobj
python
def tnet_to_nx(df, t=None): """ Creates undirected networkx object """ if t is not None: df = get_network_when(df, t=t) if 'weight' in df.columns: nxobj = nx.from_pandas_edgelist( df, source='i', target='j', edge_attr='weight') else: nxobj = nx.from_pandas_edgelist(df, source='i', target='j') return nxobj
[ "def", "tnet_to_nx", "(", "df", ",", "t", "=", "None", ")", ":", "if", "t", "is", "not", "None", ":", "df", "=", "get_network_when", "(", "df", ",", "t", "=", "t", ")", "if", "'weight'", "in", "df", ".", "columns", ":", "nxobj", "=", "nx", ".", "from_pandas_edgelist", "(", "df", ",", "source", "=", "'i'", ",", "target", "=", "'j'", ",", "edge_attr", "=", "'weight'", ")", "else", ":", "nxobj", "=", "nx", ".", "from_pandas_edgelist", "(", "df", ",", "source", "=", "'i'", ",", "target", "=", "'j'", ")", "return", "nxobj" ]
Creates undirected networkx object
[ "Creates", "undirected", "networkx", "object" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/io.py#L5-L16
-1
505
wiheto/teneto
teneto/communitydetection/louvain.py
temporal_louvain
def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1): r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be positive. n_iter : int Number of iterations to run louvain for randomseed : int Set for reproduceability negativeedge : str If there are negative edges, what should be done with them. Options: 'ignore' (i.e. set to 0). More options to be added. consensus : float (0.5 default) When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount. Returns ------- communities : array (node,time) node,time array of community assignment Notes ------- References ---------- """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # Divide resolution by the number of timepoints resolution = resolution / tnet.T supranet = create_supraadjacency_matrix( tnet, intersliceweight=intersliceweight) if negativeedge == 'ignore': supranet = supranet[supranet['weight'] > 0] nxsupra = tnet_to_nx(supranet) np.random.seed(randomseed) while True: comtmp = [] with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)} for j in as_completed(job): comtmp.append(j.result()) comtmp = np.stack(comtmp) comtmp = comtmp.transpose() comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F') if n_iter == 1: break nxsupra_old = nxsupra nxsupra = make_consensus_matrix(comtmp, consensus_threshold) # If there was no consensus, there are no communities possible, return if nxsupra is None: break if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all(): break communities = comtmp[:, :, 0] if temporal_consensus == True: communities = make_temporal_consensus(communities) return communities
python
def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1): r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be positive. n_iter : int Number of iterations to run louvain for randomseed : int Set for reproduceability negativeedge : str If there are negative edges, what should be done with them. Options: 'ignore' (i.e. set to 0). More options to be added. consensus : float (0.5 default) When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount. Returns ------- communities : array (node,time) node,time array of community assignment Notes ------- References ---------- """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # Divide resolution by the number of timepoints resolution = resolution / tnet.T supranet = create_supraadjacency_matrix( tnet, intersliceweight=intersliceweight) if negativeedge == 'ignore': supranet = supranet[supranet['weight'] > 0] nxsupra = tnet_to_nx(supranet) np.random.seed(randomseed) while True: comtmp = [] with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)} for j in as_completed(job): comtmp.append(j.result()) comtmp = np.stack(comtmp) comtmp = comtmp.transpose() comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F') if n_iter == 1: break nxsupra_old = nxsupra nxsupra = make_consensus_matrix(comtmp, consensus_threshold) # If there was no consensus, there are no communities possible, return if nxsupra is None: break if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all(): break communities = comtmp[:, :, 0] if temporal_consensus == True: communities = make_temporal_consensus(communities) return communities
[ "def", "temporal_louvain", "(", "tnet", ",", "resolution", "=", "1", ",", "intersliceweight", "=", "1", ",", "n_iter", "=", "100", ",", "negativeedge", "=", "'ignore'", ",", "randomseed", "=", "None", ",", "consensus_threshold", "=", "0.5", ",", "temporal_consensus", "=", "True", ",", "njobs", "=", "1", ")", ":", "tnet", "=", "process_input", "(", "tnet", ",", "[", "'C'", ",", "'G'", ",", "'TN'", "]", ",", "'TN'", ")", "# Divide resolution by the number of timepoints", "resolution", "=", "resolution", "/", "tnet", ".", "T", "supranet", "=", "create_supraadjacency_matrix", "(", "tnet", ",", "intersliceweight", "=", "intersliceweight", ")", "if", "negativeedge", "==", "'ignore'", ":", "supranet", "=", "supranet", "[", "supranet", "[", "'weight'", "]", ">", "0", "]", "nxsupra", "=", "tnet_to_nx", "(", "supranet", ")", "np", ".", "random", ".", "seed", "(", "randomseed", ")", "while", "True", ":", "comtmp", "=", "[", "]", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "njobs", ")", "as", "executor", ":", "job", "=", "{", "executor", ".", "submit", "(", "_run_louvain", ",", "nxsupra", ",", "resolution", ",", "tnet", ".", "N", ",", "tnet", ".", "T", ")", "for", "n", "in", "range", "(", "n_iter", ")", "}", "for", "j", "in", "as_completed", "(", "job", ")", ":", "comtmp", ".", "append", "(", "j", ".", "result", "(", ")", ")", "comtmp", "=", "np", ".", "stack", "(", "comtmp", ")", "comtmp", "=", "comtmp", ".", "transpose", "(", ")", "comtmp", "=", "np", ".", "reshape", "(", "comtmp", ",", "[", "tnet", ".", "N", ",", "tnet", ".", "T", ",", "n_iter", "]", ",", "order", "=", "'F'", ")", "if", "n_iter", "==", "1", ":", "break", "nxsupra_old", "=", "nxsupra", "nxsupra", "=", "make_consensus_matrix", "(", "comtmp", ",", "consensus_threshold", ")", "# If there was no consensus, there are no communities possible, return", "if", "nxsupra", "is", "None", ":", "break", "if", "(", "nx", ".", "to_numpy_array", "(", "nxsupra", ",", "nodelist", "=", "np", ".", "arange", "(", "tnet", ".", "N", "*", "tnet", ".", "T", ")", ")", "==", "nx", ".", "to_numpy_array", "(", "nxsupra_old", ",", "nodelist", "=", "np", ".", "arange", "(", "tnet", ".", "N", "*", "tnet", ".", "T", ")", ")", ")", ".", "all", "(", ")", ":", "break", "communities", "=", "comtmp", "[", ":", ",", ":", ",", "0", "]", "if", "temporal_consensus", "==", "True", ":", "communities", "=", "make_temporal_consensus", "(", "communities", ")", "return", "communities" ]
r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be positive. n_iter : int Number of iterations to run louvain for randomseed : int Set for reproduceability negativeedge : str If there are negative edges, what should be done with them. Options: 'ignore' (i.e. set to 0). More options to be added. consensus : float (0.5 default) When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount. Returns ------- communities : array (node,time) node,time array of community assignment Notes ------- References ----------
[ "r", "Louvain", "clustering", "for", "a", "temporal", "network", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/louvain.py#L11-L75
-1
506
wiheto/teneto
teneto/communitydetection/louvain.py
make_temporal_consensus
def make_temporal_consensus(com_membership): r""" Matches community labels accross time-points Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1. Parameters ---------- com_membership : array Shape should be node, time. Returns ------- D : array temporal consensus matrix using Jaccard distance """ com_membership = np.array(com_membership) # make first indicies be between 0 and 1. com_membership[:, 0] = clean_community_indexes(com_membership[:, 0]) # loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before for t in range(1, com_membership.shape[1]): ct, counts_t = np.unique(com_membership[:, t], return_counts=True) ct = ct[np.argsort(counts_t)[::-1]] c1back = np.unique(com_membership[:, t-1]) new_index = np.zeros(com_membership.shape[0]) for n in ct: if len(c1back) > 0: d = np.ones(int(c1back.max())+1) for m in c1back: v1 = np.zeros(com_membership.shape[0]) v2 = np.zeros(com_membership.shape[0]) v1[com_membership[:, t] == n] = 1 v2[com_membership[:, t-1] == m] = 1 d[int(m)] = jaccard(v1, v2) bestval = np.argmin(d) else: bestval = new_index.max() + 1 new_index[com_membership[:, t] == n] = bestval c1back = np.array(np.delete(c1back, np.where(c1back == bestval))) com_membership[:, t] = new_index return com_membership
python
def make_temporal_consensus(com_membership): r""" Matches community labels accross time-points Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1. Parameters ---------- com_membership : array Shape should be node, time. Returns ------- D : array temporal consensus matrix using Jaccard distance """ com_membership = np.array(com_membership) # make first indicies be between 0 and 1. com_membership[:, 0] = clean_community_indexes(com_membership[:, 0]) # loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before for t in range(1, com_membership.shape[1]): ct, counts_t = np.unique(com_membership[:, t], return_counts=True) ct = ct[np.argsort(counts_t)[::-1]] c1back = np.unique(com_membership[:, t-1]) new_index = np.zeros(com_membership.shape[0]) for n in ct: if len(c1back) > 0: d = np.ones(int(c1back.max())+1) for m in c1back: v1 = np.zeros(com_membership.shape[0]) v2 = np.zeros(com_membership.shape[0]) v1[com_membership[:, t] == n] = 1 v2[com_membership[:, t-1] == m] = 1 d[int(m)] = jaccard(v1, v2) bestval = np.argmin(d) else: bestval = new_index.max() + 1 new_index[com_membership[:, t] == n] = bestval c1back = np.array(np.delete(c1back, np.where(c1back == bestval))) com_membership[:, t] = new_index return com_membership
[ "def", "make_temporal_consensus", "(", "com_membership", ")", ":", "com_membership", "=", "np", ".", "array", "(", "com_membership", ")", "# make first indicies be between 0 and 1.", "com_membership", "[", ":", ",", "0", "]", "=", "clean_community_indexes", "(", "com_membership", "[", ":", ",", "0", "]", ")", "# loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before", "for", "t", "in", "range", "(", "1", ",", "com_membership", ".", "shape", "[", "1", "]", ")", ":", "ct", ",", "counts_t", "=", "np", ".", "unique", "(", "com_membership", "[", ":", ",", "t", "]", ",", "return_counts", "=", "True", ")", "ct", "=", "ct", "[", "np", ".", "argsort", "(", "counts_t", ")", "[", ":", ":", "-", "1", "]", "]", "c1back", "=", "np", ".", "unique", "(", "com_membership", "[", ":", ",", "t", "-", "1", "]", ")", "new_index", "=", "np", ".", "zeros", "(", "com_membership", ".", "shape", "[", "0", "]", ")", "for", "n", "in", "ct", ":", "if", "len", "(", "c1back", ")", ">", "0", ":", "d", "=", "np", ".", "ones", "(", "int", "(", "c1back", ".", "max", "(", ")", ")", "+", "1", ")", "for", "m", "in", "c1back", ":", "v1", "=", "np", ".", "zeros", "(", "com_membership", ".", "shape", "[", "0", "]", ")", "v2", "=", "np", ".", "zeros", "(", "com_membership", ".", "shape", "[", "0", "]", ")", "v1", "[", "com_membership", "[", ":", ",", "t", "]", "==", "n", "]", "=", "1", "v2", "[", "com_membership", "[", ":", ",", "t", "-", "1", "]", "==", "m", "]", "=", "1", "d", "[", "int", "(", "m", ")", "]", "=", "jaccard", "(", "v1", ",", "v2", ")", "bestval", "=", "np", ".", "argmin", "(", "d", ")", "else", ":", "bestval", "=", "new_index", ".", "max", "(", ")", "+", "1", "new_index", "[", "com_membership", "[", ":", ",", "t", "]", "==", "n", "]", "=", "bestval", "c1back", "=", "np", ".", "array", "(", "np", ".", "delete", "(", "c1back", ",", "np", ".", "where", "(", "c1back", "==", "bestval", ")", ")", ")", "com_membership", "[", ":", ",", "t", "]", "=", "new_index", "return", "com_membership" ]
r""" Matches community labels accross time-points Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1. Parameters ---------- com_membership : array Shape should be node, time. Returns ------- D : array temporal consensus matrix using Jaccard distance
[ "r", "Matches", "community", "labels", "accross", "time", "-", "points" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/louvain.py#L123-L167
-1
507
wiheto/teneto
teneto/temporalcommunity/flexibility.py
flexibility
def flexibility(communities): """ Amount a node changes community Parameters ---------- communities : array Community array of shape (node,time) Returns -------- flex : array Size with the flexibility of each node. Notes ----- Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary. References ----------- Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6. """ # Preallocate flex = np.zeros(communities.shape[0]) # Go from the second time point to last, compare with time-point before for t in range(1, communities.shape[1]): flex[communities[:, t] != communities[:, t-1]] += 1 # Normalize flex = flex / (communities.shape[1] - 1) return flex
python
def flexibility(communities): """ Amount a node changes community Parameters ---------- communities : array Community array of shape (node,time) Returns -------- flex : array Size with the flexibility of each node. Notes ----- Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary. References ----------- Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6. """ # Preallocate flex = np.zeros(communities.shape[0]) # Go from the second time point to last, compare with time-point before for t in range(1, communities.shape[1]): flex[communities[:, t] != communities[:, t-1]] += 1 # Normalize flex = flex / (communities.shape[1] - 1) return flex
[ "def", "flexibility", "(", "communities", ")", ":", "# Preallocate", "flex", "=", "np", ".", "zeros", "(", "communities", ".", "shape", "[", "0", "]", ")", "# Go from the second time point to last, compare with time-point before", "for", "t", "in", "range", "(", "1", ",", "communities", ".", "shape", "[", "1", "]", ")", ":", "flex", "[", "communities", "[", ":", ",", "t", "]", "!=", "communities", "[", ":", ",", "t", "-", "1", "]", "]", "+=", "1", "# Normalize", "flex", "=", "flex", "/", "(", "communities", ".", "shape", "[", "1", "]", "-", "1", ")", "return", "flex" ]
Amount a node changes community Parameters ---------- communities : array Community array of shape (node,time) Returns -------- flex : array Size with the flexibility of each node. Notes ----- Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary. References ----------- Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6.
[ "Amount", "a", "node", "changes", "community" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/flexibility.py#L4-L34
-1
508
wiheto/teneto
teneto/utils/bidsutils.py
load_tabular_file
def load_tabular_file(fname, return_meta=False, header=True, index_col=True): """ Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified) """ if index_col: index_col = 0 else: index_col = None if header: header = 0 else: header = None df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t') if return_meta: json_fname = fname.replace('tsv', 'json') meta = pd.read_json(json_fname) return df, meta else: return df
python
def load_tabular_file(fname, return_meta=False, header=True, index_col=True): """ Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified) """ if index_col: index_col = 0 else: index_col = None if header: header = 0 else: header = None df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t') if return_meta: json_fname = fname.replace('tsv', 'json') meta = pd.read_json(json_fname) return df, meta else: return df
[ "def", "load_tabular_file", "(", "fname", ",", "return_meta", "=", "False", ",", "header", "=", "True", ",", "index_col", "=", "True", ")", ":", "if", "index_col", ":", "index_col", "=", "0", "else", ":", "index_col", "=", "None", "if", "header", ":", "header", "=", "0", "else", ":", "header", "=", "None", "df", "=", "pd", ".", "read_csv", "(", "fname", ",", "header", "=", "header", ",", "index_col", "=", "index_col", ",", "sep", "=", "'\\t'", ")", "if", "return_meta", ":", "json_fname", "=", "fname", ".", "replace", "(", "'tsv'", ",", "'json'", ")", "meta", "=", "pd", ".", "read_json", "(", "json_fname", ")", "return", "df", ",", "meta", "else", ":", "return", "df" ]
Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified)
[ "Given", "a", "file", "name", "loads", "as", "a", "pandas", "data", "frame" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/bidsutils.py#L77-L115
-1
509
wiheto/teneto
teneto/utils/bidsutils.py
get_sidecar
def get_sidecar(fname, allowedfileformats='default'): """ Loads sidecar or creates one """ if allowedfileformats == 'default': allowedfileformats = ['.tsv', '.nii.gz'] for f in allowedfileformats: fname = fname.split(f)[0] fname += '.json' if os.path.exists(fname): with open(fname) as fs: sidecar = json.load(fs) else: sidecar = {} if 'filestatus' not in sidecar: sidecar['filestatus'] = {} sidecar['filestatus']['reject'] = False sidecar['filestatus']['reason'] = [] return sidecar
python
def get_sidecar(fname, allowedfileformats='default'): """ Loads sidecar or creates one """ if allowedfileformats == 'default': allowedfileformats = ['.tsv', '.nii.gz'] for f in allowedfileformats: fname = fname.split(f)[0] fname += '.json' if os.path.exists(fname): with open(fname) as fs: sidecar = json.load(fs) else: sidecar = {} if 'filestatus' not in sidecar: sidecar['filestatus'] = {} sidecar['filestatus']['reject'] = False sidecar['filestatus']['reason'] = [] return sidecar
[ "def", "get_sidecar", "(", "fname", ",", "allowedfileformats", "=", "'default'", ")", ":", "if", "allowedfileformats", "==", "'default'", ":", "allowedfileformats", "=", "[", "'.tsv'", ",", "'.nii.gz'", "]", "for", "f", "in", "allowedfileformats", ":", "fname", "=", "fname", ".", "split", "(", "f", ")", "[", "0", "]", "fname", "+=", "'.json'", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "with", "open", "(", "fname", ")", "as", "fs", ":", "sidecar", "=", "json", ".", "load", "(", "fs", ")", "else", ":", "sidecar", "=", "{", "}", "if", "'filestatus'", "not", "in", "sidecar", ":", "sidecar", "[", "'filestatus'", "]", "=", "{", "}", "sidecar", "[", "'filestatus'", "]", "[", "'reject'", "]", "=", "False", "sidecar", "[", "'filestatus'", "]", "[", "'reason'", "]", "=", "[", "]", "return", "sidecar" ]
Loads sidecar or creates one
[ "Loads", "sidecar", "or", "creates", "one" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/bidsutils.py#L118-L136
-1
510
wiheto/teneto
teneto/utils/bidsutils.py
process_exclusion_criteria
def process_exclusion_criteria(exclusion_criteria): """ Parses an exclusion critera string to get the function and threshold. Parameters ---------- exclusion_criteria : list list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\' Returns ------- relfun : list list of numpy functions for the exclusion criteria threshold : list list of floats for threshold for each relfun """ relfun = [] threshold = [] for ec in exclusion_criteria: if ec[0:2] == '>=': relfun.append(np.greater_equal) threshold.append(float(ec[2:])) elif ec[0:2] == '<=': relfun.append(np.less_equal) threshold.append(float(ec[2:])) elif ec[0] == '>': relfun.append(np.greater) threshold.append(float(ec[1:])) elif ec[0] == '<': relfun.append(np.less) threshold.append(float(ec[1:])) else: raise ValueError('exclusion crieria must being with >,<,>= or <=') return relfun, threshold
python
def process_exclusion_criteria(exclusion_criteria): """ Parses an exclusion critera string to get the function and threshold. Parameters ---------- exclusion_criteria : list list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\' Returns ------- relfun : list list of numpy functions for the exclusion criteria threshold : list list of floats for threshold for each relfun """ relfun = [] threshold = [] for ec in exclusion_criteria: if ec[0:2] == '>=': relfun.append(np.greater_equal) threshold.append(float(ec[2:])) elif ec[0:2] == '<=': relfun.append(np.less_equal) threshold.append(float(ec[2:])) elif ec[0] == '>': relfun.append(np.greater) threshold.append(float(ec[1:])) elif ec[0] == '<': relfun.append(np.less) threshold.append(float(ec[1:])) else: raise ValueError('exclusion crieria must being with >,<,>= or <=') return relfun, threshold
[ "def", "process_exclusion_criteria", "(", "exclusion_criteria", ")", ":", "relfun", "=", "[", "]", "threshold", "=", "[", "]", "for", "ec", "in", "exclusion_criteria", ":", "if", "ec", "[", "0", ":", "2", "]", "==", "'>='", ":", "relfun", ".", "append", "(", "np", ".", "greater_equal", ")", "threshold", ".", "append", "(", "float", "(", "ec", "[", "2", ":", "]", ")", ")", "elif", "ec", "[", "0", ":", "2", "]", "==", "'<='", ":", "relfun", ".", "append", "(", "np", ".", "less_equal", ")", "threshold", ".", "append", "(", "float", "(", "ec", "[", "2", ":", "]", ")", ")", "elif", "ec", "[", "0", "]", "==", "'>'", ":", "relfun", ".", "append", "(", "np", ".", "greater", ")", "threshold", ".", "append", "(", "float", "(", "ec", "[", "1", ":", "]", ")", ")", "elif", "ec", "[", "0", "]", "==", "'<'", ":", "relfun", ".", "append", "(", "np", ".", "less", ")", "threshold", ".", "append", "(", "float", "(", "ec", "[", "1", ":", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "'exclusion crieria must being with >,<,>= or <='", ")", "return", "relfun", ",", "threshold" ]
Parses an exclusion critera string to get the function and threshold. Parameters ---------- exclusion_criteria : list list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\' Returns ------- relfun : list list of numpy functions for the exclusion criteria threshold : list list of floats for threshold for each relfun
[ "Parses", "an", "exclusion", "critera", "string", "to", "get", "the", "function", "and", "threshold", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/bidsutils.py#L166-L201
-1
511
wiheto/teneto
teneto/networkmeasures/reachability_latency.py
reachability_latency
def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'): """ Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default: 1) reachability ratio that the latency is calculated in relation to. Value must be over 0 and up to 1. 1 (default) - all nodes must be reached. Other values (e.g. .5 imply that 50% of nodes are reached) This is rounded to the nearest node inter. E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards) calc : str what to calculate. Alternatives: 'global' entire network; 'nodes': for each node. Returns -------- reach_lat : array Reachability latency Notes ------ Reachability latency calculates the time it takes for the paths. """ if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet) pathmat = np.zeros([paths[['from', 'to']].max().max( )+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan pathmat[paths['from'].values, paths['to'].values, paths['t_start'].values] = paths['temporal-distance'] netshape = pathmat.shape edges_to_reach = netshape[0] - np.round(netshape[0] * rratio) reach_lat = np.zeros([netshape[1], netshape[2]]) * np.nan for t_ind in range(0, netshape[2]): paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=1) reach_lat[:, t_ind] = paths_sort[:, edges_to_reach] if calc == 'global': reach_lat = np.nansum(reach_lat) reach_lat = reach_lat / ((netshape[0]) * netshape[2]) elif calc == 'nodes': reach_lat = np.nansum(reach_lat, axis=1) reach_lat = reach_lat / (netshape[2]) return reach_lat
python
def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'): """ Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default: 1) reachability ratio that the latency is calculated in relation to. Value must be over 0 and up to 1. 1 (default) - all nodes must be reached. Other values (e.g. .5 imply that 50% of nodes are reached) This is rounded to the nearest node inter. E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards) calc : str what to calculate. Alternatives: 'global' entire network; 'nodes': for each node. Returns -------- reach_lat : array Reachability latency Notes ------ Reachability latency calculates the time it takes for the paths. """ if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet) pathmat = np.zeros([paths[['from', 'to']].max().max( )+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan pathmat[paths['from'].values, paths['to'].values, paths['t_start'].values] = paths['temporal-distance'] netshape = pathmat.shape edges_to_reach = netshape[0] - np.round(netshape[0] * rratio) reach_lat = np.zeros([netshape[1], netshape[2]]) * np.nan for t_ind in range(0, netshape[2]): paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=1) reach_lat[:, t_ind] = paths_sort[:, edges_to_reach] if calc == 'global': reach_lat = np.nansum(reach_lat) reach_lat = reach_lat / ((netshape[0]) * netshape[2]) elif calc == 'nodes': reach_lat = np.nansum(reach_lat, axis=1) reach_lat = reach_lat / (netshape[2]) return reach_lat
[ "def", "reachability_latency", "(", "tnet", "=", "None", ",", "paths", "=", "None", ",", "rratio", "=", "1", ",", "calc", "=", "'global'", ")", ":", "if", "tnet", "is", "not", "None", "and", "paths", "is", "not", "None", ":", "raise", "ValueError", "(", "'Only network or path input allowed.'", ")", "if", "tnet", "is", "None", "and", "paths", "is", "None", ":", "raise", "ValueError", "(", "'No input.'", ")", "# if shortest paths are not calculated, calculate them", "if", "tnet", "is", "not", "None", ":", "paths", "=", "shortest_temporal_path", "(", "tnet", ")", "pathmat", "=", "np", ".", "zeros", "(", "[", "paths", "[", "[", "'from'", ",", "'to'", "]", "]", ".", "max", "(", ")", ".", "max", "(", ")", "+", "1", ",", "paths", "[", "[", "'from'", ",", "'to'", "]", "]", ".", "max", "(", ")", ".", "max", "(", ")", "+", "1", ",", "paths", "[", "[", "'t_start'", "]", "]", ".", "max", "(", ")", ".", "max", "(", ")", "+", "1", "]", ")", "*", "np", ".", "nan", "pathmat", "[", "paths", "[", "'from'", "]", ".", "values", ",", "paths", "[", "'to'", "]", ".", "values", ",", "paths", "[", "'t_start'", "]", ".", "values", "]", "=", "paths", "[", "'temporal-distance'", "]", "netshape", "=", "pathmat", ".", "shape", "edges_to_reach", "=", "netshape", "[", "0", "]", "-", "np", ".", "round", "(", "netshape", "[", "0", "]", "*", "rratio", ")", "reach_lat", "=", "np", ".", "zeros", "(", "[", "netshape", "[", "1", "]", ",", "netshape", "[", "2", "]", "]", ")", "*", "np", ".", "nan", "for", "t_ind", "in", "range", "(", "0", ",", "netshape", "[", "2", "]", ")", ":", "paths_sort", "=", "-", "np", ".", "sort", "(", "-", "pathmat", "[", ":", ",", ":", ",", "t_ind", "]", ",", "axis", "=", "1", ")", "reach_lat", "[", ":", ",", "t_ind", "]", "=", "paths_sort", "[", ":", ",", "edges_to_reach", "]", "if", "calc", "==", "'global'", ":", "reach_lat", "=", "np", ".", "nansum", "(", "reach_lat", ")", "reach_lat", "=", "reach_lat", "/", "(", "(", "netshape", "[", "0", "]", ")", "*", "netshape", "[", "2", "]", ")", "elif", "calc", "==", "'nodes'", ":", "reach_lat", "=", "np", ".", "nansum", "(", "reach_lat", ",", "axis", "=", "1", ")", "reach_lat", "=", "reach_lat", "/", "(", "netshape", "[", "2", "]", ")", "return", "reach_lat" ]
Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default: 1) reachability ratio that the latency is calculated in relation to. Value must be over 0 and up to 1. 1 (default) - all nodes must be reached. Other values (e.g. .5 imply that 50% of nodes are reached) This is rounded to the nearest node inter. E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards) calc : str what to calculate. Alternatives: 'global' entire network; 'nodes': for each node. Returns -------- reach_lat : array Reachability latency Notes ------ Reachability latency calculates the time it takes for the paths.
[ "Reachability", "latency", ".", "This", "is", "the", "r", "-", "th", "longest", "temporal", "path", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/reachability_latency.py#L9-L72
-1
512
wiheto/teneto
teneto/temporalcommunity/recruitment.py
recruitment
def recruitment(temporalcommunities, staticcommunities): """ Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the same static communities being in the same temporal communities at other time-points or during different tasks. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Rcoeff : array recruitment coefficient for each node References: ----------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. """ # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') alleg = allegiance(temporalcommunities) Rcoeff = np.zeros(len(staticcommunities)) for i, statcom in enumerate(staticcommunities): Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom]) return Rcoeff
python
def recruitment(temporalcommunities, staticcommunities): """ Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the same static communities being in the same temporal communities at other time-points or during different tasks. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Rcoeff : array recruitment coefficient for each node References: ----------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. """ # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') alleg = allegiance(temporalcommunities) Rcoeff = np.zeros(len(staticcommunities)) for i, statcom in enumerate(staticcommunities): Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom]) return Rcoeff
[ "def", "recruitment", "(", "temporalcommunities", ",", "staticcommunities", ")", ":", "# make sure the static and temporal communities have the same number of nodes", "if", "staticcommunities", ".", "shape", "[", "0", "]", "!=", "temporalcommunities", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'Temporal and static communities have different dimensions'", ")", "alleg", "=", "allegiance", "(", "temporalcommunities", ")", "Rcoeff", "=", "np", ".", "zeros", "(", "len", "(", "staticcommunities", ")", ")", "for", "i", ",", "statcom", "in", "enumerate", "(", "staticcommunities", ")", ":", "Rcoeff", "[", "i", "]", "=", "np", ".", "mean", "(", "alleg", "[", "i", ",", "staticcommunities", "==", "statcom", "]", ")", "return", "Rcoeff" ]
Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the same static communities being in the same temporal communities at other time-points or during different tasks. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Rcoeff : array recruitment coefficient for each node References: ----------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533.
[ "Calculates", "recruitment", "coefficient", "for", "each", "node", ".", "Recruitment", "coefficient", "is", "the", "average", "probability", "of", "nodes", "from", "the", "same", "static", "communities", "being", "in", "the", "same", "temporal", "communities", "at", "other", "time", "-", "points", "or", "during", "different", "tasks", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/recruitment.py#L5-L44
-1
513
wiheto/teneto
teneto/temporalcommunity/integration.py
integration
def integration(temporalcommunities, staticcommunities): """ Calculates the integration coefficient for each node. Measures the average probability that a node is in the same community as nodes from other systems. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Icoeff : array integration coefficient for each node References: ---------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. """ # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') alleg = allegiance(temporalcommunities) Icoeff = np.zeros(len(staticcommunities)) # calc integration for each node for i, statcom in enumerate(len(staticcommunities)): Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom]) return Icoeff
python
def integration(temporalcommunities, staticcommunities): """ Calculates the integration coefficient for each node. Measures the average probability that a node is in the same community as nodes from other systems. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Icoeff : array integration coefficient for each node References: ---------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. """ # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') alleg = allegiance(temporalcommunities) Icoeff = np.zeros(len(staticcommunities)) # calc integration for each node for i, statcom in enumerate(len(staticcommunities)): Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom]) return Icoeff
[ "def", "integration", "(", "temporalcommunities", ",", "staticcommunities", ")", ":", "# make sure the static and temporal communities have the same number of nodes", "if", "staticcommunities", ".", "shape", "[", "0", "]", "!=", "temporalcommunities", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'Temporal and static communities have different dimensions'", ")", "alleg", "=", "allegiance", "(", "temporalcommunities", ")", "Icoeff", "=", "np", ".", "zeros", "(", "len", "(", "staticcommunities", ")", ")", "# calc integration for each node", "for", "i", ",", "statcom", "in", "enumerate", "(", "len", "(", "staticcommunities", ")", ")", ":", "Icoeff", "[", "i", "]", "=", "np", ".", "mean", "(", "alleg", "[", "i", ",", "staticcommunities", "!=", "statcom", "]", ")", "return", "Icoeff" ]
Calculates the integration coefficient for each node. Measures the average probability that a node is in the same community as nodes from other systems. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Icoeff : array integration coefficient for each node References: ---------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533.
[ "Calculates", "the", "integration", "coefficient", "for", "each", "node", ".", "Measures", "the", "average", "probability", "that", "a", "node", "is", "in", "the", "same", "community", "as", "nodes", "from", "other", "systems", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/integration.py#L5-L45
-1
514
wiheto/teneto
teneto/networkmeasures/intercontacttimes.py
intercontacttimes
def intercontacttimes(tnet): """ Calculates the intercontacttimes of each edge in a network. Parameters ----------- tnet : array, dict Temporal network (craphlet or contact). Nettype: 'bu', 'bd' Returns --------- contacts : dict Intercontact times as numpy array in dictionary. contacts['intercontacttimes'] Notes ------ The inter-contact times is calculated by the time between consequecutive "active" edges (where active means that the value is 1 in a binary network). Examples -------- This example goes through how inter-contact times are calculated. >>> import teneto >>> import numpy as np Make a network with 2 nodes and 4 time-points with 4 edges spaced out. >>> G = np.zeros([2,2,10]) >>> edge_on = [1,3,5,9] >>> G[0,1,edge_on] = 1 The network visualised below make it clear what the inter-contact times are between the two nodes: .. plot:: import teneto import numpy as np import matplotlib.pyplot as plt G = np.zeros([2,2,10]) edge_on = [1,3,5,9] G[0,1,edge_on] = 1 fig, ax = plt.subplots(1, figsize=(4,2)) teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2') ax.set_ylim(-0.25, 1.25) plt.tight_layout() fig.show() Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1. >>> ict = teneto.networkmeasures.intercontacttimes(G) The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN. So the icts between nodes 0 and 1 are found by: >>> ict['intercontacttimes'][0,1] array([2, 2, 4]) """ # Process input tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') if tnet.nettype[0] == 'w': print('WARNING: assuming connections to be binary when computing intercontacttimes') # Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:]. # Then discard the noninformative ones (done automatically) # Finally return back as np array contacts = np.array([[None] * tnet.netshape[0]] * tnet.netshape[0]) if tnet.nettype[1] == 'u': for i in range(0, tnet.netshape[0]): for j in range(i + 1, tnet.netshape[0]): edge_on = tnet.get_network_when(i=i, j=j)['t'].values if len(edge_on) > 0: edge_on_diff = edge_on[1:] - edge_on[:-1] contacts[i, j] = np.array(edge_on_diff) contacts[j, i] = np.array(edge_on_diff) else: contacts[i, j] = [] contacts[j, i] = [] elif tnet.nettype[1] == 'd': for i in range(0, tnet.netshape[0]): for j in range(0, tnet.netshape[0]): edge_on = tnet.get_network_when(i=i, j=j)['t'].values if len(edge_on) > 0: edge_on_diff = edge_on[1:] - edge_on[:-1] contacts[i, j] = np.array(edge_on_diff) else: contacts[i, j] = [] out = {} out['intercontacttimes'] = contacts out['nettype'] = tnet.nettype return out
python
def intercontacttimes(tnet): """ Calculates the intercontacttimes of each edge in a network. Parameters ----------- tnet : array, dict Temporal network (craphlet or contact). Nettype: 'bu', 'bd' Returns --------- contacts : dict Intercontact times as numpy array in dictionary. contacts['intercontacttimes'] Notes ------ The inter-contact times is calculated by the time between consequecutive "active" edges (where active means that the value is 1 in a binary network). Examples -------- This example goes through how inter-contact times are calculated. >>> import teneto >>> import numpy as np Make a network with 2 nodes and 4 time-points with 4 edges spaced out. >>> G = np.zeros([2,2,10]) >>> edge_on = [1,3,5,9] >>> G[0,1,edge_on] = 1 The network visualised below make it clear what the inter-contact times are between the two nodes: .. plot:: import teneto import numpy as np import matplotlib.pyplot as plt G = np.zeros([2,2,10]) edge_on = [1,3,5,9] G[0,1,edge_on] = 1 fig, ax = plt.subplots(1, figsize=(4,2)) teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2') ax.set_ylim(-0.25, 1.25) plt.tight_layout() fig.show() Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1. >>> ict = teneto.networkmeasures.intercontacttimes(G) The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN. So the icts between nodes 0 and 1 are found by: >>> ict['intercontacttimes'][0,1] array([2, 2, 4]) """ # Process input tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') if tnet.nettype[0] == 'w': print('WARNING: assuming connections to be binary when computing intercontacttimes') # Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:]. # Then discard the noninformative ones (done automatically) # Finally return back as np array contacts = np.array([[None] * tnet.netshape[0]] * tnet.netshape[0]) if tnet.nettype[1] == 'u': for i in range(0, tnet.netshape[0]): for j in range(i + 1, tnet.netshape[0]): edge_on = tnet.get_network_when(i=i, j=j)['t'].values if len(edge_on) > 0: edge_on_diff = edge_on[1:] - edge_on[:-1] contacts[i, j] = np.array(edge_on_diff) contacts[j, i] = np.array(edge_on_diff) else: contacts[i, j] = [] contacts[j, i] = [] elif tnet.nettype[1] == 'd': for i in range(0, tnet.netshape[0]): for j in range(0, tnet.netshape[0]): edge_on = tnet.get_network_when(i=i, j=j)['t'].values if len(edge_on) > 0: edge_on_diff = edge_on[1:] - edge_on[:-1] contacts[i, j] = np.array(edge_on_diff) else: contacts[i, j] = [] out = {} out['intercontacttimes'] = contacts out['nettype'] = tnet.nettype return out
[ "def", "intercontacttimes", "(", "tnet", ")", ":", "# Process input", "tnet", "=", "process_input", "(", "tnet", ",", "[", "'C'", ",", "'G'", ",", "'TN'", "]", ",", "'TN'", ")", "if", "tnet", ".", "nettype", "[", "0", "]", "==", "'w'", ":", "print", "(", "'WARNING: assuming connections to be binary when computing intercontacttimes'", ")", "# Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:].", "# Then discard the noninformative ones (done automatically)", "# Finally return back as np array", "contacts", "=", "np", ".", "array", "(", "[", "[", "None", "]", "*", "tnet", ".", "netshape", "[", "0", "]", "]", "*", "tnet", ".", "netshape", "[", "0", "]", ")", "if", "tnet", ".", "nettype", "[", "1", "]", "==", "'u'", ":", "for", "i", "in", "range", "(", "0", ",", "tnet", ".", "netshape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "tnet", ".", "netshape", "[", "0", "]", ")", ":", "edge_on", "=", "tnet", ".", "get_network_when", "(", "i", "=", "i", ",", "j", "=", "j", ")", "[", "'t'", "]", ".", "values", "if", "len", "(", "edge_on", ")", ">", "0", ":", "edge_on_diff", "=", "edge_on", "[", "1", ":", "]", "-", "edge_on", "[", ":", "-", "1", "]", "contacts", "[", "i", ",", "j", "]", "=", "np", ".", "array", "(", "edge_on_diff", ")", "contacts", "[", "j", ",", "i", "]", "=", "np", ".", "array", "(", "edge_on_diff", ")", "else", ":", "contacts", "[", "i", ",", "j", "]", "=", "[", "]", "contacts", "[", "j", ",", "i", "]", "=", "[", "]", "elif", "tnet", ".", "nettype", "[", "1", "]", "==", "'d'", ":", "for", "i", "in", "range", "(", "0", ",", "tnet", ".", "netshape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "0", ",", "tnet", ".", "netshape", "[", "0", "]", ")", ":", "edge_on", "=", "tnet", ".", "get_network_when", "(", "i", "=", "i", ",", "j", "=", "j", ")", "[", "'t'", "]", ".", "values", "if", "len", "(", "edge_on", ")", ">", "0", ":", "edge_on_diff", "=", "edge_on", "[", "1", ":", "]", "-", "edge_on", "[", ":", "-", "1", "]", "contacts", "[", "i", ",", "j", "]", "=", "np", ".", "array", "(", "edge_on_diff", ")", "else", ":", "contacts", "[", "i", ",", "j", "]", "=", "[", "]", "out", "=", "{", "}", "out", "[", "'intercontacttimes'", "]", "=", "contacts", "out", "[", "'nettype'", "]", "=", "tnet", ".", "nettype", "return", "out" ]
Calculates the intercontacttimes of each edge in a network. Parameters ----------- tnet : array, dict Temporal network (craphlet or contact). Nettype: 'bu', 'bd' Returns --------- contacts : dict Intercontact times as numpy array in dictionary. contacts['intercontacttimes'] Notes ------ The inter-contact times is calculated by the time between consequecutive "active" edges (where active means that the value is 1 in a binary network). Examples -------- This example goes through how inter-contact times are calculated. >>> import teneto >>> import numpy as np Make a network with 2 nodes and 4 time-points with 4 edges spaced out. >>> G = np.zeros([2,2,10]) >>> edge_on = [1,3,5,9] >>> G[0,1,edge_on] = 1 The network visualised below make it clear what the inter-contact times are between the two nodes: .. plot:: import teneto import numpy as np import matplotlib.pyplot as plt G = np.zeros([2,2,10]) edge_on = [1,3,5,9] G[0,1,edge_on] = 1 fig, ax = plt.subplots(1, figsize=(4,2)) teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2') ax.set_ylim(-0.25, 1.25) plt.tight_layout() fig.show() Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1. >>> ict = teneto.networkmeasures.intercontacttimes(G) The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN. So the icts between nodes 0 and 1 are found by: >>> ict['intercontacttimes'][0,1] array([2, 2, 4])
[ "Calculates", "the", "intercontacttimes", "of", "each", "edge", "in", "a", "network", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/intercontacttimes.py#L9-L108
-1
515
wiheto/teneto
teneto/timeseries/report.py
gen_report
def gen_report(report, sdir='./', report_name='report.html'): """ Generates report of derivation and postprocess steps in teneto.derive """ # Create report directory if not os.path.exists(sdir): os.makedirs(sdir) # Add a slash to file directory if not included to avoid DirNameFleName # instead of DirName/FileName being creaated if sdir[-1] != '/': sdir += '/' report_html = '<html><body>' if 'method' in report.keys(): report_html += "<h1>Method: " + report['method'] + "</h1><p>" for i in report[report['method']]: if i == 'taper_window': fig, ax = plt.subplots(1) ax.plot(report[report['method']]['taper_window'], report[report['method']]['taper']) ax.set_xlabel('Window (time). 0 in middle of window.') ax.set_title( 'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).') fig.savefig(sdir + 'taper.png') report_html += "<img src='./taper.png' width=500>" + "<p>" else: report_html += "- <b>" + i + "</b>: " + \ str(report[report['method']][i]) + "<br>" if 'postprocess' in report.keys(): report_html += "<p><h2>Postprocessing:</h2><p>" report_html += "<b>Pipeline: </b>" for i in report['postprocess']: report_html += " " + i + "," for i in report['postprocess']: report_html += "<p><h3>" + i + "</h3><p>" for j in report[i]: if j == 'lambda': report_html += "- <b>" + j + "</b>: " + "<br>" lambda_val = np.array(report['boxcox']['lambda']) fig, ax = plt.subplots(1) ax.hist(lambda_val[:, -1]) ax.set_xlabel('lambda') ax.set_ylabel('frequency') ax.set_title('Histogram of lambda parameter') fig.savefig(sdir + 'boxcox_lambda.png') report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>" report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>" np.savetxt(sdir + "boxcox_lambda.csv", lambda_val, delimiter=",") else: report_html += "- <b>" + j + "</b>: " + \ str(report[i][j]) + "<br>" report_html += '</body></html>' with open(sdir + report_name, 'w') as file: file.write(report_html) file.close()
python
def gen_report(report, sdir='./', report_name='report.html'): """ Generates report of derivation and postprocess steps in teneto.derive """ # Create report directory if not os.path.exists(sdir): os.makedirs(sdir) # Add a slash to file directory if not included to avoid DirNameFleName # instead of DirName/FileName being creaated if sdir[-1] != '/': sdir += '/' report_html = '<html><body>' if 'method' in report.keys(): report_html += "<h1>Method: " + report['method'] + "</h1><p>" for i in report[report['method']]: if i == 'taper_window': fig, ax = plt.subplots(1) ax.plot(report[report['method']]['taper_window'], report[report['method']]['taper']) ax.set_xlabel('Window (time). 0 in middle of window.') ax.set_title( 'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).') fig.savefig(sdir + 'taper.png') report_html += "<img src='./taper.png' width=500>" + "<p>" else: report_html += "- <b>" + i + "</b>: " + \ str(report[report['method']][i]) + "<br>" if 'postprocess' in report.keys(): report_html += "<p><h2>Postprocessing:</h2><p>" report_html += "<b>Pipeline: </b>" for i in report['postprocess']: report_html += " " + i + "," for i in report['postprocess']: report_html += "<p><h3>" + i + "</h3><p>" for j in report[i]: if j == 'lambda': report_html += "- <b>" + j + "</b>: " + "<br>" lambda_val = np.array(report['boxcox']['lambda']) fig, ax = plt.subplots(1) ax.hist(lambda_val[:, -1]) ax.set_xlabel('lambda') ax.set_ylabel('frequency') ax.set_title('Histogram of lambda parameter') fig.savefig(sdir + 'boxcox_lambda.png') report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>" report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>" np.savetxt(sdir + "boxcox_lambda.csv", lambda_val, delimiter=",") else: report_html += "- <b>" + j + "</b>: " + \ str(report[i][j]) + "<br>" report_html += '</body></html>' with open(sdir + report_name, 'w') as file: file.write(report_html) file.close()
[ "def", "gen_report", "(", "report", ",", "sdir", "=", "'./'", ",", "report_name", "=", "'report.html'", ")", ":", "# Create report directory", "if", "not", "os", ".", "path", ".", "exists", "(", "sdir", ")", ":", "os", ".", "makedirs", "(", "sdir", ")", "# Add a slash to file directory if not included to avoid DirNameFleName", "# instead of DirName/FileName being creaated", "if", "sdir", "[", "-", "1", "]", "!=", "'/'", ":", "sdir", "+=", "'/'", "report_html", "=", "'<html><body>'", "if", "'method'", "in", "report", ".", "keys", "(", ")", ":", "report_html", "+=", "\"<h1>Method: \"", "+", "report", "[", "'method'", "]", "+", "\"</h1><p>\"", "for", "i", "in", "report", "[", "report", "[", "'method'", "]", "]", ":", "if", "i", "==", "'taper_window'", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ")", "ax", ".", "plot", "(", "report", "[", "report", "[", "'method'", "]", "]", "[", "'taper_window'", "]", ",", "report", "[", "report", "[", "'method'", "]", "]", "[", "'taper'", "]", ")", "ax", ".", "set_xlabel", "(", "'Window (time). 0 in middle of window.'", ")", "ax", ".", "set_title", "(", "'Taper from '", "+", "report", "[", "report", "[", "'method'", "]", "]", "[", "'distribution'", "]", "+", "' distribution (PDF).'", ")", "fig", ".", "savefig", "(", "sdir", "+", "'taper.png'", ")", "report_html", "+=", "\"<img src='./taper.png' width=500>\"", "+", "\"<p>\"", "else", ":", "report_html", "+=", "\"- <b>\"", "+", "i", "+", "\"</b>: \"", "+", "str", "(", "report", "[", "report", "[", "'method'", "]", "]", "[", "i", "]", ")", "+", "\"<br>\"", "if", "'postprocess'", "in", "report", ".", "keys", "(", ")", ":", "report_html", "+=", "\"<p><h2>Postprocessing:</h2><p>\"", "report_html", "+=", "\"<b>Pipeline: </b>\"", "for", "i", "in", "report", "[", "'postprocess'", "]", ":", "report_html", "+=", "\" \"", "+", "i", "+", "\",\"", "for", "i", "in", "report", "[", "'postprocess'", "]", ":", "report_html", "+=", "\"<p><h3>\"", "+", "i", "+", "\"</h3><p>\"", "for", "j", "in", "report", "[", "i", "]", ":", "if", "j", "==", "'lambda'", ":", "report_html", "+=", "\"- <b>\"", "+", "j", "+", "\"</b>: \"", "+", "\"<br>\"", "lambda_val", "=", "np", ".", "array", "(", "report", "[", "'boxcox'", "]", "[", "'lambda'", "]", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ")", "ax", ".", "hist", "(", "lambda_val", "[", ":", ",", "-", "1", "]", ")", "ax", ".", "set_xlabel", "(", "'lambda'", ")", "ax", ".", "set_ylabel", "(", "'frequency'", ")", "ax", ".", "set_title", "(", "'Histogram of lambda parameter'", ")", "fig", ".", "savefig", "(", "sdir", "+", "'boxcox_lambda.png'", ")", "report_html", "+=", "\"<img src='./boxcox_lambda.png' width=500>\"", "+", "\"<p>\"", "report_html", "+=", "\"Data located in \"", "+", "sdir", "+", "\"boxcox_lambda.csv <p>\"", "np", ".", "savetxt", "(", "sdir", "+", "\"boxcox_lambda.csv\"", ",", "lambda_val", ",", "delimiter", "=", "\",\"", ")", "else", ":", "report_html", "+=", "\"- <b>\"", "+", "j", "+", "\"</b>: \"", "+", "str", "(", "report", "[", "i", "]", "[", "j", "]", ")", "+", "\"<br>\"", "report_html", "+=", "'</body></html>'", "with", "open", "(", "sdir", "+", "report_name", ",", "'w'", ")", "as", "file", ":", "file", ".", "write", "(", "report_html", ")", "file", ".", "close", "(", ")" ]
Generates report of derivation and postprocess steps in teneto.derive
[ "Generates", "report", "of", "derivation", "and", "postprocess", "steps", "in", "teneto", ".", "derive" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/report.py#L10-L92
-1
516
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.add_history
def add_history(self, fname, fargs, init=0): """ Adds a processing step to TenetoBIDS.history. """ if init == 1: self.history = [] self.history.append([fname, fargs])
python
def add_history(self, fname, fargs, init=0): """ Adds a processing step to TenetoBIDS.history. """ if init == 1: self.history = [] self.history.append([fname, fargs])
[ "def", "add_history", "(", "self", ",", "fname", ",", "fargs", ",", "init", "=", "0", ")", ":", "if", "init", "==", "1", ":", "self", ".", "history", "=", "[", "]", "self", ".", "history", ".", "append", "(", "[", "fname", ",", "fargs", "]", ")" ]
Adds a processing step to TenetoBIDS.history.
[ "Adds", "a", "processing", "step", "to", "TenetoBIDS", ".", "history", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L129-L135
-1
517
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.derive_temporalnetwork
def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True): """ Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) confound_files = self.get_selected_files(quiet=1, pipeline='confound') if confound_files: confounds_exist = True else: confounds_exist = False if not confound_corr_report: confounds_exist = False if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params, confounds_exist, confound_files) for i, f in enumerate(files) if f} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline = self.pipeline self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('tvc') self.set_bids_suffix('tvcconn')
python
def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True): """ Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) confound_files = self.get_selected_files(quiet=1, pipeline='confound') if confound_files: confounds_exist = True else: confounds_exist = False if not confound_corr_report: confounds_exist = False if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params, confounds_exist, confound_files) for i, f in enumerate(files) if f} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline = self.pipeline self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('tvc') self.set_bids_suffix('tvcconn')
[ "def", "derive_temporalnetwork", "(", "self", ",", "params", ",", "update_pipeline", "=", "True", ",", "tag", "=", "None", ",", "njobs", "=", "1", ",", "confound_corr_report", "=", "True", ")", ":", "if", "not", "njobs", ":", "njobs", "=", "self", ".", "njobs", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ")", "confound_files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ",", "pipeline", "=", "'confound'", ")", "if", "confound_files", ":", "confounds_exist", "=", "True", "else", ":", "confounds_exist", "=", "False", "if", "not", "confound_corr_report", ":", "confounds_exist", "=", "False", "if", "not", "tag", ":", "tag", "=", "''", "else", ":", "tag", "=", "'desc-'", "+", "tag", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "njobs", ")", "as", "executor", ":", "job", "=", "{", "executor", ".", "submit", "(", "self", ".", "_derive_temporalnetwork", ",", "f", ",", "i", ",", "tag", ",", "params", ",", "confounds_exist", ",", "confound_files", ")", "for", "i", ",", "f", "in", "enumerate", "(", "files", ")", "if", "f", "}", "for", "j", "in", "as_completed", "(", "job", ")", ":", "j", ".", "result", "(", ")", "if", "update_pipeline", "==", "True", ":", "if", "not", "self", ".", "confound_pipeline", "and", "len", "(", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ",", "pipeline", "=", "'confound'", ")", ")", ">", "0", ":", "self", ".", "set_confound_pipeline", "=", "self", ".", "pipeline", "self", ".", "set_pipeline", "(", "'teneto_'", "+", "teneto", ".", "__version__", ")", "self", ".", "set_pipeline_subdir", "(", "'tvc'", ")", "self", ".", "set_bids_suffix", "(", "'tvcconn'", ")" ]
Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy
[ "Derive", "time", "-", "varying", "connectivity", "on", "the", "selected", "files", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L164-L219
-1
518
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.make_functional_connectivity
def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None): """ Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true if to ignore index column in loaded file. file_hdr : bool Default False, true if to ignore header row in loaded file. Returns ------- Saves data in derivatives/teneto_<version>/.../fc/ R_group : array if returngroup is true, the average connectivity matrix is returned. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) R_group = [] with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files} for j in as_completed(job): R_group.append(j.result()) if returngroup: # Fisher tranform -> mean -> inverse fisher tranform R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=0)) return np.array(R_group)
python
def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None): """ Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true if to ignore index column in loaded file. file_hdr : bool Default False, true if to ignore header row in loaded file. Returns ------- Saves data in derivatives/teneto_<version>/.../fc/ R_group : array if returngroup is true, the average connectivity matrix is returned. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) R_group = [] with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files} for j in as_completed(job): R_group.append(j.result()) if returngroup: # Fisher tranform -> mean -> inverse fisher tranform R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=0)) return np.array(R_group)
[ "def", "make_functional_connectivity", "(", "self", ",", "njobs", "=", "None", ",", "returngroup", "=", "False", ",", "file_hdr", "=", "None", ",", "file_idx", "=", "None", ")", ":", "if", "not", "njobs", ":", "njobs", "=", "self", ".", "njobs", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ")", "R_group", "=", "[", "]", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "njobs", ")", "as", "executor", ":", "job", "=", "{", "executor", ".", "submit", "(", "self", ".", "_run_make_functional_connectivity", ",", "f", ",", "file_hdr", ",", "file_idx", ")", "for", "f", "in", "files", "}", "for", "j", "in", "as_completed", "(", "job", ")", ":", "R_group", ".", "append", "(", "j", ".", "result", "(", ")", ")", "if", "returngroup", ":", "# Fisher tranform -> mean -> inverse fisher tranform", "R_group", "=", "np", ".", "tanh", "(", "np", ".", "mean", "(", "np", ".", "arctanh", "(", "np", ".", "array", "(", "R_group", ")", ")", ",", "axis", "=", "0", ")", ")", "return", "np", ".", "array", "(", "R_group", ")" ]
Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true if to ignore index column in loaded file. file_hdr : bool Default False, true if to ignore header row in loaded file. Returns ------- Saves data in derivatives/teneto_<version>/.../fc/ R_group : array if returngroup is true, the average connectivity matrix is returned.
[ "Makes", "connectivity", "matrix", "for", "each", "of", "the", "subjects", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L360-L398
-1
519
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS._save_namepaths_bids_derivatives
def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None): """ Creates output directory and output name Paramters --------- f : str input files, includes the file bids_suffix tag : str what should be added to f in the output file. save_directory : str additional directory that the output file should go in suffix : str add new suffix to data Returns ------- save_name : str previous filename with new tag save_dir : str directory where it will be saved base_dir : str subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) """ file_name = f.split('/')[-1].split('.')[0] if tag != '': tag = '_' + tag if suffix: file_name, _ = drop_bids_suffix(file_name) save_name = file_name + tag save_name += '_' + suffix else: save_name = file_name + tag paths_post_pipeline = f.split(self.pipeline) if self.pipeline_subdir: paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[ 0] else: paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0] base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \ teneto.__version__ + '/' + paths_post_pipeline + '/' save_dir = base_dir + '/' + save_directory + '/' if not os.path.exists(save_dir): # A case has happened where this has been done in parallel and an error was raised. So do try/except try: os.makedirs(save_dir) except: # Wait 2 seconds so that the error does not try and save something in the directory before it is created time.sleep(2) if not os.path.exists(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json'): try: with open(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json', 'w') as fs: json.dump(self.tenetoinfo, fs) except: # Same as above, just in case parallel does duplicaiton time.sleep(2) return save_name, save_dir, base_dir
python
def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None): """ Creates output directory and output name Paramters --------- f : str input files, includes the file bids_suffix tag : str what should be added to f in the output file. save_directory : str additional directory that the output file should go in suffix : str add new suffix to data Returns ------- save_name : str previous filename with new tag save_dir : str directory where it will be saved base_dir : str subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) """ file_name = f.split('/')[-1].split('.')[0] if tag != '': tag = '_' + tag if suffix: file_name, _ = drop_bids_suffix(file_name) save_name = file_name + tag save_name += '_' + suffix else: save_name = file_name + tag paths_post_pipeline = f.split(self.pipeline) if self.pipeline_subdir: paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[ 0] else: paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0] base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \ teneto.__version__ + '/' + paths_post_pipeline + '/' save_dir = base_dir + '/' + save_directory + '/' if not os.path.exists(save_dir): # A case has happened where this has been done in parallel and an error was raised. So do try/except try: os.makedirs(save_dir) except: # Wait 2 seconds so that the error does not try and save something in the directory before it is created time.sleep(2) if not os.path.exists(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json'): try: with open(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json', 'w') as fs: json.dump(self.tenetoinfo, fs) except: # Same as above, just in case parallel does duplicaiton time.sleep(2) return save_name, save_dir, base_dir
[ "def", "_save_namepaths_bids_derivatives", "(", "self", ",", "f", ",", "tag", ",", "save_directory", ",", "suffix", "=", "None", ")", ":", "file_name", "=", "f", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "if", "tag", "!=", "''", ":", "tag", "=", "'_'", "+", "tag", "if", "suffix", ":", "file_name", ",", "_", "=", "drop_bids_suffix", "(", "file_name", ")", "save_name", "=", "file_name", "+", "tag", "save_name", "+=", "'_'", "+", "suffix", "else", ":", "save_name", "=", "file_name", "+", "tag", "paths_post_pipeline", "=", "f", ".", "split", "(", "self", ".", "pipeline", ")", "if", "self", ".", "pipeline_subdir", ":", "paths_post_pipeline", "=", "paths_post_pipeline", "[", "1", "]", ".", "split", "(", "self", ".", "pipeline_subdir", ")", "[", "0", "]", "else", ":", "paths_post_pipeline", "=", "paths_post_pipeline", "[", "1", "]", ".", "split", "(", "file_name", ")", "[", "0", "]", "base_dir", "=", "self", ".", "BIDS_dir", "+", "'/derivatives/'", "+", "'teneto_'", "+", "teneto", ".", "__version__", "+", "'/'", "+", "paths_post_pipeline", "+", "'/'", "save_dir", "=", "base_dir", "+", "'/'", "+", "save_directory", "+", "'/'", "if", "not", "os", ".", "path", ".", "exists", "(", "save_dir", ")", ":", "# A case has happened where this has been done in parallel and an error was raised. So do try/except", "try", ":", "os", ".", "makedirs", "(", "save_dir", ")", "except", ":", "# Wait 2 seconds so that the error does not try and save something in the directory before it is created", "time", ".", "sleep", "(", "2", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "BIDS_dir", "+", "'/derivatives/'", "+", "'teneto_'", "+", "teneto", ".", "__version__", "+", "'/dataset_description.json'", ")", ":", "try", ":", "with", "open", "(", "self", ".", "BIDS_dir", "+", "'/derivatives/'", "+", "'teneto_'", "+", "teneto", ".", "__version__", "+", "'/dataset_description.json'", ",", "'w'", ")", "as", "fs", ":", "json", ".", "dump", "(", "self", ".", "tenetoinfo", ",", "fs", ")", "except", ":", "# Same as above, just in case parallel does duplicaiton", "time", ".", "sleep", "(", "2", ")", "return", "save_name", ",", "save_dir", ",", "base_dir" ]
Creates output directory and output name Paramters --------- f : str input files, includes the file bids_suffix tag : str what should be added to f in the output file. save_directory : str additional directory that the output file should go in suffix : str add new suffix to data Returns ------- save_name : str previous filename with new tag save_dir : str directory where it will be saved base_dir : str subjective base directory (i.e. derivatives/teneto/func[/anythingelse/])
[ "Creates", "output", "directory", "and", "output", "name" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L409-L466
-1
520
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.get_tags
def get_tags(self, tag, quiet=1): """ Returns which tag alternatives can be identified in the BIDS derivatives structure. """ if not self.pipeline: print('Please set pipeline first.') self.get_pipeline_alternatives(quiet) else: if tag == 'sub': datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/' tag_alternatives = [ f.split('sub-')[1] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and 'sub-' in f] elif tag == 'ses': tag_alternatives = [] for sub in self.bids_tags['sub']: tag_alternatives += [f.split('ses-')[1] for f in os.listdir( self.BIDS_dir + '/derivatives/' + self.pipeline + '/' + 'sub-' + sub) if 'ses' in f] tag_alternatives = set(tag_alternatives) else: files = self.get_selected_files(quiet=1) tag_alternatives = [] for f in files: f = f.split('.')[0] f = f.split('/')[-1] tag_alternatives += [t.split('-')[1] for t in f.split('_') if t.split('-')[0] == tag] tag_alternatives = set(tag_alternatives) if quiet == 0: print(tag + ' alternatives: ' + ', '.join(tag_alternatives)) return list(tag_alternatives)
python
def get_tags(self, tag, quiet=1): """ Returns which tag alternatives can be identified in the BIDS derivatives structure. """ if not self.pipeline: print('Please set pipeline first.') self.get_pipeline_alternatives(quiet) else: if tag == 'sub': datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/' tag_alternatives = [ f.split('sub-')[1] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and 'sub-' in f] elif tag == 'ses': tag_alternatives = [] for sub in self.bids_tags['sub']: tag_alternatives += [f.split('ses-')[1] for f in os.listdir( self.BIDS_dir + '/derivatives/' + self.pipeline + '/' + 'sub-' + sub) if 'ses' in f] tag_alternatives = set(tag_alternatives) else: files = self.get_selected_files(quiet=1) tag_alternatives = [] for f in files: f = f.split('.')[0] f = f.split('/')[-1] tag_alternatives += [t.split('-')[1] for t in f.split('_') if t.split('-')[0] == tag] tag_alternatives = set(tag_alternatives) if quiet == 0: print(tag + ' alternatives: ' + ', '.join(tag_alternatives)) return list(tag_alternatives)
[ "def", "get_tags", "(", "self", ",", "tag", ",", "quiet", "=", "1", ")", ":", "if", "not", "self", ".", "pipeline", ":", "print", "(", "'Please set pipeline first.'", ")", "self", ".", "get_pipeline_alternatives", "(", "quiet", ")", "else", ":", "if", "tag", "==", "'sub'", ":", "datapath", "=", "self", ".", "BIDS_dir", "+", "'/derivatives/'", "+", "self", ".", "pipeline", "+", "'/'", "tag_alternatives", "=", "[", "f", ".", "split", "(", "'sub-'", ")", "[", "1", "]", "for", "f", "in", "os", ".", "listdir", "(", "datapath", ")", "if", "os", ".", "path", ".", "isdir", "(", "datapath", "+", "f", ")", "and", "'sub-'", "in", "f", "]", "elif", "tag", "==", "'ses'", ":", "tag_alternatives", "=", "[", "]", "for", "sub", "in", "self", ".", "bids_tags", "[", "'sub'", "]", ":", "tag_alternatives", "+=", "[", "f", ".", "split", "(", "'ses-'", ")", "[", "1", "]", "for", "f", "in", "os", ".", "listdir", "(", "self", ".", "BIDS_dir", "+", "'/derivatives/'", "+", "self", ".", "pipeline", "+", "'/'", "+", "'sub-'", "+", "sub", ")", "if", "'ses'", "in", "f", "]", "tag_alternatives", "=", "set", "(", "tag_alternatives", ")", "else", ":", "files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ")", "tag_alternatives", "=", "[", "]", "for", "f", "in", "files", ":", "f", "=", "f", ".", "split", "(", "'.'", ")", "[", "0", "]", "f", "=", "f", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "tag_alternatives", "+=", "[", "t", ".", "split", "(", "'-'", ")", "[", "1", "]", "for", "t", "in", "f", ".", "split", "(", "'_'", ")", "if", "t", ".", "split", "(", "'-'", ")", "[", "0", "]", "==", "tag", "]", "tag_alternatives", "=", "set", "(", "tag_alternatives", ")", "if", "quiet", "==", "0", ":", "print", "(", "tag", "+", "' alternatives: '", "+", "', '", ".", "join", "(", "tag_alternatives", ")", ")", "return", "list", "(", "tag_alternatives", ")" ]
Returns which tag alternatives can be identified in the BIDS derivatives structure.
[ "Returns", "which", "tag", "alternatives", "can", "be", "identified", "in", "the", "BIDS", "derivatives", "structure", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L468-L497
-1
521
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.set_exclusion_file
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): """ Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. """ self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_criteria] if isinstance(confound_stat, str): confound_stat = [confound_stat] if len(exclusion_criteria) != len(confound): raise ValueError( 'Same number of confound names and exclusion criteria must be given') if len(confound_stat) != len(confound): raise ValueError( 'Same number of confound names and confound stats must be given') relex, crit = process_exclusion_criteria(exclusion_criteria) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) bad_files = [] bs = 0 foundconfound = [] foundreason = [] for s, cfile in enumerate(confound_files): df = load_tabular_file(cfile, index_col=None) found_bad_subject = False for i, _ in enumerate(confound): if confound_stat[i] == 'median': if relex[i](df[confound[i]].median(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'mean': if relex[i](df[confound[i]].mean(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'std': if relex[i](df[i][confound[i]].std(), crit[i]): found_bad_subject = True if found_bad_subject: foundconfound.append(confound[i]) foundreason.append(exclusion_criteria[i]) if found_bad_subject: bad_files.append(files[s]) bs += 1 self.set_bad_files( bad_files, reason='excluded file (confound over specfied stat threshold)') for i, f in enumerate(bad_files): sidecar = get_sidecar(f) sidecar['file_exclusion'] = {} sidecar['confound'] = foundconfound[i] sidecar['threshold'] = foundreason[i] for af in ['.tsv', '.nii.gz']: f = f.split(af)[0] f += '.json' with open(f, 'w') as fs: json.dump(sidecar, fs) print('Removed ' + str(bs) + ' files from inclusion.')
python
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): """ Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. """ self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_criteria] if isinstance(confound_stat, str): confound_stat = [confound_stat] if len(exclusion_criteria) != len(confound): raise ValueError( 'Same number of confound names and exclusion criteria must be given') if len(confound_stat) != len(confound): raise ValueError( 'Same number of confound names and confound stats must be given') relex, crit = process_exclusion_criteria(exclusion_criteria) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) bad_files = [] bs = 0 foundconfound = [] foundreason = [] for s, cfile in enumerate(confound_files): df = load_tabular_file(cfile, index_col=None) found_bad_subject = False for i, _ in enumerate(confound): if confound_stat[i] == 'median': if relex[i](df[confound[i]].median(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'mean': if relex[i](df[confound[i]].mean(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'std': if relex[i](df[i][confound[i]].std(), crit[i]): found_bad_subject = True if found_bad_subject: foundconfound.append(confound[i]) foundreason.append(exclusion_criteria[i]) if found_bad_subject: bad_files.append(files[s]) bs += 1 self.set_bad_files( bad_files, reason='excluded file (confound over specfied stat threshold)') for i, f in enumerate(bad_files): sidecar = get_sidecar(f) sidecar['file_exclusion'] = {} sidecar['confound'] = foundconfound[i] sidecar['threshold'] = foundreason[i] for af in ['.tsv', '.nii.gz']: f = f.split(af)[0] f += '.json' with open(f, 'w') as fs: json.dump(sidecar, fs) print('Removed ' + str(bs) + ' files from inclusion.')
[ "def", "set_exclusion_file", "(", "self", ",", "confound", ",", "exclusion_criteria", ",", "confound_stat", "=", "'mean'", ")", ":", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "if", "isinstance", "(", "confound", ",", "str", ")", ":", "confound", "=", "[", "confound", "]", "if", "isinstance", "(", "exclusion_criteria", ",", "str", ")", ":", "exclusion_criteria", "=", "[", "exclusion_criteria", "]", "if", "isinstance", "(", "confound_stat", ",", "str", ")", ":", "confound_stat", "=", "[", "confound_stat", "]", "if", "len", "(", "exclusion_criteria", ")", "!=", "len", "(", "confound", ")", ":", "raise", "ValueError", "(", "'Same number of confound names and exclusion criteria must be given'", ")", "if", "len", "(", "confound_stat", ")", "!=", "len", "(", "confound", ")", ":", "raise", "ValueError", "(", "'Same number of confound names and confound stats must be given'", ")", "relex", ",", "crit", "=", "process_exclusion_criteria", "(", "exclusion_criteria", ")", "files", "=", "sorted", "(", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ")", ")", "confound_files", "=", "sorted", "(", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ",", "pipeline", "=", "'confound'", ")", ")", "files", ",", "confound_files", "=", "confound_matching", "(", "files", ",", "confound_files", ")", "bad_files", "=", "[", "]", "bs", "=", "0", "foundconfound", "=", "[", "]", "foundreason", "=", "[", "]", "for", "s", ",", "cfile", "in", "enumerate", "(", "confound_files", ")", ":", "df", "=", "load_tabular_file", "(", "cfile", ",", "index_col", "=", "None", ")", "found_bad_subject", "=", "False", "for", "i", ",", "_", "in", "enumerate", "(", "confound", ")", ":", "if", "confound_stat", "[", "i", "]", "==", "'median'", ":", "if", "relex", "[", "i", "]", "(", "df", "[", "confound", "[", "i", "]", "]", ".", "median", "(", ")", ",", "crit", "[", "i", "]", ")", ":", "found_bad_subject", "=", "True", "elif", "confound_stat", "[", "i", "]", "==", "'mean'", ":", "if", "relex", "[", "i", "]", "(", "df", "[", "confound", "[", "i", "]", "]", ".", "mean", "(", ")", ",", "crit", "[", "i", "]", ")", ":", "found_bad_subject", "=", "True", "elif", "confound_stat", "[", "i", "]", "==", "'std'", ":", "if", "relex", "[", "i", "]", "(", "df", "[", "i", "]", "[", "confound", "[", "i", "]", "]", ".", "std", "(", ")", ",", "crit", "[", "i", "]", ")", ":", "found_bad_subject", "=", "True", "if", "found_bad_subject", ":", "foundconfound", ".", "append", "(", "confound", "[", "i", "]", ")", "foundreason", ".", "append", "(", "exclusion_criteria", "[", "i", "]", ")", "if", "found_bad_subject", ":", "bad_files", ".", "append", "(", "files", "[", "s", "]", ")", "bs", "+=", "1", "self", ".", "set_bad_files", "(", "bad_files", ",", "reason", "=", "'excluded file (confound over specfied stat threshold)'", ")", "for", "i", ",", "f", "in", "enumerate", "(", "bad_files", ")", ":", "sidecar", "=", "get_sidecar", "(", "f", ")", "sidecar", "[", "'file_exclusion'", "]", "=", "{", "}", "sidecar", "[", "'confound'", "]", "=", "foundconfound", "[", "i", "]", "sidecar", "[", "'threshold'", "]", "=", "foundreason", "[", "i", "]", "for", "af", "in", "[", "'.tsv'", ",", "'.nii.gz'", "]", ":", "f", "=", "f", ".", "split", "(", "af", ")", "[", "0", "]", "f", "+=", "'.json'", "with", "open", "(", "f", ",", "'w'", ")", "as", "fs", ":", "json", ".", "dump", "(", "sidecar", ",", "fs", ")", "print", "(", "'Removed '", "+", "str", "(", "bs", ")", "+", "' files from inclusion.'", ")" ]
Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria.
[ "Excludes", "subjects", "given", "a", "certain", "exclusion", "criteria", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L650-L719
-1
522
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.make_parcellation
def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None): """ Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end. Parameters ----------- parcellation : str specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278' parc_type : str can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used. parc_params : dict **kwargs for nilearn functions network : str if "defaults", it selects static parcellation, _if available_ (other options will be made available soon). removeconfounds : bool if true, regresses out confounds that are specfied in self.set_confounds with linear regression. update_pipeline : bool TenetoBIDS gets updated with the parcellated files being selected. tag : str or list any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids). clean_params : dict **kwargs for nilearn function nilearn.signal.clean yeonetworkn : int (7 or 17) Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks njobs : n number of processes to run. Overrides TenetoBIDS.njobs Returns ------- Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/. To load these files call TenetoBIDS.load_parcellation. NOTE ---- These functions make use of nilearn. Please cite nilearn if used in a publicaiton. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) parc_name = parcellation.split('_')[0].lower() # Check confounds have been specified if not self.confounds and removeconfounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first.') # Check confounds have been specified if update_pipeline == False and removeconfounds: raise ValueError( 'Pipeline must be updated in order to remove confounds within this funciton.') # In theory these should be the same. So at the moment, it goes through each element and checks they are matched. # A matching algorithem may be needed if cases arise where this isnt the case files = self.get_selected_files(quiet=1) # Load network communities, if possible. self.set_network_communities(parcellation, netn=yeonetworkn) if not tag: tag = '' else: tag = 'desc-' + tag if not parc_params: parc_params = {} with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._run_make_parcellation, f, i, tag, parcellation, parc_name, parc_type, parc_params) for i, f in enumerate(files)} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline(self.pipeline) self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('parcellation') if tag: self.set_bids_tags({'desc': tag.split('-')[1]}) self.set_bids_suffix('roi') if removeconfounds: self.removeconfounds( clean_params=clean_params, transpose=None, njobs=njobs)
python
def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None): """ Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end. Parameters ----------- parcellation : str specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278' parc_type : str can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used. parc_params : dict **kwargs for nilearn functions network : str if "defaults", it selects static parcellation, _if available_ (other options will be made available soon). removeconfounds : bool if true, regresses out confounds that are specfied in self.set_confounds with linear regression. update_pipeline : bool TenetoBIDS gets updated with the parcellated files being selected. tag : str or list any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids). clean_params : dict **kwargs for nilearn function nilearn.signal.clean yeonetworkn : int (7 or 17) Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks njobs : n number of processes to run. Overrides TenetoBIDS.njobs Returns ------- Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/. To load these files call TenetoBIDS.load_parcellation. NOTE ---- These functions make use of nilearn. Please cite nilearn if used in a publicaiton. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) parc_name = parcellation.split('_')[0].lower() # Check confounds have been specified if not self.confounds and removeconfounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first.') # Check confounds have been specified if update_pipeline == False and removeconfounds: raise ValueError( 'Pipeline must be updated in order to remove confounds within this funciton.') # In theory these should be the same. So at the moment, it goes through each element and checks they are matched. # A matching algorithem may be needed if cases arise where this isnt the case files = self.get_selected_files(quiet=1) # Load network communities, if possible. self.set_network_communities(parcellation, netn=yeonetworkn) if not tag: tag = '' else: tag = 'desc-' + tag if not parc_params: parc_params = {} with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._run_make_parcellation, f, i, tag, parcellation, parc_name, parc_type, parc_params) for i, f in enumerate(files)} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline(self.pipeline) self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('parcellation') if tag: self.set_bids_tags({'desc': tag.split('-')[1]}) self.set_bids_suffix('roi') if removeconfounds: self.removeconfounds( clean_params=clean_params, transpose=None, njobs=njobs)
[ "def", "make_parcellation", "(", "self", ",", "parcellation", ",", "parc_type", "=", "None", ",", "parc_params", "=", "None", ",", "network", "=", "'defaults'", ",", "update_pipeline", "=", "True", ",", "removeconfounds", "=", "False", ",", "tag", "=", "None", ",", "njobs", "=", "None", ",", "clean_params", "=", "None", ",", "yeonetworkn", "=", "None", ")", ":", "if", "not", "njobs", ":", "njobs", "=", "self", ".", "njobs", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "parc_name", "=", "parcellation", ".", "split", "(", "'_'", ")", "[", "0", "]", ".", "lower", "(", ")", "# Check confounds have been specified", "if", "not", "self", ".", "confounds", "and", "removeconfounds", ":", "raise", "ValueError", "(", "'Specified confounds are not found. Make sure that you have run self.set_confunds([\\'Confound1\\',\\'Confound2\\']) first.'", ")", "# Check confounds have been specified", "if", "update_pipeline", "==", "False", "and", "removeconfounds", ":", "raise", "ValueError", "(", "'Pipeline must be updated in order to remove confounds within this funciton.'", ")", "# In theory these should be the same. So at the moment, it goes through each element and checks they are matched.", "# A matching algorithem may be needed if cases arise where this isnt the case", "files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ")", "# Load network communities, if possible.", "self", ".", "set_network_communities", "(", "parcellation", ",", "netn", "=", "yeonetworkn", ")", "if", "not", "tag", ":", "tag", "=", "''", "else", ":", "tag", "=", "'desc-'", "+", "tag", "if", "not", "parc_params", ":", "parc_params", "=", "{", "}", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "njobs", ")", "as", "executor", ":", "job", "=", "{", "executor", ".", "submit", "(", "self", ".", "_run_make_parcellation", ",", "f", ",", "i", ",", "tag", ",", "parcellation", ",", "parc_name", ",", "parc_type", ",", "parc_params", ")", "for", "i", ",", "f", "in", "enumerate", "(", "files", ")", "}", "for", "j", "in", "as_completed", "(", "job", ")", ":", "j", ".", "result", "(", ")", "if", "update_pipeline", "==", "True", ":", "if", "not", "self", ".", "confound_pipeline", "and", "len", "(", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ",", "pipeline", "=", "'confound'", ")", ")", ">", "0", ":", "self", ".", "set_confound_pipeline", "(", "self", ".", "pipeline", ")", "self", ".", "set_pipeline", "(", "'teneto_'", "+", "teneto", ".", "__version__", ")", "self", ".", "set_pipeline_subdir", "(", "'parcellation'", ")", "if", "tag", ":", "self", ".", "set_bids_tags", "(", "{", "'desc'", ":", "tag", ".", "split", "(", "'-'", ")", "[", "1", "]", "}", ")", "self", ".", "set_bids_suffix", "(", "'roi'", ")", "if", "removeconfounds", ":", "self", ".", "removeconfounds", "(", "clean_params", "=", "clean_params", ",", "transpose", "=", "None", ",", "njobs", "=", "njobs", ")" ]
Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end. Parameters ----------- parcellation : str specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278' parc_type : str can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used. parc_params : dict **kwargs for nilearn functions network : str if "defaults", it selects static parcellation, _if available_ (other options will be made available soon). removeconfounds : bool if true, regresses out confounds that are specfied in self.set_confounds with linear regression. update_pipeline : bool TenetoBIDS gets updated with the parcellated files being selected. tag : str or list any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids). clean_params : dict **kwargs for nilearn function nilearn.signal.clean yeonetworkn : int (7 or 17) Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks njobs : n number of processes to run. Overrides TenetoBIDS.njobs Returns ------- Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/. To load these files call TenetoBIDS.load_parcellation. NOTE ---- These functions make use of nilearn. Please cite nilearn if used in a publicaiton.
[ "Reduces", "the", "data", "from", "voxel", "to", "parcellation", "space", ".", "Files", "get", "saved", "in", "a", "teneto", "folder", "in", "the", "derivatives", "with", "a", "roi", "tag", "at", "the", "end", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L847-L930
-1
523
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.communitydetection
def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None): """ Calls temporal_louvain_with_consensus on connectivity data Parameters ---------- community_detection_params : dict kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus community_type : str Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint. file_idx : bool (default false) if true, index column present in data and this will be ignored file_hdr : bool (default false) if true, header row present in data and this will be ignored njobs : int number of processes to run. Overrides TenetoBIDS.njobs Note ---- All non-positive edges are made to zero. Returns ------- List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/ """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not tag: tag = '' else: tag = 'desc-' + tag if community_type == 'temporal': files = self.get_selected_files(quiet=True) # Run check to make sure files are tvc input for f in files: if 'tvc' not in f: raise ValueError( 'tvc tag not found in filename. TVC data must be used in communitydetection (perhaps run TenetoBIDS.derive first?).') elif community_type == 'static': files = self.get_selected_files( quiet=True, pipeline='functionalconnectivity') with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._run_communitydetection, f, community_detection_params, community_type, file_hdr, file_idx, tag) for i, f in enumerate(files) if all([t + '_' in f or t + '.' in f for t in tag])} for j in as_completed(job): j.result()
python
def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None): """ Calls temporal_louvain_with_consensus on connectivity data Parameters ---------- community_detection_params : dict kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus community_type : str Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint. file_idx : bool (default false) if true, index column present in data and this will be ignored file_hdr : bool (default false) if true, header row present in data and this will be ignored njobs : int number of processes to run. Overrides TenetoBIDS.njobs Note ---- All non-positive edges are made to zero. Returns ------- List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/ """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not tag: tag = '' else: tag = 'desc-' + tag if community_type == 'temporal': files = self.get_selected_files(quiet=True) # Run check to make sure files are tvc input for f in files: if 'tvc' not in f: raise ValueError( 'tvc tag not found in filename. TVC data must be used in communitydetection (perhaps run TenetoBIDS.derive first?).') elif community_type == 'static': files = self.get_selected_files( quiet=True, pipeline='functionalconnectivity') with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._run_communitydetection, f, community_detection_params, community_type, file_hdr, file_idx, tag) for i, f in enumerate(files) if all([t + '_' in f or t + '.' in f for t in tag])} for j in as_completed(job): j.result()
[ "def", "communitydetection", "(", "self", ",", "community_detection_params", ",", "community_type", "=", "'temporal'", ",", "tag", "=", "None", ",", "file_hdr", "=", "False", ",", "file_idx", "=", "False", ",", "njobs", "=", "None", ")", ":", "if", "not", "njobs", ":", "njobs", "=", "self", ".", "njobs", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "if", "not", "tag", ":", "tag", "=", "''", "else", ":", "tag", "=", "'desc-'", "+", "tag", "if", "community_type", "==", "'temporal'", ":", "files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "True", ")", "# Run check to make sure files are tvc input", "for", "f", "in", "files", ":", "if", "'tvc'", "not", "in", "f", ":", "raise", "ValueError", "(", "'tvc tag not found in filename. TVC data must be used in communitydetection (perhaps run TenetoBIDS.derive first?).'", ")", "elif", "community_type", "==", "'static'", ":", "files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "True", ",", "pipeline", "=", "'functionalconnectivity'", ")", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "njobs", ")", "as", "executor", ":", "job", "=", "{", "executor", ".", "submit", "(", "self", ".", "_run_communitydetection", ",", "f", ",", "community_detection_params", ",", "community_type", ",", "file_hdr", ",", "file_idx", ",", "tag", ")", "for", "i", ",", "f", "in", "enumerate", "(", "files", ")", "if", "all", "(", "[", "t", "+", "'_'", "in", "f", "or", "t", "+", "'.'", "in", "f", "for", "t", "in", "tag", "]", ")", "}", "for", "j", "in", "as_completed", "(", "job", ")", ":", "j", ".", "result", "(", ")" ]
Calls temporal_louvain_with_consensus on connectivity data Parameters ---------- community_detection_params : dict kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus community_type : str Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint. file_idx : bool (default false) if true, index column present in data and this will be ignored file_hdr : bool (default false) if true, header row present in data and this will be ignored njobs : int number of processes to run. Overrides TenetoBIDS.njobs Note ---- All non-positive edges are made to zero. Returns ------- List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/
[ "Calls", "temporal_louvain_with_consensus", "on", "connectivity", "data" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L950-L1001
-1
524
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.removeconfounds
def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None): """ Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal. njobs : int Number of jobs. Otherwise tenetoBIDS.njobs is run. update_pipeline : bool update pipeline with '_clean' tag for new files created overwrite : bool tag : str Returns ------- Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end. Note ---- There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not self.confounds and not confounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.') if not tag: tag = '' else: tag = 'desc-' + tag if confounds: self.set_confounds(confounds) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) if not clean_params: clean_params = {} with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)} for j in as_completed(job): j.result() self.set_pipeline('teneto_' + teneto.__version__) self.set_bids_suffix('roi') if tag: self.set_bids_tags({'desc': tag.split('-')[1]})
python
def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None): """ Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal. njobs : int Number of jobs. Otherwise tenetoBIDS.njobs is run. update_pipeline : bool update pipeline with '_clean' tag for new files created overwrite : bool tag : str Returns ------- Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end. Note ---- There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not self.confounds and not confounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.') if not tag: tag = '' else: tag = 'desc-' + tag if confounds: self.set_confounds(confounds) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) if not clean_params: clean_params = {} with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)} for j in as_completed(job): j.result() self.set_pipeline('teneto_' + teneto.__version__) self.set_bids_suffix('roi') if tag: self.set_bids_tags({'desc': tag.split('-')[1]})
[ "def", "removeconfounds", "(", "self", ",", "confounds", "=", "None", ",", "clean_params", "=", "None", ",", "transpose", "=", "None", ",", "njobs", "=", "None", ",", "update_pipeline", "=", "True", ",", "overwrite", "=", "True", ",", "tag", "=", "None", ")", ":", "if", "not", "njobs", ":", "njobs", "=", "self", ".", "njobs", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "if", "not", "self", ".", "confounds", "and", "not", "confounds", ":", "raise", "ValueError", "(", "'Specified confounds are not found. Make sure that you have run self.set_confunds([\\'Confound1\\',\\'Confound2\\']) first or pass confounds as input to function.'", ")", "if", "not", "tag", ":", "tag", "=", "''", "else", ":", "tag", "=", "'desc-'", "+", "tag", "if", "confounds", ":", "self", ".", "set_confounds", "(", "confounds", ")", "files", "=", "sorted", "(", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ")", ")", "confound_files", "=", "sorted", "(", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ",", "pipeline", "=", "'confound'", ")", ")", "files", ",", "confound_files", "=", "confound_matching", "(", "files", ",", "confound_files", ")", "if", "not", "clean_params", ":", "clean_params", "=", "{", "}", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "njobs", ")", "as", "executor", ":", "job", "=", "{", "executor", ".", "submit", "(", "self", ".", "_run_removeconfounds", ",", "f", ",", "confound_files", "[", "i", "]", ",", "clean_params", ",", "transpose", ",", "overwrite", ",", "tag", ")", "for", "i", ",", "f", "in", "enumerate", "(", "files", ")", "}", "for", "j", "in", "as_completed", "(", "job", ")", ":", "j", ".", "result", "(", ")", "self", ".", "set_pipeline", "(", "'teneto_'", "+", "teneto", ".", "__version__", ")", "self", ".", "set_bids_suffix", "(", "'roi'", ")", "if", "tag", ":", "self", ".", "set_bids_tags", "(", "{", "'desc'", ":", "tag", ".", "split", "(", "'-'", ")", "[", "1", "]", "}", ")" ]
Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal. njobs : int Number of jobs. Otherwise tenetoBIDS.njobs is run. update_pipeline : bool update pipeline with '_clean' tag for new files created overwrite : bool tag : str Returns ------- Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end. Note ---- There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data.
[ "Removes", "specified", "confounds", "using", "nilearn", ".", "signal", ".", "clean" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1036-L1094
-1
525
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.networkmeasures
def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None): """ Calculates a network measure For available funcitons see: teneto.networkmeasures Parameters ---------- measure : str or list Mame of function(s) from teneto.networkmeasures that will be run. measure_params : dict or list of dctionaries) Containing kwargs for the argument in measure. See note regarding Communities key. tag : str Add additional tag to saved filenames. Note ---- In measure_params, if communities can equal 'template', 'static', or 'temporal'. These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy Returns ------- Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/ Load the measure with tenetoBIDS.load_network_measure """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) # measure can be string or list if isinstance(measure, str): measure = [measure] # measure_params can be dictionaary or list of dictionaries if isinstance(measure_params, dict): measure_params = [measure_params] if measure_params and len(measure) != len(measure_params): raise ValueError('Number of identified measure_params (' + str(len(measure_params)) + ') differs from number of identified measures (' + str(len(measure)) + '). Leave black dictionary if default methods are wanted') files = self.get_selected_files(quiet=1) if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_networkmeasures, f, tag, measure, measure_params) for f in files} for j in as_completed(job): j.result()
python
def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None): """ Calculates a network measure For available funcitons see: teneto.networkmeasures Parameters ---------- measure : str or list Mame of function(s) from teneto.networkmeasures that will be run. measure_params : dict or list of dctionaries) Containing kwargs for the argument in measure. See note regarding Communities key. tag : str Add additional tag to saved filenames. Note ---- In measure_params, if communities can equal 'template', 'static', or 'temporal'. These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy Returns ------- Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/ Load the measure with tenetoBIDS.load_network_measure """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) # measure can be string or list if isinstance(measure, str): measure = [measure] # measure_params can be dictionaary or list of dictionaries if isinstance(measure_params, dict): measure_params = [measure_params] if measure_params and len(measure) != len(measure_params): raise ValueError('Number of identified measure_params (' + str(len(measure_params)) + ') differs from number of identified measures (' + str(len(measure)) + '). Leave black dictionary if default methods are wanted') files = self.get_selected_files(quiet=1) if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_networkmeasures, f, tag, measure, measure_params) for f in files} for j in as_completed(job): j.result()
[ "def", "networkmeasures", "(", "self", ",", "measure", "=", "None", ",", "measure_params", "=", "None", ",", "tag", "=", "None", ",", "njobs", "=", "None", ")", ":", "if", "not", "njobs", ":", "njobs", "=", "self", ".", "njobs", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "# measure can be string or list", "if", "isinstance", "(", "measure", ",", "str", ")", ":", "measure", "=", "[", "measure", "]", "# measure_params can be dictionaary or list of dictionaries", "if", "isinstance", "(", "measure_params", ",", "dict", ")", ":", "measure_params", "=", "[", "measure_params", "]", "if", "measure_params", "and", "len", "(", "measure", ")", "!=", "len", "(", "measure_params", ")", ":", "raise", "ValueError", "(", "'Number of identified measure_params ('", "+", "str", "(", "len", "(", "measure_params", ")", ")", "+", "') differs from number of identified measures ('", "+", "str", "(", "len", "(", "measure", ")", ")", "+", "'). Leave black dictionary if default methods are wanted'", ")", "files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ")", "if", "not", "tag", ":", "tag", "=", "''", "else", ":", "tag", "=", "'desc-'", "+", "tag", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "njobs", ")", "as", "executor", ":", "job", "=", "{", "executor", ".", "submit", "(", "self", ".", "_run_networkmeasures", ",", "f", ",", "tag", ",", "measure", ",", "measure_params", ")", "for", "f", "in", "files", "}", "for", "j", "in", "as_completed", "(", "job", ")", ":", "j", ".", "result", "(", ")" ]
Calculates a network measure For available funcitons see: teneto.networkmeasures Parameters ---------- measure : str or list Mame of function(s) from teneto.networkmeasures that will be run. measure_params : dict or list of dctionaries) Containing kwargs for the argument in measure. See note regarding Communities key. tag : str Add additional tag to saved filenames. Note ---- In measure_params, if communities can equal 'template', 'static', or 'temporal'. These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy Returns ------- Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/ Load the measure with tenetoBIDS.load_network_measure
[ "Calculates", "a", "network", "measure" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1153-L1209
-1
526
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.set_bids_suffix
def set_bids_suffix(self, bids_suffix): """ The last analysis step is the final tag that is present in files. """ self.add_history(inspect.stack()[0][3], locals(), 1) self.bids_suffix = bids_suffix
python
def set_bids_suffix(self, bids_suffix): """ The last analysis step is the final tag that is present in files. """ self.add_history(inspect.stack()[0][3], locals(), 1) self.bids_suffix = bids_suffix
[ "def", "set_bids_suffix", "(", "self", ",", "bids_suffix", ")", ":", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "self", ".", "bids_suffix", "=", "bids_suffix" ]
The last analysis step is the final tag that is present in files.
[ "The", "last", "analysis", "step", "is", "the", "final", "tag", "that", "is", "present", "in", "files", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1426-L1431
-1
527
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.set_pipeline
def set_pipeline(self, pipeline): """ Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. """ self.add_history(inspect.stack()[0][3], locals(), 1) if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline): print('Specified direvative directory not found.') self.get_pipeline_alternatives() else: # Todo: perform check that pipeline is valid self.pipeline = pipeline
python
def set_pipeline(self, pipeline): """ Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. """ self.add_history(inspect.stack()[0][3], locals(), 1) if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline): print('Specified direvative directory not found.') self.get_pipeline_alternatives() else: # Todo: perform check that pipeline is valid self.pipeline = pipeline
[ "def", "set_pipeline", "(", "self", ",", "pipeline", ")", ":", "self", ".", "add_history", "(", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ",", "locals", "(", ")", ",", "1", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "BIDS_dir", "+", "'/derivatives/'", "+", "pipeline", ")", ":", "print", "(", "'Specified direvative directory not found.'", ")", "self", ".", "get_pipeline_alternatives", "(", ")", "else", ":", "# Todo: perform check that pipeline is valid", "self", ".", "pipeline", "=", "pipeline" ]
Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string.
[ "Specify", "the", "pipeline", ".", "See", "get_pipeline_alternatives", "to", "see", "what", "are", "avaialble", ".", "Input", "should", "be", "a", "string", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1433-L1443
-1
528
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.load_frompickle
def load_frompickle(cls, fname, reload_object=False): """ Loaded saved instance of fname : str path to pickle object (output of TenetoBIDS.save_aspickle) reload_object : bool (default False) reloads object by calling teneto.TenetoBIDS (some information lost, for development) Returns ------- self : TenetoBIDS instance """ if fname[-4:] != '.pkl': fname += '.pkl' with open(fname, 'rb') as f: tnet = pickle.load(f) if reload_object: reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tnet.pipeline_subdir, bids_tags=tnet.bids_tags, bids_suffix=tnet.bids_suffix, bad_subjects=tnet.bad_subjects, confound_pipeline=tnet.confound_pipeline, raw_data_exists=tnet.raw_data_exists, njobs=tnet.njobs) reloadnet.histroy = tnet.history tnet = reloadnet return tnet
python
def load_frompickle(cls, fname, reload_object=False): """ Loaded saved instance of fname : str path to pickle object (output of TenetoBIDS.save_aspickle) reload_object : bool (default False) reloads object by calling teneto.TenetoBIDS (some information lost, for development) Returns ------- self : TenetoBIDS instance """ if fname[-4:] != '.pkl': fname += '.pkl' with open(fname, 'rb') as f: tnet = pickle.load(f) if reload_object: reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tnet.pipeline_subdir, bids_tags=tnet.bids_tags, bids_suffix=tnet.bids_suffix, bad_subjects=tnet.bad_subjects, confound_pipeline=tnet.confound_pipeline, raw_data_exists=tnet.raw_data_exists, njobs=tnet.njobs) reloadnet.histroy = tnet.history tnet = reloadnet return tnet
[ "def", "load_frompickle", "(", "cls", ",", "fname", ",", "reload_object", "=", "False", ")", ":", "if", "fname", "[", "-", "4", ":", "]", "!=", "'.pkl'", ":", "fname", "+=", "'.pkl'", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "f", ":", "tnet", "=", "pickle", ".", "load", "(", "f", ")", "if", "reload_object", ":", "reloadnet", "=", "teneto", ".", "TenetoBIDS", "(", "tnet", ".", "BIDS_dir", ",", "pipeline", "=", "tnet", ".", "pipeline", ",", "pipeline_subdir", "=", "tnet", ".", "pipeline_subdir", ",", "bids_tags", "=", "tnet", ".", "bids_tags", ",", "bids_suffix", "=", "tnet", ".", "bids_suffix", ",", "bad_subjects", "=", "tnet", ".", "bad_subjects", ",", "confound_pipeline", "=", "tnet", ".", "confound_pipeline", ",", "raw_data_exists", "=", "tnet", ".", "raw_data_exists", ",", "njobs", "=", "tnet", ".", "njobs", ")", "reloadnet", ".", "histroy", "=", "tnet", ".", "history", "tnet", "=", "reloadnet", "return", "tnet" ]
Loaded saved instance of fname : str path to pickle object (output of TenetoBIDS.save_aspickle) reload_object : bool (default False) reloads object by calling teneto.TenetoBIDS (some information lost, for development) Returns ------- self : TenetoBIDS instance
[ "Loaded", "saved", "instance", "of" ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1541-L1564
-1
529
wiheto/teneto
teneto/networkmeasures/temporal_closeness_centrality.py
temporal_closeness_centrality
def temporal_closeness_centrality(tnet=None, paths=None): ''' Returns temporal closeness centrality per node. Parameters ----------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. paths : pandas dataframe Output of TenetoBIDS.networkmeasure.shortest_temporal_paths Returns -------- :close: array temporal closness centrality (nodal measure) ''' if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet) pathmat = np.zeros([paths[['from', 'to']].max().max( )+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan pathmat[paths['from'].values, paths['to'].values, paths['t_start'].values] = paths['temporal-distance'] closeness = np.nansum(1 / np.nanmean(pathmat, axis=2), axis=1) / (pathmat.shape[1] - 1) return closeness
python
def temporal_closeness_centrality(tnet=None, paths=None): ''' Returns temporal closeness centrality per node. Parameters ----------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. paths : pandas dataframe Output of TenetoBIDS.networkmeasure.shortest_temporal_paths Returns -------- :close: array temporal closness centrality (nodal measure) ''' if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet) pathmat = np.zeros([paths[['from', 'to']].max().max( )+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan pathmat[paths['from'].values, paths['to'].values, paths['t_start'].values] = paths['temporal-distance'] closeness = np.nansum(1 / np.nanmean(pathmat, axis=2), axis=1) / (pathmat.shape[1] - 1) return closeness
[ "def", "temporal_closeness_centrality", "(", "tnet", "=", "None", ",", "paths", "=", "None", ")", ":", "if", "tnet", "is", "not", "None", "and", "paths", "is", "not", "None", ":", "raise", "ValueError", "(", "'Only network or path input allowed.'", ")", "if", "tnet", "is", "None", "and", "paths", "is", "None", ":", "raise", "ValueError", "(", "'No input.'", ")", "# if shortest paths are not calculated, calculate them", "if", "tnet", "is", "not", "None", ":", "paths", "=", "shortest_temporal_path", "(", "tnet", ")", "pathmat", "=", "np", ".", "zeros", "(", "[", "paths", "[", "[", "'from'", ",", "'to'", "]", "]", ".", "max", "(", ")", ".", "max", "(", ")", "+", "1", ",", "paths", "[", "[", "'from'", ",", "'to'", "]", "]", ".", "max", "(", ")", ".", "max", "(", ")", "+", "1", ",", "paths", "[", "[", "'t_start'", "]", "]", ".", "max", "(", ")", ".", "max", "(", ")", "+", "1", "]", ")", "*", "np", ".", "nan", "pathmat", "[", "paths", "[", "'from'", "]", ".", "values", ",", "paths", "[", "'to'", "]", ".", "values", ",", "paths", "[", "'t_start'", "]", ".", "values", "]", "=", "paths", "[", "'temporal-distance'", "]", "closeness", "=", "np", ".", "nansum", "(", "1", "/", "np", ".", "nanmean", "(", "pathmat", ",", "axis", "=", "2", ")", ",", "axis", "=", "1", ")", "/", "(", "pathmat", ".", "shape", "[", "1", "]", "-", "1", ")", "return", "closeness" ]
Returns temporal closeness centrality per node. Parameters ----------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. paths : pandas dataframe Output of TenetoBIDS.networkmeasure.shortest_temporal_paths Returns -------- :close: array temporal closness centrality (nodal measure)
[ "Returns", "temporal", "closeness", "centrality", "per", "node", "." ]
80d7a83a9adc1714589b020627c45bd5b66248ab
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/temporal_closeness_centrality.py#L9-L52
-1
530
ianlini/flatten-dict
flatten_dict/flatten_dict.py
flatten
def flatten(d, reducer='tuple', inverse=False): """Flatten dict-like object. Parameters ---------- d: dict-like object The dict that will be flattened. reducer: {'tuple', 'path', function} (default: 'tuple') The key joining method. If a function is given, the function will be used to reduce. 'tuple': The resulting key will be tuple of the original keys 'path': Use ``os.path.join`` to join keys. inverse: bool (default: False) Whether you want invert the resulting key and value. Returns ------- flat_dict: dict """ if isinstance(reducer, str): reducer = REDUCER_DICT[reducer] flat_dict = {} def _flatten(d, parent=None): for key, value in six.viewitems(d): flat_key = reducer(parent, key) if isinstance(value, Mapping): _flatten(value, flat_key) else: if inverse: flat_key, value = value, flat_key if flat_key in flat_dict: raise ValueError("duplicated key '{}'".format(flat_key)) flat_dict[flat_key] = value _flatten(d) return flat_dict
python
def flatten(d, reducer='tuple', inverse=False): """Flatten dict-like object. Parameters ---------- d: dict-like object The dict that will be flattened. reducer: {'tuple', 'path', function} (default: 'tuple') The key joining method. If a function is given, the function will be used to reduce. 'tuple': The resulting key will be tuple of the original keys 'path': Use ``os.path.join`` to join keys. inverse: bool (default: False) Whether you want invert the resulting key and value. Returns ------- flat_dict: dict """ if isinstance(reducer, str): reducer = REDUCER_DICT[reducer] flat_dict = {} def _flatten(d, parent=None): for key, value in six.viewitems(d): flat_key = reducer(parent, key) if isinstance(value, Mapping): _flatten(value, flat_key) else: if inverse: flat_key, value = value, flat_key if flat_key in flat_dict: raise ValueError("duplicated key '{}'".format(flat_key)) flat_dict[flat_key] = value _flatten(d) return flat_dict
[ "def", "flatten", "(", "d", ",", "reducer", "=", "'tuple'", ",", "inverse", "=", "False", ")", ":", "if", "isinstance", "(", "reducer", ",", "str", ")", ":", "reducer", "=", "REDUCER_DICT", "[", "reducer", "]", "flat_dict", "=", "{", "}", "def", "_flatten", "(", "d", ",", "parent", "=", "None", ")", ":", "for", "key", ",", "value", "in", "six", ".", "viewitems", "(", "d", ")", ":", "flat_key", "=", "reducer", "(", "parent", ",", "key", ")", "if", "isinstance", "(", "value", ",", "Mapping", ")", ":", "_flatten", "(", "value", ",", "flat_key", ")", "else", ":", "if", "inverse", ":", "flat_key", ",", "value", "=", "value", ",", "flat_key", "if", "flat_key", "in", "flat_dict", ":", "raise", "ValueError", "(", "\"duplicated key '{}'\"", ".", "format", "(", "flat_key", ")", ")", "flat_dict", "[", "flat_key", "]", "=", "value", "_flatten", "(", "d", ")", "return", "flat_dict" ]
Flatten dict-like object. Parameters ---------- d: dict-like object The dict that will be flattened. reducer: {'tuple', 'path', function} (default: 'tuple') The key joining method. If a function is given, the function will be used to reduce. 'tuple': The resulting key will be tuple of the original keys 'path': Use ``os.path.join`` to join keys. inverse: bool (default: False) Whether you want invert the resulting key and value. Returns ------- flat_dict: dict
[ "Flatten", "dict", "-", "like", "object", "." ]
77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa
https://github.com/ianlini/flatten-dict/blob/77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa/flatten_dict/flatten_dict.py#L20-L56
-1
531
ianlini/flatten-dict
flatten_dict/flatten_dict.py
nested_set_dict
def nested_set_dict(d, keys, value): """Set a value to a sequence of nested keys Parameters ---------- d: Mapping keys: Sequence[str] value: Any """ assert keys key = keys[0] if len(keys) == 1: if key in d: raise ValueError("duplicated key '{}'".format(key)) d[key] = value return d = d.setdefault(key, {}) nested_set_dict(d, keys[1:], value)
python
def nested_set_dict(d, keys, value): """Set a value to a sequence of nested keys Parameters ---------- d: Mapping keys: Sequence[str] value: Any """ assert keys key = keys[0] if len(keys) == 1: if key in d: raise ValueError("duplicated key '{}'".format(key)) d[key] = value return d = d.setdefault(key, {}) nested_set_dict(d, keys[1:], value)
[ "def", "nested_set_dict", "(", "d", ",", "keys", ",", "value", ")", ":", "assert", "keys", "key", "=", "keys", "[", "0", "]", "if", "len", "(", "keys", ")", "==", "1", ":", "if", "key", "in", "d", ":", "raise", "ValueError", "(", "\"duplicated key '{}'\"", ".", "format", "(", "key", ")", ")", "d", "[", "key", "]", "=", "value", "return", "d", "=", "d", ".", "setdefault", "(", "key", ",", "{", "}", ")", "nested_set_dict", "(", "d", ",", "keys", "[", "1", ":", "]", ",", "value", ")" ]
Set a value to a sequence of nested keys Parameters ---------- d: Mapping keys: Sequence[str] value: Any
[ "Set", "a", "value", "to", "a", "sequence", "of", "nested", "keys" ]
77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa
https://github.com/ianlini/flatten-dict/blob/77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa/flatten_dict/flatten_dict.py#L59-L76
-1
532
ianlini/flatten-dict
flatten_dict/flatten_dict.py
unflatten
def unflatten(d, splitter='tuple', inverse=False): """Unflatten dict-like object. Parameters ---------- d: dict-like object The dict that will be unflattened. splitter: {'tuple', 'path', function} (default: 'tuple') The key splitting method. If a function is given, the function will be used to split. 'tuple': Use each element in the tuple key as the key of the unflattened dict. 'path': Use ``pathlib.Path.parts`` to split keys. inverse: bool (default: False) Whether you want to invert the key and value before flattening. Returns ------- unflattened_dict: dict """ if isinstance(splitter, str): splitter = SPLITTER_DICT[splitter] unflattened_dict = {} for flat_key, value in six.viewitems(d): if inverse: flat_key, value = value, flat_key key_tuple = splitter(flat_key) nested_set_dict(unflattened_dict, key_tuple, value) return unflattened_dict
python
def unflatten(d, splitter='tuple', inverse=False): """Unflatten dict-like object. Parameters ---------- d: dict-like object The dict that will be unflattened. splitter: {'tuple', 'path', function} (default: 'tuple') The key splitting method. If a function is given, the function will be used to split. 'tuple': Use each element in the tuple key as the key of the unflattened dict. 'path': Use ``pathlib.Path.parts`` to split keys. inverse: bool (default: False) Whether you want to invert the key and value before flattening. Returns ------- unflattened_dict: dict """ if isinstance(splitter, str): splitter = SPLITTER_DICT[splitter] unflattened_dict = {} for flat_key, value in six.viewitems(d): if inverse: flat_key, value = value, flat_key key_tuple = splitter(flat_key) nested_set_dict(unflattened_dict, key_tuple, value) return unflattened_dict
[ "def", "unflatten", "(", "d", ",", "splitter", "=", "'tuple'", ",", "inverse", "=", "False", ")", ":", "if", "isinstance", "(", "splitter", ",", "str", ")", ":", "splitter", "=", "SPLITTER_DICT", "[", "splitter", "]", "unflattened_dict", "=", "{", "}", "for", "flat_key", ",", "value", "in", "six", ".", "viewitems", "(", "d", ")", ":", "if", "inverse", ":", "flat_key", ",", "value", "=", "value", ",", "flat_key", "key_tuple", "=", "splitter", "(", "flat_key", ")", "nested_set_dict", "(", "unflattened_dict", ",", "key_tuple", ",", "value", ")", "return", "unflattened_dict" ]
Unflatten dict-like object. Parameters ---------- d: dict-like object The dict that will be unflattened. splitter: {'tuple', 'path', function} (default: 'tuple') The key splitting method. If a function is given, the function will be used to split. 'tuple': Use each element in the tuple key as the key of the unflattened dict. 'path': Use ``pathlib.Path.parts`` to split keys. inverse: bool (default: False) Whether you want to invert the key and value before flattening. Returns ------- unflattened_dict: dict
[ "Unflatten", "dict", "-", "like", "object", "." ]
77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa
https://github.com/ianlini/flatten-dict/blob/77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa/flatten_dict/flatten_dict.py#L79-L108
-1
533
salu133445/pypianoroll
pypianoroll/plot.py
plot_track
def plot_track(track, filename=None, beat_resolution=None, downbeats=None, preset='default', cmap='Blues', xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='both', grid_linestyle=':', grid_linewidth=.5): """ Plot the pianoroll or save a plot of the pianoroll. Parameters ---------- filename : The filename to which the plot is saved. If None, save nothing. beat_resolution : int The number of time steps used to represent a beat. Required and only effective when `xtick` is 'beat'. downbeats : list An array that indicates whether the time step contains a downbeat (i.e., the first time step of a bar). preset : {'default', 'plain', 'frame'} A string that indicates the preset theme to use. - In 'default' preset, the ticks, grid and labels are on. - In 'frame' preset, the ticks and grid are both off. - In 'plain' preset, the x- and y-axis are both off. cmap : `matplotlib.colors.Colormap` The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to 'Blues'. Only effective when `pianoroll` is 2D. xtick : {'auto', 'beat', 'step', 'off'} A string that indicates what to use as ticks along the x-axis. If 'auto' is given, automatically set to 'beat' if `beat_resolution` is also given and set to 'step', otherwise. Defaults to 'auto'. ytick : {'octave', 'pitch', 'off'} A string that indicates what to use as ticks along the y-axis. Defaults to 'octave'. xticklabel : bool Whether to add tick labels along the x-axis. Only effective when `xtick` is not 'off'. yticklabel : {'auto', 'name', 'number', 'off'} If 'name', use octave name and pitch name (key name when `is_drum` is True) as tick labels along the y-axis. If 'number', use pitch number. If 'auto', set to 'name' when `ytick` is 'octave' and 'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective when `ytick` is not 'off'. tick_loc : tuple or list The locations to put the ticks. Availables elements are 'bottom', 'top', 'left' and 'right'. Defaults to ('bottom', 'left'). tick_direction : {'in', 'out', 'inout'} A string that indicates where to put the ticks. Defaults to 'in'. Only effective when one of `xtick` and `ytick` is on. label : {'x', 'y', 'both', 'off'} A string that indicates whether to add labels to the x-axis and y-axis. Defaults to 'both'. grid : {'x', 'y', 'both', 'off'} A string that indicates whether to add grids to the x-axis, y-axis, both or neither. Defaults to 'both'. grid_linestyle : str Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle' argument. grid_linewidth : float Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth' argument. Returns ------- fig : `matplotlib.figure.Figure` object A :class:`matplotlib.figure.Figure` object. ax : `matplotlib.axes.Axes` object A :class:`matplotlib.axes.Axes` object. """ if not HAS_MATPLOTLIB: raise ImportError("matplotlib package is required for plotting " "supports.") fig, ax = plt.subplots() plot_pianoroll(ax, track.pianoroll, track.is_drum, beat_resolution, downbeats, preset=preset, cmap=cmap, xtick=xtick, ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel, tick_loc=tick_loc, tick_direction=tick_direction, label=label, grid=grid, grid_linestyle=grid_linestyle, grid_linewidth=grid_linewidth) if filename is not None: plt.savefig(filename) return fig, ax
python
def plot_track(track, filename=None, beat_resolution=None, downbeats=None, preset='default', cmap='Blues', xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='both', grid_linestyle=':', grid_linewidth=.5): """ Plot the pianoroll or save a plot of the pianoroll. Parameters ---------- filename : The filename to which the plot is saved. If None, save nothing. beat_resolution : int The number of time steps used to represent a beat. Required and only effective when `xtick` is 'beat'. downbeats : list An array that indicates whether the time step contains a downbeat (i.e., the first time step of a bar). preset : {'default', 'plain', 'frame'} A string that indicates the preset theme to use. - In 'default' preset, the ticks, grid and labels are on. - In 'frame' preset, the ticks and grid are both off. - In 'plain' preset, the x- and y-axis are both off. cmap : `matplotlib.colors.Colormap` The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to 'Blues'. Only effective when `pianoroll` is 2D. xtick : {'auto', 'beat', 'step', 'off'} A string that indicates what to use as ticks along the x-axis. If 'auto' is given, automatically set to 'beat' if `beat_resolution` is also given and set to 'step', otherwise. Defaults to 'auto'. ytick : {'octave', 'pitch', 'off'} A string that indicates what to use as ticks along the y-axis. Defaults to 'octave'. xticklabel : bool Whether to add tick labels along the x-axis. Only effective when `xtick` is not 'off'. yticklabel : {'auto', 'name', 'number', 'off'} If 'name', use octave name and pitch name (key name when `is_drum` is True) as tick labels along the y-axis. If 'number', use pitch number. If 'auto', set to 'name' when `ytick` is 'octave' and 'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective when `ytick` is not 'off'. tick_loc : tuple or list The locations to put the ticks. Availables elements are 'bottom', 'top', 'left' and 'right'. Defaults to ('bottom', 'left'). tick_direction : {'in', 'out', 'inout'} A string that indicates where to put the ticks. Defaults to 'in'. Only effective when one of `xtick` and `ytick` is on. label : {'x', 'y', 'both', 'off'} A string that indicates whether to add labels to the x-axis and y-axis. Defaults to 'both'. grid : {'x', 'y', 'both', 'off'} A string that indicates whether to add grids to the x-axis, y-axis, both or neither. Defaults to 'both'. grid_linestyle : str Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle' argument. grid_linewidth : float Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth' argument. Returns ------- fig : `matplotlib.figure.Figure` object A :class:`matplotlib.figure.Figure` object. ax : `matplotlib.axes.Axes` object A :class:`matplotlib.axes.Axes` object. """ if not HAS_MATPLOTLIB: raise ImportError("matplotlib package is required for plotting " "supports.") fig, ax = plt.subplots() plot_pianoroll(ax, track.pianoroll, track.is_drum, beat_resolution, downbeats, preset=preset, cmap=cmap, xtick=xtick, ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel, tick_loc=tick_loc, tick_direction=tick_direction, label=label, grid=grid, grid_linestyle=grid_linestyle, grid_linewidth=grid_linewidth) if filename is not None: plt.savefig(filename) return fig, ax
[ "def", "plot_track", "(", "track", ",", "filename", "=", "None", ",", "beat_resolution", "=", "None", ",", "downbeats", "=", "None", ",", "preset", "=", "'default'", ",", "cmap", "=", "'Blues'", ",", "xtick", "=", "'auto'", ",", "ytick", "=", "'octave'", ",", "xticklabel", "=", "True", ",", "yticklabel", "=", "'auto'", ",", "tick_loc", "=", "None", ",", "tick_direction", "=", "'in'", ",", "label", "=", "'both'", ",", "grid", "=", "'both'", ",", "grid_linestyle", "=", "':'", ",", "grid_linewidth", "=", ".5", ")", ":", "if", "not", "HAS_MATPLOTLIB", ":", "raise", "ImportError", "(", "\"matplotlib package is required for plotting \"", "\"supports.\"", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "plot_pianoroll", "(", "ax", ",", "track", ".", "pianoroll", ",", "track", ".", "is_drum", ",", "beat_resolution", ",", "downbeats", ",", "preset", "=", "preset", ",", "cmap", "=", "cmap", ",", "xtick", "=", "xtick", ",", "ytick", "=", "ytick", ",", "xticklabel", "=", "xticklabel", ",", "yticklabel", "=", "yticklabel", ",", "tick_loc", "=", "tick_loc", ",", "tick_direction", "=", "tick_direction", ",", "label", "=", "label", ",", "grid", "=", "grid", ",", "grid_linestyle", "=", "grid_linestyle", ",", "grid_linewidth", "=", "grid_linewidth", ")", "if", "filename", "is", "not", "None", ":", "plt", ".", "savefig", "(", "filename", ")", "return", "fig", ",", "ax" ]
Plot the pianoroll or save a plot of the pianoroll. Parameters ---------- filename : The filename to which the plot is saved. If None, save nothing. beat_resolution : int The number of time steps used to represent a beat. Required and only effective when `xtick` is 'beat'. downbeats : list An array that indicates whether the time step contains a downbeat (i.e., the first time step of a bar). preset : {'default', 'plain', 'frame'} A string that indicates the preset theme to use. - In 'default' preset, the ticks, grid and labels are on. - In 'frame' preset, the ticks and grid are both off. - In 'plain' preset, the x- and y-axis are both off. cmap : `matplotlib.colors.Colormap` The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to 'Blues'. Only effective when `pianoroll` is 2D. xtick : {'auto', 'beat', 'step', 'off'} A string that indicates what to use as ticks along the x-axis. If 'auto' is given, automatically set to 'beat' if `beat_resolution` is also given and set to 'step', otherwise. Defaults to 'auto'. ytick : {'octave', 'pitch', 'off'} A string that indicates what to use as ticks along the y-axis. Defaults to 'octave'. xticklabel : bool Whether to add tick labels along the x-axis. Only effective when `xtick` is not 'off'. yticklabel : {'auto', 'name', 'number', 'off'} If 'name', use octave name and pitch name (key name when `is_drum` is True) as tick labels along the y-axis. If 'number', use pitch number. If 'auto', set to 'name' when `ytick` is 'octave' and 'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective when `ytick` is not 'off'. tick_loc : tuple or list The locations to put the ticks. Availables elements are 'bottom', 'top', 'left' and 'right'. Defaults to ('bottom', 'left'). tick_direction : {'in', 'out', 'inout'} A string that indicates where to put the ticks. Defaults to 'in'. Only effective when one of `xtick` and `ytick` is on. label : {'x', 'y', 'both', 'off'} A string that indicates whether to add labels to the x-axis and y-axis. Defaults to 'both'. grid : {'x', 'y', 'both', 'off'} A string that indicates whether to add grids to the x-axis, y-axis, both or neither. Defaults to 'both'. grid_linestyle : str Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle' argument. grid_linewidth : float Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth' argument. Returns ------- fig : `matplotlib.figure.Figure` object A :class:`matplotlib.figure.Figure` object. ax : `matplotlib.axes.Axes` object A :class:`matplotlib.axes.Axes` object.
[ "Plot", "the", "pianoroll", "or", "save", "a", "plot", "of", "the", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/plot.py#L216-L303
-1
534
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.append_track
def append_track(self, track=None, pianoroll=None, program=0, is_drum=False, name='unknown'): """ Append a multitrack.Track instance to the track list or create a new multitrack.Track object and append it to the track list. Parameters ---------- track : pianoroll.Track A :class:`pypianoroll.Track` instance to be appended to the track list. pianoroll : np.ndarray, shape=(n_time_steps, 128) A pianoroll matrix. The first and second dimension represents time and pitch, respectively. Available datatypes are bool, int and float. Only effective when `track` is None. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). Only effective when `track` is None. is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. Only effective when `track` is None. name : str The name of the track. Defaults to 'unknown'. Only effective when `track` is None. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if track is not None: if not isinstance(track, Track): raise TypeError("`track` must be a pypianoroll.Track instance.") track.check_validity() else: track = Track(pianoroll, program, is_drum, name) self.tracks.append(track)
python
def append_track(self, track=None, pianoroll=None, program=0, is_drum=False, name='unknown'): """ Append a multitrack.Track instance to the track list or create a new multitrack.Track object and append it to the track list. Parameters ---------- track : pianoroll.Track A :class:`pypianoroll.Track` instance to be appended to the track list. pianoroll : np.ndarray, shape=(n_time_steps, 128) A pianoroll matrix. The first and second dimension represents time and pitch, respectively. Available datatypes are bool, int and float. Only effective when `track` is None. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). Only effective when `track` is None. is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. Only effective when `track` is None. name : str The name of the track. Defaults to 'unknown'. Only effective when `track` is None. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if track is not None: if not isinstance(track, Track): raise TypeError("`track` must be a pypianoroll.Track instance.") track.check_validity() else: track = Track(pianoroll, program, is_drum, name) self.tracks.append(track)
[ "def", "append_track", "(", "self", ",", "track", "=", "None", ",", "pianoroll", "=", "None", ",", "program", "=", "0", ",", "is_drum", "=", "False", ",", "name", "=", "'unknown'", ")", ":", "if", "track", "is", "not", "None", ":", "if", "not", "isinstance", "(", "track", ",", "Track", ")", ":", "raise", "TypeError", "(", "\"`track` must be a pypianoroll.Track instance.\"", ")", "track", ".", "check_validity", "(", ")", "else", ":", "track", "=", "Track", "(", "pianoroll", ",", "program", ",", "is_drum", ",", "name", ")", "self", ".", "tracks", ".", "append", "(", "track", ")" ]
Append a multitrack.Track instance to the track list or create a new multitrack.Track object and append it to the track list. Parameters ---------- track : pianoroll.Track A :class:`pypianoroll.Track` instance to be appended to the track list. pianoroll : np.ndarray, shape=(n_time_steps, 128) A pianoroll matrix. The first and second dimension represents time and pitch, respectively. Available datatypes are bool, int and float. Only effective when `track` is None. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). Only effective when `track` is None. is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. Only effective when `track` is None. name : str The name of the track. Defaults to 'unknown'. Only effective when `track` is None. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set
[ "Append", "a", "multitrack", ".", "Track", "instance", "to", "the", "track", "list", "or", "create", "a", "new", "multitrack", ".", "Track", "object", "and", "append", "it", "to", "the", "track", "list", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L143-L180
-1
535
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.check_validity
def check_validity(self): """ Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type). """ # tracks for track in self.tracks: if not isinstance(track, Track): raise TypeError("`tracks` must be a list of " "`pypianoroll.Track` instances.") track.check_validity() # tempo if not isinstance(self.tempo, np.ndarray): raise TypeError("`tempo` must be int or a numpy array.") elif not np.issubdtype(self.tempo.dtype, np.number): raise TypeError("Data type of `tempo` must be a subdtype of " "np.number.") elif self.tempo.ndim != 1: raise ValueError("`tempo` must be a 1D numpy array.") if np.any(self.tempo <= 0.0): raise ValueError("`tempo` should contain only positive numbers.") # downbeat if self.downbeat is not None: if not isinstance(self.downbeat, np.ndarray): raise TypeError("`downbeat` must be a numpy array.") if not np.issubdtype(self.downbeat.dtype, np.bool_): raise TypeError("Data type of `downbeat` must be bool.") if self.downbeat.ndim != 1: raise ValueError("`downbeat` must be a 1D numpy array.") # beat_resolution if not isinstance(self.beat_resolution, int): raise TypeError("`beat_resolution` must be int.") if self.beat_resolution < 1: raise ValueError("`beat_resolution` must be a positive integer.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
python
def check_validity(self): """ Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type). """ # tracks for track in self.tracks: if not isinstance(track, Track): raise TypeError("`tracks` must be a list of " "`pypianoroll.Track` instances.") track.check_validity() # tempo if not isinstance(self.tempo, np.ndarray): raise TypeError("`tempo` must be int or a numpy array.") elif not np.issubdtype(self.tempo.dtype, np.number): raise TypeError("Data type of `tempo` must be a subdtype of " "np.number.") elif self.tempo.ndim != 1: raise ValueError("`tempo` must be a 1D numpy array.") if np.any(self.tempo <= 0.0): raise ValueError("`tempo` should contain only positive numbers.") # downbeat if self.downbeat is not None: if not isinstance(self.downbeat, np.ndarray): raise TypeError("`downbeat` must be a numpy array.") if not np.issubdtype(self.downbeat.dtype, np.bool_): raise TypeError("Data type of `downbeat` must be bool.") if self.downbeat.ndim != 1: raise ValueError("`downbeat` must be a 1D numpy array.") # beat_resolution if not isinstance(self.beat_resolution, int): raise TypeError("`beat_resolution` must be int.") if self.beat_resolution < 1: raise ValueError("`beat_resolution` must be a positive integer.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
[ "def", "check_validity", "(", "self", ")", ":", "# tracks", "for", "track", "in", "self", ".", "tracks", ":", "if", "not", "isinstance", "(", "track", ",", "Track", ")", ":", "raise", "TypeError", "(", "\"`tracks` must be a list of \"", "\"`pypianoroll.Track` instances.\"", ")", "track", ".", "check_validity", "(", ")", "# tempo", "if", "not", "isinstance", "(", "self", ".", "tempo", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"`tempo` must be int or a numpy array.\"", ")", "elif", "not", "np", ".", "issubdtype", "(", "self", ".", "tempo", ".", "dtype", ",", "np", ".", "number", ")", ":", "raise", "TypeError", "(", "\"Data type of `tempo` must be a subdtype of \"", "\"np.number.\"", ")", "elif", "self", ".", "tempo", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"`tempo` must be a 1D numpy array.\"", ")", "if", "np", ".", "any", "(", "self", ".", "tempo", "<=", "0.0", ")", ":", "raise", "ValueError", "(", "\"`tempo` should contain only positive numbers.\"", ")", "# downbeat", "if", "self", ".", "downbeat", "is", "not", "None", ":", "if", "not", "isinstance", "(", "self", ".", "downbeat", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"`downbeat` must be a numpy array.\"", ")", "if", "not", "np", ".", "issubdtype", "(", "self", ".", "downbeat", ".", "dtype", ",", "np", ".", "bool_", ")", ":", "raise", "TypeError", "(", "\"Data type of `downbeat` must be bool.\"", ")", "if", "self", ".", "downbeat", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"`downbeat` must be a 1D numpy array.\"", ")", "# beat_resolution", "if", "not", "isinstance", "(", "self", ".", "beat_resolution", ",", "int", ")", ":", "raise", "TypeError", "(", "\"`beat_resolution` must be int.\"", ")", "if", "self", ".", "beat_resolution", "<", "1", ":", "raise", "ValueError", "(", "\"`beat_resolution` must be a positive integer.\"", ")", "# name", "if", "not", "isinstance", "(", "self", ".", "name", ",", "string_types", ")", ":", "raise", "TypeError", "(", "\"`name` must be a string.\"", ")" ]
Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type).
[ "Raise", "an", "error", "if", "any", "invalid", "attribute", "found", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L210-L253
-1
536
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.clip
def clip(self, lower=0, upper=127): """ Clip the pianorolls of all tracks by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianorolls. Defaults to 0. upper : int or float The upper bound to clip the pianorolls. Defaults to 127. """ for track in self.tracks: track.clip(lower, upper)
python
def clip(self, lower=0, upper=127): """ Clip the pianorolls of all tracks by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianorolls. Defaults to 0. upper : int or float The upper bound to clip the pianorolls. Defaults to 127. """ for track in self.tracks: track.clip(lower, upper)
[ "def", "clip", "(", "self", ",", "lower", "=", "0", ",", "upper", "=", "127", ")", ":", "for", "track", "in", "self", ".", "tracks", ":", "track", ".", "clip", "(", "lower", ",", "upper", ")" ]
Clip the pianorolls of all tracks by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianorolls. Defaults to 0. upper : int or float The upper bound to clip the pianorolls. Defaults to 127.
[ "Clip", "the", "pianorolls", "of", "all", "tracks", "by", "the", "given", "lower", "and", "upper", "bounds", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L255-L268
-1
537
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.get_downbeat_steps
def get_downbeat_steps(self): """ Return the indices of time steps that contain downbeats. Returns ------- downbeat_steps : list The indices of time steps that contain downbeats. """ if self.downbeat is None: return [] downbeat_steps = np.nonzero(self.downbeat)[0].tolist() return downbeat_steps
python
def get_downbeat_steps(self): """ Return the indices of time steps that contain downbeats. Returns ------- downbeat_steps : list The indices of time steps that contain downbeats. """ if self.downbeat is None: return [] downbeat_steps = np.nonzero(self.downbeat)[0].tolist() return downbeat_steps
[ "def", "get_downbeat_steps", "(", "self", ")", ":", "if", "self", ".", "downbeat", "is", "None", ":", "return", "[", "]", "downbeat_steps", "=", "np", ".", "nonzero", "(", "self", ".", "downbeat", ")", "[", "0", "]", ".", "tolist", "(", ")", "return", "downbeat_steps" ]
Return the indices of time steps that contain downbeats. Returns ------- downbeat_steps : list The indices of time steps that contain downbeats.
[ "Return", "the", "indices", "of", "time", "steps", "that", "contain", "downbeats", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L324-L337
-1
538
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.get_empty_tracks
def get_empty_tracks(self): """ Return the indices of tracks with empty pianorolls. Returns ------- empty_track_indices : list The indices of tracks with empty pianorolls. """ empty_track_indices = [idx for idx, track in enumerate(self.tracks) if not np.any(track.pianoroll)] return empty_track_indices
python
def get_empty_tracks(self): """ Return the indices of tracks with empty pianorolls. Returns ------- empty_track_indices : list The indices of tracks with empty pianorolls. """ empty_track_indices = [idx for idx, track in enumerate(self.tracks) if not np.any(track.pianoroll)] return empty_track_indices
[ "def", "get_empty_tracks", "(", "self", ")", ":", "empty_track_indices", "=", "[", "idx", "for", "idx", ",", "track", "in", "enumerate", "(", "self", ".", "tracks", ")", "if", "not", "np", ".", "any", "(", "track", ".", "pianoroll", ")", "]", "return", "empty_track_indices" ]
Return the indices of tracks with empty pianorolls. Returns ------- empty_track_indices : list The indices of tracks with empty pianorolls.
[ "Return", "the", "indices", "of", "tracks", "with", "empty", "pianorolls", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L339-L351
-1
539
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.get_merged_pianoroll
def get_merged_pianoroll(self, mode='sum'): """ Return the merged pianoroll. Parameters ---------- mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of all the pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among all the pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the pianorolls has nonzero value at that pixel; False if all pianorolls are inactive (zero-valued) at that pixel. Returns ------- merged : np.ndarray, shape=(n_time_steps, 128) The merged pianoroll. """ stacked = self.get_stacked_pianoroll() if mode == 'any': merged = np.any(stacked, axis=2) elif mode == 'sum': merged = np.sum(stacked, axis=2) elif mode == 'max': merged = np.max(stacked, axis=2) else: raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") return merged
python
def get_merged_pianoroll(self, mode='sum'): """ Return the merged pianoroll. Parameters ---------- mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of all the pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among all the pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the pianorolls has nonzero value at that pixel; False if all pianorolls are inactive (zero-valued) at that pixel. Returns ------- merged : np.ndarray, shape=(n_time_steps, 128) The merged pianoroll. """ stacked = self.get_stacked_pianoroll() if mode == 'any': merged = np.any(stacked, axis=2) elif mode == 'sum': merged = np.sum(stacked, axis=2) elif mode == 'max': merged = np.max(stacked, axis=2) else: raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") return merged
[ "def", "get_merged_pianoroll", "(", "self", ",", "mode", "=", "'sum'", ")", ":", "stacked", "=", "self", ".", "get_stacked_pianoroll", "(", ")", "if", "mode", "==", "'any'", ":", "merged", "=", "np", ".", "any", "(", "stacked", ",", "axis", "=", "2", ")", "elif", "mode", "==", "'sum'", ":", "merged", "=", "np", ".", "sum", "(", "stacked", ",", "axis", "=", "2", ")", "elif", "mode", "==", "'max'", ":", "merged", "=", "np", ".", "max", "(", "stacked", ",", "axis", "=", "2", ")", "else", ":", "raise", "ValueError", "(", "\"`mode` must be one of {'max', 'sum', 'any'}.\"", ")", "return", "merged" ]
Return the merged pianoroll. Parameters ---------- mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of all the pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among all the pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the pianorolls has nonzero value at that pixel; False if all pianorolls are inactive (zero-valued) at that pixel. Returns ------- merged : np.ndarray, shape=(n_time_steps, 128) The merged pianoroll.
[ "Return", "the", "merged", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L371-L407
-1
540
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.merge_tracks
def merge_tracks(self, track_indices=None, mode='sum', program=0, is_drum=False, name='merged', remove_merged=False): """ Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : list The indices of tracks to be merged. Defaults to all the tracks. mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of the collected pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among the collected pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the collected pianorolls has nonzero value at that pixel; False if all the collected pianorolls are inactive (zero-valued) at that pixel. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. name : str A name to be assigned to the merged track. Defaults to 'merged'. remove_merged : bool True to remove the source tracks from the track list. False to keep them. Defaults to False. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if mode not in ('max', 'sum', 'any'): raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") merged = self[track_indices].get_merged_pianoroll(mode) merged_track = Track(merged, program, is_drum, name) self.append_track(merged_track) if remove_merged: self.remove_tracks(track_indices)
python
def merge_tracks(self, track_indices=None, mode='sum', program=0, is_drum=False, name='merged', remove_merged=False): """ Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : list The indices of tracks to be merged. Defaults to all the tracks. mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of the collected pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among the collected pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the collected pianorolls has nonzero value at that pixel; False if all the collected pianorolls are inactive (zero-valued) at that pixel. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. name : str A name to be assigned to the merged track. Defaults to 'merged'. remove_merged : bool True to remove the source tracks from the track list. False to keep them. Defaults to False. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if mode not in ('max', 'sum', 'any'): raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") merged = self[track_indices].get_merged_pianoroll(mode) merged_track = Track(merged, program, is_drum, name) self.append_track(merged_track) if remove_merged: self.remove_tracks(track_indices)
[ "def", "merge_tracks", "(", "self", ",", "track_indices", "=", "None", ",", "mode", "=", "'sum'", ",", "program", "=", "0", ",", "is_drum", "=", "False", ",", "name", "=", "'merged'", ",", "remove_merged", "=", "False", ")", ":", "if", "mode", "not", "in", "(", "'max'", ",", "'sum'", ",", "'any'", ")", ":", "raise", "ValueError", "(", "\"`mode` must be one of {'max', 'sum', 'any'}.\"", ")", "merged", "=", "self", "[", "track_indices", "]", ".", "get_merged_pianoroll", "(", "mode", ")", "merged_track", "=", "Track", "(", "merged", ",", "program", ",", "is_drum", ",", "name", ")", "self", ".", "append_track", "(", "merged_track", ")", "if", "remove_merged", ":", "self", ".", "remove_tracks", "(", "track_indices", ")" ]
Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : list The indices of tracks to be merged. Defaults to all the tracks. mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of the collected pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among the collected pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the collected pianorolls has nonzero value at that pixel; False if all the collected pianorolls are inactive (zero-valued) at that pixel. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. name : str A name to be assigned to the merged track. Defaults to 'merged'. remove_merged : bool True to remove the source tracks from the track list. False to keep them. Defaults to False. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set
[ "Merge", "pianorolls", "of", "the", "tracks", "specified", "by", "track_indices", ".", "The", "merged", "track", "will", "have", "program", "number", "as", "given", "by", "program", "and", "drum", "indicator", "as", "given", "by", "is_drum", ".", "The", "merged", "track", "will", "be", "appended", "at", "the", "end", "of", "the", "track", "list", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L486-L538
-1
541
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.pad_to_same
def pad_to_same(self): """Pad shorter pianorolls with zeros at the end along the time axis to make the resulting pianoroll lengths the same as the maximum pianoroll length among all the tracks.""" max_length = self.get_max_length() for track in self.tracks: if track.pianoroll.shape[0] < max_length: track.pad(max_length - track.pianoroll.shape[0])
python
def pad_to_same(self): """Pad shorter pianorolls with zeros at the end along the time axis to make the resulting pianoroll lengths the same as the maximum pianoroll length among all the tracks.""" max_length = self.get_max_length() for track in self.tracks: if track.pianoroll.shape[0] < max_length: track.pad(max_length - track.pianoroll.shape[0])
[ "def", "pad_to_same", "(", "self", ")", ":", "max_length", "=", "self", ".", "get_max_length", "(", ")", "for", "track", "in", "self", ".", "tracks", ":", "if", "track", ".", "pianoroll", ".", "shape", "[", "0", "]", "<", "max_length", ":", "track", ".", "pad", "(", "max_length", "-", "track", ".", "pianoroll", ".", "shape", "[", "0", "]", ")" ]
Pad shorter pianorolls with zeros at the end along the time axis to make the resulting pianoroll lengths the same as the maximum pianoroll length among all the tracks.
[ "Pad", "shorter", "pianorolls", "with", "zeros", "at", "the", "end", "along", "the", "time", "axis", "to", "make", "the", "resulting", "pianoroll", "lengths", "the", "same", "as", "the", "maximum", "pianoroll", "length", "among", "all", "the", "tracks", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L581-L588
-1
542
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.remove_tracks
def remove_tracks(self, track_indices): """ Remove tracks specified by `track_indices`. Parameters ---------- track_indices : list The indices of the tracks to be removed. """ if isinstance(track_indices, int): track_indices = [track_indices] self.tracks = [track for idx, track in enumerate(self.tracks) if idx not in track_indices]
python
def remove_tracks(self, track_indices): """ Remove tracks specified by `track_indices`. Parameters ---------- track_indices : list The indices of the tracks to be removed. """ if isinstance(track_indices, int): track_indices = [track_indices] self.tracks = [track for idx, track in enumerate(self.tracks) if idx not in track_indices]
[ "def", "remove_tracks", "(", "self", ",", "track_indices", ")", ":", "if", "isinstance", "(", "track_indices", ",", "int", ")", ":", "track_indices", "=", "[", "track_indices", "]", "self", ".", "tracks", "=", "[", "track", "for", "idx", ",", "track", "in", "enumerate", "(", "self", ".", "tracks", ")", "if", "idx", "not", "in", "track_indices", "]" ]
Remove tracks specified by `track_indices`. Parameters ---------- track_indices : list The indices of the tracks to be removed.
[ "Remove", "tracks", "specified", "by", "track_indices", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L815-L828
-1
543
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.transpose
def transpose(self, semitone): """ Transpose the pianorolls of all tracks by a number of semitones, where positive values are for higher key, while negative values are for lower key. The drum tracks are ignored. Parameters ---------- semitone : int The number of semitones to transpose the pianorolls. """ for track in self.tracks: if not track.is_drum: track.transpose(semitone)
python
def transpose(self, semitone): """ Transpose the pianorolls of all tracks by a number of semitones, where positive values are for higher key, while negative values are for lower key. The drum tracks are ignored. Parameters ---------- semitone : int The number of semitones to transpose the pianorolls. """ for track in self.tracks: if not track.is_drum: track.transpose(semitone)
[ "def", "transpose", "(", "self", ",", "semitone", ")", ":", "for", "track", "in", "self", ".", "tracks", ":", "if", "not", "track", ".", "is_drum", ":", "track", ".", "transpose", "(", "semitone", ")" ]
Transpose the pianorolls of all tracks by a number of semitones, where positive values are for higher key, while negative values are for lower key. The drum tracks are ignored. Parameters ---------- semitone : int The number of semitones to transpose the pianorolls.
[ "Transpose", "the", "pianorolls", "of", "all", "tracks", "by", "a", "number", "of", "semitones", "where", "positive", "values", "are", "for", "higher", "key", "while", "negative", "values", "are", "for", "lower", "key", ".", "The", "drum", "tracks", "are", "ignored", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L954-L968
-1
544
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.trim_trailing_silence
def trim_trailing_silence(self): """Trim the trailing silences of the pianorolls of all tracks. Trailing silences are considered globally.""" active_length = self.get_active_length() for track in self.tracks: track.pianoroll = track.pianoroll[:active_length]
python
def trim_trailing_silence(self): """Trim the trailing silences of the pianorolls of all tracks. Trailing silences are considered globally.""" active_length = self.get_active_length() for track in self.tracks: track.pianoroll = track.pianoroll[:active_length]
[ "def", "trim_trailing_silence", "(", "self", ")", ":", "active_length", "=", "self", ".", "get_active_length", "(", ")", "for", "track", "in", "self", ".", "tracks", ":", "track", ".", "pianoroll", "=", "track", ".", "pianoroll", "[", ":", "active_length", "]" ]
Trim the trailing silences of the pianorolls of all tracks. Trailing silences are considered globally.
[ "Trim", "the", "trailing", "silences", "of", "the", "pianorolls", "of", "all", "tracks", ".", "Trailing", "silences", "are", "considered", "globally", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L970-L975
-1
545
salu133445/pypianoroll
pypianoroll/multitrack.py
Multitrack.write
def write(self, filename): """ Write the multitrack pianoroll to a MIDI file. Parameters ---------- filename : str The name of the MIDI file to which the multitrack pianoroll is written. """ if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')): filename = filename + '.mid' pm = self.to_pretty_midi() pm.write(filename)
python
def write(self, filename): """ Write the multitrack pianoroll to a MIDI file. Parameters ---------- filename : str The name of the MIDI file to which the multitrack pianoroll is written. """ if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')): filename = filename + '.mid' pm = self.to_pretty_midi() pm.write(filename)
[ "def", "write", "(", "self", ",", "filename", ")", ":", "if", "not", "filename", ".", "endswith", "(", "(", "'.mid'", ",", "'.midi'", ",", "'.MID'", ",", "'.MIDI'", ")", ")", ":", "filename", "=", "filename", "+", "'.mid'", "pm", "=", "self", ".", "to_pretty_midi", "(", ")", "pm", ".", "write", "(", "filename", ")" ]
Write the multitrack pianoroll to a MIDI file. Parameters ---------- filename : str The name of the MIDI file to which the multitrack pianoroll is written.
[ "Write", "the", "multitrack", "pianoroll", "to", "a", "MIDI", "file", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L977-L991
-1
546
salu133445/pypianoroll
pypianoroll/utilities.py
check_pianoroll
def check_pianoroll(arr): """ Return True if the array is a standard piano-roll matrix. Otherwise, return False. Raise TypeError if the input object is not a numpy array. """ if not isinstance(arr, np.ndarray): raise TypeError("`arr` must be of np.ndarray type") if not (np.issubdtype(arr.dtype, np.bool_) or np.issubdtype(arr.dtype, np.number)): return False if arr.ndim != 2: return False if arr.shape[1] != 128: return False return True
python
def check_pianoroll(arr): """ Return True if the array is a standard piano-roll matrix. Otherwise, return False. Raise TypeError if the input object is not a numpy array. """ if not isinstance(arr, np.ndarray): raise TypeError("`arr` must be of np.ndarray type") if not (np.issubdtype(arr.dtype, np.bool_) or np.issubdtype(arr.dtype, np.number)): return False if arr.ndim != 2: return False if arr.shape[1] != 128: return False return True
[ "def", "check_pianoroll", "(", "arr", ")", ":", "if", "not", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"`arr` must be of np.ndarray type\"", ")", "if", "not", "(", "np", ".", "issubdtype", "(", "arr", ".", "dtype", ",", "np", ".", "bool_", ")", "or", "np", ".", "issubdtype", "(", "arr", ".", "dtype", ",", "np", ".", "number", ")", ")", ":", "return", "False", "if", "arr", ".", "ndim", "!=", "2", ":", "return", "False", "if", "arr", ".", "shape", "[", "1", "]", "!=", "128", ":", "return", "False", "return", "True" ]
Return True if the array is a standard piano-roll matrix. Otherwise, return False. Raise TypeError if the input object is not a numpy array.
[ "Return", "True", "if", "the", "array", "is", "a", "standard", "piano", "-", "roll", "matrix", ".", "Otherwise", "return", "False", ".", "Raise", "TypeError", "if", "the", "input", "object", "is", "not", "a", "numpy", "array", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/utilities.py#L20-L35
-1
547
salu133445/pypianoroll
pypianoroll/utilities.py
pad
def pad(obj, pad_length): """ Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad along the time axis with zeros. """ _check_supported(obj) copied = deepcopy(obj) copied.pad(pad_length) return copied
python
def pad(obj, pad_length): """ Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad along the time axis with zeros. """ _check_supported(obj) copied = deepcopy(obj) copied.pad(pad_length) return copied
[ "def", "pad", "(", "obj", ",", "pad_length", ")", ":", "_check_supported", "(", "obj", ")", "copied", "=", "deepcopy", "(", "obj", ")", "copied", ".", "pad", "(", "pad_length", ")", "return", "copied" ]
Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad along the time axis with zeros.
[ "Return", "a", "copy", "of", "the", "object", "with", "piano", "-", "roll", "padded", "with", "zeros", "at", "the", "end", "along", "the", "time", "axis", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/utilities.py#L107-L121
-1
548
salu133445/pypianoroll
pypianoroll/utilities.py
pad_to_multiple
def pad_to_multiple(obj, factor): """ Return a copy of the object with its piano-roll padded with zeros at the end along the time axis with the minimal length that make the length of the resulting piano-roll a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting piano-roll will be a multiple of. """ _check_supported(obj) copied = deepcopy(obj) copied.pad_to_multiple(factor) return copied
python
def pad_to_multiple(obj, factor): """ Return a copy of the object with its piano-roll padded with zeros at the end along the time axis with the minimal length that make the length of the resulting piano-roll a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting piano-roll will be a multiple of. """ _check_supported(obj) copied = deepcopy(obj) copied.pad_to_multiple(factor) return copied
[ "def", "pad_to_multiple", "(", "obj", ",", "factor", ")", ":", "_check_supported", "(", "obj", ")", "copied", "=", "deepcopy", "(", "obj", ")", "copied", ".", "pad_to_multiple", "(", "factor", ")", "return", "copied" ]
Return a copy of the object with its piano-roll padded with zeros at the end along the time axis with the minimal length that make the length of the resulting piano-roll a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting piano-roll will be a multiple of.
[ "Return", "a", "copy", "of", "the", "object", "with", "its", "piano", "-", "roll", "padded", "with", "zeros", "at", "the", "end", "along", "the", "time", "axis", "with", "the", "minimal", "length", "that", "make", "the", "length", "of", "the", "resulting", "piano", "-", "roll", "a", "multiple", "of", "factor", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/utilities.py#L123-L139
-1
549
salu133445/pypianoroll
pypianoroll/utilities.py
pad_to_same
def pad_to_same(obj): """ Return a copy of the object with shorter piano-rolls padded with zeros at the end along the time axis to the length of the piano-roll with the maximal length. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") copied = deepcopy(obj) copied.pad_to_same() return copied
python
def pad_to_same(obj): """ Return a copy of the object with shorter piano-rolls padded with zeros at the end along the time axis to the length of the piano-roll with the maximal length. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") copied = deepcopy(obj) copied.pad_to_same() return copied
[ "def", "pad_to_same", "(", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "Multitrack", ")", ":", "raise", "TypeError", "(", "\"Support only `pypianoroll.Multitrack` class objects\"", ")", "copied", "=", "deepcopy", "(", "obj", ")", "copied", ".", "pad_to_same", "(", ")", "return", "copied" ]
Return a copy of the object with shorter piano-rolls padded with zeros at the end along the time axis to the length of the piano-roll with the maximal length.
[ "Return", "a", "copy", "of", "the", "object", "with", "shorter", "piano", "-", "rolls", "padded", "with", "zeros", "at", "the", "end", "along", "the", "time", "axis", "to", "the", "length", "of", "the", "piano", "-", "roll", "with", "the", "maximal", "length", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/utilities.py#L141-L152
-1
550
salu133445/pypianoroll
pypianoroll/utilities.py
save
def save(filepath, obj, compressed=True): """ Save the object to a .npz file. Parameters ---------- filepath : str The path to save the file. obj: `pypianoroll.Multitrack` objects The object to be saved. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.save(filepath, compressed)
python
def save(filepath, obj, compressed=True): """ Save the object to a .npz file. Parameters ---------- filepath : str The path to save the file. obj: `pypianoroll.Multitrack` objects The object to be saved. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.save(filepath, compressed)
[ "def", "save", "(", "filepath", ",", "obj", ",", "compressed", "=", "True", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "Multitrack", ")", ":", "raise", "TypeError", "(", "\"Support only `pypianoroll.Multitrack` class objects\"", ")", "obj", ".", "save", "(", "filepath", ",", "compressed", ")" ]
Save the object to a .npz file. Parameters ---------- filepath : str The path to save the file. obj: `pypianoroll.Multitrack` objects The object to be saved.
[ "Save", "the", "object", "to", "a", ".", "npz", "file", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/utilities.py#L178-L192
-1
551
salu133445/pypianoroll
pypianoroll/utilities.py
write
def write(obj, filepath): """ Write the object to a MIDI file. Parameters ---------- filepath : str The path to write the MIDI file. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.write(filepath)
python
def write(obj, filepath): """ Write the object to a MIDI file. Parameters ---------- filepath : str The path to write the MIDI file. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.write(filepath)
[ "def", "write", "(", "obj", ",", "filepath", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "Multitrack", ")", ":", "raise", "TypeError", "(", "\"Support only `pypianoroll.Multitrack` class objects\"", ")", "obj", ".", "write", "(", "filepath", ")" ]
Write the object to a MIDI file. Parameters ---------- filepath : str The path to write the MIDI file.
[ "Write", "the", "object", "to", "a", "MIDI", "file", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/utilities.py#L222-L234
-1
552
salu133445/pypianoroll
pypianoroll/metrics.py
_validate_pianoroll
def _validate_pianoroll(pianoroll): """Raise an error if the input array is not a standard pianoroll.""" if not isinstance(pianoroll, np.ndarray): raise TypeError("`pianoroll` must be of np.ndarray type.") if not (np.issubdtype(pianoroll.dtype, np.bool_) or np.issubdtype(pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or a " "subdtype of np.number.") if pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` must be " "128.")
python
def _validate_pianoroll(pianoroll): """Raise an error if the input array is not a standard pianoroll.""" if not isinstance(pianoroll, np.ndarray): raise TypeError("`pianoroll` must be of np.ndarray type.") if not (np.issubdtype(pianoroll.dtype, np.bool_) or np.issubdtype(pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or a " "subdtype of np.number.") if pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` must be " "128.")
[ "def", "_validate_pianoroll", "(", "pianoroll", ")", ":", "if", "not", "isinstance", "(", "pianoroll", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"`pianoroll` must be of np.ndarray type.\"", ")", "if", "not", "(", "np", ".", "issubdtype", "(", "pianoroll", ".", "dtype", ",", "np", ".", "bool_", ")", "or", "np", ".", "issubdtype", "(", "pianoroll", ".", "dtype", ",", "np", ".", "number", ")", ")", ":", "raise", "TypeError", "(", "\"The data type of `pianoroll` must be np.bool_ or a \"", "\"subdtype of np.number.\"", ")", "if", "pianoroll", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"`pianoroll` must have exactly two dimensions.\"", ")", "if", "pianoroll", ".", "shape", "[", "1", "]", "!=", "128", ":", "raise", "ValueError", "(", "\"The length of the second axis of `pianoroll` must be \"", "\"128.\"", ")" ]
Raise an error if the input array is not a standard pianoroll.
[ "Raise", "an", "error", "if", "the", "input", "array", "is", "not", "a", "standard", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/metrics.py#L7-L19
-1
553
salu133445/pypianoroll
pypianoroll/metrics.py
_to_chroma
def _to_chroma(pianoroll): """Return the unnormalized chroma features of a pianoroll.""" _validate_pianoroll(pianoroll) reshaped = pianoroll[:, :120].reshape(-1, 12, 10) reshaped[..., :8] += pianoroll[:, 120:].reshape(-1, 1, 8) return np.sum(reshaped, 1)
python
def _to_chroma(pianoroll): """Return the unnormalized chroma features of a pianoroll.""" _validate_pianoroll(pianoroll) reshaped = pianoroll[:, :120].reshape(-1, 12, 10) reshaped[..., :8] += pianoroll[:, 120:].reshape(-1, 1, 8) return np.sum(reshaped, 1)
[ "def", "_to_chroma", "(", "pianoroll", ")", ":", "_validate_pianoroll", "(", "pianoroll", ")", "reshaped", "=", "pianoroll", "[", ":", ",", ":", "120", "]", ".", "reshape", "(", "-", "1", ",", "12", ",", "10", ")", "reshaped", "[", "...", ",", ":", "8", "]", "+=", "pianoroll", "[", ":", ",", "120", ":", "]", ".", "reshape", "(", "-", "1", ",", "1", ",", "8", ")", "return", "np", ".", "sum", "(", "reshaped", ",", "1", ")" ]
Return the unnormalized chroma features of a pianoroll.
[ "Return", "the", "unnormalized", "chroma", "features", "of", "a", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/metrics.py#L21-L26
-1
554
salu133445/pypianoroll
pypianoroll/metrics.py
empty_beat_rate
def empty_beat_rate(pianoroll, beat_resolution): """Return the ratio of empty beats to the total number of beats in a pianoroll.""" _validate_pianoroll(pianoroll) reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1]) n_empty_beats = np.count_nonzero(reshaped.any(1)) return n_empty_beats / len(reshaped)
python
def empty_beat_rate(pianoroll, beat_resolution): """Return the ratio of empty beats to the total number of beats in a pianoroll.""" _validate_pianoroll(pianoroll) reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1]) n_empty_beats = np.count_nonzero(reshaped.any(1)) return n_empty_beats / len(reshaped)
[ "def", "empty_beat_rate", "(", "pianoroll", ",", "beat_resolution", ")", ":", "_validate_pianoroll", "(", "pianoroll", ")", "reshaped", "=", "pianoroll", ".", "reshape", "(", "-", "1", ",", "beat_resolution", "*", "pianoroll", ".", "shape", "[", "1", "]", ")", "n_empty_beats", "=", "np", ".", "count_nonzero", "(", "reshaped", ".", "any", "(", "1", ")", ")", "return", "n_empty_beats", "/", "len", "(", "reshaped", ")" ]
Return the ratio of empty beats to the total number of beats in a pianoroll.
[ "Return", "the", "ratio", "of", "empty", "beats", "to", "the", "total", "number", "of", "beats", "in", "a", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/metrics.py#L28-L34
-1
555
salu133445/pypianoroll
pypianoroll/metrics.py
n_pitche_classes_used
def n_pitche_classes_used(pianoroll): """Return the number of unique pitch classes used in a pianoroll.""" _validate_pianoroll(pianoroll) chroma = _to_chroma(pianoroll) return np.count_nonzero(np.any(chroma, 0))
python
def n_pitche_classes_used(pianoroll): """Return the number of unique pitch classes used in a pianoroll.""" _validate_pianoroll(pianoroll) chroma = _to_chroma(pianoroll) return np.count_nonzero(np.any(chroma, 0))
[ "def", "n_pitche_classes_used", "(", "pianoroll", ")", ":", "_validate_pianoroll", "(", "pianoroll", ")", "chroma", "=", "_to_chroma", "(", "pianoroll", ")", "return", "np", ".", "count_nonzero", "(", "np", ".", "any", "(", "chroma", ",", "0", ")", ")" ]
Return the number of unique pitch classes used in a pianoroll.
[ "Return", "the", "number", "of", "unique", "pitch", "classes", "used", "in", "a", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/metrics.py#L41-L45
-1
556
salu133445/pypianoroll
pypianoroll/metrics.py
polyphonic_rate
def polyphonic_rate(pianoroll, threshold=2): """Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll.""" _validate_pianoroll(pianoroll) n_poly = np.count_nonzero(np.count_nonzero(pianoroll, 1) > threshold) return n_poly / len(pianoroll)
python
def polyphonic_rate(pianoroll, threshold=2): """Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll.""" _validate_pianoroll(pianoroll) n_poly = np.count_nonzero(np.count_nonzero(pianoroll, 1) > threshold) return n_poly / len(pianoroll)
[ "def", "polyphonic_rate", "(", "pianoroll", ",", "threshold", "=", "2", ")", ":", "_validate_pianoroll", "(", "pianoroll", ")", "n_poly", "=", "np", ".", "count_nonzero", "(", "np", ".", "count_nonzero", "(", "pianoroll", ",", "1", ")", ">", "threshold", ")", "return", "n_poly", "/", "len", "(", "pianoroll", ")" ]
Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll.
[ "Return", "the", "ratio", "of", "the", "number", "of", "time", "steps", "where", "the", "number", "of", "pitches", "being", "played", "is", "larger", "than", "threshold", "to", "the", "total", "number", "of", "time", "steps", "in", "a", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/metrics.py#L60-L66
-1
557
salu133445/pypianoroll
pypianoroll/metrics.py
in_scale_rate
def in_scale_rate(pianoroll, key=3, kind='major'): """Return the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. Default to C major scale.""" if not isinstance(key, int): raise TypeError("`key` must an integer.") if key > 11 or key < 0: raise ValueError("`key` must be in an integer in between 0 and 11.") if kind not in ('major', 'minor'): raise ValueError("`kind` must be one of 'major' or 'minor'.") _validate_pianoroll(pianoroll) def _scale_mask(key, kind): """Return a scale mask for the given key. Default to C major scale.""" if kind == 'major': a_scale_mask = np.array([0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1], bool) else: a_scale_mask = np.array([1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], bool) return np.roll(a_scale_mask, key) chroma = _to_chroma(pianoroll) scale_mask = _scale_mask(key, kind) n_in_scale = np.sum(scale_mask.reshape(-1, 12) * chroma) return n_in_scale / np.count_nonzero(pianoroll)
python
def in_scale_rate(pianoroll, key=3, kind='major'): """Return the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. Default to C major scale.""" if not isinstance(key, int): raise TypeError("`key` must an integer.") if key > 11 or key < 0: raise ValueError("`key` must be in an integer in between 0 and 11.") if kind not in ('major', 'minor'): raise ValueError("`kind` must be one of 'major' or 'minor'.") _validate_pianoroll(pianoroll) def _scale_mask(key, kind): """Return a scale mask for the given key. Default to C major scale.""" if kind == 'major': a_scale_mask = np.array([0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1], bool) else: a_scale_mask = np.array([1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], bool) return np.roll(a_scale_mask, key) chroma = _to_chroma(pianoroll) scale_mask = _scale_mask(key, kind) n_in_scale = np.sum(scale_mask.reshape(-1, 12) * chroma) return n_in_scale / np.count_nonzero(pianoroll)
[ "def", "in_scale_rate", "(", "pianoroll", ",", "key", "=", "3", ",", "kind", "=", "'major'", ")", ":", "if", "not", "isinstance", "(", "key", ",", "int", ")", ":", "raise", "TypeError", "(", "\"`key` must an integer.\"", ")", "if", "key", ">", "11", "or", "key", "<", "0", ":", "raise", "ValueError", "(", "\"`key` must be in an integer in between 0 and 11.\"", ")", "if", "kind", "not", "in", "(", "'major'", ",", "'minor'", ")", ":", "raise", "ValueError", "(", "\"`kind` must be one of 'major' or 'minor'.\"", ")", "_validate_pianoroll", "(", "pianoroll", ")", "def", "_scale_mask", "(", "key", ",", "kind", ")", ":", "\"\"\"Return a scale mask for the given key. Default to C major scale.\"\"\"", "if", "kind", "==", "'major'", ":", "a_scale_mask", "=", "np", ".", "array", "(", "[", "0", ",", "1", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "1", ",", "0", ",", "1", "]", ",", "bool", ")", "else", ":", "a_scale_mask", "=", "np", ".", "array", "(", "[", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", "]", ",", "bool", ")", "return", "np", ".", "roll", "(", "a_scale_mask", ",", "key", ")", "chroma", "=", "_to_chroma", "(", "pianoroll", ")", "scale_mask", "=", "_scale_mask", "(", "key", ",", "kind", ")", "n_in_scale", "=", "np", ".", "sum", "(", "scale_mask", ".", "reshape", "(", "-", "1", ",", "12", ")", "*", "chroma", ")", "return", "n_in_scale", "/", "np", ".", "count_nonzero", "(", "pianoroll", ")" ]
Return the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. Default to C major scale.
[ "Return", "the", "ratio", "of", "the", "number", "of", "nonzero", "entries", "that", "lie", "in", "a", "specific", "scale", "to", "the", "total", "number", "of", "nonzero", "entries", "in", "a", "pianoroll", ".", "Default", "to", "C", "major", "scale", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/metrics.py#L100-L123
-1
558
salu133445/pypianoroll
pypianoroll/track.py
Track.assign_constant
def assign_constant(self, value, dtype=None): """ Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be assigned to all the nonzeros in the pianoroll. """ if not self.is_binarized(): self.pianoroll[self.pianoroll.nonzero()] = value return if dtype is None: if isinstance(value, int): dtype = int elif isinstance(value, float): dtype = float nonzero = self.pianoroll.nonzero() self.pianoroll = np.zeros(self.pianoroll.shape, dtype) self.pianoroll[nonzero] = value
python
def assign_constant(self, value, dtype=None): """ Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be assigned to all the nonzeros in the pianoroll. """ if not self.is_binarized(): self.pianoroll[self.pianoroll.nonzero()] = value return if dtype is None: if isinstance(value, int): dtype = int elif isinstance(value, float): dtype = float nonzero = self.pianoroll.nonzero() self.pianoroll = np.zeros(self.pianoroll.shape, dtype) self.pianoroll[nonzero] = value
[ "def", "assign_constant", "(", "self", ",", "value", ",", "dtype", "=", "None", ")", ":", "if", "not", "self", ".", "is_binarized", "(", ")", ":", "self", ".", "pianoroll", "[", "self", ".", "pianoroll", ".", "nonzero", "(", ")", "]", "=", "value", "return", "if", "dtype", "is", "None", ":", "if", "isinstance", "(", "value", ",", "int", ")", ":", "dtype", "=", "int", "elif", "isinstance", "(", "value", ",", "float", ")", ":", "dtype", "=", "float", "nonzero", "=", "self", ".", "pianoroll", ".", "nonzero", "(", ")", "self", ".", "pianoroll", "=", "np", ".", "zeros", "(", "self", ".", "pianoroll", ".", "shape", ",", "dtype", ")", "self", ".", "pianoroll", "[", "nonzero", "]", "=", "value" ]
Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be assigned to all the nonzeros in the pianoroll.
[ "Assign", "a", "constant", "value", "to", "all", "nonzeros", "in", "the", "pianoroll", ".", "If", "the", "pianoroll", "is", "not", "binarized", "its", "data", "type", "will", "be", "preserved", ".", "If", "the", "pianoroll", "is", "binarized", "it", "will", "be", "casted", "to", "the", "type", "of", "value", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L76-L99
-1
559
salu133445/pypianoroll
pypianoroll/track.py
Track.binarize
def binarize(self, threshold=0): """ Binarize the pianoroll. Parameters ---------- threshold : int or float A threshold used to binarize the pianorolls. Defaults to zero. """ if not self.is_binarized(): self.pianoroll = (self.pianoroll > threshold)
python
def binarize(self, threshold=0): """ Binarize the pianoroll. Parameters ---------- threshold : int or float A threshold used to binarize the pianorolls. Defaults to zero. """ if not self.is_binarized(): self.pianoroll = (self.pianoroll > threshold)
[ "def", "binarize", "(", "self", ",", "threshold", "=", "0", ")", ":", "if", "not", "self", ".", "is_binarized", "(", ")", ":", "self", ".", "pianoroll", "=", "(", "self", ".", "pianoroll", ">", "threshold", ")" ]
Binarize the pianoroll. Parameters ---------- threshold : int or float A threshold used to binarize the pianorolls. Defaults to zero.
[ "Binarize", "the", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L101-L112
-1
560
salu133445/pypianoroll
pypianoroll/track.py
Track.check_validity
def check_validity(self): """"Raise error if any invalid attribute found.""" # pianoroll if not isinstance(self.pianoroll, np.ndarray): raise TypeError("`pianoroll` must be a numpy array.") if not (np.issubdtype(self.pianoroll.dtype, np.bool_) or np.issubdtype(self.pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or " "a subdtype of np.number.") if self.pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if self.pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` " "must be 128.") # program if not isinstance(self.program, int): raise TypeError("`program` must be int.") if self.program < 0 or self.program > 127: raise ValueError("`program` must be in between 0 to 127.") # is_drum if not isinstance(self.is_drum, bool): raise TypeError("`is_drum` must be bool.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
python
def check_validity(self): """"Raise error if any invalid attribute found.""" # pianoroll if not isinstance(self.pianoroll, np.ndarray): raise TypeError("`pianoroll` must be a numpy array.") if not (np.issubdtype(self.pianoroll.dtype, np.bool_) or np.issubdtype(self.pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or " "a subdtype of np.number.") if self.pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if self.pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` " "must be 128.") # program if not isinstance(self.program, int): raise TypeError("`program` must be int.") if self.program < 0 or self.program > 127: raise ValueError("`program` must be in between 0 to 127.") # is_drum if not isinstance(self.is_drum, bool): raise TypeError("`is_drum` must be bool.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
[ "def", "check_validity", "(", "self", ")", ":", "# pianoroll", "if", "not", "isinstance", "(", "self", ".", "pianoroll", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"`pianoroll` must be a numpy array.\"", ")", "if", "not", "(", "np", ".", "issubdtype", "(", "self", ".", "pianoroll", ".", "dtype", ",", "np", ".", "bool_", ")", "or", "np", ".", "issubdtype", "(", "self", ".", "pianoroll", ".", "dtype", ",", "np", ".", "number", ")", ")", ":", "raise", "TypeError", "(", "\"The data type of `pianoroll` must be np.bool_ or \"", "\"a subdtype of np.number.\"", ")", "if", "self", ".", "pianoroll", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"`pianoroll` must have exactly two dimensions.\"", ")", "if", "self", ".", "pianoroll", ".", "shape", "[", "1", "]", "!=", "128", ":", "raise", "ValueError", "(", "\"The length of the second axis of `pianoroll` \"", "\"must be 128.\"", ")", "# program", "if", "not", "isinstance", "(", "self", ".", "program", ",", "int", ")", ":", "raise", "TypeError", "(", "\"`program` must be int.\"", ")", "if", "self", ".", "program", "<", "0", "or", "self", ".", "program", ">", "127", ":", "raise", "ValueError", "(", "\"`program` must be in between 0 to 127.\"", ")", "# is_drum", "if", "not", "isinstance", "(", "self", ".", "is_drum", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"`is_drum` must be bool.\"", ")", "# name", "if", "not", "isinstance", "(", "self", ".", "name", ",", "string_types", ")", ":", "raise", "TypeError", "(", "\"`name` must be a string.\"", ")" ]
Raise error if any invalid attribute found.
[ "Raise", "error", "if", "any", "invalid", "attribute", "found", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L114-L138
-1
561
salu133445/pypianoroll
pypianoroll/track.py
Track.clip
def clip(self, lower=0, upper=127): """ Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127. """ self.pianoroll = self.pianoroll.clip(lower, upper)
python
def clip(self, lower=0, upper=127): """ Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127. """ self.pianoroll = self.pianoroll.clip(lower, upper)
[ "def", "clip", "(", "self", ",", "lower", "=", "0", ",", "upper", "=", "127", ")", ":", "self", ".", "pianoroll", "=", "self", ".", "pianoroll", ".", "clip", "(", "lower", ",", "upper", ")" ]
Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127.
[ "Clip", "the", "pianoroll", "by", "the", "given", "lower", "and", "upper", "bounds", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L140-L152
-1
562
salu133445/pypianoroll
pypianoroll/track.py
Track.is_binarized
def is_binarized(self): """ Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False. """ is_binarized = np.issubdtype(self.pianoroll.dtype, np.bool_) return is_binarized
python
def is_binarized(self): """ Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False. """ is_binarized = np.issubdtype(self.pianoroll.dtype, np.bool_) return is_binarized
[ "def", "is_binarized", "(", "self", ")", ":", "is_binarized", "=", "np", ".", "issubdtype", "(", "self", ".", "pianoroll", ".", "dtype", ",", "np", ".", "bool_", ")", "return", "is_binarized" ]
Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False.
[ "Return", "True", "if", "the", "pianoroll", "is", "already", "binarized", ".", "Otherwise", "return", "False", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L225-L237
-1
563
salu133445/pypianoroll
pypianoroll/track.py
Track.pad
def pad(self, pad_length): """ Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis. """ self.pianoroll = np.pad( self.pianoroll, ((0, pad_length), (0, 0)), 'constant')
python
def pad(self, pad_length): """ Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis. """ self.pianoroll = np.pad( self.pianoroll, ((0, pad_length), (0, 0)), 'constant')
[ "def", "pad", "(", "self", ",", "pad_length", ")", ":", "self", ".", "pianoroll", "=", "np", ".", "pad", "(", "self", ".", "pianoroll", ",", "(", "(", "0", ",", "pad_length", ")", ",", "(", "0", ",", "0", ")", ")", ",", "'constant'", ")" ]
Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis.
[ "Pad", "the", "pianoroll", "with", "zeros", "at", "the", "end", "along", "the", "time", "axis", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L239-L250
-1
564
salu133445/pypianoroll
pypianoroll/track.py
Track.pad_to_multiple
def pad_to_multiple(self, factor): """ Pad the pianoroll with zeros at the end along the time axis with the minimum length that makes the resulting pianoroll length a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting pianoroll will be a multiple of. """ remainder = self.pianoroll.shape[0] % factor if remainder: pad_width = ((0, (factor - remainder)), (0, 0)) self.pianoroll = np.pad(self.pianoroll, pad_width, 'constant')
python
def pad_to_multiple(self, factor): """ Pad the pianoroll with zeros at the end along the time axis with the minimum length that makes the resulting pianoroll length a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting pianoroll will be a multiple of. """ remainder = self.pianoroll.shape[0] % factor if remainder: pad_width = ((0, (factor - remainder)), (0, 0)) self.pianoroll = np.pad(self.pianoroll, pad_width, 'constant')
[ "def", "pad_to_multiple", "(", "self", ",", "factor", ")", ":", "remainder", "=", "self", ".", "pianoroll", ".", "shape", "[", "0", "]", "%", "factor", "if", "remainder", ":", "pad_width", "=", "(", "(", "0", ",", "(", "factor", "-", "remainder", ")", ")", ",", "(", "0", ",", "0", ")", ")", "self", ".", "pianoroll", "=", "np", ".", "pad", "(", "self", ".", "pianoroll", ",", "pad_width", ",", "'constant'", ")" ]
Pad the pianoroll with zeros at the end along the time axis with the minimum length that makes the resulting pianoroll length a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting pianoroll will be a multiple of.
[ "Pad", "the", "pianoroll", "with", "zeros", "at", "the", "end", "along", "the", "time", "axis", "with", "the", "minimum", "length", "that", "makes", "the", "resulting", "pianoroll", "length", "a", "multiple", "of", "factor", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L252-L268
-1
565
salu133445/pypianoroll
pypianoroll/track.py
Track.transpose
def transpose(self, semitone): """ Transpose the pianoroll by a number of semitones, where positive values are for higher key, while negative values are for lower key. Parameters ---------- semitone : int The number of semitones to transpose the pianoroll. """ if semitone > 0 and semitone < 128: self.pianoroll[:, semitone:] = self.pianoroll[:, :(128 - semitone)] self.pianoroll[:, :semitone] = 0 elif semitone < 0 and semitone > -128: self.pianoroll[:, :(128 + semitone)] = self.pianoroll[:, -semitone:] self.pianoroll[:, (128 + semitone):] = 0
python
def transpose(self, semitone): """ Transpose the pianoroll by a number of semitones, where positive values are for higher key, while negative values are for lower key. Parameters ---------- semitone : int The number of semitones to transpose the pianoroll. """ if semitone > 0 and semitone < 128: self.pianoroll[:, semitone:] = self.pianoroll[:, :(128 - semitone)] self.pianoroll[:, :semitone] = 0 elif semitone < 0 and semitone > -128: self.pianoroll[:, :(128 + semitone)] = self.pianoroll[:, -semitone:] self.pianoroll[:, (128 + semitone):] = 0
[ "def", "transpose", "(", "self", ",", "semitone", ")", ":", "if", "semitone", ">", "0", "and", "semitone", "<", "128", ":", "self", ".", "pianoroll", "[", ":", ",", "semitone", ":", "]", "=", "self", ".", "pianoroll", "[", ":", ",", ":", "(", "128", "-", "semitone", ")", "]", "self", ".", "pianoroll", "[", ":", ",", ":", "semitone", "]", "=", "0", "elif", "semitone", "<", "0", "and", "semitone", ">", "-", "128", ":", "self", ".", "pianoroll", "[", ":", ",", ":", "(", "128", "+", "semitone", ")", "]", "=", "self", ".", "pianoroll", "[", ":", ",", "-", "semitone", ":", "]", "self", ".", "pianoroll", "[", ":", ",", "(", "128", "+", "semitone", ")", ":", "]", "=", "0" ]
Transpose the pianoroll by a number of semitones, where positive values are for higher key, while negative values are for lower key. Parameters ---------- semitone : int The number of semitones to transpose the pianoroll.
[ "Transpose", "the", "pianoroll", "by", "a", "number", "of", "semitones", "where", "positive", "values", "are", "for", "higher", "key", "while", "negative", "values", "are", "for", "lower", "key", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L275-L291
-1
566
salu133445/pypianoroll
pypianoroll/track.py
Track.trim_trailing_silence
def trim_trailing_silence(self): """Trim the trailing silence of the pianoroll.""" length = self.get_active_length() self.pianoroll = self.pianoroll[:length]
python
def trim_trailing_silence(self): """Trim the trailing silence of the pianoroll.""" length = self.get_active_length() self.pianoroll = self.pianoroll[:length]
[ "def", "trim_trailing_silence", "(", "self", ")", ":", "length", "=", "self", ".", "get_active_length", "(", ")", "self", ".", "pianoroll", "=", "self", ".", "pianoroll", "[", ":", "length", "]" ]
Trim the trailing silence of the pianoroll.
[ "Trim", "the", "trailing", "silence", "of", "the", "pianoroll", "." ]
6224dc1e29222de2124d249acb80f3d072166917
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L293-L296
-1
567
dnouri/nolearn
nolearn/lasagne/visualize.py
plot_conv_weights
def plot_conv_weights(layer, figsize=(6, 6)): """Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer """ W = layer.W.get_value() shape = W.shape nrows = np.ceil(np.sqrt(shape[0])).astype(int) ncols = nrows for feature_map in range(shape[1]): figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False) for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[0]: break axes[r, c].imshow(W[i, feature_map], cmap='gray', interpolation='none') return plt
python
def plot_conv_weights(layer, figsize=(6, 6)): """Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer """ W = layer.W.get_value() shape = W.shape nrows = np.ceil(np.sqrt(shape[0])).astype(int) ncols = nrows for feature_map in range(shape[1]): figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False) for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[0]: break axes[r, c].imshow(W[i, feature_map], cmap='gray', interpolation='none') return plt
[ "def", "plot_conv_weights", "(", "layer", ",", "figsize", "=", "(", "6", ",", "6", ")", ")", ":", "W", "=", "layer", ".", "W", ".", "get_value", "(", ")", "shape", "=", "W", ".", "shape", "nrows", "=", "np", ".", "ceil", "(", "np", ".", "sqrt", "(", "shape", "[", "0", "]", ")", ")", ".", "astype", "(", "int", ")", "ncols", "=", "nrows", "for", "feature_map", "in", "range", "(", "shape", "[", "1", "]", ")", ":", "figs", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", ",", "ncols", ",", "figsize", "=", "figsize", ",", "squeeze", "=", "False", ")", "for", "ax", "in", "axes", ".", "flatten", "(", ")", ":", "ax", ".", "set_xticks", "(", "[", "]", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "ax", ".", "axis", "(", "'off'", ")", "for", "i", ",", "(", "r", ",", "c", ")", "in", "enumerate", "(", "product", "(", "range", "(", "nrows", ")", ",", "range", "(", "ncols", ")", ")", ")", ":", "if", "i", ">=", "shape", "[", "0", "]", ":", "break", "axes", "[", "r", ",", "c", "]", ".", "imshow", "(", "W", "[", "i", ",", "feature_map", "]", ",", "cmap", "=", "'gray'", ",", "interpolation", "=", "'none'", ")", "return", "plt" ]
Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer
[ "Plot", "the", "weights", "of", "a", "specific", "layer", "." ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L26-L54
-1
568
dnouri/nolearn
nolearn/lasagne/visualize.py
plot_conv_activity
def plot_conv_activity(layer, x, figsize=(6, 8)): """Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1. """ if x.shape[0] != 1: raise ValueError("Only one sample can be plotted at a time.") # compile theano function xs = T.tensor4('xs').astype(theano.config.floatX) get_activity = theano.function([xs], get_output(layer, xs)) activity = get_activity(x) shape = activity.shape nrows = np.ceil(np.sqrt(shape[1])).astype(int) ncols = nrows figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False) axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray', interpolation='none') axes[0, ncols // 2].set_title('original') for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[1]: break ndim = activity[0][i].ndim if ndim != 2: raise ValueError("Wrong number of dimensions, image data should " "have 2, instead got {}".format(ndim)) axes[r + 1, c].imshow(-activity[0][i], cmap='gray', interpolation='none') return plt
python
def plot_conv_activity(layer, x, figsize=(6, 8)): """Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1. """ if x.shape[0] != 1: raise ValueError("Only one sample can be plotted at a time.") # compile theano function xs = T.tensor4('xs').astype(theano.config.floatX) get_activity = theano.function([xs], get_output(layer, xs)) activity = get_activity(x) shape = activity.shape nrows = np.ceil(np.sqrt(shape[1])).astype(int) ncols = nrows figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False) axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray', interpolation='none') axes[0, ncols // 2].set_title('original') for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[1]: break ndim = activity[0][i].ndim if ndim != 2: raise ValueError("Wrong number of dimensions, image data should " "have 2, instead got {}".format(ndim)) axes[r + 1, c].imshow(-activity[0][i], cmap='gray', interpolation='none') return plt
[ "def", "plot_conv_activity", "(", "layer", ",", "x", ",", "figsize", "=", "(", "6", ",", "8", ")", ")", ":", "if", "x", ".", "shape", "[", "0", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"Only one sample can be plotted at a time.\"", ")", "# compile theano function", "xs", "=", "T", ".", "tensor4", "(", "'xs'", ")", ".", "astype", "(", "theano", ".", "config", ".", "floatX", ")", "get_activity", "=", "theano", ".", "function", "(", "[", "xs", "]", ",", "get_output", "(", "layer", ",", "xs", ")", ")", "activity", "=", "get_activity", "(", "x", ")", "shape", "=", "activity", ".", "shape", "nrows", "=", "np", ".", "ceil", "(", "np", ".", "sqrt", "(", "shape", "[", "1", "]", ")", ")", ".", "astype", "(", "int", ")", "ncols", "=", "nrows", "figs", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", "+", "1", ",", "ncols", ",", "figsize", "=", "figsize", ",", "squeeze", "=", "False", ")", "axes", "[", "0", ",", "ncols", "//", "2", "]", ".", "imshow", "(", "1", "-", "x", "[", "0", "]", "[", "0", "]", ",", "cmap", "=", "'gray'", ",", "interpolation", "=", "'none'", ")", "axes", "[", "0", ",", "ncols", "//", "2", "]", ".", "set_title", "(", "'original'", ")", "for", "ax", "in", "axes", ".", "flatten", "(", ")", ":", "ax", ".", "set_xticks", "(", "[", "]", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "ax", ".", "axis", "(", "'off'", ")", "for", "i", ",", "(", "r", ",", "c", ")", "in", "enumerate", "(", "product", "(", "range", "(", "nrows", ")", ",", "range", "(", "ncols", ")", ")", ")", ":", "if", "i", ">=", "shape", "[", "1", "]", ":", "break", "ndim", "=", "activity", "[", "0", "]", "[", "i", "]", ".", "ndim", "if", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"Wrong number of dimensions, image data should \"", "\"have 2, instead got {}\"", ".", "format", "(", "ndim", ")", ")", "axes", "[", "r", "+", "1", ",", "c", "]", ".", "imshow", "(", "-", "activity", "[", "0", "]", "[", "i", "]", ",", "cmap", "=", "'gray'", ",", "interpolation", "=", "'none'", ")", "return", "plt" ]
Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1.
[ "Plot", "the", "acitivities", "of", "a", "specific", "layer", "." ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L57-L102
-1
569
dnouri/nolearn
nolearn/lasagne/visualize.py
occlusion_heatmap
def occlusion_heatmap(net, x, target, square_length=7): """An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. Results ------- heat_array : np.array (with same size as image) An 2D np.array that at each point (i, j) contains the predicted probability of the correct class if the image is occluded by a square with center (i, j). """ if (x.ndim != 4) or x.shape[0] != 1: raise ValueError("This function requires the input data to be of " "shape (1, c, x, y), instead got {}".format(x.shape)) if square_length % 2 == 0: raise ValueError("Square length has to be an odd number, instead " "got {}.".format(square_length)) num_classes = get_output_shape(net.layers_[-1])[1] img = x[0].copy() bs, col, s0, s1 = x.shape heat_array = np.zeros((s0, s1)) pad = square_length // 2 + 1 x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype) probs = np.zeros((s0, s1, num_classes)) # generate occluded images for i in range(s0): # batch s1 occluded images for faster prediction for j in range(s1): x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant') x_pad[:, i:i + square_length, j:j + square_length] = 0. x_occluded[j] = x_pad[:, pad:-pad, pad:-pad] y_proba = net.predict_proba(x_occluded) probs[i] = y_proba.reshape(s1, num_classes) # from predicted probabilities, pick only those of target class for i in range(s0): for j in range(s1): heat_array[i, j] = probs[i, j, target] return heat_array
python
def occlusion_heatmap(net, x, target, square_length=7): """An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. Results ------- heat_array : np.array (with same size as image) An 2D np.array that at each point (i, j) contains the predicted probability of the correct class if the image is occluded by a square with center (i, j). """ if (x.ndim != 4) or x.shape[0] != 1: raise ValueError("This function requires the input data to be of " "shape (1, c, x, y), instead got {}".format(x.shape)) if square_length % 2 == 0: raise ValueError("Square length has to be an odd number, instead " "got {}.".format(square_length)) num_classes = get_output_shape(net.layers_[-1])[1] img = x[0].copy() bs, col, s0, s1 = x.shape heat_array = np.zeros((s0, s1)) pad = square_length // 2 + 1 x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype) probs = np.zeros((s0, s1, num_classes)) # generate occluded images for i in range(s0): # batch s1 occluded images for faster prediction for j in range(s1): x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant') x_pad[:, i:i + square_length, j:j + square_length] = 0. x_occluded[j] = x_pad[:, pad:-pad, pad:-pad] y_proba = net.predict_proba(x_occluded) probs[i] = y_proba.reshape(s1, num_classes) # from predicted probabilities, pick only those of target class for i in range(s0): for j in range(s1): heat_array[i, j] = probs[i, j, target] return heat_array
[ "def", "occlusion_heatmap", "(", "net", ",", "x", ",", "target", ",", "square_length", "=", "7", ")", ":", "if", "(", "x", ".", "ndim", "!=", "4", ")", "or", "x", ".", "shape", "[", "0", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"This function requires the input data to be of \"", "\"shape (1, c, x, y), instead got {}\"", ".", "format", "(", "x", ".", "shape", ")", ")", "if", "square_length", "%", "2", "==", "0", ":", "raise", "ValueError", "(", "\"Square length has to be an odd number, instead \"", "\"got {}.\"", ".", "format", "(", "square_length", ")", ")", "num_classes", "=", "get_output_shape", "(", "net", ".", "layers_", "[", "-", "1", "]", ")", "[", "1", "]", "img", "=", "x", "[", "0", "]", ".", "copy", "(", ")", "bs", ",", "col", ",", "s0", ",", "s1", "=", "x", ".", "shape", "heat_array", "=", "np", ".", "zeros", "(", "(", "s0", ",", "s1", ")", ")", "pad", "=", "square_length", "//", "2", "+", "1", "x_occluded", "=", "np", ".", "zeros", "(", "(", "s1", ",", "col", ",", "s0", ",", "s1", ")", ",", "dtype", "=", "img", ".", "dtype", ")", "probs", "=", "np", ".", "zeros", "(", "(", "s0", ",", "s1", ",", "num_classes", ")", ")", "# generate occluded images", "for", "i", "in", "range", "(", "s0", ")", ":", "# batch s1 occluded images for faster prediction", "for", "j", "in", "range", "(", "s1", ")", ":", "x_pad", "=", "np", ".", "pad", "(", "img", ",", "(", "(", "0", ",", "0", ")", ",", "(", "pad", ",", "pad", ")", ",", "(", "pad", ",", "pad", ")", ")", ",", "'constant'", ")", "x_pad", "[", ":", ",", "i", ":", "i", "+", "square_length", ",", "j", ":", "j", "+", "square_length", "]", "=", "0.", "x_occluded", "[", "j", "]", "=", "x_pad", "[", ":", ",", "pad", ":", "-", "pad", ",", "pad", ":", "-", "pad", "]", "y_proba", "=", "net", ".", "predict_proba", "(", "x_occluded", ")", "probs", "[", "i", "]", "=", "y_proba", ".", "reshape", "(", "s1", ",", "num_classes", ")", "# from predicted probabilities, pick only those of target class", "for", "i", "in", "range", "(", "s0", ")", ":", "for", "j", "in", "range", "(", "s1", ")", ":", "heat_array", "[", "i", ",", "j", "]", "=", "probs", "[", "i", ",", "j", ",", "target", "]", "return", "heat_array" ]
An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. Results ------- heat_array : np.array (with same size as image) An 2D np.array that at each point (i, j) contains the predicted probability of the correct class if the image is occluded by a square with center (i, j).
[ "An", "occlusion", "test", "that", "checks", "an", "image", "for", "its", "critical", "parts", "." ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L105-L180
-1
570
dnouri/nolearn
nolearn/lasagne/visualize.py
plot_occlusion
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)): """Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. figsize : tuple (int, int) Size of the figure. Plots ----- Figure with 3 subplots: the original image, the occlusion heatmap, and both images super-imposed. """ return _plot_heat_map( net, X, figsize, lambda net, X, n: occlusion_heatmap( net, X, target[n], square_length))
python
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)): """Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. figsize : tuple (int, int) Size of the figure. Plots ----- Figure with 3 subplots: the original image, the occlusion heatmap, and both images super-imposed. """ return _plot_heat_map( net, X, figsize, lambda net, X, n: occlusion_heatmap( net, X, target[n], square_length))
[ "def", "plot_occlusion", "(", "net", ",", "X", ",", "target", ",", "square_length", "=", "7", ",", "figsize", "=", "(", "9", ",", "None", ")", ")", ":", "return", "_plot_heat_map", "(", "net", ",", "X", ",", "figsize", ",", "lambda", "net", ",", "X", ",", "n", ":", "occlusion_heatmap", "(", "net", ",", "X", ",", "target", "[", "n", "]", ",", "square_length", ")", ")" ]
Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. figsize : tuple (int, int) Size of the figure. Plots ----- Figure with 3 subplots: the original image, the occlusion heatmap, and both images super-imposed.
[ "Plot", "which", "parts", "of", "an", "image", "are", "particularly", "import", "for", "the", "net", "to", "classify", "the", "image", "correctly", "." ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L214-L250
-1
571
dnouri/nolearn
nolearn/metrics.py
multiclass_logloss
def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ # Convert 'actual' to a binary array if it's not already: if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota
python
def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ # Convert 'actual' to a binary array if it's not already: if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota
[ "def", "multiclass_logloss", "(", "actual", ",", "predicted", ",", "eps", "=", "1e-15", ")", ":", "# Convert 'actual' to a binary array if it's not already:", "if", "len", "(", "actual", ".", "shape", ")", "==", "1", ":", "actual2", "=", "np", ".", "zeros", "(", "(", "actual", ".", "shape", "[", "0", "]", ",", "predicted", ".", "shape", "[", "1", "]", ")", ")", "for", "i", ",", "val", "in", "enumerate", "(", "actual", ")", ":", "actual2", "[", "i", ",", "val", "]", "=", "1", "actual", "=", "actual2", "clip", "=", "np", ".", "clip", "(", "predicted", ",", "eps", ",", "1", "-", "eps", ")", "rows", "=", "actual", ".", "shape", "[", "0", "]", "vsota", "=", "np", ".", "sum", "(", "actual", "*", "np", ".", "log", "(", "clip", ")", ")", "return", "-", "1.0", "/", "rows", "*", "vsota" ]
Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class
[ "Multi", "class", "version", "of", "Logarithmic", "Loss", "metric", "." ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/metrics.py#L8-L24
-1
572
dnouri/nolearn
nolearn/lasagne/base.py
objective
def objective(layers, loss_function, target, aggregate=aggregate, deterministic=False, l1=0, l2=0, get_output_kw=None): """ Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss """ if get_output_kw is None: get_output_kw = {} output_layer = layers[-1] network_output = get_output( output_layer, deterministic=deterministic, **get_output_kw) loss = aggregate(loss_function(network_output, target)) if l1: loss += regularization.regularize_layer_params( layers.values(), regularization.l1) * l1 if l2: loss += regularization.regularize_layer_params( layers.values(), regularization.l2) * l2 return loss
python
def objective(layers, loss_function, target, aggregate=aggregate, deterministic=False, l1=0, l2=0, get_output_kw=None): """ Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss """ if get_output_kw is None: get_output_kw = {} output_layer = layers[-1] network_output = get_output( output_layer, deterministic=deterministic, **get_output_kw) loss = aggregate(loss_function(network_output, target)) if l1: loss += regularization.regularize_layer_params( layers.values(), regularization.l1) * l1 if l2: loss += regularization.regularize_layer_params( layers.values(), regularization.l2) * l2 return loss
[ "def", "objective", "(", "layers", ",", "loss_function", ",", "target", ",", "aggregate", "=", "aggregate", ",", "deterministic", "=", "False", ",", "l1", "=", "0", ",", "l2", "=", "0", ",", "get_output_kw", "=", "None", ")", ":", "if", "get_output_kw", "is", "None", ":", "get_output_kw", "=", "{", "}", "output_layer", "=", "layers", "[", "-", "1", "]", "network_output", "=", "get_output", "(", "output_layer", ",", "deterministic", "=", "deterministic", ",", "*", "*", "get_output_kw", ")", "loss", "=", "aggregate", "(", "loss_function", "(", "network_output", ",", "target", ")", ")", "if", "l1", ":", "loss", "+=", "regularization", ".", "regularize_layer_params", "(", "layers", ".", "values", "(", ")", ",", "regularization", ".", "l1", ")", "*", "l1", "if", "l2", ":", "loss", "+=", "regularization", ".", "regularize_layer_params", "(", "layers", ".", "values", "(", ")", ",", "regularization", ".", "l2", ")", "*", "l2", "return", "loss" ]
Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss
[ "Default", "implementation", "of", "the", "NeuralNet", "objective", "." ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L166-L202
-1
573
dnouri/nolearn
nolearn/lasagne/base.py
NeuralNet.initialize
def initialize(self): """Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action. """ if getattr(self, '_initialized', False): return out = getattr(self, '_output_layers', None) if out is None: self.initialize_layers() self._check_for_unused_kwargs() iter_funcs = self._create_iter_funcs( self.layers_, self.objective, self.update, self.y_tensor_type, ) self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs self._initialized = True
python
def initialize(self): """Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action. """ if getattr(self, '_initialized', False): return out = getattr(self, '_output_layers', None) if out is None: self.initialize_layers() self._check_for_unused_kwargs() iter_funcs = self._create_iter_funcs( self.layers_, self.objective, self.update, self.y_tensor_type, ) self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs self._initialized = True
[ "def", "initialize", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'_initialized'", ",", "False", ")", ":", "return", "out", "=", "getattr", "(", "self", ",", "'_output_layers'", ",", "None", ")", "if", "out", "is", "None", ":", "self", ".", "initialize_layers", "(", ")", "self", ".", "_check_for_unused_kwargs", "(", ")", "iter_funcs", "=", "self", ".", "_create_iter_funcs", "(", "self", ".", "layers_", ",", "self", ".", "objective", ",", "self", ".", "update", ",", "self", ".", "y_tensor_type", ",", ")", "self", ".", "train_iter_", ",", "self", ".", "eval_iter_", ",", "self", ".", "predict_iter_", "=", "iter_funcs", "self", ".", "_initialized", "=", "True" ]
Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action.
[ "Initializes", "the", "network", ".", "Checks", "that", "no", "extra", "kwargs", "were", "passed", "to", "the", "constructor", "and", "compiles", "the", "train", "predict", "and", "evaluation", "functions", "." ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L473-L493
-1
574
dnouri/nolearn
nolearn/lasagne/base.py
NeuralNet.fit
def fit(self, X, y, epochs=None): """ Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance """ if self.check_input: X, y = self._check_good_input(X, y) if self.use_label_encoder: self.enc_ = LabelEncoder() y = self.enc_.fit_transform(y).astype(np.int32) self.classes_ = self.enc_.classes_ self.initialize() try: self.train_loop(X, y, epochs=epochs) except KeyboardInterrupt: pass return self
python
def fit(self, X, y, epochs=None): """ Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance """ if self.check_input: X, y = self._check_good_input(X, y) if self.use_label_encoder: self.enc_ = LabelEncoder() y = self.enc_.fit_transform(y).astype(np.int32) self.classes_ = self.enc_.classes_ self.initialize() try: self.train_loop(X, y, epochs=epochs) except KeyboardInterrupt: pass return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ",", "epochs", "=", "None", ")", ":", "if", "self", ".", "check_input", ":", "X", ",", "y", "=", "self", ".", "_check_good_input", "(", "X", ",", "y", ")", "if", "self", ".", "use_label_encoder", ":", "self", ".", "enc_", "=", "LabelEncoder", "(", ")", "y", "=", "self", ".", "enc_", ".", "fit_transform", "(", "y", ")", ".", "astype", "(", "np", ".", "int32", ")", "self", ".", "classes_", "=", "self", ".", "enc_", ".", "classes_", "self", ".", "initialize", "(", ")", "try", ":", "self", ".", "train_loop", "(", "X", ",", "y", ",", "epochs", "=", "epochs", ")", "except", "KeyboardInterrupt", ":", "pass", "return", "self" ]
Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance
[ "Runs", "the", "training", "loop", "for", "a", "given", "number", "of", "epochs" ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L680-L703
-1
575
dnouri/nolearn
nolearn/lasagne/base.py
NeuralNet.partial_fit
def partial_fit(self, X, y, classes=None): """ Runs a single epoch using the provided data :return: This instance """ return self.fit(X, y, epochs=1)
python
def partial_fit(self, X, y, classes=None): """ Runs a single epoch using the provided data :return: This instance """ return self.fit(X, y, epochs=1)
[ "def", "partial_fit", "(", "self", ",", "X", ",", "y", ",", "classes", "=", "None", ")", ":", "return", "self", ".", "fit", "(", "X", ",", "y", ",", "epochs", "=", "1", ")" ]
Runs a single epoch using the provided data :return: This instance
[ "Runs", "a", "single", "epoch", "using", "the", "provided", "data" ]
2ef346c869e80fc90247916e4aea5cfa7cf2edda
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L705-L711
-1
576
pennersr/django-trackstats
trackstats/models.py
ByDateQuerySetMixin.narrow
def narrow(self, **kwargs): """Up-to including""" from_date = kwargs.pop('from_date', None) to_date = kwargs.pop('to_date', None) date = kwargs.pop('date', None) qs = self if from_date: qs = qs.filter(date__gte=from_date) if to_date: qs = qs.filter(date__lte=to_date) if date: qs = qs.filter(date=date) return super(ByDateQuerySetMixin, qs).narrow(**kwargs)
python
def narrow(self, **kwargs): """Up-to including""" from_date = kwargs.pop('from_date', None) to_date = kwargs.pop('to_date', None) date = kwargs.pop('date', None) qs = self if from_date: qs = qs.filter(date__gte=from_date) if to_date: qs = qs.filter(date__lte=to_date) if date: qs = qs.filter(date=date) return super(ByDateQuerySetMixin, qs).narrow(**kwargs)
[ "def", "narrow", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from_date", "=", "kwargs", ".", "pop", "(", "'from_date'", ",", "None", ")", "to_date", "=", "kwargs", ".", "pop", "(", "'to_date'", ",", "None", ")", "date", "=", "kwargs", ".", "pop", "(", "'date'", ",", "None", ")", "qs", "=", "self", "if", "from_date", ":", "qs", "=", "qs", ".", "filter", "(", "date__gte", "=", "from_date", ")", "if", "to_date", ":", "qs", "=", "qs", ".", "filter", "(", "date__lte", "=", "to_date", ")", "if", "date", ":", "qs", "=", "qs", ".", "filter", "(", "date", "=", "date", ")", "return", "super", "(", "ByDateQuerySetMixin", ",", "qs", ")", ".", "narrow", "(", "*", "*", "kwargs", ")" ]
Up-to including
[ "Up", "-", "to", "including" ]
4c36e769cb02017675a86de405afcd4e10ed3356
https://github.com/pennersr/django-trackstats/blob/4c36e769cb02017675a86de405afcd4e10ed3356/trackstats/models.py#L230-L242
-1
577
HDE/python-lambda-local
lambda_local/environment_variables.py
set_environment_variables
def set_environment_variables(json_file_path): """ Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``` :param json_file_path: path to flat json file :type json_file_path: str """ if json_file_path: with open(json_file_path) as json_file: env_vars = json.loads(json_file.read()) export_variables(env_vars)
python
def set_environment_variables(json_file_path): """ Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``` :param json_file_path: path to flat json file :type json_file_path: str """ if json_file_path: with open(json_file_path) as json_file: env_vars = json.loads(json_file.read()) export_variables(env_vars)
[ "def", "set_environment_variables", "(", "json_file_path", ")", ":", "if", "json_file_path", ":", "with", "open", "(", "json_file_path", ")", "as", "json_file", ":", "env_vars", "=", "json", ".", "loads", "(", "json_file", ".", "read", "(", ")", ")", "export_variables", "(", "env_vars", ")" ]
Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``` :param json_file_path: path to flat json file :type json_file_path: str
[ "Read", "and", "set", "environment", "variables", "from", "a", "flat", "json", "file", "." ]
49ad011a039974f1d8f904435eb8db895558d2d9
https://github.com/HDE/python-lambda-local/blob/49ad011a039974f1d8f904435eb8db895558d2d9/lambda_local/environment_variables.py#L10-L33
-1
578
HDE/python-lambda-local
lambda_local/context.py
millis_interval
def millis_interval(start, end): """start and end are datetime instances""" diff = end - start millis = diff.days * 24 * 60 * 60 * 1000 millis += diff.seconds * 1000 millis += diff.microseconds / 1000 return millis
python
def millis_interval(start, end): """start and end are datetime instances""" diff = end - start millis = diff.days * 24 * 60 * 60 * 1000 millis += diff.seconds * 1000 millis += diff.microseconds / 1000 return millis
[ "def", "millis_interval", "(", "start", ",", "end", ")", ":", "diff", "=", "end", "-", "start", "millis", "=", "diff", ".", "days", "*", "24", "*", "60", "*", "60", "*", "1000", "millis", "+=", "diff", ".", "seconds", "*", "1000", "millis", "+=", "diff", ".", "microseconds", "/", "1000", "return", "millis" ]
start and end are datetime instances
[ "start", "and", "end", "are", "datetime", "instances" ]
49ad011a039974f1d8f904435eb8db895558d2d9
https://github.com/HDE/python-lambda-local/blob/49ad011a039974f1d8f904435eb8db895558d2d9/lambda_local/context.py#L49-L55
-1
579
locationlabs/mockredis
mockredis/script.py
Script._import_lua
def _import_lua(load_dependencies=True): """ Import lua and dependencies. :param load_dependencies: should Lua library dependencies be loaded? :raises: RuntimeError if Lua is not available """ try: import lua except ImportError: raise RuntimeError("Lua not installed") lua_globals = lua.globals() if load_dependencies: Script._import_lua_dependencies(lua, lua_globals) return lua, lua_globals
python
def _import_lua(load_dependencies=True): """ Import lua and dependencies. :param load_dependencies: should Lua library dependencies be loaded? :raises: RuntimeError if Lua is not available """ try: import lua except ImportError: raise RuntimeError("Lua not installed") lua_globals = lua.globals() if load_dependencies: Script._import_lua_dependencies(lua, lua_globals) return lua, lua_globals
[ "def", "_import_lua", "(", "load_dependencies", "=", "True", ")", ":", "try", ":", "import", "lua", "except", "ImportError", ":", "raise", "RuntimeError", "(", "\"Lua not installed\"", ")", "lua_globals", "=", "lua", ".", "globals", "(", ")", "if", "load_dependencies", ":", "Script", ".", "_import_lua_dependencies", "(", "lua", ",", "lua_globals", ")", "return", "lua", ",", "lua_globals" ]
Import lua and dependencies. :param load_dependencies: should Lua library dependencies be loaded? :raises: RuntimeError if Lua is not available
[ "Import", "lua", "and", "dependencies", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/script.py#L54-L69
-1
580
locationlabs/mockredis
mockredis/script.py
Script._import_lua_dependencies
def _import_lua_dependencies(lua, lua_globals): """ Imports lua dependencies that are supported by redis lua scripts. The current implementation is fragile to the target platform and lua version and may be disabled if these imports are not needed. Included: - cjson lib. Pending: - base lib. - table lib. - string lib. - math lib. - debug lib. - cmsgpack lib. """ if sys.platform not in ('darwin', 'windows'): import ctypes ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL) try: lua_globals.cjson = lua.eval('require "cjson"') except RuntimeError: raise RuntimeError("cjson not installed")
python
def _import_lua_dependencies(lua, lua_globals): """ Imports lua dependencies that are supported by redis lua scripts. The current implementation is fragile to the target platform and lua version and may be disabled if these imports are not needed. Included: - cjson lib. Pending: - base lib. - table lib. - string lib. - math lib. - debug lib. - cmsgpack lib. """ if sys.platform not in ('darwin', 'windows'): import ctypes ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL) try: lua_globals.cjson = lua.eval('require "cjson"') except RuntimeError: raise RuntimeError("cjson not installed")
[ "def", "_import_lua_dependencies", "(", "lua", ",", "lua_globals", ")", ":", "if", "sys", ".", "platform", "not", "in", "(", "'darwin'", ",", "'windows'", ")", ":", "import", "ctypes", "ctypes", ".", "CDLL", "(", "'liblua5.2.so'", ",", "mode", "=", "ctypes", ".", "RTLD_GLOBAL", ")", "try", ":", "lua_globals", ".", "cjson", "=", "lua", ".", "eval", "(", "'require \"cjson\"'", ")", "except", "RuntimeError", ":", "raise", "RuntimeError", "(", "\"cjson not installed\"", ")" ]
Imports lua dependencies that are supported by redis lua scripts. The current implementation is fragile to the target platform and lua version and may be disabled if these imports are not needed. Included: - cjson lib. Pending: - base lib. - table lib. - string lib. - math lib. - debug lib. - cmsgpack lib.
[ "Imports", "lua", "dependencies", "that", "are", "supported", "by", "redis", "lua", "scripts", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/script.py#L72-L96
-1
581
locationlabs/mockredis
mockredis/client.py
MockRedis.lock
def lock(self, key, timeout=0, sleep=0): """Emulate lock.""" return MockRedisLock(self, key, timeout, sleep)
python
def lock(self, key, timeout=0, sleep=0): """Emulate lock.""" return MockRedisLock(self, key, timeout, sleep)
[ "def", "lock", "(", "self", ",", "key", ",", "timeout", "=", "0", ",", "sleep", "=", "0", ")", ":", "return", "MockRedisLock", "(", "self", ",", "key", ",", "timeout", ",", "sleep", ")" ]
Emulate lock.
[ "Emulate", "lock", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L77-L79
-1
582
locationlabs/mockredis
mockredis/client.py
MockRedis.keys
def keys(self, pattern='*'): """Emulate keys.""" # making sure the pattern is unicode/str. try: pattern = pattern.decode('utf-8') # This throws an AttributeError in python 3, or an # UnicodeEncodeError in python 2 except (AttributeError, UnicodeEncodeError): pass # Make regex out of glob styled pattern. regex = fnmatch.translate(pattern) regex = re.compile(re.sub(r'(^|[^\\])\.', r'\1[^/]', regex)) # Find every key that matches the pattern return [key for key in self.redis.keys() if regex.match(key.decode('utf-8'))]
python
def keys(self, pattern='*'): """Emulate keys.""" # making sure the pattern is unicode/str. try: pattern = pattern.decode('utf-8') # This throws an AttributeError in python 3, or an # UnicodeEncodeError in python 2 except (AttributeError, UnicodeEncodeError): pass # Make regex out of glob styled pattern. regex = fnmatch.translate(pattern) regex = re.compile(re.sub(r'(^|[^\\])\.', r'\1[^/]', regex)) # Find every key that matches the pattern return [key for key in self.redis.keys() if regex.match(key.decode('utf-8'))]
[ "def", "keys", "(", "self", ",", "pattern", "=", "'*'", ")", ":", "# making sure the pattern is unicode/str.", "try", ":", "pattern", "=", "pattern", ".", "decode", "(", "'utf-8'", ")", "# This throws an AttributeError in python 3, or an", "# UnicodeEncodeError in python 2", "except", "(", "AttributeError", ",", "UnicodeEncodeError", ")", ":", "pass", "# Make regex out of glob styled pattern.", "regex", "=", "fnmatch", ".", "translate", "(", "pattern", ")", "regex", "=", "re", ".", "compile", "(", "re", ".", "sub", "(", "r'(^|[^\\\\])\\.'", ",", "r'\\1[^/]'", ",", "regex", ")", ")", "# Find every key that matches the pattern", "return", "[", "key", "for", "key", "in", "self", ".", "redis", ".", "keys", "(", ")", "if", "regex", ".", "match", "(", "key", ".", "decode", "(", "'utf-8'", ")", ")", "]" ]
Emulate keys.
[ "Emulate", "keys", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L154-L169
-1
583
locationlabs/mockredis
mockredis/client.py
MockRedis.delete
def delete(self, *keys): """Emulate delete.""" key_counter = 0 for key in map(self._encode, keys): if key in self.redis: del self.redis[key] key_counter += 1 if key in self.timeouts: del self.timeouts[key] return key_counter
python
def delete(self, *keys): """Emulate delete.""" key_counter = 0 for key in map(self._encode, keys): if key in self.redis: del self.redis[key] key_counter += 1 if key in self.timeouts: del self.timeouts[key] return key_counter
[ "def", "delete", "(", "self", ",", "*", "keys", ")", ":", "key_counter", "=", "0", "for", "key", "in", "map", "(", "self", ".", "_encode", ",", "keys", ")", ":", "if", "key", "in", "self", ".", "redis", ":", "del", "self", ".", "redis", "[", "key", "]", "key_counter", "+=", "1", "if", "key", "in", "self", ".", "timeouts", ":", "del", "self", ".", "timeouts", "[", "key", "]", "return", "key_counter" ]
Emulate delete.
[ "Emulate", "delete", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L171-L180
-1
584
locationlabs/mockredis
mockredis/client.py
MockRedis.do_expire
def do_expire(self): """ Expire objects assuming now == time """ # Deep copy to avoid RuntimeError: dictionary changed size during iteration _timeouts = deepcopy(self.timeouts) for key, value in _timeouts.items(): if value - self.clock.now() < timedelta(0): del self.timeouts[key] # removing the expired key if key in self.redis: self.redis.pop(key, None)
python
def do_expire(self): """ Expire objects assuming now == time """ # Deep copy to avoid RuntimeError: dictionary changed size during iteration _timeouts = deepcopy(self.timeouts) for key, value in _timeouts.items(): if value - self.clock.now() < timedelta(0): del self.timeouts[key] # removing the expired key if key in self.redis: self.redis.pop(key, None)
[ "def", "do_expire", "(", "self", ")", ":", "# Deep copy to avoid RuntimeError: dictionary changed size during iteration", "_timeouts", "=", "deepcopy", "(", "self", ".", "timeouts", ")", "for", "key", ",", "value", "in", "_timeouts", ".", "items", "(", ")", ":", "if", "value", "-", "self", ".", "clock", ".", "now", "(", ")", "<", "timedelta", "(", "0", ")", ":", "del", "self", ".", "timeouts", "[", "key", "]", "# removing the expired key", "if", "key", "in", "self", ".", "redis", ":", "self", ".", "redis", ".", "pop", "(", "key", ",", "None", ")" ]
Expire objects assuming now == time
[ "Expire", "objects", "assuming", "now", "==", "time" ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L258-L269
-1
585
locationlabs/mockredis
mockredis/client.py
MockRedis.set
def set(self, key, value, ex=None, px=None, nx=False, xx=False): """ Set the ``value`` for the ``key`` in the context of the provided kwargs. As per the behavior of the redis-py lib: If nx and xx are both set, the function does nothing and None is returned. If px and ex are both set, the preference is given to px. If the key is not set for some reason, the lib function returns None. """ key = self._encode(key) value = self._encode(value) if nx and xx: return None mode = "nx" if nx else "xx" if xx else None if self._should_set(key, mode): expire = None if ex is not None: expire = ex if isinstance(ex, timedelta) else timedelta(seconds=ex) if px is not None: expire = px if isinstance(px, timedelta) else timedelta(milliseconds=px) if expire is not None and expire.total_seconds() <= 0: raise ResponseError("invalid expire time in SETEX") result = self._set(key, value) if expire: self._expire(key, expire) return result
python
def set(self, key, value, ex=None, px=None, nx=False, xx=False): """ Set the ``value`` for the ``key`` in the context of the provided kwargs. As per the behavior of the redis-py lib: If nx and xx are both set, the function does nothing and None is returned. If px and ex are both set, the preference is given to px. If the key is not set for some reason, the lib function returns None. """ key = self._encode(key) value = self._encode(value) if nx and xx: return None mode = "nx" if nx else "xx" if xx else None if self._should_set(key, mode): expire = None if ex is not None: expire = ex if isinstance(ex, timedelta) else timedelta(seconds=ex) if px is not None: expire = px if isinstance(px, timedelta) else timedelta(milliseconds=px) if expire is not None and expire.total_seconds() <= 0: raise ResponseError("invalid expire time in SETEX") result = self._set(key, value) if expire: self._expire(key, expire) return result
[ "def", "set", "(", "self", ",", "key", ",", "value", ",", "ex", "=", "None", ",", "px", "=", "None", ",", "nx", "=", "False", ",", "xx", "=", "False", ")", ":", "key", "=", "self", ".", "_encode", "(", "key", ")", "value", "=", "self", ".", "_encode", "(", "value", ")", "if", "nx", "and", "xx", ":", "return", "None", "mode", "=", "\"nx\"", "if", "nx", "else", "\"xx\"", "if", "xx", "else", "None", "if", "self", ".", "_should_set", "(", "key", ",", "mode", ")", ":", "expire", "=", "None", "if", "ex", "is", "not", "None", ":", "expire", "=", "ex", "if", "isinstance", "(", "ex", ",", "timedelta", ")", "else", "timedelta", "(", "seconds", "=", "ex", ")", "if", "px", "is", "not", "None", ":", "expire", "=", "px", "if", "isinstance", "(", "px", ",", "timedelta", ")", "else", "timedelta", "(", "milliseconds", "=", "px", ")", "if", "expire", "is", "not", "None", "and", "expire", ".", "total_seconds", "(", ")", "<=", "0", ":", "raise", "ResponseError", "(", "\"invalid expire time in SETEX\"", ")", "result", "=", "self", ".", "_set", "(", "key", ",", "value", ")", "if", "expire", ":", "self", ".", "_expire", "(", "key", ",", "expire", ")", "return", "result" ]
Set the ``value`` for the ``key`` in the context of the provided kwargs. As per the behavior of the redis-py lib: If nx and xx are both set, the function does nothing and None is returned. If px and ex are both set, the preference is given to px. If the key is not set for some reason, the lib function returns None.
[ "Set", "the", "value", "for", "the", "key", "in", "the", "context", "of", "the", "provided", "kwargs", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L313-L342
-1
586
locationlabs/mockredis
mockredis/client.py
MockRedis._should_set
def _should_set(self, key, mode): """ Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx). """ if mode is None or mode not in ["nx", "xx"]: return True if mode == "nx": if key in self.redis: # nx means set only if key is absent # false if the key already exists return False elif key not in self.redis: # at this point mode can only be xx # xx means set only if the key already exists # false if is absent return False # for all other cases, return true return True
python
def _should_set(self, key, mode): """ Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx). """ if mode is None or mode not in ["nx", "xx"]: return True if mode == "nx": if key in self.redis: # nx means set only if key is absent # false if the key already exists return False elif key not in self.redis: # at this point mode can only be xx # xx means set only if the key already exists # false if is absent return False # for all other cases, return true return True
[ "def", "_should_set", "(", "self", ",", "key", ",", "mode", ")", ":", "if", "mode", "is", "None", "or", "mode", "not", "in", "[", "\"nx\"", ",", "\"xx\"", "]", ":", "return", "True", "if", "mode", "==", "\"nx\"", ":", "if", "key", "in", "self", ".", "redis", ":", "# nx means set only if key is absent", "# false if the key already exists", "return", "False", "elif", "key", "not", "in", "self", ".", "redis", ":", "# at this point mode can only be xx", "# xx means set only if the key already exists", "# false if is absent", "return", "False", "# for all other cases, return true", "return", "True" ]
Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx).
[ "Determine", "if", "it", "is", "okay", "to", "set", "a", "key", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L359-L381
-1
587
locationlabs/mockredis
mockredis/client.py
MockRedis.setex
def setex(self, name, time, value): """ Set the value of ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object. """ if not self.strict: # when not strict mode swap value and time args order time, value = value, time return self.set(name, value, ex=time)
python
def setex(self, name, time, value): """ Set the value of ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object. """ if not self.strict: # when not strict mode swap value and time args order time, value = value, time return self.set(name, value, ex=time)
[ "def", "setex", "(", "self", ",", "name", ",", "time", ",", "value", ")", ":", "if", "not", "self", ".", "strict", ":", "# when not strict mode swap value and time args order", "time", ",", "value", "=", "value", ",", "time", "return", "self", ".", "set", "(", "name", ",", "value", ",", "ex", "=", "time", ")" ]
Set the value of ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object.
[ "Set", "the", "value", "of", "name", "to", "value", "that", "expires", "in", "time", "seconds", ".", "time", "can", "be", "represented", "by", "an", "integer", "or", "a", "Python", "timedelta", "object", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L383-L392
-1
588
locationlabs/mockredis
mockredis/client.py
MockRedis.psetex
def psetex(self, key, time, value): """ Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object. """ return self.set(key, value, px=time)
python
def psetex(self, key, time, value): """ Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object. """ return self.set(key, value, px=time)
[ "def", "psetex", "(", "self", ",", "key", ",", "time", ",", "value", ")", ":", "return", "self", ".", "set", "(", "key", ",", "value", ",", "px", "=", "time", ")" ]
Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object.
[ "Set", "the", "value", "of", "key", "to", "value", "that", "expires", "in", "time", "milliseconds", ".", "time", "can", "be", "represented", "by", "an", "integer", "or", "a", "Python", "timedelta", "object", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L394-L400
-1
589
locationlabs/mockredis
mockredis/client.py
MockRedis.setnx
def setnx(self, key, value): """Set the value of ``key`` to ``value`` if key doesn't exist""" return self.set(key, value, nx=True)
python
def setnx(self, key, value): """Set the value of ``key`` to ``value`` if key doesn't exist""" return self.set(key, value, nx=True)
[ "def", "setnx", "(", "self", ",", "key", ",", "value", ")", ":", "return", "self", ".", "set", "(", "key", ",", "value", ",", "nx", "=", "True", ")" ]
Set the value of ``key`` to ``value`` if key doesn't exist
[ "Set", "the", "value", "of", "key", "to", "value", "if", "key", "doesn", "t", "exist" ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L402-L404
-1
590
locationlabs/mockredis
mockredis/client.py
MockRedis.setbit
def setbit(self, key, offset, value): """ Set the bit at ``offset`` in ``key`` to ``value``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): bits.extend(b"\x00" * (index + 1 - len(bits))) prev_val = 1 if (bits[index] & mask) else 0 if value: bits[index] |= mask else: bits[index] &= ~mask self.redis[key] = bytes(bits) return prev_val
python
def setbit(self, key, offset, value): """ Set the bit at ``offset`` in ``key`` to ``value``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): bits.extend(b"\x00" * (index + 1 - len(bits))) prev_val = 1 if (bits[index] & mask) else 0 if value: bits[index] |= mask else: bits[index] &= ~mask self.redis[key] = bytes(bits) return prev_val
[ "def", "setbit", "(", "self", ",", "key", ",", "offset", ",", "value", ")", ":", "key", "=", "self", ".", "_encode", "(", "key", ")", "index", ",", "bits", ",", "mask", "=", "self", ".", "_get_bits_and_offset", "(", "key", ",", "offset", ")", "if", "index", ">=", "len", "(", "bits", ")", ":", "bits", ".", "extend", "(", "b\"\\x00\"", "*", "(", "index", "+", "1", "-", "len", "(", "bits", ")", ")", ")", "prev_val", "=", "1", "if", "(", "bits", "[", "index", "]", "&", "mask", ")", "else", "0", "if", "value", ":", "bits", "[", "index", "]", "|=", "mask", "else", ":", "bits", "[", "index", "]", "&=", "~", "mask", "self", ".", "redis", "[", "key", "]", "=", "bytes", "(", "bits", ")", "return", "prev_val" ]
Set the bit at ``offset`` in ``key`` to ``value``.
[ "Set", "the", "bit", "at", "offset", "in", "key", "to", "value", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L465-L484
-1
591
locationlabs/mockredis
mockredis/client.py
MockRedis.getbit
def getbit(self, key, offset): """ Returns the bit value at ``offset`` in ``key``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): return 0 return 1 if (bits[index] & mask) else 0
python
def getbit(self, key, offset): """ Returns the bit value at ``offset`` in ``key``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): return 0 return 1 if (bits[index] & mask) else 0
[ "def", "getbit", "(", "self", ",", "key", ",", "offset", ")", ":", "key", "=", "self", ".", "_encode", "(", "key", ")", "index", ",", "bits", ",", "mask", "=", "self", ".", "_get_bits_and_offset", "(", "key", ",", "offset", ")", "if", "index", ">=", "len", "(", "bits", ")", ":", "return", "0", "return", "1", "if", "(", "bits", "[", "index", "]", "&", "mask", ")", "else", "0" ]
Returns the bit value at ``offset`` in ``key``.
[ "Returns", "the", "bit", "value", "at", "offset", "in", "key", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L486-L496
-1
592
locationlabs/mockredis
mockredis/client.py
MockRedis.hexists
def hexists(self, hashkey, attribute): """Emulate hexists.""" redis_hash = self._get_hash(hashkey, 'HEXISTS') return self._encode(attribute) in redis_hash
python
def hexists(self, hashkey, attribute): """Emulate hexists.""" redis_hash = self._get_hash(hashkey, 'HEXISTS') return self._encode(attribute) in redis_hash
[ "def", "hexists", "(", "self", ",", "hashkey", ",", "attribute", ")", ":", "redis_hash", "=", "self", ".", "_get_hash", "(", "hashkey", ",", "'HEXISTS'", ")", "return", "self", ".", "_encode", "(", "attribute", ")", "in", "redis_hash" ]
Emulate hexists.
[ "Emulate", "hexists", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L506-L510
-1
593
locationlabs/mockredis
mockredis/client.py
MockRedis.hget
def hget(self, hashkey, attribute): """Emulate hget.""" redis_hash = self._get_hash(hashkey, 'HGET') return redis_hash.get(self._encode(attribute))
python
def hget(self, hashkey, attribute): """Emulate hget.""" redis_hash = self._get_hash(hashkey, 'HGET') return redis_hash.get(self._encode(attribute))
[ "def", "hget", "(", "self", ",", "hashkey", ",", "attribute", ")", ":", "redis_hash", "=", "self", ".", "_get_hash", "(", "hashkey", ",", "'HGET'", ")", "return", "redis_hash", ".", "get", "(", "self", ".", "_encode", "(", "attribute", ")", ")" ]
Emulate hget.
[ "Emulate", "hget", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L512-L516
-1
594
locationlabs/mockredis
mockredis/client.py
MockRedis.hmset
def hmset(self, hashkey, value): """Emulate hmset.""" redis_hash = self._get_hash(hashkey, 'HMSET', create=True) for key, value in value.items(): attribute = self._encode(key) redis_hash[attribute] = self._encode(value) return True
python
def hmset(self, hashkey, value): """Emulate hmset.""" redis_hash = self._get_hash(hashkey, 'HMSET', create=True) for key, value in value.items(): attribute = self._encode(key) redis_hash[attribute] = self._encode(value) return True
[ "def", "hmset", "(", "self", ",", "hashkey", ",", "value", ")", ":", "redis_hash", "=", "self", ".", "_get_hash", "(", "hashkey", ",", "'HMSET'", ",", "create", "=", "True", ")", "for", "key", ",", "value", "in", "value", ".", "items", "(", ")", ":", "attribute", "=", "self", ".", "_encode", "(", "key", ")", "redis_hash", "[", "attribute", "]", "=", "self", ".", "_encode", "(", "value", ")", "return", "True" ]
Emulate hmset.
[ "Emulate", "hmset", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L543-L550
-1
595
locationlabs/mockredis
mockredis/client.py
MockRedis.hmget
def hmget(self, hashkey, keys, *args): """Emulate hmget.""" redis_hash = self._get_hash(hashkey, 'HMGET') attributes = self._list_or_args(keys, args) return [redis_hash.get(self._encode(attribute)) for attribute in attributes]
python
def hmget(self, hashkey, keys, *args): """Emulate hmget.""" redis_hash = self._get_hash(hashkey, 'HMGET') attributes = self._list_or_args(keys, args) return [redis_hash.get(self._encode(attribute)) for attribute in attributes]
[ "def", "hmget", "(", "self", ",", "hashkey", ",", "keys", ",", "*", "args", ")", ":", "redis_hash", "=", "self", ".", "_get_hash", "(", "hashkey", ",", "'HMGET'", ")", "attributes", "=", "self", ".", "_list_or_args", "(", "keys", ",", "args", ")", "return", "[", "redis_hash", ".", "get", "(", "self", ".", "_encode", "(", "attribute", ")", ")", "for", "attribute", "in", "attributes", "]" ]
Emulate hmget.
[ "Emulate", "hmget", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L552-L557
-1
596
locationlabs/mockredis
mockredis/client.py
MockRedis.hset
def hset(self, hashkey, attribute, value): """Emulate hset.""" redis_hash = self._get_hash(hashkey, 'HSET', create=True) attribute = self._encode(attribute) attribute_present = attribute in redis_hash redis_hash[attribute] = self._encode(value) return long(0) if attribute_present else long(1)
python
def hset(self, hashkey, attribute, value): """Emulate hset.""" redis_hash = self._get_hash(hashkey, 'HSET', create=True) attribute = self._encode(attribute) attribute_present = attribute in redis_hash redis_hash[attribute] = self._encode(value) return long(0) if attribute_present else long(1)
[ "def", "hset", "(", "self", ",", "hashkey", ",", "attribute", ",", "value", ")", ":", "redis_hash", "=", "self", ".", "_get_hash", "(", "hashkey", ",", "'HSET'", ",", "create", "=", "True", ")", "attribute", "=", "self", ".", "_encode", "(", "attribute", ")", "attribute_present", "=", "attribute", "in", "redis_hash", "redis_hash", "[", "attribute", "]", "=", "self", ".", "_encode", "(", "value", ")", "return", "long", "(", "0", ")", "if", "attribute_present", "else", "long", "(", "1", ")" ]
Emulate hset.
[ "Emulate", "hset", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L559-L566
-1
597
locationlabs/mockredis
mockredis/client.py
MockRedis.hsetnx
def hsetnx(self, hashkey, attribute, value): """Emulate hsetnx.""" redis_hash = self._get_hash(hashkey, 'HSETNX', create=True) attribute = self._encode(attribute) if attribute in redis_hash: return long(0) else: redis_hash[attribute] = self._encode(value) return long(1)
python
def hsetnx(self, hashkey, attribute, value): """Emulate hsetnx.""" redis_hash = self._get_hash(hashkey, 'HSETNX', create=True) attribute = self._encode(attribute) if attribute in redis_hash: return long(0) else: redis_hash[attribute] = self._encode(value) return long(1)
[ "def", "hsetnx", "(", "self", ",", "hashkey", ",", "attribute", ",", "value", ")", ":", "redis_hash", "=", "self", ".", "_get_hash", "(", "hashkey", ",", "'HSETNX'", ",", "create", "=", "True", ")", "attribute", "=", "self", ".", "_encode", "(", "attribute", ")", "if", "attribute", "in", "redis_hash", ":", "return", "long", "(", "0", ")", "else", ":", "redis_hash", "[", "attribute", "]", "=", "self", ".", "_encode", "(", "value", ")", "return", "long", "(", "1", ")" ]
Emulate hsetnx.
[ "Emulate", "hsetnx", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L568-L577
-1
598
locationlabs/mockredis
mockredis/client.py
MockRedis.hincrby
def hincrby(self, hashkey, attribute, increment=1): """Emulate hincrby.""" return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment)
python
def hincrby(self, hashkey, attribute, increment=1): """Emulate hincrby.""" return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment)
[ "def", "hincrby", "(", "self", ",", "hashkey", ",", "attribute", ",", "increment", "=", "1", ")", ":", "return", "self", ".", "_hincrby", "(", "hashkey", ",", "attribute", ",", "'HINCRBY'", ",", "long", ",", "increment", ")" ]
Emulate hincrby.
[ "Emulate", "hincrby", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L579-L582
-1
599
locationlabs/mockredis
mockredis/client.py
MockRedis.hincrbyfloat
def hincrbyfloat(self, hashkey, attribute, increment=1.0): """Emulate hincrbyfloat.""" return self._hincrby(hashkey, attribute, 'HINCRBYFLOAT', float, increment)
python
def hincrbyfloat(self, hashkey, attribute, increment=1.0): """Emulate hincrbyfloat.""" return self._hincrby(hashkey, attribute, 'HINCRBYFLOAT', float, increment)
[ "def", "hincrbyfloat", "(", "self", ",", "hashkey", ",", "attribute", ",", "increment", "=", "1.0", ")", ":", "return", "self", ".", "_hincrby", "(", "hashkey", ",", "attribute", ",", "'HINCRBYFLOAT'", ",", "float", ",", "increment", ")" ]
Emulate hincrbyfloat.
[ "Emulate", "hincrbyfloat", "." ]
fd4e3117066ff0c24e86ebca007853a8092e3254
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L584-L587
-1