repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
lacava/few
few/variation.py
VariationMixin.mutate
def mutate(self,p_i,func_set,term_set): #, max_depth=2 """point mutation, addition, removal""" self.point_mutate(p_i,func_set,term_set)
python
def mutate(self,p_i,func_set,term_set): #, max_depth=2 """point mutation, addition, removal""" self.point_mutate(p_i,func_set,term_set)
[ "def", "mutate", "(", "self", ",", "p_i", ",", "func_set", ",", "term_set", ")", ":", "#, max_depth=2", "self", ".", "point_mutate", "(", "p_i", ",", "func_set", ",", "term_set", ")" ]
point mutation, addition, removal
[ "point", "mutation", "addition", "removal" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/variation.py#L174-L176
lacava/few
few/variation.py
VariationMixin.point_mutate
def point_mutate(self,p_i,func_set,term_set): """point mutation on individual p_i""" # point mutation x = self.random_state.randint(len(p_i)) arity = p_i[x].arity[p_i[x].in_type] # find eligible replacements based on arity and type reps = [n for n in func_set+term_set if n.arity[n.in_type]==arity and n.out_type==p_i[x].out_type and n.in_type==p_i[x].in_type] tmp = reps[self.random_state.randint(len(reps))] tmp_p = p_i[:] p_i[x] = tmp if not self.is_valid_program(p_i): print("old:",tmp_p) print("new:",p_i) raise ValueError('Mutation produced an invalid program.')
python
def point_mutate(self,p_i,func_set,term_set): """point mutation on individual p_i""" # point mutation x = self.random_state.randint(len(p_i)) arity = p_i[x].arity[p_i[x].in_type] # find eligible replacements based on arity and type reps = [n for n in func_set+term_set if n.arity[n.in_type]==arity and n.out_type==p_i[x].out_type and n.in_type==p_i[x].in_type] tmp = reps[self.random_state.randint(len(reps))] tmp_p = p_i[:] p_i[x] = tmp if not self.is_valid_program(p_i): print("old:",tmp_p) print("new:",p_i) raise ValueError('Mutation produced an invalid program.')
[ "def", "point_mutate", "(", "self", ",", "p_i", ",", "func_set", ",", "term_set", ")", ":", "# point mutation", "x", "=", "self", ".", "random_state", ".", "randint", "(", "len", "(", "p_i", ")", ")", "arity", "=", "p_i", "[", "x", "]", ".", "arity", "[", "p_i", "[", "x", "]", ".", "in_type", "]", "# find eligible replacements based on arity and type", "reps", "=", "[", "n", "for", "n", "in", "func_set", "+", "term_set", "if", "n", ".", "arity", "[", "n", ".", "in_type", "]", "==", "arity", "and", "n", ".", "out_type", "==", "p_i", "[", "x", "]", ".", "out_type", "and", "n", ".", "in_type", "==", "p_i", "[", "x", "]", ".", "in_type", "]", "tmp", "=", "reps", "[", "self", ".", "random_state", ".", "randint", "(", "len", "(", "reps", ")", ")", "]", "tmp_p", "=", "p_i", "[", ":", "]", "p_i", "[", "x", "]", "=", "tmp", "if", "not", "self", ".", "is_valid_program", "(", "p_i", ")", ":", "print", "(", "\"old:\"", ",", "tmp_p", ")", "print", "(", "\"new:\"", ",", "p_i", ")", "raise", "ValueError", "(", "'Mutation produced an invalid program.'", ")" ]
point mutation on individual p_i
[ "point", "mutation", "on", "individual", "p_i" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/variation.py#L178-L194
lacava/few
few/variation.py
VariationMixin.is_valid_program
def is_valid_program(self,p): """checks whether program p makes a syntactically valid tree. checks that the accumulated program length is always greater than the accumulated arities, indicating that the appropriate number of arguments is alway present for functions. It then checks that the sum of arties +1 exactly equals the length of the stack, indicating that there are no missing arguments. """ # print("p:",p) arities = list(a.arity[a.in_type] for a in p) accu_arities = list(accumulate(arities)) accu_len = list(np.arange(len(p))+1) check = list(a < b for a,b in zip(accu_arities,accu_len)) # print("accu_arities:",accu_arities) # print("accu_len:",accu_len) # print("accu_arities < accu_len:",accu_arities<accu_len) return all(check) and sum(a.arity[a.in_type] for a in p) +1 == len(p) and len(p)>0
python
def is_valid_program(self,p): """checks whether program p makes a syntactically valid tree. checks that the accumulated program length is always greater than the accumulated arities, indicating that the appropriate number of arguments is alway present for functions. It then checks that the sum of arties +1 exactly equals the length of the stack, indicating that there are no missing arguments. """ # print("p:",p) arities = list(a.arity[a.in_type] for a in p) accu_arities = list(accumulate(arities)) accu_len = list(np.arange(len(p))+1) check = list(a < b for a,b in zip(accu_arities,accu_len)) # print("accu_arities:",accu_arities) # print("accu_len:",accu_len) # print("accu_arities < accu_len:",accu_arities<accu_len) return all(check) and sum(a.arity[a.in_type] for a in p) +1 == len(p) and len(p)>0
[ "def", "is_valid_program", "(", "self", ",", "p", ")", ":", "# print(\"p:\",p)", "arities", "=", "list", "(", "a", ".", "arity", "[", "a", ".", "in_type", "]", "for", "a", "in", "p", ")", "accu_arities", "=", "list", "(", "accumulate", "(", "arities", ")", ")", "accu_len", "=", "list", "(", "np", ".", "arange", "(", "len", "(", "p", ")", ")", "+", "1", ")", "check", "=", "list", "(", "a", "<", "b", "for", "a", ",", "b", "in", "zip", "(", "accu_arities", ",", "accu_len", ")", ")", "# print(\"accu_arities:\",accu_arities)", "# print(\"accu_len:\",accu_len)", "# print(\"accu_arities < accu_len:\",accu_arities<accu_len)", "return", "all", "(", "check", ")", "and", "sum", "(", "a", ".", "arity", "[", "a", ".", "in_type", "]", "for", "a", "in", "p", ")", "+", "1", "==", "len", "(", "p", ")", "and", "len", "(", "p", ")", ">", "0" ]
checks whether program p makes a syntactically valid tree. checks that the accumulated program length is always greater than the accumulated arities, indicating that the appropriate number of arguments is alway present for functions. It then checks that the sum of arties +1 exactly equals the length of the stack, indicating that there are no missing arguments.
[ "checks", "whether", "program", "p", "makes", "a", "syntactically", "valid", "tree", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/variation.py#L206-L223
lacava/few
few/population.py
run_MDR
def run_MDR(n,stack_float,labels=None): """run utility function for MDR nodes.""" # need to check that tmp is categorical x1 = stack_float.pop() x2 = stack_float.pop() # check data is categorical if len(np.unique(x1))<=3 and len(np.unique(x2))<=3: tmp = np.vstack((x1,x2)).transpose() if labels is None: # prediction return n.model.transform(tmp)[:,0] else: # training out = n.model.fit_transform(tmp,labels)[:,0] return out else: return np.zeros(x1.shape[0])
python
def run_MDR(n,stack_float,labels=None): """run utility function for MDR nodes.""" # need to check that tmp is categorical x1 = stack_float.pop() x2 = stack_float.pop() # check data is categorical if len(np.unique(x1))<=3 and len(np.unique(x2))<=3: tmp = np.vstack((x1,x2)).transpose() if labels is None: # prediction return n.model.transform(tmp)[:,0] else: # training out = n.model.fit_transform(tmp,labels)[:,0] return out else: return np.zeros(x1.shape[0])
[ "def", "run_MDR", "(", "n", ",", "stack_float", ",", "labels", "=", "None", ")", ":", "# need to check that tmp is categorical", "x1", "=", "stack_float", ".", "pop", "(", ")", "x2", "=", "stack_float", ".", "pop", "(", ")", "# check data is categorical", "if", "len", "(", "np", ".", "unique", "(", "x1", ")", ")", "<=", "3", "and", "len", "(", "np", ".", "unique", "(", "x2", ")", ")", "<=", "3", ":", "tmp", "=", "np", ".", "vstack", "(", "(", "x1", ",", "x2", ")", ")", ".", "transpose", "(", ")", "if", "labels", "is", "None", ":", "# prediction", "return", "n", ".", "model", ".", "transform", "(", "tmp", ")", "[", ":", ",", "0", "]", "else", ":", "# training", "out", "=", "n", ".", "model", ".", "fit_transform", "(", "tmp", ",", "labels", ")", "[", ":", ",", "0", "]", "return", "out", "else", ":", "return", "np", ".", "zeros", "(", "x1", ".", "shape", "[", "0", "]", ")" ]
run utility function for MDR nodes.
[ "run", "utility", "function", "for", "MDR", "nodes", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/population.py#L49-L66
lacava/few
few/population.py
PopMixin.stack_2_eqn
def stack_2_eqn(self,p): """returns equation string for program stack""" stack_eqn = [] if p: # if stack is not empty for n in p.stack: self.eval_eqn(n,stack_eqn) return stack_eqn[-1] return []
python
def stack_2_eqn(self,p): """returns equation string for program stack""" stack_eqn = [] if p: # if stack is not empty for n in p.stack: self.eval_eqn(n,stack_eqn) return stack_eqn[-1] return []
[ "def", "stack_2_eqn", "(", "self", ",", "p", ")", ":", "stack_eqn", "=", "[", "]", "if", "p", ":", "# if stack is not empty", "for", "n", "in", "p", ".", "stack", ":", "self", ".", "eval_eqn", "(", "n", ",", "stack_eqn", ")", "return", "stack_eqn", "[", "-", "1", "]", "return", "[", "]" ]
returns equation string for program stack
[ "returns", "equation", "string", "for", "program", "stack" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/population.py#L190-L197
lacava/few
few/population.py
PopMixin.stacks_2_eqns
def stacks_2_eqns(self,stacks): """returns equation strings from stacks""" if stacks: return list(map(lambda p: self.stack_2_eqn(p), stacks)) else: return []
python
def stacks_2_eqns(self,stacks): """returns equation strings from stacks""" if stacks: return list(map(lambda p: self.stack_2_eqn(p), stacks)) else: return []
[ "def", "stacks_2_eqns", "(", "self", ",", "stacks", ")", ":", "if", "stacks", ":", "return", "list", "(", "map", "(", "lambda", "p", ":", "self", ".", "stack_2_eqn", "(", "p", ")", ",", "stacks", ")", ")", "else", ":", "return", "[", "]" ]
returns equation strings from stacks
[ "returns", "equation", "strings", "from", "stacks" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/population.py#L199-L204
lacava/few
few/population.py
PopMixin.make_program
def make_program(self,stack,func_set,term_set,max_d,ntype): """makes a program stack""" # print("stack:",stack,"max d:",max_d) if max_d == 0: ts = [t for t in term_set if t.out_type==ntype] if not ts: raise ValueError('no ts. ntype:'+ntype+'. term_set out_types:'+ ','.join([t.out_type for t in term_set])) stack.append(ts[self.random_state.choice(len(ts))]) else: fs = [f for f in func_set if (f.out_type==ntype and (f.in_type=='f' or max_d>1))] if len(fs)==0: print('ntype:',ntype,'\nfunc_set:',[f.name for f in func_set]) stack.append(fs[self.random_state.choice(len(fs))]) tmp = copy.copy(stack[-1]) for i in np.arange(tmp.arity['f']): self.make_program(stack,func_set,term_set,max_d-1,'f') for i in np.arange(tmp.arity['b']): self.make_program(stack,func_set,term_set,max_d-1,'b')
python
def make_program(self,stack,func_set,term_set,max_d,ntype): """makes a program stack""" # print("stack:",stack,"max d:",max_d) if max_d == 0: ts = [t for t in term_set if t.out_type==ntype] if not ts: raise ValueError('no ts. ntype:'+ntype+'. term_set out_types:'+ ','.join([t.out_type for t in term_set])) stack.append(ts[self.random_state.choice(len(ts))]) else: fs = [f for f in func_set if (f.out_type==ntype and (f.in_type=='f' or max_d>1))] if len(fs)==0: print('ntype:',ntype,'\nfunc_set:',[f.name for f in func_set]) stack.append(fs[self.random_state.choice(len(fs))]) tmp = copy.copy(stack[-1]) for i in np.arange(tmp.arity['f']): self.make_program(stack,func_set,term_set,max_d-1,'f') for i in np.arange(tmp.arity['b']): self.make_program(stack,func_set,term_set,max_d-1,'b')
[ "def", "make_program", "(", "self", ",", "stack", ",", "func_set", ",", "term_set", ",", "max_d", ",", "ntype", ")", ":", "# print(\"stack:\",stack,\"max d:\",max_d)", "if", "max_d", "==", "0", ":", "ts", "=", "[", "t", "for", "t", "in", "term_set", "if", "t", ".", "out_type", "==", "ntype", "]", "if", "not", "ts", ":", "raise", "ValueError", "(", "'no ts. ntype:'", "+", "ntype", "+", "'. term_set out_types:'", "+", "','", ".", "join", "(", "[", "t", ".", "out_type", "for", "t", "in", "term_set", "]", ")", ")", "stack", ".", "append", "(", "ts", "[", "self", ".", "random_state", ".", "choice", "(", "len", "(", "ts", ")", ")", "]", ")", "else", ":", "fs", "=", "[", "f", "for", "f", "in", "func_set", "if", "(", "f", ".", "out_type", "==", "ntype", "and", "(", "f", ".", "in_type", "==", "'f'", "or", "max_d", ">", "1", ")", ")", "]", "if", "len", "(", "fs", ")", "==", "0", ":", "print", "(", "'ntype:'", ",", "ntype", ",", "'\\nfunc_set:'", ",", "[", "f", ".", "name", "for", "f", "in", "func_set", "]", ")", "stack", ".", "append", "(", "fs", "[", "self", ".", "random_state", ".", "choice", "(", "len", "(", "fs", ")", ")", "]", ")", "tmp", "=", "copy", ".", "copy", "(", "stack", "[", "-", "1", "]", ")", "for", "i", "in", "np", ".", "arange", "(", "tmp", ".", "arity", "[", "'f'", "]", ")", ":", "self", ".", "make_program", "(", "stack", ",", "func_set", ",", "term_set", ",", "max_d", "-", "1", ",", "'f'", ")", "for", "i", "in", "np", ".", "arange", "(", "tmp", ".", "arity", "[", "'b'", "]", ")", ":", "self", ".", "make_program", "(", "stack", ",", "func_set", ",", "term_set", ",", "max_d", "-", "1", ",", "'b'", ")" ]
makes a program stack
[ "makes", "a", "program", "stack" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/population.py#L207-L229
lacava/few
few/population.py
PopMixin.init_pop
def init_pop(self): """initializes population of features as GP stacks.""" pop = Pop(self.population_size) seed_with_raw_features = False # make programs if self.seed_with_ml: # initial population is the components of the default ml model if (self.ml_type == 'SVC' or self.ml_type == 'SVR'): # this is needed because svm has a bug that throws valueerror #on attribute check seed_with_raw_features=True elif (hasattr(self.pipeline.named_steps['ml'],'coef_') or hasattr(self.pipeline.named_steps['ml'],'feature_importances_')): # add model components with non-zero coefficients to initial # population, in order of coefficient size coef = (self.pipeline.named_steps['ml'].coef_ if hasattr(self.pipeline.named_steps['ml'],'coef_') else self.pipeline.named_steps['ml'].feature_importances_) # compress multiple coefficients for each feature into single # numbers (occurs with multiclass classification) if len(coef.shape)>1: coef = [np.mean(abs(c)) for c in coef.transpose()] # remove zeros coef = [c for c in coef if c!=0] # sort feature locations based on importance/coefficient locs = np.arange(len(coef)) locs = locs[np.argsort(np.abs(coef))[::-1]] for i,p in enumerate(pop.individuals): if i < len(locs): p.stack = [node('x',loc=locs[i])] else: # make program if pop is bigger than n_features self.make_program(p.stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) p.stack = list(reversed(p.stack)) else: seed_with_raw_features = True # seed with random features if no importance info available if seed_with_raw_features: for i,p in enumerate(pop.individuals): if i < self.n_features: p.stack = [node('x', loc=self.random_state.randint(self.n_features))] else: # make program if pop is bigger than n_features self.make_program(p.stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) p.stack = list(reversed(p.stack)) # print initial population if self.verbosity > 2: print("seeded initial population:", self.stacks_2_eqns(pop.individuals)) else: # don't seed with ML for I in pop.individuals: depth = self.random_state.randint(self.min_depth,self.max_depth_init) self.make_program(I.stack,self.func_set,self.term_set,depth, self.otype) #print(I.stack) I.stack = list(reversed(I.stack)) return pop
python
def init_pop(self): """initializes population of features as GP stacks.""" pop = Pop(self.population_size) seed_with_raw_features = False # make programs if self.seed_with_ml: # initial population is the components of the default ml model if (self.ml_type == 'SVC' or self.ml_type == 'SVR'): # this is needed because svm has a bug that throws valueerror #on attribute check seed_with_raw_features=True elif (hasattr(self.pipeline.named_steps['ml'],'coef_') or hasattr(self.pipeline.named_steps['ml'],'feature_importances_')): # add model components with non-zero coefficients to initial # population, in order of coefficient size coef = (self.pipeline.named_steps['ml'].coef_ if hasattr(self.pipeline.named_steps['ml'],'coef_') else self.pipeline.named_steps['ml'].feature_importances_) # compress multiple coefficients for each feature into single # numbers (occurs with multiclass classification) if len(coef.shape)>1: coef = [np.mean(abs(c)) for c in coef.transpose()] # remove zeros coef = [c for c in coef if c!=0] # sort feature locations based on importance/coefficient locs = np.arange(len(coef)) locs = locs[np.argsort(np.abs(coef))[::-1]] for i,p in enumerate(pop.individuals): if i < len(locs): p.stack = [node('x',loc=locs[i])] else: # make program if pop is bigger than n_features self.make_program(p.stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) p.stack = list(reversed(p.stack)) else: seed_with_raw_features = True # seed with random features if no importance info available if seed_with_raw_features: for i,p in enumerate(pop.individuals): if i < self.n_features: p.stack = [node('x', loc=self.random_state.randint(self.n_features))] else: # make program if pop is bigger than n_features self.make_program(p.stack,self.func_set,self.term_set, self.random_state.randint(self.min_depth, self.max_depth+1), self.otype) p.stack = list(reversed(p.stack)) # print initial population if self.verbosity > 2: print("seeded initial population:", self.stacks_2_eqns(pop.individuals)) else: # don't seed with ML for I in pop.individuals: depth = self.random_state.randint(self.min_depth,self.max_depth_init) self.make_program(I.stack,self.func_set,self.term_set,depth, self.otype) #print(I.stack) I.stack = list(reversed(I.stack)) return pop
[ "def", "init_pop", "(", "self", ")", ":", "pop", "=", "Pop", "(", "self", ".", "population_size", ")", "seed_with_raw_features", "=", "False", "# make programs", "if", "self", ".", "seed_with_ml", ":", "# initial population is the components of the default ml model", "if", "(", "self", ".", "ml_type", "==", "'SVC'", "or", "self", ".", "ml_type", "==", "'SVR'", ")", ":", "# this is needed because svm has a bug that throws valueerror", "#on attribute check", "seed_with_raw_features", "=", "True", "elif", "(", "hasattr", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ",", "'coef_'", ")", "or", "hasattr", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ",", "'feature_importances_'", ")", ")", ":", "# add model components with non-zero coefficients to initial", "# population, in order of coefficient size", "coef", "=", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ".", "coef_", "if", "hasattr", "(", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ",", "'coef_'", ")", "else", "self", ".", "pipeline", ".", "named_steps", "[", "'ml'", "]", ".", "feature_importances_", ")", "# compress multiple coefficients for each feature into single", "# numbers (occurs with multiclass classification)", "if", "len", "(", "coef", ".", "shape", ")", ">", "1", ":", "coef", "=", "[", "np", ".", "mean", "(", "abs", "(", "c", ")", ")", "for", "c", "in", "coef", ".", "transpose", "(", ")", "]", "# remove zeros", "coef", "=", "[", "c", "for", "c", "in", "coef", "if", "c", "!=", "0", "]", "# sort feature locations based on importance/coefficient", "locs", "=", "np", ".", "arange", "(", "len", "(", "coef", ")", ")", "locs", "=", "locs", "[", "np", ".", "argsort", "(", "np", ".", "abs", "(", "coef", ")", ")", "[", ":", ":", "-", "1", "]", "]", "for", "i", ",", "p", "in", "enumerate", "(", "pop", ".", "individuals", ")", ":", "if", "i", "<", "len", "(", "locs", ")", ":", "p", ".", "stack", "=", "[", "node", "(", "'x'", ",", "loc", "=", "locs", "[", "i", "]", ")", "]", "else", ":", "# make program if pop is bigger than n_features", "self", ".", "make_program", "(", "p", ".", "stack", ",", "self", ".", "func_set", ",", "self", ".", "term_set", ",", "self", ".", "random_state", ".", "randint", "(", "self", ".", "min_depth", ",", "self", ".", "max_depth", "+", "1", ")", ",", "self", ".", "otype", ")", "p", ".", "stack", "=", "list", "(", "reversed", "(", "p", ".", "stack", ")", ")", "else", ":", "seed_with_raw_features", "=", "True", "# seed with random features if no importance info available", "if", "seed_with_raw_features", ":", "for", "i", ",", "p", "in", "enumerate", "(", "pop", ".", "individuals", ")", ":", "if", "i", "<", "self", ".", "n_features", ":", "p", ".", "stack", "=", "[", "node", "(", "'x'", ",", "loc", "=", "self", ".", "random_state", ".", "randint", "(", "self", ".", "n_features", ")", ")", "]", "else", ":", "# make program if pop is bigger than n_features", "self", ".", "make_program", "(", "p", ".", "stack", ",", "self", ".", "func_set", ",", "self", ".", "term_set", ",", "self", ".", "random_state", ".", "randint", "(", "self", ".", "min_depth", ",", "self", ".", "max_depth", "+", "1", ")", ",", "self", ".", "otype", ")", "p", ".", "stack", "=", "list", "(", "reversed", "(", "p", ".", "stack", ")", ")", "# print initial population", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"seeded initial population:\"", ",", "self", ".", "stacks_2_eqns", "(", "pop", ".", "individuals", ")", ")", "else", ":", "# don't seed with ML", "for", "I", "in", "pop", ".", "individuals", ":", "depth", "=", "self", ".", "random_state", ".", "randint", "(", "self", ".", "min_depth", ",", "self", ".", "max_depth_init", ")", "self", ".", "make_program", "(", "I", ".", "stack", ",", "self", ".", "func_set", ",", "self", ".", "term_set", ",", "depth", ",", "self", ".", "otype", ")", "#print(I.stack)", "I", ".", "stack", "=", "list", "(", "reversed", "(", "I", ".", "stack", ")", ")", "return", "pop" ]
initializes population of features as GP stacks.
[ "initializes", "population", "of", "features", "as", "GP", "stacks", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/population.py#L231-L298
lacava/few
few/few.py
main
def main(): """Main function that is called when FEW is run on the command line""" parser = argparse.ArgumentParser(description='A feature engineering wrapper' ' for machine learning algorithms.', add_help=False) parser.add_argument('INPUT_FILE', type=str, help='Data file to run FEW on; ensure that the ' 'target/label column is labeled as "label" or "class".') parser.add_argument('-h', '--help', action='help', help='Show this help message and exit.') parser.add_argument('-is', action='store',dest='INPUT_SEPARATOR', default=None,type=str, help='Character separating columns in the input file.') parser.add_argument('-o', action='store', dest='OUTPUT_FILE', default='', type=str, help='File to export the final model.') parser.add_argument('-g', action='store', dest='GENERATIONS', default=100, type=positive_integer, help='Number of generations to run FEW.') parser.add_argument('-p', action='store', dest='POPULATION_SIZE',default=50, help='Number of individuals in the GP population. ' 'Follow the number with x to set population size as a' 'multiple of raw feature size.') parser.add_argument('-mr', action='store', dest='MUTATION_RATE',default=0.5, type=float_range, help='GP mutation rate in the range [0.0, 1.0].') parser.add_argument('-xr', action='store', dest='CROSSOVER_RATE', default=0.5,type=float_range, help='GP crossover rate in the range [0.0, 1.0].') parser.add_argument('-ml', action='store', dest='MACHINE_LEARNER', default=None, choices = ['lasso','svr','lsvr','lr','svc','rfc','rfr', 'dtc','dtr','dc','knc','knr','sgd'], type=str, help='ML algorithm to pair with features. ' 'Default: Lasso (regression), LogisticRegression ' '(classification)') parser.add_argument('-min_depth', action='store', dest='MIN_DEPTH', default=1,type=positive_integer, help='Minimum length of GP programs.') parser.add_argument('-max_depth', action='store', dest='MAX_DEPTH', default=2,type=positive_integer, help='Maximum number of nodes in GP programs.') parser.add_argument('-max_depth_init', action='store',dest='MAX_DEPTH_INIT', default=2,type=positive_integer, help='Maximum nodes in initial programs.') parser.add_argument('-op_weight', action='store',dest='OP_WEIGHT',default=1, type=bool, help='Weight attributes for incuded in' ' features based on ML scores. Default: off') parser.add_argument('-ms', action='store', dest='MAX_STALL',default=100, type=positive_integer, help='If model CV does not ' 'improve for this many generations, end optimization.') parser.add_argument('--weight_parents', action='store_true', dest='WEIGHT_PARENTS',default=True, help='Feature importance weights parent selection.') parser.add_argument('--lex_size', action='store_true',dest='LEX_SIZE',default=False, help='Size mediated parent selection for lexicase survival.') parser.add_argument('-sel', action='store', dest='SEL', default='epsilon_lexicase', choices = ['tournament','lexicase','epsilon_lexicase', 'deterministic_crowding','random'], type=str, help='Selection method (Default: tournament)') parser.add_argument('-tourn_size', action='store', dest='TOURN_SIZE', default=2, type=positive_integer, help='Tournament size (Default: 2)') parser.add_argument('-fit', action='store', dest='FIT_CHOICE', default=None, choices = ['mse','mae','r2','vaf','mse_rel','mae_rel', 'r2_rel','vaf_rel','silhouette','inertia', 'separation','fisher','random','relief'], type=str, help='Fitness metric (Default: dependent on ml used)') parser.add_argument('--no_seed', action='store_false', dest='SEED_WITH_ML', default=True, help='Turn off initial GP population seeding.') parser.add_argument('--elitism', action='store_true', dest='ELITISM', default=False, help='Force survival of best feature in GP population.') parser.add_argument('--erc', action='store_true', dest='ERC', default=False, help='Use random constants in GP feature construction.') parser.add_argument('--bool', action='store_true', dest='BOOLEAN', default=False, help='Include boolean operators in features.') parser.add_argument('-otype', action='store', dest='OTYPE', default='f', choices=['f','b'], type=str, help='Feature output type. f: float, b: boolean.') parser.add_argument('-ops', action='store', dest='OPS', default=None, type=str, help='Specify operators separated by commas') parser.add_argument('-dtypes', action='store', dest='DTYPES', default=None, type=str, help='Specify datafile types separated by a comma') parser.add_argument('--class', action='store_true', dest='CLASSIFICATION', default=False, help='Conduct classification rather than regression.') parser.add_argument('--mdr', action='store_true',dest='MDR',default=False, help='Use MDR nodes.') parser.add_argument('--nonorm', action='store_false',dest='NORMALIZE',default=True, help='Disable standard scaler preprocessor.') parser.add_argument('--diversity', action='store_true', dest='TRACK_DIVERSITY', default=False, help='Store diversity of feature transforms each gen.') parser.add_argument('--clean', action='store_true', dest='CLEAN', default=False, help='Clean input data of missing values.') parser.add_argument('--no_lib', action='store_false', dest='c', default=True, help='Don''t use optimized c libraries.') parser.add_argument('-s', action='store', dest='RANDOM_STATE', default=None, type=int, help='Random number generator seed for reproducibility.' 'Note that using multi-threading may make exact results' ' impossible to reproduce.') parser.add_argument('-v', action='store', dest='VERBOSITY', default=1, choices=[0, 1, 2, 3], type=int, help='How much information FEW communicates while it is' ' running: 0 = none, 1 = minimal, 2 = lots, 3 = all.') parser.add_argument('--no-update-check', action='store_true', dest='DISABLE_UPDATE_CHECK', default=False, help='Don''t check the FEW version.') parser.add_argument('--version', action='version', version='FEW {version}'.format(version=__version__), help='Show FEW\'s version number and exit.') args = parser.parse_args() # if args.VERBOSITY >= 2: # print('\nFEW settings:') # for arg in sorted(args.__dict__): # if arg == 'DISABLE_UPDATE_CHECK': # continue # print('{}\t=\t{}'.format(arg, args.__dict__[arg])) # print('') # load data from csv file if args.INPUT_SEPARATOR is None: input_data = pd.read_csv(args.INPUT_FILE, sep=args.INPUT_SEPARATOR, engine='python') else: # use c engine for read_csv is separator is specified input_data = pd.read_csv(args.INPUT_FILE, sep=args.INPUT_SEPARATOR) # if 'Label' in input_data.columns.values: input_data.rename(columns={'Label': 'label','Class':'label','class':'label', 'target':'label'}, inplace=True) RANDOM_STATE = args.RANDOM_STATE train_i, test_i = train_test_split(input_data.index, stratify = None, #stratify=input_data['label'].values, train_size=0.75, test_size=0.25, random_state=RANDOM_STATE) training_features = input_data.loc[train_i].drop('label', axis=1).values training_labels = input_data.loc[train_i, 'label'].values testing_features = input_data.loc[test_i].drop('label', axis=1).values testing_labels = input_data.loc[test_i, 'label'].values learner = FEW(generations=args.GENERATIONS, population_size=args.POPULATION_SIZE, mutation_rate=args.MUTATION_RATE, crossover_rate=args.CROSSOVER_RATE, ml = ml_dict[args.MACHINE_LEARNER], min_depth = args.MIN_DEPTH,max_depth = args.MAX_DEPTH, sel = args.SEL, tourn_size = args.TOURN_SIZE, seed_with_ml = args.SEED_WITH_ML, op_weight = args.OP_WEIGHT, max_stall = args.MAX_STALL, erc = args.ERC, random_state=args.RANDOM_STATE, verbosity=args.VERBOSITY, disable_update_check=args.DISABLE_UPDATE_CHECK, fit_choice = args.FIT_CHOICE,boolean=args.BOOLEAN, classification=args.CLASSIFICATION,clean = args.CLEAN, track_diversity=args.TRACK_DIVERSITY,mdr=args.MDR, otype=args.OTYPE,c=args.c, lex_size = args.LEX_SIZE, weight_parents = args.WEIGHT_PARENTS,operators=args.OPS, normalize=args.NORMALIZE, dtypes = args.DTYPES) learner.fit(training_features, training_labels) # pdb.set_trace() if args.VERBOSITY >= 1: print('\nTraining accuracy: {:1.3f}'.format( learner.score(training_features, training_labels))) print('Test accuracy: {:1.3f}'.format( learner.score(testing_features, testing_labels))) if args.OUTPUT_FILE != '': learner.export(args.OUTPUT_FILE)
python
def main(): """Main function that is called when FEW is run on the command line""" parser = argparse.ArgumentParser(description='A feature engineering wrapper' ' for machine learning algorithms.', add_help=False) parser.add_argument('INPUT_FILE', type=str, help='Data file to run FEW on; ensure that the ' 'target/label column is labeled as "label" or "class".') parser.add_argument('-h', '--help', action='help', help='Show this help message and exit.') parser.add_argument('-is', action='store',dest='INPUT_SEPARATOR', default=None,type=str, help='Character separating columns in the input file.') parser.add_argument('-o', action='store', dest='OUTPUT_FILE', default='', type=str, help='File to export the final model.') parser.add_argument('-g', action='store', dest='GENERATIONS', default=100, type=positive_integer, help='Number of generations to run FEW.') parser.add_argument('-p', action='store', dest='POPULATION_SIZE',default=50, help='Number of individuals in the GP population. ' 'Follow the number with x to set population size as a' 'multiple of raw feature size.') parser.add_argument('-mr', action='store', dest='MUTATION_RATE',default=0.5, type=float_range, help='GP mutation rate in the range [0.0, 1.0].') parser.add_argument('-xr', action='store', dest='CROSSOVER_RATE', default=0.5,type=float_range, help='GP crossover rate in the range [0.0, 1.0].') parser.add_argument('-ml', action='store', dest='MACHINE_LEARNER', default=None, choices = ['lasso','svr','lsvr','lr','svc','rfc','rfr', 'dtc','dtr','dc','knc','knr','sgd'], type=str, help='ML algorithm to pair with features. ' 'Default: Lasso (regression), LogisticRegression ' '(classification)') parser.add_argument('-min_depth', action='store', dest='MIN_DEPTH', default=1,type=positive_integer, help='Minimum length of GP programs.') parser.add_argument('-max_depth', action='store', dest='MAX_DEPTH', default=2,type=positive_integer, help='Maximum number of nodes in GP programs.') parser.add_argument('-max_depth_init', action='store',dest='MAX_DEPTH_INIT', default=2,type=positive_integer, help='Maximum nodes in initial programs.') parser.add_argument('-op_weight', action='store',dest='OP_WEIGHT',default=1, type=bool, help='Weight attributes for incuded in' ' features based on ML scores. Default: off') parser.add_argument('-ms', action='store', dest='MAX_STALL',default=100, type=positive_integer, help='If model CV does not ' 'improve for this many generations, end optimization.') parser.add_argument('--weight_parents', action='store_true', dest='WEIGHT_PARENTS',default=True, help='Feature importance weights parent selection.') parser.add_argument('--lex_size', action='store_true',dest='LEX_SIZE',default=False, help='Size mediated parent selection for lexicase survival.') parser.add_argument('-sel', action='store', dest='SEL', default='epsilon_lexicase', choices = ['tournament','lexicase','epsilon_lexicase', 'deterministic_crowding','random'], type=str, help='Selection method (Default: tournament)') parser.add_argument('-tourn_size', action='store', dest='TOURN_SIZE', default=2, type=positive_integer, help='Tournament size (Default: 2)') parser.add_argument('-fit', action='store', dest='FIT_CHOICE', default=None, choices = ['mse','mae','r2','vaf','mse_rel','mae_rel', 'r2_rel','vaf_rel','silhouette','inertia', 'separation','fisher','random','relief'], type=str, help='Fitness metric (Default: dependent on ml used)') parser.add_argument('--no_seed', action='store_false', dest='SEED_WITH_ML', default=True, help='Turn off initial GP population seeding.') parser.add_argument('--elitism', action='store_true', dest='ELITISM', default=False, help='Force survival of best feature in GP population.') parser.add_argument('--erc', action='store_true', dest='ERC', default=False, help='Use random constants in GP feature construction.') parser.add_argument('--bool', action='store_true', dest='BOOLEAN', default=False, help='Include boolean operators in features.') parser.add_argument('-otype', action='store', dest='OTYPE', default='f', choices=['f','b'], type=str, help='Feature output type. f: float, b: boolean.') parser.add_argument('-ops', action='store', dest='OPS', default=None, type=str, help='Specify operators separated by commas') parser.add_argument('-dtypes', action='store', dest='DTYPES', default=None, type=str, help='Specify datafile types separated by a comma') parser.add_argument('--class', action='store_true', dest='CLASSIFICATION', default=False, help='Conduct classification rather than regression.') parser.add_argument('--mdr', action='store_true',dest='MDR',default=False, help='Use MDR nodes.') parser.add_argument('--nonorm', action='store_false',dest='NORMALIZE',default=True, help='Disable standard scaler preprocessor.') parser.add_argument('--diversity', action='store_true', dest='TRACK_DIVERSITY', default=False, help='Store diversity of feature transforms each gen.') parser.add_argument('--clean', action='store_true', dest='CLEAN', default=False, help='Clean input data of missing values.') parser.add_argument('--no_lib', action='store_false', dest='c', default=True, help='Don''t use optimized c libraries.') parser.add_argument('-s', action='store', dest='RANDOM_STATE', default=None, type=int, help='Random number generator seed for reproducibility.' 'Note that using multi-threading may make exact results' ' impossible to reproduce.') parser.add_argument('-v', action='store', dest='VERBOSITY', default=1, choices=[0, 1, 2, 3], type=int, help='How much information FEW communicates while it is' ' running: 0 = none, 1 = minimal, 2 = lots, 3 = all.') parser.add_argument('--no-update-check', action='store_true', dest='DISABLE_UPDATE_CHECK', default=False, help='Don''t check the FEW version.') parser.add_argument('--version', action='version', version='FEW {version}'.format(version=__version__), help='Show FEW\'s version number and exit.') args = parser.parse_args() # if args.VERBOSITY >= 2: # print('\nFEW settings:') # for arg in sorted(args.__dict__): # if arg == 'DISABLE_UPDATE_CHECK': # continue # print('{}\t=\t{}'.format(arg, args.__dict__[arg])) # print('') # load data from csv file if args.INPUT_SEPARATOR is None: input_data = pd.read_csv(args.INPUT_FILE, sep=args.INPUT_SEPARATOR, engine='python') else: # use c engine for read_csv is separator is specified input_data = pd.read_csv(args.INPUT_FILE, sep=args.INPUT_SEPARATOR) # if 'Label' in input_data.columns.values: input_data.rename(columns={'Label': 'label','Class':'label','class':'label', 'target':'label'}, inplace=True) RANDOM_STATE = args.RANDOM_STATE train_i, test_i = train_test_split(input_data.index, stratify = None, #stratify=input_data['label'].values, train_size=0.75, test_size=0.25, random_state=RANDOM_STATE) training_features = input_data.loc[train_i].drop('label', axis=1).values training_labels = input_data.loc[train_i, 'label'].values testing_features = input_data.loc[test_i].drop('label', axis=1).values testing_labels = input_data.loc[test_i, 'label'].values learner = FEW(generations=args.GENERATIONS, population_size=args.POPULATION_SIZE, mutation_rate=args.MUTATION_RATE, crossover_rate=args.CROSSOVER_RATE, ml = ml_dict[args.MACHINE_LEARNER], min_depth = args.MIN_DEPTH,max_depth = args.MAX_DEPTH, sel = args.SEL, tourn_size = args.TOURN_SIZE, seed_with_ml = args.SEED_WITH_ML, op_weight = args.OP_WEIGHT, max_stall = args.MAX_STALL, erc = args.ERC, random_state=args.RANDOM_STATE, verbosity=args.VERBOSITY, disable_update_check=args.DISABLE_UPDATE_CHECK, fit_choice = args.FIT_CHOICE,boolean=args.BOOLEAN, classification=args.CLASSIFICATION,clean = args.CLEAN, track_diversity=args.TRACK_DIVERSITY,mdr=args.MDR, otype=args.OTYPE,c=args.c, lex_size = args.LEX_SIZE, weight_parents = args.WEIGHT_PARENTS,operators=args.OPS, normalize=args.NORMALIZE, dtypes = args.DTYPES) learner.fit(training_features, training_labels) # pdb.set_trace() if args.VERBOSITY >= 1: print('\nTraining accuracy: {:1.3f}'.format( learner.score(training_features, training_labels))) print('Test accuracy: {:1.3f}'.format( learner.score(testing_features, testing_labels))) if args.OUTPUT_FILE != '': learner.export(args.OUTPUT_FILE)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'A feature engineering wrapper'", "' for machine learning algorithms.'", ",", "add_help", "=", "False", ")", "parser", ".", "add_argument", "(", "'INPUT_FILE'", ",", "type", "=", "str", ",", "help", "=", "'Data file to run FEW on; ensure that the '", "'target/label column is labeled as \"label\" or \"class\".'", ")", "parser", ".", "add_argument", "(", "'-h'", ",", "'--help'", ",", "action", "=", "'help'", ",", "help", "=", "'Show this help message and exit.'", ")", "parser", ".", "add_argument", "(", "'-is'", ",", "action", "=", "'store'", ",", "dest", "=", "'INPUT_SEPARATOR'", ",", "default", "=", "None", ",", "type", "=", "str", ",", "help", "=", "'Character separating columns in the input file.'", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "action", "=", "'store'", ",", "dest", "=", "'OUTPUT_FILE'", ",", "default", "=", "''", ",", "type", "=", "str", ",", "help", "=", "'File to export the final model.'", ")", "parser", ".", "add_argument", "(", "'-g'", ",", "action", "=", "'store'", ",", "dest", "=", "'GENERATIONS'", ",", "default", "=", "100", ",", "type", "=", "positive_integer", ",", "help", "=", "'Number of generations to run FEW.'", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "action", "=", "'store'", ",", "dest", "=", "'POPULATION_SIZE'", ",", "default", "=", "50", ",", "help", "=", "'Number of individuals in the GP population. '", "'Follow the number with x to set population size as a'", "'multiple of raw feature size.'", ")", "parser", ".", "add_argument", "(", "'-mr'", ",", "action", "=", "'store'", ",", "dest", "=", "'MUTATION_RATE'", ",", "default", "=", "0.5", ",", "type", "=", "float_range", ",", "help", "=", "'GP mutation rate in the range [0.0, 1.0].'", ")", "parser", ".", "add_argument", "(", "'-xr'", ",", "action", "=", "'store'", ",", "dest", "=", "'CROSSOVER_RATE'", ",", "default", "=", "0.5", ",", "type", "=", "float_range", ",", "help", "=", "'GP crossover rate in the range [0.0, 1.0].'", ")", "parser", ".", "add_argument", "(", "'-ml'", ",", "action", "=", "'store'", ",", "dest", "=", "'MACHINE_LEARNER'", ",", "default", "=", "None", ",", "choices", "=", "[", "'lasso'", ",", "'svr'", ",", "'lsvr'", ",", "'lr'", ",", "'svc'", ",", "'rfc'", ",", "'rfr'", ",", "'dtc'", ",", "'dtr'", ",", "'dc'", ",", "'knc'", ",", "'knr'", ",", "'sgd'", "]", ",", "type", "=", "str", ",", "help", "=", "'ML algorithm to pair with features. '", "'Default: Lasso (regression), LogisticRegression '", "'(classification)'", ")", "parser", ".", "add_argument", "(", "'-min_depth'", ",", "action", "=", "'store'", ",", "dest", "=", "'MIN_DEPTH'", ",", "default", "=", "1", ",", "type", "=", "positive_integer", ",", "help", "=", "'Minimum length of GP programs.'", ")", "parser", ".", "add_argument", "(", "'-max_depth'", ",", "action", "=", "'store'", ",", "dest", "=", "'MAX_DEPTH'", ",", "default", "=", "2", ",", "type", "=", "positive_integer", ",", "help", "=", "'Maximum number of nodes in GP programs.'", ")", "parser", ".", "add_argument", "(", "'-max_depth_init'", ",", "action", "=", "'store'", ",", "dest", "=", "'MAX_DEPTH_INIT'", ",", "default", "=", "2", ",", "type", "=", "positive_integer", ",", "help", "=", "'Maximum nodes in initial programs.'", ")", "parser", ".", "add_argument", "(", "'-op_weight'", ",", "action", "=", "'store'", ",", "dest", "=", "'OP_WEIGHT'", ",", "default", "=", "1", ",", "type", "=", "bool", ",", "help", "=", "'Weight attributes for incuded in'", "' features based on ML scores. Default: off'", ")", "parser", ".", "add_argument", "(", "'-ms'", ",", "action", "=", "'store'", ",", "dest", "=", "'MAX_STALL'", ",", "default", "=", "100", ",", "type", "=", "positive_integer", ",", "help", "=", "'If model CV does not '", "'improve for this many generations, end optimization.'", ")", "parser", ".", "add_argument", "(", "'--weight_parents'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'WEIGHT_PARENTS'", ",", "default", "=", "True", ",", "help", "=", "'Feature importance weights parent selection.'", ")", "parser", ".", "add_argument", "(", "'--lex_size'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'LEX_SIZE'", ",", "default", "=", "False", ",", "help", "=", "'Size mediated parent selection for lexicase survival.'", ")", "parser", ".", "add_argument", "(", "'-sel'", ",", "action", "=", "'store'", ",", "dest", "=", "'SEL'", ",", "default", "=", "'epsilon_lexicase'", ",", "choices", "=", "[", "'tournament'", ",", "'lexicase'", ",", "'epsilon_lexicase'", ",", "'deterministic_crowding'", ",", "'random'", "]", ",", "type", "=", "str", ",", "help", "=", "'Selection method (Default: tournament)'", ")", "parser", ".", "add_argument", "(", "'-tourn_size'", ",", "action", "=", "'store'", ",", "dest", "=", "'TOURN_SIZE'", ",", "default", "=", "2", ",", "type", "=", "positive_integer", ",", "help", "=", "'Tournament size (Default: 2)'", ")", "parser", ".", "add_argument", "(", "'-fit'", ",", "action", "=", "'store'", ",", "dest", "=", "'FIT_CHOICE'", ",", "default", "=", "None", ",", "choices", "=", "[", "'mse'", ",", "'mae'", ",", "'r2'", ",", "'vaf'", ",", "'mse_rel'", ",", "'mae_rel'", ",", "'r2_rel'", ",", "'vaf_rel'", ",", "'silhouette'", ",", "'inertia'", ",", "'separation'", ",", "'fisher'", ",", "'random'", ",", "'relief'", "]", ",", "type", "=", "str", ",", "help", "=", "'Fitness metric (Default: dependent on ml used)'", ")", "parser", ".", "add_argument", "(", "'--no_seed'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'SEED_WITH_ML'", ",", "default", "=", "True", ",", "help", "=", "'Turn off initial GP population seeding.'", ")", "parser", ".", "add_argument", "(", "'--elitism'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'ELITISM'", ",", "default", "=", "False", ",", "help", "=", "'Force survival of best feature in GP population.'", ")", "parser", ".", "add_argument", "(", "'--erc'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'ERC'", ",", "default", "=", "False", ",", "help", "=", "'Use random constants in GP feature construction.'", ")", "parser", ".", "add_argument", "(", "'--bool'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'BOOLEAN'", ",", "default", "=", "False", ",", "help", "=", "'Include boolean operators in features.'", ")", "parser", ".", "add_argument", "(", "'-otype'", ",", "action", "=", "'store'", ",", "dest", "=", "'OTYPE'", ",", "default", "=", "'f'", ",", "choices", "=", "[", "'f'", ",", "'b'", "]", ",", "type", "=", "str", ",", "help", "=", "'Feature output type. f: float, b: boolean.'", ")", "parser", ".", "add_argument", "(", "'-ops'", ",", "action", "=", "'store'", ",", "dest", "=", "'OPS'", ",", "default", "=", "None", ",", "type", "=", "str", ",", "help", "=", "'Specify operators separated by commas'", ")", "parser", ".", "add_argument", "(", "'-dtypes'", ",", "action", "=", "'store'", ",", "dest", "=", "'DTYPES'", ",", "default", "=", "None", ",", "type", "=", "str", ",", "help", "=", "'Specify datafile types separated by a comma'", ")", "parser", ".", "add_argument", "(", "'--class'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'CLASSIFICATION'", ",", "default", "=", "False", ",", "help", "=", "'Conduct classification rather than regression.'", ")", "parser", ".", "add_argument", "(", "'--mdr'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'MDR'", ",", "default", "=", "False", ",", "help", "=", "'Use MDR nodes.'", ")", "parser", ".", "add_argument", "(", "'--nonorm'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'NORMALIZE'", ",", "default", "=", "True", ",", "help", "=", "'Disable standard scaler preprocessor.'", ")", "parser", ".", "add_argument", "(", "'--diversity'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'TRACK_DIVERSITY'", ",", "default", "=", "False", ",", "help", "=", "'Store diversity of feature transforms each gen.'", ")", "parser", ".", "add_argument", "(", "'--clean'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'CLEAN'", ",", "default", "=", "False", ",", "help", "=", "'Clean input data of missing values.'", ")", "parser", ".", "add_argument", "(", "'--no_lib'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'c'", ",", "default", "=", "True", ",", "help", "=", "'Don'", "'t use optimized c libraries.'", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "action", "=", "'store'", ",", "dest", "=", "'RANDOM_STATE'", ",", "default", "=", "None", ",", "type", "=", "int", ",", "help", "=", "'Random number generator seed for reproducibility.'", "'Note that using multi-threading may make exact results'", "' impossible to reproduce.'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "action", "=", "'store'", ",", "dest", "=", "'VERBOSITY'", ",", "default", "=", "1", ",", "choices", "=", "[", "0", ",", "1", ",", "2", ",", "3", "]", ",", "type", "=", "int", ",", "help", "=", "'How much information FEW communicates while it is'", "' running: 0 = none, 1 = minimal, 2 = lots, 3 = all.'", ")", "parser", ".", "add_argument", "(", "'--no-update-check'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'DISABLE_UPDATE_CHECK'", ",", "default", "=", "False", ",", "help", "=", "'Don'", "'t check the FEW version.'", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'FEW {version}'", ".", "format", "(", "version", "=", "__version__", ")", ",", "help", "=", "'Show FEW\\'s version number and exit.'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# if args.VERBOSITY >= 2:", "# print('\\nFEW settings:')", "# for arg in sorted(args.__dict__):", "# if arg == 'DISABLE_UPDATE_CHECK':", "# continue", "# print('{}\\t=\\t{}'.format(arg, args.__dict__[arg]))", "# print('')", "# load data from csv file", "if", "args", ".", "INPUT_SEPARATOR", "is", "None", ":", "input_data", "=", "pd", ".", "read_csv", "(", "args", ".", "INPUT_FILE", ",", "sep", "=", "args", ".", "INPUT_SEPARATOR", ",", "engine", "=", "'python'", ")", "else", ":", "# use c engine for read_csv is separator is specified", "input_data", "=", "pd", ".", "read_csv", "(", "args", ".", "INPUT_FILE", ",", "sep", "=", "args", ".", "INPUT_SEPARATOR", ")", "# if 'Label' in input_data.columns.values:", "input_data", ".", "rename", "(", "columns", "=", "{", "'Label'", ":", "'label'", ",", "'Class'", ":", "'label'", ",", "'class'", ":", "'label'", ",", "'target'", ":", "'label'", "}", ",", "inplace", "=", "True", ")", "RANDOM_STATE", "=", "args", ".", "RANDOM_STATE", "train_i", ",", "test_i", "=", "train_test_split", "(", "input_data", ".", "index", ",", "stratify", "=", "None", ",", "#stratify=input_data['label'].values,", "train_size", "=", "0.75", ",", "test_size", "=", "0.25", ",", "random_state", "=", "RANDOM_STATE", ")", "training_features", "=", "input_data", ".", "loc", "[", "train_i", "]", ".", "drop", "(", "'label'", ",", "axis", "=", "1", ")", ".", "values", "training_labels", "=", "input_data", ".", "loc", "[", "train_i", ",", "'label'", "]", ".", "values", "testing_features", "=", "input_data", ".", "loc", "[", "test_i", "]", ".", "drop", "(", "'label'", ",", "axis", "=", "1", ")", ".", "values", "testing_labels", "=", "input_data", ".", "loc", "[", "test_i", ",", "'label'", "]", ".", "values", "learner", "=", "FEW", "(", "generations", "=", "args", ".", "GENERATIONS", ",", "population_size", "=", "args", ".", "POPULATION_SIZE", ",", "mutation_rate", "=", "args", ".", "MUTATION_RATE", ",", "crossover_rate", "=", "args", ".", "CROSSOVER_RATE", ",", "ml", "=", "ml_dict", "[", "args", ".", "MACHINE_LEARNER", "]", ",", "min_depth", "=", "args", ".", "MIN_DEPTH", ",", "max_depth", "=", "args", ".", "MAX_DEPTH", ",", "sel", "=", "args", ".", "SEL", ",", "tourn_size", "=", "args", ".", "TOURN_SIZE", ",", "seed_with_ml", "=", "args", ".", "SEED_WITH_ML", ",", "op_weight", "=", "args", ".", "OP_WEIGHT", ",", "max_stall", "=", "args", ".", "MAX_STALL", ",", "erc", "=", "args", ".", "ERC", ",", "random_state", "=", "args", ".", "RANDOM_STATE", ",", "verbosity", "=", "args", ".", "VERBOSITY", ",", "disable_update_check", "=", "args", ".", "DISABLE_UPDATE_CHECK", ",", "fit_choice", "=", "args", ".", "FIT_CHOICE", ",", "boolean", "=", "args", ".", "BOOLEAN", ",", "classification", "=", "args", ".", "CLASSIFICATION", ",", "clean", "=", "args", ".", "CLEAN", ",", "track_diversity", "=", "args", ".", "TRACK_DIVERSITY", ",", "mdr", "=", "args", ".", "MDR", ",", "otype", "=", "args", ".", "OTYPE", ",", "c", "=", "args", ".", "c", ",", "lex_size", "=", "args", ".", "LEX_SIZE", ",", "weight_parents", "=", "args", ".", "WEIGHT_PARENTS", ",", "operators", "=", "args", ".", "OPS", ",", "normalize", "=", "args", ".", "NORMALIZE", ",", "dtypes", "=", "args", ".", "DTYPES", ")", "learner", ".", "fit", "(", "training_features", ",", "training_labels", ")", "# pdb.set_trace()", "if", "args", ".", "VERBOSITY", ">=", "1", ":", "print", "(", "'\\nTraining accuracy: {:1.3f}'", ".", "format", "(", "learner", ".", "score", "(", "training_features", ",", "training_labels", ")", ")", ")", "print", "(", "'Test accuracy: {:1.3f}'", ".", "format", "(", "learner", ".", "score", "(", "testing_features", ",", "testing_labels", ")", ")", ")", "if", "args", ".", "OUTPUT_FILE", "!=", "''", ":", "learner", ".", "export", "(", "args", ".", "OUTPUT_FILE", ")" ]
Main function that is called when FEW is run on the command line
[ "Main", "function", "that", "is", "called", "when", "FEW", "is", "run", "on", "the", "command", "line" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L714-L937
lacava/few
few/few.py
FEW.fit
def fit(self, features, labels): """Fit model to data""" # setup data # imputation if self.clean: features = self.impute_data(features) # save the number of features self.n_features = features.shape[1] self.n_samples = features.shape[0] # set population size if type(self.population_size) is str: if 'x' in self.population_size: # set pop size prop to features self.population_size = int( float(self.population_size[:-1])*features.shape[1]) else: self.population_size = int(self.population_size) if self.verbosity >0: print("population size:",self.population_size) # re-initialize pipeline (needs to be here rather than init for GridSearchCV) if self.normalize: self.pipeline = Pipeline([('standardScaler',StandardScaler()), ('ml', self.ml)]) else: self.pipeline = Pipeline([('ml',self.ml)]) # set variable names if they haven't been set if self.names is None: self.names = ['x_'+str(i) for i in np.arange(features.shape[1])] # set variable data types if they haven't been set if self.dtypes is None: self.dtypes = ['f' for i in np.arange(features.shape[1])] # create terminal set for i in np.arange(self.n_features): self.term_set.append(node('x',loc=i,otype=self.dtypes[i])) # features # add ephemeral random constants if flag if self.erc: # ephemeral random constants self.term_set.append(node('k',value=self.random_state.rand())) # edit function set if boolean if self.boolean or self.otype=='b': # include boolean functions self.func_set += [node('!'), node('&'), node('|'), node('=='), node('>_f'), node('<_f'), node('>=_f'), node('<=_f'), node('>_b'), node('<_b'), node('>=_b'), node('<=_b'), node('xor_b'), node('xor_f')] # add mdr if specified if self.mdr: self.func_set += [node('mdr2')] # print few settings if self.verbosity > 1: for arg in self.get_params(): print('{}\t=\t{}'.format(arg, self.get_params()[arg])) print('') ######################################################### initial model # fit to original data with warnings.catch_warnings(): warnings.simplefilter("ignore") if self.scoring_function == roc_auc_score: self._best_score = self.roc_auc_cv(features,labels) else: self._best_score = np.mean( [self.scoring_function(labels[test], self.pipeline.fit(features[train],labels[train]). predict(features[test])) for train, test in KFold().split(features, labels)]) initial_score = self._best_score if self.verbosity > 0: print("initial ML CV: {:1.3f}".format(self._best_score)) ############################################# Create initial population # for now, force seed_with_ml to be off if otype is 'b', since data # types are assumed to be float if self.otype=='b': self.seed_with_ml = False self.pop = self.init_pop() # check that uuids are unique in population uuids = [p.id for p in self.pop.individuals] if len(uuids) != len(set(uuids)): pdb.set_trace() # Evaluate the entire population # X represents a matrix of the population outputs (number of samples x # population size) # single thread self.X = self.transform(features,self.pop.individuals,labels).transpose() # pdb.set_trace() # parallel: # X = np.asarray(Parallel(n_jobs=-1)( # delayed(out)(I,features,self.otype,labels) for I in self.pop.individuals), # order = 'F') # calculate fitness of individuals # fitnesses = list(map(lambda I: fitness(I,labels,self.pipeline),X)) self.F = self.calc_fitness(self.X,labels,self.fit_choice,self.sel) #pdb.set_trace() #with Parallel(n_jobs=10) as parallel: #################### self.diversity=[] # progress bar pbar = tqdm(total=self.generations,disable = self.verbosity==0, desc='Internal CV: {:1.3f}'.format(self._best_score)) stall_count = 0 ########################################################### main GP loop for g in np.arange(self.generations): if stall_count == self.max_stall: if self.verbosity > 0: print('max stall count reached.') break if self.track_diversity: self.get_diversity(self.X) # mid verbosity printouts if self.verbosity > 1: print("generation", str(g)) print("median fitness pop: %0.2f" % np.median( [np.mean(f) for f in self.F])) print("best fitness pop: %0.2f" % np.min( [np.mean(f) for f in self.F])) if self.track_diversity: print("feature diversity: %0.2f" % self.diversity[-1]) # high verbosity printouts if self.verbosity > 2: eqns = self.stacks_2_eqns(self.pop.individuals) fs = [np.mean(f) for f in self.F] print("population:",[("%0.2f" % f, eqns[i]) for f,i in zip(np.sort(fs), np.argsort(fs))]) #print("pop fitnesses:", ["%0.2f" % np.mean(f) for f in self.F]) ####################################################### fit ml model if self.verbosity > 1: print("ml fitting...") tmp_score=0 with warnings.catch_warnings(): warnings.simplefilter("ignore") try: if self.valid_loc(): if self.scoring_function == roc_auc_score: tmp_score = self.roc_auc_cv(self.X[self.valid_loc(),:].transpose(), labels) else: tmp_score = np.mean( [self.scoring_function(labels[test], self.pipeline.fit( self.X[self.valid_loc(),:].transpose()[train], labels[train]). predict(self.X[self.valid_loc(),:].transpose()[test])) for train, test in KFold().split(features, labels)]) except ValueError as detail: print("warning: ValueError in ml fit. X.shape:", self.X[:,self.valid_loc()].transpose().shape, "labels shape:",labels.shape) print("First ten entries X:", self.X[self.valid_loc(),:].transpose()[:10]) print("First ten entries labels:",labels[:10]) print("equations:",self.stacks_2_eqns(self.pop.individuals)) print("FEW parameters:",self.get_params()) print("---\ndetailed error message:", detail) raise(detail) if self.verbosity > 1: print("current ml validation score:",tmp_score) #################################################### save best model if self.valid_loc() and tmp_score > self._best_score: self._best_estimator = copy.deepcopy(self.pipeline) self._best_score = tmp_score stall_count = 0; self._best_inds = copy.deepcopy(self.valid()) if self.verbosity > 1: print("updated best internal CV:",self._best_score) else: stall_count = stall_count + 1 ########################################################## variation if self.verbosity > 2: print("variation...") offspring,elite,elite_index = self.variation(self.pop.individuals) ################################################# evaluate offspring if self.verbosity > 2: print("output...") X_offspring = self.transform(features,offspring).transpose() if self.verbosity > 2: print("fitness...") F_offspring = self.calc_fitness(X_offspring, labels,self.fit_choice,self.sel) ########################################################### survival if self.verbosity > 2: print("survival..") survivors,survivor_index = self.survival(self.pop.individuals, offspring, elite, elite_index, X = self.X, X_O=X_offspring, F=self.F, F_O=F_offspring) # set survivors self.pop.individuals[:] = survivors self.X = np.vstack((self.X, X_offspring))[survivor_index] if 'lexicase' in self.sel: self.F = np.asarray( np.vstack((self.F, F_offspring))[survivor_index], order='F') else: self.F = np.asarray( np.hstack((self.F,F_offspring))[survivor_index], order='F') if self.verbosity > 2: print("median fitness survivors: %0.2f" % np.median( [np.mean(f) for f in self.F])) if self.verbosity>2: print("best features:", self.stacks_2_eqns(self._best_inds) if self._best_inds else 'original') pbar.set_description('Internal CV: {:1.3f}'.format(self._best_score)) pbar.update(1) # end of main GP loop #################### if self.verbosity > 0: print('finished. best internal val score:' ' {:1.3f}'.format(self._best_score)) if self.verbosity > 0: print("final model:\n",self.print_model()) if not self._best_estimator: # if no better model found, just return underlying method fit to the # training data with warnings.catch_warnings(): warnings.simplefilter("ignore") self._best_estimator = self.pipeline.fit(features,labels) else: # fit final estimator to all the training data with warnings.catch_warnings(): warnings.simplefilter("ignore") self._best_estimator.fit(self.transform(features),labels) return self
python
def fit(self, features, labels): """Fit model to data""" # setup data # imputation if self.clean: features = self.impute_data(features) # save the number of features self.n_features = features.shape[1] self.n_samples = features.shape[0] # set population size if type(self.population_size) is str: if 'x' in self.population_size: # set pop size prop to features self.population_size = int( float(self.population_size[:-1])*features.shape[1]) else: self.population_size = int(self.population_size) if self.verbosity >0: print("population size:",self.population_size) # re-initialize pipeline (needs to be here rather than init for GridSearchCV) if self.normalize: self.pipeline = Pipeline([('standardScaler',StandardScaler()), ('ml', self.ml)]) else: self.pipeline = Pipeline([('ml',self.ml)]) # set variable names if they haven't been set if self.names is None: self.names = ['x_'+str(i) for i in np.arange(features.shape[1])] # set variable data types if they haven't been set if self.dtypes is None: self.dtypes = ['f' for i in np.arange(features.shape[1])] # create terminal set for i in np.arange(self.n_features): self.term_set.append(node('x',loc=i,otype=self.dtypes[i])) # features # add ephemeral random constants if flag if self.erc: # ephemeral random constants self.term_set.append(node('k',value=self.random_state.rand())) # edit function set if boolean if self.boolean or self.otype=='b': # include boolean functions self.func_set += [node('!'), node('&'), node('|'), node('=='), node('>_f'), node('<_f'), node('>=_f'), node('<=_f'), node('>_b'), node('<_b'), node('>=_b'), node('<=_b'), node('xor_b'), node('xor_f')] # add mdr if specified if self.mdr: self.func_set += [node('mdr2')] # print few settings if self.verbosity > 1: for arg in self.get_params(): print('{}\t=\t{}'.format(arg, self.get_params()[arg])) print('') ######################################################### initial model # fit to original data with warnings.catch_warnings(): warnings.simplefilter("ignore") if self.scoring_function == roc_auc_score: self._best_score = self.roc_auc_cv(features,labels) else: self._best_score = np.mean( [self.scoring_function(labels[test], self.pipeline.fit(features[train],labels[train]). predict(features[test])) for train, test in KFold().split(features, labels)]) initial_score = self._best_score if self.verbosity > 0: print("initial ML CV: {:1.3f}".format(self._best_score)) ############################################# Create initial population # for now, force seed_with_ml to be off if otype is 'b', since data # types are assumed to be float if self.otype=='b': self.seed_with_ml = False self.pop = self.init_pop() # check that uuids are unique in population uuids = [p.id for p in self.pop.individuals] if len(uuids) != len(set(uuids)): pdb.set_trace() # Evaluate the entire population # X represents a matrix of the population outputs (number of samples x # population size) # single thread self.X = self.transform(features,self.pop.individuals,labels).transpose() # pdb.set_trace() # parallel: # X = np.asarray(Parallel(n_jobs=-1)( # delayed(out)(I,features,self.otype,labels) for I in self.pop.individuals), # order = 'F') # calculate fitness of individuals # fitnesses = list(map(lambda I: fitness(I,labels,self.pipeline),X)) self.F = self.calc_fitness(self.X,labels,self.fit_choice,self.sel) #pdb.set_trace() #with Parallel(n_jobs=10) as parallel: #################### self.diversity=[] # progress bar pbar = tqdm(total=self.generations,disable = self.verbosity==0, desc='Internal CV: {:1.3f}'.format(self._best_score)) stall_count = 0 ########################################################### main GP loop for g in np.arange(self.generations): if stall_count == self.max_stall: if self.verbosity > 0: print('max stall count reached.') break if self.track_diversity: self.get_diversity(self.X) # mid verbosity printouts if self.verbosity > 1: print("generation", str(g)) print("median fitness pop: %0.2f" % np.median( [np.mean(f) for f in self.F])) print("best fitness pop: %0.2f" % np.min( [np.mean(f) for f in self.F])) if self.track_diversity: print("feature diversity: %0.2f" % self.diversity[-1]) # high verbosity printouts if self.verbosity > 2: eqns = self.stacks_2_eqns(self.pop.individuals) fs = [np.mean(f) for f in self.F] print("population:",[("%0.2f" % f, eqns[i]) for f,i in zip(np.sort(fs), np.argsort(fs))]) #print("pop fitnesses:", ["%0.2f" % np.mean(f) for f in self.F]) ####################################################### fit ml model if self.verbosity > 1: print("ml fitting...") tmp_score=0 with warnings.catch_warnings(): warnings.simplefilter("ignore") try: if self.valid_loc(): if self.scoring_function == roc_auc_score: tmp_score = self.roc_auc_cv(self.X[self.valid_loc(),:].transpose(), labels) else: tmp_score = np.mean( [self.scoring_function(labels[test], self.pipeline.fit( self.X[self.valid_loc(),:].transpose()[train], labels[train]). predict(self.X[self.valid_loc(),:].transpose()[test])) for train, test in KFold().split(features, labels)]) except ValueError as detail: print("warning: ValueError in ml fit. X.shape:", self.X[:,self.valid_loc()].transpose().shape, "labels shape:",labels.shape) print("First ten entries X:", self.X[self.valid_loc(),:].transpose()[:10]) print("First ten entries labels:",labels[:10]) print("equations:",self.stacks_2_eqns(self.pop.individuals)) print("FEW parameters:",self.get_params()) print("---\ndetailed error message:", detail) raise(detail) if self.verbosity > 1: print("current ml validation score:",tmp_score) #################################################### save best model if self.valid_loc() and tmp_score > self._best_score: self._best_estimator = copy.deepcopy(self.pipeline) self._best_score = tmp_score stall_count = 0; self._best_inds = copy.deepcopy(self.valid()) if self.verbosity > 1: print("updated best internal CV:",self._best_score) else: stall_count = stall_count + 1 ########################################################## variation if self.verbosity > 2: print("variation...") offspring,elite,elite_index = self.variation(self.pop.individuals) ################################################# evaluate offspring if self.verbosity > 2: print("output...") X_offspring = self.transform(features,offspring).transpose() if self.verbosity > 2: print("fitness...") F_offspring = self.calc_fitness(X_offspring, labels,self.fit_choice,self.sel) ########################################################### survival if self.verbosity > 2: print("survival..") survivors,survivor_index = self.survival(self.pop.individuals, offspring, elite, elite_index, X = self.X, X_O=X_offspring, F=self.F, F_O=F_offspring) # set survivors self.pop.individuals[:] = survivors self.X = np.vstack((self.X, X_offspring))[survivor_index] if 'lexicase' in self.sel: self.F = np.asarray( np.vstack((self.F, F_offspring))[survivor_index], order='F') else: self.F = np.asarray( np.hstack((self.F,F_offspring))[survivor_index], order='F') if self.verbosity > 2: print("median fitness survivors: %0.2f" % np.median( [np.mean(f) for f in self.F])) if self.verbosity>2: print("best features:", self.stacks_2_eqns(self._best_inds) if self._best_inds else 'original') pbar.set_description('Internal CV: {:1.3f}'.format(self._best_score)) pbar.update(1) # end of main GP loop #################### if self.verbosity > 0: print('finished. best internal val score:' ' {:1.3f}'.format(self._best_score)) if self.verbosity > 0: print("final model:\n",self.print_model()) if not self._best_estimator: # if no better model found, just return underlying method fit to the # training data with warnings.catch_warnings(): warnings.simplefilter("ignore") self._best_estimator = self.pipeline.fit(features,labels) else: # fit final estimator to all the training data with warnings.catch_warnings(): warnings.simplefilter("ignore") self._best_estimator.fit(self.transform(features),labels) return self
[ "def", "fit", "(", "self", ",", "features", ",", "labels", ")", ":", "# setup data", "# imputation", "if", "self", ".", "clean", ":", "features", "=", "self", ".", "impute_data", "(", "features", ")", "# save the number of features", "self", ".", "n_features", "=", "features", ".", "shape", "[", "1", "]", "self", ".", "n_samples", "=", "features", ".", "shape", "[", "0", "]", "# set population size", "if", "type", "(", "self", ".", "population_size", ")", "is", "str", ":", "if", "'x'", "in", "self", ".", "population_size", ":", "# set pop size prop to features", "self", ".", "population_size", "=", "int", "(", "float", "(", "self", ".", "population_size", "[", ":", "-", "1", "]", ")", "*", "features", ".", "shape", "[", "1", "]", ")", "else", ":", "self", ".", "population_size", "=", "int", "(", "self", ".", "population_size", ")", "if", "self", ".", "verbosity", ">", "0", ":", "print", "(", "\"population size:\"", ",", "self", ".", "population_size", ")", "# re-initialize pipeline (needs to be here rather than init for GridSearchCV)", "if", "self", ".", "normalize", ":", "self", ".", "pipeline", "=", "Pipeline", "(", "[", "(", "'standardScaler'", ",", "StandardScaler", "(", ")", ")", ",", "(", "'ml'", ",", "self", ".", "ml", ")", "]", ")", "else", ":", "self", ".", "pipeline", "=", "Pipeline", "(", "[", "(", "'ml'", ",", "self", ".", "ml", ")", "]", ")", "# set variable names if they haven't been set", "if", "self", ".", "names", "is", "None", ":", "self", ".", "names", "=", "[", "'x_'", "+", "str", "(", "i", ")", "for", "i", "in", "np", ".", "arange", "(", "features", ".", "shape", "[", "1", "]", ")", "]", "# set variable data types if they haven't been set", "if", "self", ".", "dtypes", "is", "None", ":", "self", ".", "dtypes", "=", "[", "'f'", "for", "i", "in", "np", ".", "arange", "(", "features", ".", "shape", "[", "1", "]", ")", "]", "# create terminal set", "for", "i", "in", "np", ".", "arange", "(", "self", ".", "n_features", ")", ":", "self", ".", "term_set", ".", "append", "(", "node", "(", "'x'", ",", "loc", "=", "i", ",", "otype", "=", "self", ".", "dtypes", "[", "i", "]", ")", ")", "# features", "# add ephemeral random constants if flag", "if", "self", ".", "erc", ":", "# ephemeral random constants", "self", ".", "term_set", ".", "append", "(", "node", "(", "'k'", ",", "value", "=", "self", ".", "random_state", ".", "rand", "(", ")", ")", ")", "# edit function set if boolean", "if", "self", ".", "boolean", "or", "self", ".", "otype", "==", "'b'", ":", "# include boolean functions", "self", ".", "func_set", "+=", "[", "node", "(", "'!'", ")", ",", "node", "(", "'&'", ")", ",", "node", "(", "'|'", ")", ",", "node", "(", "'=='", ")", ",", "node", "(", "'>_f'", ")", ",", "node", "(", "'<_f'", ")", ",", "node", "(", "'>=_f'", ")", ",", "node", "(", "'<=_f'", ")", ",", "node", "(", "'>_b'", ")", ",", "node", "(", "'<_b'", ")", ",", "node", "(", "'>=_b'", ")", ",", "node", "(", "'<=_b'", ")", ",", "node", "(", "'xor_b'", ")", ",", "node", "(", "'xor_f'", ")", "]", "# add mdr if specified", "if", "self", ".", "mdr", ":", "self", ".", "func_set", "+=", "[", "node", "(", "'mdr2'", ")", "]", "# print few settings", "if", "self", ".", "verbosity", ">", "1", ":", "for", "arg", "in", "self", ".", "get_params", "(", ")", ":", "print", "(", "'{}\\t=\\t{}'", ".", "format", "(", "arg", ",", "self", ".", "get_params", "(", ")", "[", "arg", "]", ")", ")", "print", "(", "''", ")", "######################################################### initial model", "# fit to original data", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "if", "self", ".", "scoring_function", "==", "roc_auc_score", ":", "self", ".", "_best_score", "=", "self", ".", "roc_auc_cv", "(", "features", ",", "labels", ")", "else", ":", "self", ".", "_best_score", "=", "np", ".", "mean", "(", "[", "self", ".", "scoring_function", "(", "labels", "[", "test", "]", ",", "self", ".", "pipeline", ".", "fit", "(", "features", "[", "train", "]", ",", "labels", "[", "train", "]", ")", ".", "predict", "(", "features", "[", "test", "]", ")", ")", "for", "train", ",", "test", "in", "KFold", "(", ")", ".", "split", "(", "features", ",", "labels", ")", "]", ")", "initial_score", "=", "self", ".", "_best_score", "if", "self", ".", "verbosity", ">", "0", ":", "print", "(", "\"initial ML CV: {:1.3f}\"", ".", "format", "(", "self", ".", "_best_score", ")", ")", "############################################# Create initial population", "# for now, force seed_with_ml to be off if otype is 'b', since data", "# types are assumed to be float", "if", "self", ".", "otype", "==", "'b'", ":", "self", ".", "seed_with_ml", "=", "False", "self", ".", "pop", "=", "self", ".", "init_pop", "(", ")", "# check that uuids are unique in population", "uuids", "=", "[", "p", ".", "id", "for", "p", "in", "self", ".", "pop", ".", "individuals", "]", "if", "len", "(", "uuids", ")", "!=", "len", "(", "set", "(", "uuids", ")", ")", ":", "pdb", ".", "set_trace", "(", ")", "# Evaluate the entire population", "# X represents a matrix of the population outputs (number of samples x", "# population size)", "# single thread", "self", ".", "X", "=", "self", ".", "transform", "(", "features", ",", "self", ".", "pop", ".", "individuals", ",", "labels", ")", ".", "transpose", "(", ")", "# pdb.set_trace()", "# parallel:", "# X = np.asarray(Parallel(n_jobs=-1)(", "# delayed(out)(I,features,self.otype,labels) for I in self.pop.individuals),", "# order = 'F')", "# calculate fitness of individuals", "# fitnesses = list(map(lambda I: fitness(I,labels,self.pipeline),X))", "self", ".", "F", "=", "self", ".", "calc_fitness", "(", "self", ".", "X", ",", "labels", ",", "self", ".", "fit_choice", ",", "self", ".", "sel", ")", "#pdb.set_trace()", "#with Parallel(n_jobs=10) as parallel:", "####################", "self", ".", "diversity", "=", "[", "]", "# progress bar", "pbar", "=", "tqdm", "(", "total", "=", "self", ".", "generations", ",", "disable", "=", "self", ".", "verbosity", "==", "0", ",", "desc", "=", "'Internal CV: {:1.3f}'", ".", "format", "(", "self", ".", "_best_score", ")", ")", "stall_count", "=", "0", "########################################################### main GP loop", "for", "g", "in", "np", ".", "arange", "(", "self", ".", "generations", ")", ":", "if", "stall_count", "==", "self", ".", "max_stall", ":", "if", "self", ".", "verbosity", ">", "0", ":", "print", "(", "'max stall count reached.'", ")", "break", "if", "self", ".", "track_diversity", ":", "self", ".", "get_diversity", "(", "self", ".", "X", ")", "# mid verbosity printouts", "if", "self", ".", "verbosity", ">", "1", ":", "print", "(", "\"generation\"", ",", "str", "(", "g", ")", ")", "print", "(", "\"median fitness pop: %0.2f\"", "%", "np", ".", "median", "(", "[", "np", ".", "mean", "(", "f", ")", "for", "f", "in", "self", ".", "F", "]", ")", ")", "print", "(", "\"best fitness pop: %0.2f\"", "%", "np", ".", "min", "(", "[", "np", ".", "mean", "(", "f", ")", "for", "f", "in", "self", ".", "F", "]", ")", ")", "if", "self", ".", "track_diversity", ":", "print", "(", "\"feature diversity: %0.2f\"", "%", "self", ".", "diversity", "[", "-", "1", "]", ")", "# high verbosity printouts ", "if", "self", ".", "verbosity", ">", "2", ":", "eqns", "=", "self", ".", "stacks_2_eqns", "(", "self", ".", "pop", ".", "individuals", ")", "fs", "=", "[", "np", ".", "mean", "(", "f", ")", "for", "f", "in", "self", ".", "F", "]", "print", "(", "\"population:\"", ",", "[", "(", "\"%0.2f\"", "%", "f", ",", "eqns", "[", "i", "]", ")", "for", "f", ",", "i", "in", "zip", "(", "np", ".", "sort", "(", "fs", ")", ",", "np", ".", "argsort", "(", "fs", ")", ")", "]", ")", "#print(\"pop fitnesses:\", [\"%0.2f\" % np.mean(f) for f in self.F])", "####################################################### fit ml model", "if", "self", ".", "verbosity", ">", "1", ":", "print", "(", "\"ml fitting...\"", ")", "tmp_score", "=", "0", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "try", ":", "if", "self", ".", "valid_loc", "(", ")", ":", "if", "self", ".", "scoring_function", "==", "roc_auc_score", ":", "tmp_score", "=", "self", ".", "roc_auc_cv", "(", "self", ".", "X", "[", "self", ".", "valid_loc", "(", ")", ",", ":", "]", ".", "transpose", "(", ")", ",", "labels", ")", "else", ":", "tmp_score", "=", "np", ".", "mean", "(", "[", "self", ".", "scoring_function", "(", "labels", "[", "test", "]", ",", "self", ".", "pipeline", ".", "fit", "(", "self", ".", "X", "[", "self", ".", "valid_loc", "(", ")", ",", ":", "]", ".", "transpose", "(", ")", "[", "train", "]", ",", "labels", "[", "train", "]", ")", ".", "predict", "(", "self", ".", "X", "[", "self", ".", "valid_loc", "(", ")", ",", ":", "]", ".", "transpose", "(", ")", "[", "test", "]", ")", ")", "for", "train", ",", "test", "in", "KFold", "(", ")", ".", "split", "(", "features", ",", "labels", ")", "]", ")", "except", "ValueError", "as", "detail", ":", "print", "(", "\"warning: ValueError in ml fit. X.shape:\"", ",", "self", ".", "X", "[", ":", ",", "self", ".", "valid_loc", "(", ")", "]", ".", "transpose", "(", ")", ".", "shape", ",", "\"labels shape:\"", ",", "labels", ".", "shape", ")", "print", "(", "\"First ten entries X:\"", ",", "self", ".", "X", "[", "self", ".", "valid_loc", "(", ")", ",", ":", "]", ".", "transpose", "(", ")", "[", ":", "10", "]", ")", "print", "(", "\"First ten entries labels:\"", ",", "labels", "[", ":", "10", "]", ")", "print", "(", "\"equations:\"", ",", "self", ".", "stacks_2_eqns", "(", "self", ".", "pop", ".", "individuals", ")", ")", "print", "(", "\"FEW parameters:\"", ",", "self", ".", "get_params", "(", ")", ")", "print", "(", "\"---\\ndetailed error message:\"", ",", "detail", ")", "raise", "(", "detail", ")", "if", "self", ".", "verbosity", ">", "1", ":", "print", "(", "\"current ml validation score:\"", ",", "tmp_score", ")", "#################################################### save best model", "if", "self", ".", "valid_loc", "(", ")", "and", "tmp_score", ">", "self", ".", "_best_score", ":", "self", ".", "_best_estimator", "=", "copy", ".", "deepcopy", "(", "self", ".", "pipeline", ")", "self", ".", "_best_score", "=", "tmp_score", "stall_count", "=", "0", "self", ".", "_best_inds", "=", "copy", ".", "deepcopy", "(", "self", ".", "valid", "(", ")", ")", "if", "self", ".", "verbosity", ">", "1", ":", "print", "(", "\"updated best internal CV:\"", ",", "self", ".", "_best_score", ")", "else", ":", "stall_count", "=", "stall_count", "+", "1", "########################################################## variation", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"variation...\"", ")", "offspring", ",", "elite", ",", "elite_index", "=", "self", ".", "variation", "(", "self", ".", "pop", ".", "individuals", ")", "################################################# evaluate offspring", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"output...\"", ")", "X_offspring", "=", "self", ".", "transform", "(", "features", ",", "offspring", ")", ".", "transpose", "(", ")", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"fitness...\"", ")", "F_offspring", "=", "self", ".", "calc_fitness", "(", "X_offspring", ",", "labels", ",", "self", ".", "fit_choice", ",", "self", ".", "sel", ")", "########################################################### survival", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"survival..\"", ")", "survivors", ",", "survivor_index", "=", "self", ".", "survival", "(", "self", ".", "pop", ".", "individuals", ",", "offspring", ",", "elite", ",", "elite_index", ",", "X", "=", "self", ".", "X", ",", "X_O", "=", "X_offspring", ",", "F", "=", "self", ".", "F", ",", "F_O", "=", "F_offspring", ")", "# set survivors", "self", ".", "pop", ".", "individuals", "[", ":", "]", "=", "survivors", "self", ".", "X", "=", "np", ".", "vstack", "(", "(", "self", ".", "X", ",", "X_offspring", ")", ")", "[", "survivor_index", "]", "if", "'lexicase'", "in", "self", ".", "sel", ":", "self", ".", "F", "=", "np", ".", "asarray", "(", "np", ".", "vstack", "(", "(", "self", ".", "F", ",", "F_offspring", ")", ")", "[", "survivor_index", "]", ",", "order", "=", "'F'", ")", "else", ":", "self", ".", "F", "=", "np", ".", "asarray", "(", "np", ".", "hstack", "(", "(", "self", ".", "F", ",", "F_offspring", ")", ")", "[", "survivor_index", "]", ",", "order", "=", "'F'", ")", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"median fitness survivors: %0.2f\"", "%", "np", ".", "median", "(", "[", "np", ".", "mean", "(", "f", ")", "for", "f", "in", "self", ".", "F", "]", ")", ")", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "\"best features:\"", ",", "self", ".", "stacks_2_eqns", "(", "self", ".", "_best_inds", ")", "if", "self", ".", "_best_inds", "else", "'original'", ")", "pbar", ".", "set_description", "(", "'Internal CV: {:1.3f}'", ".", "format", "(", "self", ".", "_best_score", ")", ")", "pbar", ".", "update", "(", "1", ")", "# end of main GP loop", "####################", "if", "self", ".", "verbosity", ">", "0", ":", "print", "(", "'finished. best internal val score:'", "' {:1.3f}'", ".", "format", "(", "self", ".", "_best_score", ")", ")", "if", "self", ".", "verbosity", ">", "0", ":", "print", "(", "\"final model:\\n\"", ",", "self", ".", "print_model", "(", ")", ")", "if", "not", "self", ".", "_best_estimator", ":", "# if no better model found, just return underlying method fit to the", "# training data", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "self", ".", "_best_estimator", "=", "self", ".", "pipeline", ".", "fit", "(", "features", ",", "labels", ")", "else", ":", "# fit final estimator to all the training data", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "self", ".", "_best_estimator", ".", "fit", "(", "self", ".", "transform", "(", "features", ")", ",", "labels", ")", "return", "self" ]
Fit model to data
[ "Fit", "model", "to", "data" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L172-L417
lacava/few
few/few.py
FEW.transform
def transform(self,x,inds=None,labels = None): """return a transformation of x using population outputs""" if inds: # return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) # for I in inds)).transpose() return np.asarray( [self.out(I,x,labels,self.otype) for I in inds]).transpose() elif self._best_inds: # return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) # for I in self._best_inds)).transpose() return np.asarray( [self.out(I,x,labels,self.otype) for I in self._best_inds]).transpose() else: return x
python
def transform(self,x,inds=None,labels = None): """return a transformation of x using population outputs""" if inds: # return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) # for I in inds)).transpose() return np.asarray( [self.out(I,x,labels,self.otype) for I in inds]).transpose() elif self._best_inds: # return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) # for I in self._best_inds)).transpose() return np.asarray( [self.out(I,x,labels,self.otype) for I in self._best_inds]).transpose() else: return x
[ "def", "transform", "(", "self", ",", "x", ",", "inds", "=", "None", ",", "labels", "=", "None", ")", ":", "if", "inds", ":", "# return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) ", "# for I in inds)).transpose()", "return", "np", ".", "asarray", "(", "[", "self", ".", "out", "(", "I", ",", "x", ",", "labels", ",", "self", ".", "otype", ")", "for", "I", "in", "inds", "]", ")", ".", "transpose", "(", ")", "elif", "self", ".", "_best_inds", ":", "# return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) ", "# for I in self._best_inds)).transpose()", "return", "np", ".", "asarray", "(", "[", "self", ".", "out", "(", "I", ",", "x", ",", "labels", ",", "self", ".", "otype", ")", "for", "I", "in", "self", ".", "_best_inds", "]", ")", ".", "transpose", "(", ")", "else", ":", "return", "x" ]
return a transformation of x using population outputs
[ "return", "a", "transformation", "of", "x", "using", "population", "outputs" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L419-L432
lacava/few
few/few.py
FEW.impute_data
def impute_data(self,x): """Imputes data set containing Nan values""" imp = Imputer(missing_values='NaN', strategy='mean', axis=0) return imp.fit_transform(x)
python
def impute_data(self,x): """Imputes data set containing Nan values""" imp = Imputer(missing_values='NaN', strategy='mean', axis=0) return imp.fit_transform(x)
[ "def", "impute_data", "(", "self", ",", "x", ")", ":", "imp", "=", "Imputer", "(", "missing_values", "=", "'NaN'", ",", "strategy", "=", "'mean'", ",", "axis", "=", "0", ")", "return", "imp", ".", "fit_transform", "(", "x", ")" ]
Imputes data set containing Nan values
[ "Imputes", "data", "set", "containing", "Nan", "values" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L434-L437
lacava/few
few/few.py
FEW.clean
def clean(self,x): """remove nan and inf rows from x""" return x[~np.any(np.isnan(x) | np.isinf(x),axis=1)]
python
def clean(self,x): """remove nan and inf rows from x""" return x[~np.any(np.isnan(x) | np.isinf(x),axis=1)]
[ "def", "clean", "(", "self", ",", "x", ")", ":", "return", "x", "[", "~", "np", ".", "any", "(", "np", ".", "isnan", "(", "x", ")", "|", "np", ".", "isinf", "(", "x", ")", ",", "axis", "=", "1", ")", "]" ]
remove nan and inf rows from x
[ "remove", "nan", "and", "inf", "rows", "from", "x" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L439-L441
lacava/few
few/few.py
FEW.clean_with_zeros
def clean_with_zeros(self,x): """ set nan and inf rows from x to zero""" x[~np.any(np.isnan(x) | np.isinf(x),axis=1)] = 0 return x
python
def clean_with_zeros(self,x): """ set nan and inf rows from x to zero""" x[~np.any(np.isnan(x) | np.isinf(x),axis=1)] = 0 return x
[ "def", "clean_with_zeros", "(", "self", ",", "x", ")", ":", "x", "[", "~", "np", ".", "any", "(", "np", ".", "isnan", "(", "x", ")", "|", "np", ".", "isinf", "(", "x", ")", ",", "axis", "=", "1", ")", "]", "=", "0", "return", "x" ]
set nan and inf rows from x to zero
[ "set", "nan", "and", "inf", "rows", "from", "x", "to", "zero" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L443-L446
lacava/few
few/few.py
FEW.predict
def predict(self, testing_features): """predict on a holdout data set.""" # print("best_inds:",self._best_inds) # print("best estimator size:",self._best_estimator.coef_.shape) if self.clean: testing_features = self.impute_data(testing_features) if self._best_inds: X_transform = self.transform(testing_features) try: return self._best_estimator.predict(self.transform(testing_features)) except ValueError as detail: # pdb.set_trace() print('shape of X:',testing_features.shape) print('shape of X_transform:',X_transform.transpose().shape) print('best inds:',self.stacks_2_eqns(self._best_inds)) print('valid locs:',self.valid_loc(self._best_inds)) raise ValueError(detail) else: return self._best_estimator.predict(testing_features)
python
def predict(self, testing_features): """predict on a holdout data set.""" # print("best_inds:",self._best_inds) # print("best estimator size:",self._best_estimator.coef_.shape) if self.clean: testing_features = self.impute_data(testing_features) if self._best_inds: X_transform = self.transform(testing_features) try: return self._best_estimator.predict(self.transform(testing_features)) except ValueError as detail: # pdb.set_trace() print('shape of X:',testing_features.shape) print('shape of X_transform:',X_transform.transpose().shape) print('best inds:',self.stacks_2_eqns(self._best_inds)) print('valid locs:',self.valid_loc(self._best_inds)) raise ValueError(detail) else: return self._best_estimator.predict(testing_features)
[ "def", "predict", "(", "self", ",", "testing_features", ")", ":", "# print(\"best_inds:\",self._best_inds)", "# print(\"best estimator size:\",self._best_estimator.coef_.shape)", "if", "self", ".", "clean", ":", "testing_features", "=", "self", ".", "impute_data", "(", "testing_features", ")", "if", "self", ".", "_best_inds", ":", "X_transform", "=", "self", ".", "transform", "(", "testing_features", ")", "try", ":", "return", "self", ".", "_best_estimator", ".", "predict", "(", "self", ".", "transform", "(", "testing_features", ")", ")", "except", "ValueError", "as", "detail", ":", "# pdb.set_trace()", "print", "(", "'shape of X:'", ",", "testing_features", ".", "shape", ")", "print", "(", "'shape of X_transform:'", ",", "X_transform", ".", "transpose", "(", ")", ".", "shape", ")", "print", "(", "'best inds:'", ",", "self", ".", "stacks_2_eqns", "(", "self", ".", "_best_inds", ")", ")", "print", "(", "'valid locs:'", ",", "self", ".", "valid_loc", "(", "self", ".", "_best_inds", ")", ")", "raise", "ValueError", "(", "detail", ")", "else", ":", "return", "self", ".", "_best_estimator", ".", "predict", "(", "testing_features", ")" ]
predict on a holdout data set.
[ "predict", "on", "a", "holdout", "data", "set", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L448-L467
lacava/few
few/few.py
FEW.fit_predict
def fit_predict(self, features, labels): """Convenience function that fits a pipeline then predicts on the provided features Parameters ---------- features: array-like {n_samples, n_features} Feature matrix labels: array-like {n_samples} List of class labels for prediction Returns ---------- array-like: {n_samples} Predicted labels for the provided features """ self.fit(features, labels) return self.predict(features)
python
def fit_predict(self, features, labels): """Convenience function that fits a pipeline then predicts on the provided features Parameters ---------- features: array-like {n_samples, n_features} Feature matrix labels: array-like {n_samples} List of class labels for prediction Returns ---------- array-like: {n_samples} Predicted labels for the provided features """ self.fit(features, labels) return self.predict(features)
[ "def", "fit_predict", "(", "self", ",", "features", ",", "labels", ")", ":", "self", ".", "fit", "(", "features", ",", "labels", ")", "return", "self", ".", "predict", "(", "features", ")" ]
Convenience function that fits a pipeline then predicts on the provided features Parameters ---------- features: array-like {n_samples, n_features} Feature matrix labels: array-like {n_samples} List of class labels for prediction Returns ---------- array-like: {n_samples} Predicted labels for the provided features
[ "Convenience", "function", "that", "fits", "a", "pipeline", "then", "predicts", "on", "the", "provided", "features" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L469-L487
lacava/few
few/few.py
FEW.score
def score(self, testing_features, testing_labels): """estimates accuracy on testing set""" # print("test features shape:",testing_features.shape) # print("testing labels shape:",testing_labels.shape) yhat = self.predict(testing_features) return self.scoring_function(testing_labels,yhat)
python
def score(self, testing_features, testing_labels): """estimates accuracy on testing set""" # print("test features shape:",testing_features.shape) # print("testing labels shape:",testing_labels.shape) yhat = self.predict(testing_features) return self.scoring_function(testing_labels,yhat)
[ "def", "score", "(", "self", ",", "testing_features", ",", "testing_labels", ")", ":", "# print(\"test features shape:\",testing_features.shape)", "# print(\"testing labels shape:\",testing_labels.shape)", "yhat", "=", "self", ".", "predict", "(", "testing_features", ")", "return", "self", ".", "scoring_function", "(", "testing_labels", ",", "yhat", ")" ]
estimates accuracy on testing set
[ "estimates", "accuracy", "on", "testing", "set" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L489-L494
lacava/few
few/few.py
FEW.export
def export(self, output_file_name): """exports engineered features Parameters ---------- output_file_name: string String containing the path and file name of the desired output file Returns ------- None """ if self._best_estimator is None: raise ValueError('A model has not been optimized. Please call fit()' ' first.') # Write print_model() to file with open(output_file_name, 'w') as output_file: output_file.write(self.print_model()) # if decision tree, print tree into dot file if 'DecisionTree' in self.ml_type: export_graphviz(self._best_estimator, out_file=output_file_name+'.dot', feature_names = self.stacks_2_eqns(self._best_inds) if self._best_inds else None, class_names=['True','False'], filled=False,impurity = True,rotate=True)
python
def export(self, output_file_name): """exports engineered features Parameters ---------- output_file_name: string String containing the path and file name of the desired output file Returns ------- None """ if self._best_estimator is None: raise ValueError('A model has not been optimized. Please call fit()' ' first.') # Write print_model() to file with open(output_file_name, 'w') as output_file: output_file.write(self.print_model()) # if decision tree, print tree into dot file if 'DecisionTree' in self.ml_type: export_graphviz(self._best_estimator, out_file=output_file_name+'.dot', feature_names = self.stacks_2_eqns(self._best_inds) if self._best_inds else None, class_names=['True','False'], filled=False,impurity = True,rotate=True)
[ "def", "export", "(", "self", ",", "output_file_name", ")", ":", "if", "self", ".", "_best_estimator", "is", "None", ":", "raise", "ValueError", "(", "'A model has not been optimized. Please call fit()'", "' first.'", ")", "# Write print_model() to file", "with", "open", "(", "output_file_name", ",", "'w'", ")", "as", "output_file", ":", "output_file", ".", "write", "(", "self", ".", "print_model", "(", ")", ")", "# if decision tree, print tree into dot file", "if", "'DecisionTree'", "in", "self", ".", "ml_type", ":", "export_graphviz", "(", "self", ".", "_best_estimator", ",", "out_file", "=", "output_file_name", "+", "'.dot'", ",", "feature_names", "=", "self", ".", "stacks_2_eqns", "(", "self", ".", "_best_inds", ")", "if", "self", ".", "_best_inds", "else", "None", ",", "class_names", "=", "[", "'True'", ",", "'False'", "]", ",", "filled", "=", "False", ",", "impurity", "=", "True", ",", "rotate", "=", "True", ")" ]
exports engineered features Parameters ---------- output_file_name: string String containing the path and file name of the desired output file Returns ------- None
[ "exports", "engineered", "features" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L496-L523
lacava/few
few/few.py
FEW.print_model
def print_model(self,sep='\n'): """prints model contained in best inds, if ml has a coefficient property. otherwise, prints the features generated by FEW.""" model = '' # print('ml type:',self.ml_type) # print('ml:',self._best_estimator) if self._best_inds: if self.ml_type == 'GridSearchCV': ml = self._best_estimator.named_steps['ml'].best_estimator_ else: ml = self._best_estimator.named_steps['ml'] if self.ml_type != 'SVC' and self.ml_type != 'SVR': # this is need because svm has a bug that throws valueerror on # attribute check if hasattr(ml,'coef_'): if len(ml.coef_.shape)==1: s = np.argsort(np.abs(ml.coef_))[::-1] scoef = ml.coef_[s] bi = [self._best_inds[k] for k in s] model = (' +' + sep).join( [str(round(c,3))+'*'+self.stack_2_eqn(f) for i,(f,c) in enumerate(zip(bi,scoef)) if round(scoef[i],3) != 0]) else: # more than one decision function is fit. print all. for j,coef in enumerate(ml.coef_): s = np.argsort(np.abs(coef))[::-1] scoef = coef[s] bi =[self._best_inds[k] for k in s] model += sep + 'class'+str(j)+' :'+' + '.join( [str(round(c,3))+'*'+self.stack_2_eqn(f) for i,(f,c) in enumerate(zip(bi,coef)) if coef[i] != 0]) elif hasattr(ml,'feature_importances_'): s = np.argsort(ml.feature_importances_)[::-1] sfi = ml.feature_importances_[s] bi = [self._best_inds[k] for k in s] # model = 'importance:feature'+sep model += sep.join( [str(round(c,3))+':'+self.stack_2_eqn(f) for i,(f,c) in enumerate(zip(bi,sfi)) if round(sfi[i],3) != 0]) else: return sep.join(self.stacks_2_eqns(self._best_inds)) else: return sep.join(self.stacks_2_eqns(self._best_inds)) else: return 'original features' return model
python
def print_model(self,sep='\n'): """prints model contained in best inds, if ml has a coefficient property. otherwise, prints the features generated by FEW.""" model = '' # print('ml type:',self.ml_type) # print('ml:',self._best_estimator) if self._best_inds: if self.ml_type == 'GridSearchCV': ml = self._best_estimator.named_steps['ml'].best_estimator_ else: ml = self._best_estimator.named_steps['ml'] if self.ml_type != 'SVC' and self.ml_type != 'SVR': # this is need because svm has a bug that throws valueerror on # attribute check if hasattr(ml,'coef_'): if len(ml.coef_.shape)==1: s = np.argsort(np.abs(ml.coef_))[::-1] scoef = ml.coef_[s] bi = [self._best_inds[k] for k in s] model = (' +' + sep).join( [str(round(c,3))+'*'+self.stack_2_eqn(f) for i,(f,c) in enumerate(zip(bi,scoef)) if round(scoef[i],3) != 0]) else: # more than one decision function is fit. print all. for j,coef in enumerate(ml.coef_): s = np.argsort(np.abs(coef))[::-1] scoef = coef[s] bi =[self._best_inds[k] for k in s] model += sep + 'class'+str(j)+' :'+' + '.join( [str(round(c,3))+'*'+self.stack_2_eqn(f) for i,(f,c) in enumerate(zip(bi,coef)) if coef[i] != 0]) elif hasattr(ml,'feature_importances_'): s = np.argsort(ml.feature_importances_)[::-1] sfi = ml.feature_importances_[s] bi = [self._best_inds[k] for k in s] # model = 'importance:feature'+sep model += sep.join( [str(round(c,3))+':'+self.stack_2_eqn(f) for i,(f,c) in enumerate(zip(bi,sfi)) if round(sfi[i],3) != 0]) else: return sep.join(self.stacks_2_eqns(self._best_inds)) else: return sep.join(self.stacks_2_eqns(self._best_inds)) else: return 'original features' return model
[ "def", "print_model", "(", "self", ",", "sep", "=", "'\\n'", ")", ":", "model", "=", "''", "# print('ml type:',self.ml_type)", "# print('ml:',self._best_estimator)", "if", "self", ".", "_best_inds", ":", "if", "self", ".", "ml_type", "==", "'GridSearchCV'", ":", "ml", "=", "self", ".", "_best_estimator", ".", "named_steps", "[", "'ml'", "]", ".", "best_estimator_", "else", ":", "ml", "=", "self", ".", "_best_estimator", ".", "named_steps", "[", "'ml'", "]", "if", "self", ".", "ml_type", "!=", "'SVC'", "and", "self", ".", "ml_type", "!=", "'SVR'", ":", "# this is need because svm has a bug that throws valueerror on", "# attribute check", "if", "hasattr", "(", "ml", ",", "'coef_'", ")", ":", "if", "len", "(", "ml", ".", "coef_", ".", "shape", ")", "==", "1", ":", "s", "=", "np", ".", "argsort", "(", "np", ".", "abs", "(", "ml", ".", "coef_", ")", ")", "[", ":", ":", "-", "1", "]", "scoef", "=", "ml", ".", "coef_", "[", "s", "]", "bi", "=", "[", "self", ".", "_best_inds", "[", "k", "]", "for", "k", "in", "s", "]", "model", "=", "(", "' +'", "+", "sep", ")", ".", "join", "(", "[", "str", "(", "round", "(", "c", ",", "3", ")", ")", "+", "'*'", "+", "self", ".", "stack_2_eqn", "(", "f", ")", "for", "i", ",", "(", "f", ",", "c", ")", "in", "enumerate", "(", "zip", "(", "bi", ",", "scoef", ")", ")", "if", "round", "(", "scoef", "[", "i", "]", ",", "3", ")", "!=", "0", "]", ")", "else", ":", "# more than one decision function is fit. print all.", "for", "j", ",", "coef", "in", "enumerate", "(", "ml", ".", "coef_", ")", ":", "s", "=", "np", ".", "argsort", "(", "np", ".", "abs", "(", "coef", ")", ")", "[", ":", ":", "-", "1", "]", "scoef", "=", "coef", "[", "s", "]", "bi", "=", "[", "self", ".", "_best_inds", "[", "k", "]", "for", "k", "in", "s", "]", "model", "+=", "sep", "+", "'class'", "+", "str", "(", "j", ")", "+", "' :'", "+", "' + '", ".", "join", "(", "[", "str", "(", "round", "(", "c", ",", "3", ")", ")", "+", "'*'", "+", "self", ".", "stack_2_eqn", "(", "f", ")", "for", "i", ",", "(", "f", ",", "c", ")", "in", "enumerate", "(", "zip", "(", "bi", ",", "coef", ")", ")", "if", "coef", "[", "i", "]", "!=", "0", "]", ")", "elif", "hasattr", "(", "ml", ",", "'feature_importances_'", ")", ":", "s", "=", "np", ".", "argsort", "(", "ml", ".", "feature_importances_", ")", "[", ":", ":", "-", "1", "]", "sfi", "=", "ml", ".", "feature_importances_", "[", "s", "]", "bi", "=", "[", "self", ".", "_best_inds", "[", "k", "]", "for", "k", "in", "s", "]", "# model = 'importance:feature'+sep", "model", "+=", "sep", ".", "join", "(", "[", "str", "(", "round", "(", "c", ",", "3", ")", ")", "+", "':'", "+", "self", ".", "stack_2_eqn", "(", "f", ")", "for", "i", ",", "(", "f", ",", "c", ")", "in", "enumerate", "(", "zip", "(", "bi", ",", "sfi", ")", ")", "if", "round", "(", "sfi", "[", "i", "]", ",", "3", ")", "!=", "0", "]", ")", "else", ":", "return", "sep", ".", "join", "(", "self", ".", "stacks_2_eqns", "(", "self", ".", "_best_inds", ")", ")", "else", ":", "return", "sep", ".", "join", "(", "self", ".", "stacks_2_eqns", "(", "self", ".", "_best_inds", ")", ")", "else", ":", "return", "'original features'", "return", "model" ]
prints model contained in best inds, if ml has a coefficient property. otherwise, prints the features generated by FEW.
[ "prints", "model", "contained", "in", "best", "inds", "if", "ml", "has", "a", "coefficient", "property", ".", "otherwise", "prints", "the", "features", "generated", "by", "FEW", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L525-L579
lacava/few
few/few.py
FEW.valid_loc
def valid_loc(self,F=None): """returns the indices of individuals with valid fitness.""" if F is not None: return [i for i,f in enumerate(F) if np.all(f < self.max_fit) and np.all(f >= 0)] else: return [i for i,f in enumerate(self.F) if np.all(f < self.max_fit) and np.all(f >= 0)]
python
def valid_loc(self,F=None): """returns the indices of individuals with valid fitness.""" if F is not None: return [i for i,f in enumerate(F) if np.all(f < self.max_fit) and np.all(f >= 0)] else: return [i for i,f in enumerate(self.F) if np.all(f < self.max_fit) and np.all(f >= 0)]
[ "def", "valid_loc", "(", "self", ",", "F", "=", "None", ")", ":", "if", "F", "is", "not", "None", ":", "return", "[", "i", "for", "i", ",", "f", "in", "enumerate", "(", "F", ")", "if", "np", ".", "all", "(", "f", "<", "self", ".", "max_fit", ")", "and", "np", ".", "all", "(", "f", ">=", "0", ")", "]", "else", ":", "return", "[", "i", "for", "i", ",", "f", "in", "enumerate", "(", "self", ".", "F", ")", "if", "np", ".", "all", "(", "f", "<", "self", ".", "max_fit", ")", "and", "np", ".", "all", "(", "f", ">=", "0", ")", "]" ]
returns the indices of individuals with valid fitness.
[ "returns", "the", "indices", "of", "individuals", "with", "valid", "fitness", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L585-L590
lacava/few
few/few.py
FEW.valid
def valid(self,individuals=None,F=None): """returns the sublist of individuals with valid fitness.""" if F: valid_locs = self.valid_loc(F) else: valid_locs = self.valid_loc(self.F) if individuals: return [ind for i,ind in enumerate(individuals) if i in valid_locs] else: return [ind for i,ind in enumerate(self.pop.individuals) if i in valid_locs]
python
def valid(self,individuals=None,F=None): """returns the sublist of individuals with valid fitness.""" if F: valid_locs = self.valid_loc(F) else: valid_locs = self.valid_loc(self.F) if individuals: return [ind for i,ind in enumerate(individuals) if i in valid_locs] else: return [ind for i,ind in enumerate(self.pop.individuals) if i in valid_locs]
[ "def", "valid", "(", "self", ",", "individuals", "=", "None", ",", "F", "=", "None", ")", ":", "if", "F", ":", "valid_locs", "=", "self", ".", "valid_loc", "(", "F", ")", "else", ":", "valid_locs", "=", "self", ".", "valid_loc", "(", "self", ".", "F", ")", "if", "individuals", ":", "return", "[", "ind", "for", "i", ",", "ind", "in", "enumerate", "(", "individuals", ")", "if", "i", "in", "valid_locs", "]", "else", ":", "return", "[", "ind", "for", "i", ",", "ind", "in", "enumerate", "(", "self", ".", "pop", ".", "individuals", ")", "if", "i", "in", "valid_locs", "]" ]
returns the sublist of individuals with valid fitness.
[ "returns", "the", "sublist", "of", "individuals", "with", "valid", "fitness", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L592-L602
lacava/few
few/few.py
FEW.get_diversity
def get_diversity(self,X): """compute mean diversity of individual outputs""" # diversity in terms of cosine distances between features feature_correlations = np.zeros(X.shape[0]-1) for i in np.arange(1,X.shape[0]-1): feature_correlations[i] = max(0.0,r2_score(X[0],X[i])) # pdb.set_trace() self.diversity.append(1-np.mean(feature_correlations))
python
def get_diversity(self,X): """compute mean diversity of individual outputs""" # diversity in terms of cosine distances between features feature_correlations = np.zeros(X.shape[0]-1) for i in np.arange(1,X.shape[0]-1): feature_correlations[i] = max(0.0,r2_score(X[0],X[i])) # pdb.set_trace() self.diversity.append(1-np.mean(feature_correlations))
[ "def", "get_diversity", "(", "self", ",", "X", ")", ":", "# diversity in terms of cosine distances between features", "feature_correlations", "=", "np", ".", "zeros", "(", "X", ".", "shape", "[", "0", "]", "-", "1", ")", "for", "i", "in", "np", ".", "arange", "(", "1", ",", "X", ".", "shape", "[", "0", "]", "-", "1", ")", ":", "feature_correlations", "[", "i", "]", "=", "max", "(", "0.0", ",", "r2_score", "(", "X", "[", "0", "]", ",", "X", "[", "i", "]", ")", ")", "# pdb.set_trace()", "self", ".", "diversity", ".", "append", "(", "1", "-", "np", ".", "mean", "(", "feature_correlations", ")", ")" ]
compute mean diversity of individual outputs
[ "compute", "mean", "diversity", "of", "individual", "outputs" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L622-L629
lacava/few
few/few.py
FEW.roc_auc_cv
def roc_auc_cv(self,features,labels): """returns an roc auc score depending on the underlying estimator.""" if callable(getattr(self.ml, "decision_function", None)): return np.mean([self.scoring_function(labels[test], self.pipeline.fit(features[train],labels[train]). decision_function(features[test])) for train, test in KFold().split(features, labels)]) elif callable(getattr(self.ml, "predict_proba", None)): return np.mean([self.scoring_function(labels[test], self.pipeline.fit(features[train],labels[train]). predict_proba(features[test])[:,1]) for train, test in KFold().split(features, labels)]) else: raise ValueError("ROC AUC score won't work with " + self.ml_type + ". No " "decision_function or predict_proba method found for this learner.")
python
def roc_auc_cv(self,features,labels): """returns an roc auc score depending on the underlying estimator.""" if callable(getattr(self.ml, "decision_function", None)): return np.mean([self.scoring_function(labels[test], self.pipeline.fit(features[train],labels[train]). decision_function(features[test])) for train, test in KFold().split(features, labels)]) elif callable(getattr(self.ml, "predict_proba", None)): return np.mean([self.scoring_function(labels[test], self.pipeline.fit(features[train],labels[train]). predict_proba(features[test])[:,1]) for train, test in KFold().split(features, labels)]) else: raise ValueError("ROC AUC score won't work with " + self.ml_type + ". No " "decision_function or predict_proba method found for this learner.")
[ "def", "roc_auc_cv", "(", "self", ",", "features", ",", "labels", ")", ":", "if", "callable", "(", "getattr", "(", "self", ".", "ml", ",", "\"decision_function\"", ",", "None", ")", ")", ":", "return", "np", ".", "mean", "(", "[", "self", ".", "scoring_function", "(", "labels", "[", "test", "]", ",", "self", ".", "pipeline", ".", "fit", "(", "features", "[", "train", "]", ",", "labels", "[", "train", "]", ")", ".", "decision_function", "(", "features", "[", "test", "]", ")", ")", "for", "train", ",", "test", "in", "KFold", "(", ")", ".", "split", "(", "features", ",", "labels", ")", "]", ")", "elif", "callable", "(", "getattr", "(", "self", ".", "ml", ",", "\"predict_proba\"", ",", "None", ")", ")", ":", "return", "np", ".", "mean", "(", "[", "self", ".", "scoring_function", "(", "labels", "[", "test", "]", ",", "self", ".", "pipeline", ".", "fit", "(", "features", "[", "train", "]", ",", "labels", "[", "train", "]", ")", ".", "predict_proba", "(", "features", "[", "test", "]", ")", "[", ":", ",", "1", "]", ")", "for", "train", ",", "test", "in", "KFold", "(", ")", ".", "split", "(", "features", ",", "labels", ")", "]", ")", "else", ":", "raise", "ValueError", "(", "\"ROC AUC score won't work with \"", "+", "self", ".", "ml_type", "+", "\". No \"", "\"decision_function or predict_proba method found for this learner.\"", ")" ]
returns an roc auc score depending on the underlying estimator.
[ "returns", "an", "roc", "auc", "score", "depending", "on", "the", "underlying", "estimator", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/few.py#L631-L645
lacava/few
few/evaluation.py
divs
def divs(x,y): """safe division""" tmp = np.ones(x.shape) nonzero_y = y != 0 tmp[nonzero_y] = x[nonzero_y]/y[nonzero_y] return tmp
python
def divs(x,y): """safe division""" tmp = np.ones(x.shape) nonzero_y = y != 0 tmp[nonzero_y] = x[nonzero_y]/y[nonzero_y] return tmp
[ "def", "divs", "(", "x", ",", "y", ")", ":", "tmp", "=", "np", ".", "ones", "(", "x", ".", "shape", ")", "nonzero_y", "=", "y", "!=", "0", "tmp", "[", "nonzero_y", "]", "=", "x", "[", "nonzero_y", "]", "/", "y", "[", "nonzero_y", "]", "return", "tmp" ]
safe division
[ "safe", "division" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L19-L24
lacava/few
few/evaluation.py
logs
def logs(x): """safe log""" tmp = np.ones(x.shape) nonzero_x = x != 0 tmp[nonzero_x] = np.log(np.abs(x[nonzero_x])) return tmp
python
def logs(x): """safe log""" tmp = np.ones(x.shape) nonzero_x = x != 0 tmp[nonzero_x] = np.log(np.abs(x[nonzero_x])) return tmp
[ "def", "logs", "(", "x", ")", ":", "tmp", "=", "np", ".", "ones", "(", "x", ".", "shape", ")", "nonzero_x", "=", "x", "!=", "0", "tmp", "[", "nonzero_x", "]", "=", "np", ".", "log", "(", "np", ".", "abs", "(", "x", "[", "nonzero_x", "]", ")", ")", "return", "tmp" ]
safe log
[ "safe", "log" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L27-L32
lacava/few
few/evaluation.py
r2_score_vec
def r2_score_vec(y_true,y_pred): """ returns non-aggregate version of r2 score. based on r2_score() function from sklearn (http://sklearn.org) """ numerator = (y_true - y_pred) ** 2 denominator = (y_true - np.average(y_true)) ** 2 nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator output_scores = np.ones([y_true.shape[0]]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) # arbitrary set to zero to avoid -inf scores, having a constant # y_true is not interesting for scoring a regression anyway output_scores[nonzero_numerator & ~nonzero_denominator] = 0. return output_scores
python
def r2_score_vec(y_true,y_pred): """ returns non-aggregate version of r2 score. based on r2_score() function from sklearn (http://sklearn.org) """ numerator = (y_true - y_pred) ** 2 denominator = (y_true - np.average(y_true)) ** 2 nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator output_scores = np.ones([y_true.shape[0]]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) # arbitrary set to zero to avoid -inf scores, having a constant # y_true is not interesting for scoring a regression anyway output_scores[nonzero_numerator & ~nonzero_denominator] = 0. return output_scores
[ "def", "r2_score_vec", "(", "y_true", ",", "y_pred", ")", ":", "numerator", "=", "(", "y_true", "-", "y_pred", ")", "**", "2", "denominator", "=", "(", "y_true", "-", "np", ".", "average", "(", "y_true", ")", ")", "**", "2", "nonzero_denominator", "=", "denominator", "!=", "0", "nonzero_numerator", "=", "numerator", "!=", "0", "valid_score", "=", "nonzero_denominator", "&", "nonzero_numerator", "output_scores", "=", "np", ".", "ones", "(", "[", "y_true", ".", "shape", "[", "0", "]", "]", ")", "output_scores", "[", "valid_score", "]", "=", "1", "-", "(", "numerator", "[", "valid_score", "]", "/", "denominator", "[", "valid_score", "]", ")", "# arbitrary set to zero to avoid -inf scores, having a constant", "# y_true is not interesting for scoring a regression anyway", "output_scores", "[", "nonzero_numerator", "&", "~", "nonzero_denominator", "]", "=", "0.", "return", "output_scores" ]
returns non-aggregate version of r2 score. based on r2_score() function from sklearn (http://sklearn.org)
[ "returns", "non", "-", "aggregate", "version", "of", "r2", "score", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L35-L54
lacava/few
few/evaluation.py
inertia
def inertia(X,y,samples=False): """ return the within-class squared distance from the centroid""" # pdb.set_trace() if samples: # return within-class distance for each sample inertia = np.zeros(y.shape) for label in np.unique(y): inertia[y==label] = (X[y==label] - np.mean(X[y==label])) ** 2 else: # return aggregate score inertia = 0 for i,label in enumerate(np.unique(y)): inertia += np.sum((X[y==label] - np.mean(X[y==label])) ** 2)/len(y[y==label]) inertia = inertia/len(np.unique(y)) return inertia
python
def inertia(X,y,samples=False): """ return the within-class squared distance from the centroid""" # pdb.set_trace() if samples: # return within-class distance for each sample inertia = np.zeros(y.shape) for label in np.unique(y): inertia[y==label] = (X[y==label] - np.mean(X[y==label])) ** 2 else: # return aggregate score inertia = 0 for i,label in enumerate(np.unique(y)): inertia += np.sum((X[y==label] - np.mean(X[y==label])) ** 2)/len(y[y==label]) inertia = inertia/len(np.unique(y)) return inertia
[ "def", "inertia", "(", "X", ",", "y", ",", "samples", "=", "False", ")", ":", "# pdb.set_trace()", "if", "samples", ":", "# return within-class distance for each sample", "inertia", "=", "np", ".", "zeros", "(", "y", ".", "shape", ")", "for", "label", "in", "np", ".", "unique", "(", "y", ")", ":", "inertia", "[", "y", "==", "label", "]", "=", "(", "X", "[", "y", "==", "label", "]", "-", "np", ".", "mean", "(", "X", "[", "y", "==", "label", "]", ")", ")", "**", "2", "else", ":", "# return aggregate score", "inertia", "=", "0", "for", "i", ",", "label", "in", "enumerate", "(", "np", ".", "unique", "(", "y", ")", ")", ":", "inertia", "+=", "np", ".", "sum", "(", "(", "X", "[", "y", "==", "label", "]", "-", "np", ".", "mean", "(", "X", "[", "y", "==", "label", "]", ")", ")", "**", "2", ")", "/", "len", "(", "y", "[", "y", "==", "label", "]", ")", "inertia", "=", "inertia", "/", "len", "(", "np", ".", "unique", "(", "y", ")", ")", "return", "inertia" ]
return the within-class squared distance from the centroid
[ "return", "the", "within", "-", "class", "squared", "distance", "from", "the", "centroid" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L232-L247
lacava/few
few/evaluation.py
separation
def separation(X,y,samples=False): """ return the sum of the between-class squared distance""" # pdb.set_trace() num_classes = len(np.unique(y)) total_dist = (X.max()-X.min())**2 if samples: # return intra-class distance for each sample separation = np.zeros(y.shape) for label in np.unique(y): for outsider in np.unique(y[y!=label]): separation[y==label] += (X[y==label] - np.mean(X[y==outsider])) ** 2 #normalize between 0 and 1 print('separation:',separation) print('num_classes:',num_classes) print('total_dist:',total_dist) separation = separation#/separation.max() print('separation after normalization:',separation) else: # return aggregate score separation = 0 for i,label in enumerate(np.unique(y)): for outsider in np.unique(y[y!=label]): separation += np.sum((X[y==label] - np.mean(X[y==outsider])) ** 2)/len(y[y==label]) separation = separation/len(np.unique(y)) return separation
python
def separation(X,y,samples=False): """ return the sum of the between-class squared distance""" # pdb.set_trace() num_classes = len(np.unique(y)) total_dist = (X.max()-X.min())**2 if samples: # return intra-class distance for each sample separation = np.zeros(y.shape) for label in np.unique(y): for outsider in np.unique(y[y!=label]): separation[y==label] += (X[y==label] - np.mean(X[y==outsider])) ** 2 #normalize between 0 and 1 print('separation:',separation) print('num_classes:',num_classes) print('total_dist:',total_dist) separation = separation#/separation.max() print('separation after normalization:',separation) else: # return aggregate score separation = 0 for i,label in enumerate(np.unique(y)): for outsider in np.unique(y[y!=label]): separation += np.sum((X[y==label] - np.mean(X[y==outsider])) ** 2)/len(y[y==label]) separation = separation/len(np.unique(y)) return separation
[ "def", "separation", "(", "X", ",", "y", ",", "samples", "=", "False", ")", ":", "# pdb.set_trace()", "num_classes", "=", "len", "(", "np", ".", "unique", "(", "y", ")", ")", "total_dist", "=", "(", "X", ".", "max", "(", ")", "-", "X", ".", "min", "(", ")", ")", "**", "2", "if", "samples", ":", "# return intra-class distance for each sample", "separation", "=", "np", ".", "zeros", "(", "y", ".", "shape", ")", "for", "label", "in", "np", ".", "unique", "(", "y", ")", ":", "for", "outsider", "in", "np", ".", "unique", "(", "y", "[", "y", "!=", "label", "]", ")", ":", "separation", "[", "y", "==", "label", "]", "+=", "(", "X", "[", "y", "==", "label", "]", "-", "np", ".", "mean", "(", "X", "[", "y", "==", "outsider", "]", ")", ")", "**", "2", "#normalize between 0 and 1", "print", "(", "'separation:'", ",", "separation", ")", "print", "(", "'num_classes:'", ",", "num_classes", ")", "print", "(", "'total_dist:'", ",", "total_dist", ")", "separation", "=", "separation", "#/separation.max()", "print", "(", "'separation after normalization:'", ",", "separation", ")", "else", ":", "# return aggregate score", "separation", "=", "0", "for", "i", ",", "label", "in", "enumerate", "(", "np", ".", "unique", "(", "y", ")", ")", ":", "for", "outsider", "in", "np", ".", "unique", "(", "y", "[", "y", "!=", "label", "]", ")", ":", "separation", "+=", "np", ".", "sum", "(", "(", "X", "[", "y", "==", "label", "]", "-", "np", ".", "mean", "(", "X", "[", "y", "==", "outsider", "]", ")", ")", "**", "2", ")", "/", "len", "(", "y", "[", "y", "==", "label", "]", ")", "separation", "=", "separation", "/", "len", "(", "np", ".", "unique", "(", "y", ")", ")", "return", "separation" ]
return the sum of the between-class squared distance
[ "return", "the", "sum", "of", "the", "between", "-", "class", "squared", "distance" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L249-L277
lacava/few
few/evaluation.py
fisher
def fisher(yhat,y,samples=False): """Fisher criterion""" classes = np.unique(y) mu = np.zeros(len(classes)) v = np.zeros(len(classes)) # pdb.set_trace() for c in classes.astype(int): mu[c] = np.mean(yhat[y==c]) v[c] = np.var(yhat[y==c]) if not samples: fisher = 0 for c1,c2 in pairwise(classes.astype(int)): fisher += np.abs(mu[c1] - mu[c2])/np.sqrt(v[c1]+v[c2]) else: # lexicase version fisher = np.zeros(len(yhat)) # get closests classes to each class (min mu distance) mu_d = pairwise_distances(mu.reshape(-1,1)) min_mu=np.zeros(len(classes),dtype=int) for i in np.arange(len(min_mu)): min_mu[i] = np.argsort(mu_d[i])[1] # for c1, pairwise(classes.astype(int)): # min_mu[c1] = np.argmin() for i,l in enumerate(yhat.astype(int)): fisher[i] = np.abs(l - mu[min_mu[y[i]]])/np.sqrt(v[y[i]]+v[min_mu[y[i]]]) # pdb.set_trace() return fisher
python
def fisher(yhat,y,samples=False): """Fisher criterion""" classes = np.unique(y) mu = np.zeros(len(classes)) v = np.zeros(len(classes)) # pdb.set_trace() for c in classes.astype(int): mu[c] = np.mean(yhat[y==c]) v[c] = np.var(yhat[y==c]) if not samples: fisher = 0 for c1,c2 in pairwise(classes.astype(int)): fisher += np.abs(mu[c1] - mu[c2])/np.sqrt(v[c1]+v[c2]) else: # lexicase version fisher = np.zeros(len(yhat)) # get closests classes to each class (min mu distance) mu_d = pairwise_distances(mu.reshape(-1,1)) min_mu=np.zeros(len(classes),dtype=int) for i in np.arange(len(min_mu)): min_mu[i] = np.argsort(mu_d[i])[1] # for c1, pairwise(classes.astype(int)): # min_mu[c1] = np.argmin() for i,l in enumerate(yhat.astype(int)): fisher[i] = np.abs(l - mu[min_mu[y[i]]])/np.sqrt(v[y[i]]+v[min_mu[y[i]]]) # pdb.set_trace() return fisher
[ "def", "fisher", "(", "yhat", ",", "y", ",", "samples", "=", "False", ")", ":", "classes", "=", "np", ".", "unique", "(", "y", ")", "mu", "=", "np", ".", "zeros", "(", "len", "(", "classes", ")", ")", "v", "=", "np", ".", "zeros", "(", "len", "(", "classes", ")", ")", "# pdb.set_trace()", "for", "c", "in", "classes", ".", "astype", "(", "int", ")", ":", "mu", "[", "c", "]", "=", "np", ".", "mean", "(", "yhat", "[", "y", "==", "c", "]", ")", "v", "[", "c", "]", "=", "np", ".", "var", "(", "yhat", "[", "y", "==", "c", "]", ")", "if", "not", "samples", ":", "fisher", "=", "0", "for", "c1", ",", "c2", "in", "pairwise", "(", "classes", ".", "astype", "(", "int", ")", ")", ":", "fisher", "+=", "np", ".", "abs", "(", "mu", "[", "c1", "]", "-", "mu", "[", "c2", "]", ")", "/", "np", ".", "sqrt", "(", "v", "[", "c1", "]", "+", "v", "[", "c2", "]", ")", "else", ":", "# lexicase version", "fisher", "=", "np", ".", "zeros", "(", "len", "(", "yhat", ")", ")", "# get closests classes to each class (min mu distance)", "mu_d", "=", "pairwise_distances", "(", "mu", ".", "reshape", "(", "-", "1", ",", "1", ")", ")", "min_mu", "=", "np", ".", "zeros", "(", "len", "(", "classes", ")", ",", "dtype", "=", "int", ")", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "min_mu", ")", ")", ":", "min_mu", "[", "i", "]", "=", "np", ".", "argsort", "(", "mu_d", "[", "i", "]", ")", "[", "1", "]", "# for c1, pairwise(classes.astype(int)):", "# min_mu[c1] = np.argmin()", "for", "i", ",", "l", "in", "enumerate", "(", "yhat", ".", "astype", "(", "int", ")", ")", ":", "fisher", "[", "i", "]", "=", "np", ".", "abs", "(", "l", "-", "mu", "[", "min_mu", "[", "y", "[", "i", "]", "]", "]", ")", "/", "np", ".", "sqrt", "(", "v", "[", "y", "[", "i", "]", "]", "+", "v", "[", "min_mu", "[", "y", "[", "i", "]", "]", "]", ")", "# pdb.set_trace()", "return", "fisher" ]
Fisher criterion
[ "Fisher", "criterion" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L286-L314
lacava/few
few/evaluation.py
EvaluationMixin.proper
def proper(self,x): """cleans fitness vector""" x[x < 0] = self.max_fit x[np.isnan(x)] = self.max_fit x[np.isinf(x)] = self.max_fit return x
python
def proper(self,x): """cleans fitness vector""" x[x < 0] = self.max_fit x[np.isnan(x)] = self.max_fit x[np.isinf(x)] = self.max_fit return x
[ "def", "proper", "(", "self", ",", "x", ")", ":", "x", "[", "x", "<", "0", "]", "=", "self", ".", "max_fit", "x", "[", "np", ".", "isnan", "(", "x", ")", "]", "=", "self", ".", "max_fit", "x", "[", "np", ".", "isinf", "(", "x", ")", "]", "=", "self", ".", "max_fit", "return", "x" ]
cleans fitness vector
[ "cleans", "fitness", "vector" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L149-L154
lacava/few
few/evaluation.py
EvaluationMixin.safe
def safe(self,x): """removes nans and infs from outputs.""" x[np.isinf(x)] = 1 x[np.isnan(x)] = 1 return x
python
def safe(self,x): """removes nans and infs from outputs.""" x[np.isinf(x)] = 1 x[np.isnan(x)] = 1 return x
[ "def", "safe", "(", "self", ",", "x", ")", ":", "x", "[", "np", ".", "isinf", "(", "x", ")", "]", "=", "1", "x", "[", "np", ".", "isnan", "(", "x", ")", "]", "=", "1", "return", "x" ]
removes nans and infs from outputs.
[ "removes", "nans", "and", "infs", "from", "outputs", "." ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L155-L159
lacava/few
few/evaluation.py
EvaluationMixin.evaluate
def evaluate(self,n, features, stack_float, stack_bool,labels=None): """evaluate node in program""" np.seterr(all='ignore') if len(stack_float) >= n.arity['f'] and len(stack_bool) >= n.arity['b']: if n.out_type == 'f': stack_float.append( self.safe(self.eval_dict[n.name](n,features,stack_float, stack_bool,labels))) if (np.isnan(stack_float[-1]).any() or np.isinf(stack_float[-1]).any()): print("problem operator:",n) else: stack_bool.append(self.safe(self.eval_dict[n.name](n,features, stack_float, stack_bool, labels))) if np.isnan(stack_bool[-1]).any() or np.isinf(stack_bool[-1]).any(): print("problem operator:",n)
python
def evaluate(self,n, features, stack_float, stack_bool,labels=None): """evaluate node in program""" np.seterr(all='ignore') if len(stack_float) >= n.arity['f'] and len(stack_bool) >= n.arity['b']: if n.out_type == 'f': stack_float.append( self.safe(self.eval_dict[n.name](n,features,stack_float, stack_bool,labels))) if (np.isnan(stack_float[-1]).any() or np.isinf(stack_float[-1]).any()): print("problem operator:",n) else: stack_bool.append(self.safe(self.eval_dict[n.name](n,features, stack_float, stack_bool, labels))) if np.isnan(stack_bool[-1]).any() or np.isinf(stack_bool[-1]).any(): print("problem operator:",n)
[ "def", "evaluate", "(", "self", ",", "n", ",", "features", ",", "stack_float", ",", "stack_bool", ",", "labels", "=", "None", ")", ":", "np", ".", "seterr", "(", "all", "=", "'ignore'", ")", "if", "len", "(", "stack_float", ")", ">=", "n", ".", "arity", "[", "'f'", "]", "and", "len", "(", "stack_bool", ")", ">=", "n", ".", "arity", "[", "'b'", "]", ":", "if", "n", ".", "out_type", "==", "'f'", ":", "stack_float", ".", "append", "(", "self", ".", "safe", "(", "self", ".", "eval_dict", "[", "n", ".", "name", "]", "(", "n", ",", "features", ",", "stack_float", ",", "stack_bool", ",", "labels", ")", ")", ")", "if", "(", "np", ".", "isnan", "(", "stack_float", "[", "-", "1", "]", ")", ".", "any", "(", ")", "or", "np", ".", "isinf", "(", "stack_float", "[", "-", "1", "]", ")", ".", "any", "(", ")", ")", ":", "print", "(", "\"problem operator:\"", ",", "n", ")", "else", ":", "stack_bool", ".", "append", "(", "self", ".", "safe", "(", "self", ".", "eval_dict", "[", "n", ".", "name", "]", "(", "n", ",", "features", ",", "stack_float", ",", "stack_bool", ",", "labels", ")", ")", ")", "if", "np", ".", "isnan", "(", "stack_bool", "[", "-", "1", "]", ")", ".", "any", "(", ")", "or", "np", ".", "isinf", "(", "stack_bool", "[", "-", "1", "]", ")", ".", "any", "(", ")", ":", "print", "(", "\"problem operator:\"", ",", "n", ")" ]
evaluate node in program
[ "evaluate", "node", "in", "program" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L161-L178
lacava/few
few/evaluation.py
EvaluationMixin.all_finite
def all_finite(self,X): """returns true if X is finite, false, otherwise""" # Adapted from sklearn utils: _assert_all_finite(X) # First try an O(n) time, O(1) space solution for the common case that # everything is finite; fall back to O(n) space np.isfinite to prevent # false positives from overflow in sum method. # Note: this is basically here because sklearn tree.py uses float32 internally, # and float64's that are finite are not finite in float32. if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(np.asarray(X,dtype='float32').sum()) and not np.isfinite(np.asarray(X,dtype='float32')).all()): return False return True
python
def all_finite(self,X): """returns true if X is finite, false, otherwise""" # Adapted from sklearn utils: _assert_all_finite(X) # First try an O(n) time, O(1) space solution for the common case that # everything is finite; fall back to O(n) space np.isfinite to prevent # false positives from overflow in sum method. # Note: this is basically here because sklearn tree.py uses float32 internally, # and float64's that are finite are not finite in float32. if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(np.asarray(X,dtype='float32').sum()) and not np.isfinite(np.asarray(X,dtype='float32')).all()): return False return True
[ "def", "all_finite", "(", "self", ",", "X", ")", ":", "# Adapted from sklearn utils: _assert_all_finite(X)", "# First try an O(n) time, O(1) space solution for the common case that", "# everything is finite; fall back to O(n) space np.isfinite to prevent", "# false positives from overflow in sum method.", "# Note: this is basically here because sklearn tree.py uses float32 internally,", "# and float64's that are finite are not finite in float32.", "if", "(", "X", ".", "dtype", ".", "char", "in", "np", ".", "typecodes", "[", "'AllFloat'", "]", "and", "not", "np", ".", "isfinite", "(", "np", ".", "asarray", "(", "X", ",", "dtype", "=", "'float32'", ")", ".", "sum", "(", ")", ")", "and", "not", "np", ".", "isfinite", "(", "np", ".", "asarray", "(", "X", ",", "dtype", "=", "'float32'", ")", ")", ".", "all", "(", ")", ")", ":", "return", "False", "return", "True" ]
returns true if X is finite, false, otherwise
[ "returns", "true", "if", "X", "is", "finite", "false", "otherwise" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L180-L192
lacava/few
few/evaluation.py
EvaluationMixin.out
def out(self,I,features,labels=None,otype='f'): """computes the output for individual I""" stack_float = [] stack_bool = [] # print("stack:",I.stack) # evaulate stack over rows of features,labels # pdb.set_trace() for n in I.stack: self.evaluate(n,features,stack_float,stack_bool,labels) # print("stack_float:",stack_float) if otype=='f': return (stack_float[-1] if self.all_finite(stack_float[-1]) else np.zeros(len(features))) else: return (stack_bool[-1].astype(float) if self.all_finite(stack_bool[-1]) else np.zeros(len(features)))
python
def out(self,I,features,labels=None,otype='f'): """computes the output for individual I""" stack_float = [] stack_bool = [] # print("stack:",I.stack) # evaulate stack over rows of features,labels # pdb.set_trace() for n in I.stack: self.evaluate(n,features,stack_float,stack_bool,labels) # print("stack_float:",stack_float) if otype=='f': return (stack_float[-1] if self.all_finite(stack_float[-1]) else np.zeros(len(features))) else: return (stack_bool[-1].astype(float) if self.all_finite(stack_bool[-1]) else np.zeros(len(features)))
[ "def", "out", "(", "self", ",", "I", ",", "features", ",", "labels", "=", "None", ",", "otype", "=", "'f'", ")", ":", "stack_float", "=", "[", "]", "stack_bool", "=", "[", "]", "# print(\"stack:\",I.stack)", "# evaulate stack over rows of features,labels", "# pdb.set_trace()", "for", "n", "in", "I", ".", "stack", ":", "self", ".", "evaluate", "(", "n", ",", "features", ",", "stack_float", ",", "stack_bool", ",", "labels", ")", "# print(\"stack_float:\",stack_float)", "if", "otype", "==", "'f'", ":", "return", "(", "stack_float", "[", "-", "1", "]", "if", "self", ".", "all_finite", "(", "stack_float", "[", "-", "1", "]", ")", "else", "np", ".", "zeros", "(", "len", "(", "features", ")", ")", ")", "else", ":", "return", "(", "stack_bool", "[", "-", "1", "]", ".", "astype", "(", "float", ")", "if", "self", ".", "all_finite", "(", "stack_bool", "[", "-", "1", "]", ")", "else", "np", ".", "zeros", "(", "len", "(", "features", ")", ")", ")" ]
computes the output for individual I
[ "computes", "the", "output", "for", "individual", "I" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L194-L209
lacava/few
few/evaluation.py
EvaluationMixin.calc_fitness
def calc_fitness(self,X,labels,fit_choice,sel): """computes fitness of individual output yhat. yhat: output of a program. labels: correct outputs fit_choice: choice of fitness function """ if 'lexicase' in sel: # return list(map(lambda yhat: self.f_vec[fit_choice](labels,yhat),X)) return np.asarray( [self.proper(self.f_vec[fit_choice](labels, yhat)) for yhat in X], order='F') # return list(Parallel(n_jobs=-1)(delayed(self.f_vec[fit_choice])(labels,yhat) for yhat in X)) else: # return list(map(lambda yhat: self.f[fit_choice](labels,yhat),X)) return np.asarray([self.f[fit_choice](labels,yhat) for yhat in X], order='F').reshape(-1)
python
def calc_fitness(self,X,labels,fit_choice,sel): """computes fitness of individual output yhat. yhat: output of a program. labels: correct outputs fit_choice: choice of fitness function """ if 'lexicase' in sel: # return list(map(lambda yhat: self.f_vec[fit_choice](labels,yhat),X)) return np.asarray( [self.proper(self.f_vec[fit_choice](labels, yhat)) for yhat in X], order='F') # return list(Parallel(n_jobs=-1)(delayed(self.f_vec[fit_choice])(labels,yhat) for yhat in X)) else: # return list(map(lambda yhat: self.f[fit_choice](labels,yhat),X)) return np.asarray([self.f[fit_choice](labels,yhat) for yhat in X], order='F').reshape(-1)
[ "def", "calc_fitness", "(", "self", ",", "X", ",", "labels", ",", "fit_choice", ",", "sel", ")", ":", "if", "'lexicase'", "in", "sel", ":", "# return list(map(lambda yhat: self.f_vec[fit_choice](labels,yhat),X))", "return", "np", ".", "asarray", "(", "[", "self", ".", "proper", "(", "self", ".", "f_vec", "[", "fit_choice", "]", "(", "labels", ",", "yhat", ")", ")", "for", "yhat", "in", "X", "]", ",", "order", "=", "'F'", ")", "# return list(Parallel(n_jobs=-1)(delayed(self.f_vec[fit_choice])(labels,yhat) for yhat in X))", "else", ":", "# return list(map(lambda yhat: self.f[fit_choice](labels,yhat),X))", "return", "np", ".", "asarray", "(", "[", "self", ".", "f", "[", "fit_choice", "]", "(", "labels", ",", "yhat", ")", "for", "yhat", "in", "X", "]", ",", "order", "=", "'F'", ")", ".", "reshape", "(", "-", "1", ")" ]
computes fitness of individual output yhat. yhat: output of a program. labels: correct outputs fit_choice: choice of fitness function
[ "computes", "fitness", "of", "individual", "output", "yhat", ".", "yhat", ":", "output", "of", "a", "program", ".", "labels", ":", "correct", "outputs", "fit_choice", ":", "choice", "of", "fitness", "function" ]
train
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/evaluation.py#L211-L228
nion-software/nionswift-io
nionswift_plugin/DM_IO/dm3_image_utils.py
imagedatadict_to_ndarray
def imagedatadict_to_ndarray(imdict): """ Converts the ImageData dictionary, imdict, to an nd image. """ arr = imdict['Data'] im = None if isinstance(arr, parse_dm3.array.array): im = numpy.asarray(arr, dtype=arr.typecode) elif isinstance(arr, parse_dm3.structarray): t = tuple(arr.typecodes) im = numpy.frombuffer( arr.raw_data, dtype=structarray_to_np_map[t]) # print "Image has dmimagetype", imdict["DataType"], "numpy type is", im.dtype assert dm_image_dtypes[imdict["DataType"]][1] == im.dtype assert imdict['PixelDepth'] == im.dtype.itemsize im = im.reshape(imdict['Dimensions'][::-1]) if imdict["DataType"] == 23: # RGB im = im.view(numpy.uint8).reshape(im.shape + (-1, ))[..., :-1] # strip A # NOTE: RGB -> BGR would be [:, :, ::-1] return im
python
def imagedatadict_to_ndarray(imdict): """ Converts the ImageData dictionary, imdict, to an nd image. """ arr = imdict['Data'] im = None if isinstance(arr, parse_dm3.array.array): im = numpy.asarray(arr, dtype=arr.typecode) elif isinstance(arr, parse_dm3.structarray): t = tuple(arr.typecodes) im = numpy.frombuffer( arr.raw_data, dtype=structarray_to_np_map[t]) # print "Image has dmimagetype", imdict["DataType"], "numpy type is", im.dtype assert dm_image_dtypes[imdict["DataType"]][1] == im.dtype assert imdict['PixelDepth'] == im.dtype.itemsize im = im.reshape(imdict['Dimensions'][::-1]) if imdict["DataType"] == 23: # RGB im = im.view(numpy.uint8).reshape(im.shape + (-1, ))[..., :-1] # strip A # NOTE: RGB -> BGR would be [:, :, ::-1] return im
[ "def", "imagedatadict_to_ndarray", "(", "imdict", ")", ":", "arr", "=", "imdict", "[", "'Data'", "]", "im", "=", "None", "if", "isinstance", "(", "arr", ",", "parse_dm3", ".", "array", ".", "array", ")", ":", "im", "=", "numpy", ".", "asarray", "(", "arr", ",", "dtype", "=", "arr", ".", "typecode", ")", "elif", "isinstance", "(", "arr", ",", "parse_dm3", ".", "structarray", ")", ":", "t", "=", "tuple", "(", "arr", ".", "typecodes", ")", "im", "=", "numpy", ".", "frombuffer", "(", "arr", ".", "raw_data", ",", "dtype", "=", "structarray_to_np_map", "[", "t", "]", ")", "# print \"Image has dmimagetype\", imdict[\"DataType\"], \"numpy type is\", im.dtype", "assert", "dm_image_dtypes", "[", "imdict", "[", "\"DataType\"", "]", "]", "[", "1", "]", "==", "im", ".", "dtype", "assert", "imdict", "[", "'PixelDepth'", "]", "==", "im", ".", "dtype", ".", "itemsize", "im", "=", "im", ".", "reshape", "(", "imdict", "[", "'Dimensions'", "]", "[", ":", ":", "-", "1", "]", ")", "if", "imdict", "[", "\"DataType\"", "]", "==", "23", ":", "# RGB", "im", "=", "im", ".", "view", "(", "numpy", ".", "uint8", ")", ".", "reshape", "(", "im", ".", "shape", "+", "(", "-", "1", ",", ")", ")", "[", "...", ",", ":", "-", "1", "]", "# strip A", "# NOTE: RGB -> BGR would be [:, :, ::-1]", "return", "im" ]
Converts the ImageData dictionary, imdict, to an nd image.
[ "Converts", "the", "ImageData", "dictionary", "imdict", "to", "an", "nd", "image", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/dm3_image_utils.py#L69-L89
nion-software/nionswift-io
nionswift_plugin/DM_IO/dm3_image_utils.py
ndarray_to_imagedatadict
def ndarray_to_imagedatadict(nparr): """ Convert the numpy array nparr into a suitable ImageList entry dictionary. Returns a dictionary with the appropriate Data, DataType, PixelDepth to be inserted into a dm3 tag dictionary and written to a file. """ ret = {} dm_type = None for k, v in iter(dm_image_dtypes.items()): if v[1] == nparr.dtype.type: dm_type = k break if dm_type is None and nparr.dtype == numpy.uint8 and nparr.shape[-1] in (3, 4): ret["DataType"] = 23 ret["PixelDepth"] = 4 if nparr.shape[2] == 4: rgb_view = nparr.view(numpy.int32).reshape(nparr.shape[:-1]) # squash the color into uint32 else: assert nparr.shape[2] == 3 rgba_image = numpy.empty(nparr.shape[:-1] + (4,), numpy.uint8) rgba_image[:,:,0:3] = nparr rgba_image[:,:,3] = 255 rgb_view = rgba_image.view(numpy.int32).reshape(rgba_image.shape[:-1]) # squash the color into uint32 ret["Dimensions"] = list(rgb_view.shape[::-1]) ret["Data"] = parse_dm3.array.array(platform_independent_char(rgb_view.dtype), rgb_view.flatten()) else: ret["DataType"] = dm_type ret["PixelDepth"] = nparr.dtype.itemsize ret["Dimensions"] = list(nparr.shape[::-1]) if nparr.dtype.type in np_to_structarray_map: types = np_to_structarray_map[nparr.dtype.type] ret["Data"] = parse_dm3.structarray(types) ret["Data"].raw_data = bytes(numpy.array(nparr, copy=False).data) else: ret["Data"] = parse_dm3.array.array(platform_independent_char(nparr.dtype), numpy.array(nparr, copy=False).flatten()) return ret
python
def ndarray_to_imagedatadict(nparr): """ Convert the numpy array nparr into a suitable ImageList entry dictionary. Returns a dictionary with the appropriate Data, DataType, PixelDepth to be inserted into a dm3 tag dictionary and written to a file. """ ret = {} dm_type = None for k, v in iter(dm_image_dtypes.items()): if v[1] == nparr.dtype.type: dm_type = k break if dm_type is None and nparr.dtype == numpy.uint8 and nparr.shape[-1] in (3, 4): ret["DataType"] = 23 ret["PixelDepth"] = 4 if nparr.shape[2] == 4: rgb_view = nparr.view(numpy.int32).reshape(nparr.shape[:-1]) # squash the color into uint32 else: assert nparr.shape[2] == 3 rgba_image = numpy.empty(nparr.shape[:-1] + (4,), numpy.uint8) rgba_image[:,:,0:3] = nparr rgba_image[:,:,3] = 255 rgb_view = rgba_image.view(numpy.int32).reshape(rgba_image.shape[:-1]) # squash the color into uint32 ret["Dimensions"] = list(rgb_view.shape[::-1]) ret["Data"] = parse_dm3.array.array(platform_independent_char(rgb_view.dtype), rgb_view.flatten()) else: ret["DataType"] = dm_type ret["PixelDepth"] = nparr.dtype.itemsize ret["Dimensions"] = list(nparr.shape[::-1]) if nparr.dtype.type in np_to_structarray_map: types = np_to_structarray_map[nparr.dtype.type] ret["Data"] = parse_dm3.structarray(types) ret["Data"].raw_data = bytes(numpy.array(nparr, copy=False).data) else: ret["Data"] = parse_dm3.array.array(platform_independent_char(nparr.dtype), numpy.array(nparr, copy=False).flatten()) return ret
[ "def", "ndarray_to_imagedatadict", "(", "nparr", ")", ":", "ret", "=", "{", "}", "dm_type", "=", "None", "for", "k", ",", "v", "in", "iter", "(", "dm_image_dtypes", ".", "items", "(", ")", ")", ":", "if", "v", "[", "1", "]", "==", "nparr", ".", "dtype", ".", "type", ":", "dm_type", "=", "k", "break", "if", "dm_type", "is", "None", "and", "nparr", ".", "dtype", "==", "numpy", ".", "uint8", "and", "nparr", ".", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", ":", "ret", "[", "\"DataType\"", "]", "=", "23", "ret", "[", "\"PixelDepth\"", "]", "=", "4", "if", "nparr", ".", "shape", "[", "2", "]", "==", "4", ":", "rgb_view", "=", "nparr", ".", "view", "(", "numpy", ".", "int32", ")", ".", "reshape", "(", "nparr", ".", "shape", "[", ":", "-", "1", "]", ")", "# squash the color into uint32", "else", ":", "assert", "nparr", ".", "shape", "[", "2", "]", "==", "3", "rgba_image", "=", "numpy", ".", "empty", "(", "nparr", ".", "shape", "[", ":", "-", "1", "]", "+", "(", "4", ",", ")", ",", "numpy", ".", "uint8", ")", "rgba_image", "[", ":", ",", ":", ",", "0", ":", "3", "]", "=", "nparr", "rgba_image", "[", ":", ",", ":", ",", "3", "]", "=", "255", "rgb_view", "=", "rgba_image", ".", "view", "(", "numpy", ".", "int32", ")", ".", "reshape", "(", "rgba_image", ".", "shape", "[", ":", "-", "1", "]", ")", "# squash the color into uint32", "ret", "[", "\"Dimensions\"", "]", "=", "list", "(", "rgb_view", ".", "shape", "[", ":", ":", "-", "1", "]", ")", "ret", "[", "\"Data\"", "]", "=", "parse_dm3", ".", "array", ".", "array", "(", "platform_independent_char", "(", "rgb_view", ".", "dtype", ")", ",", "rgb_view", ".", "flatten", "(", ")", ")", "else", ":", "ret", "[", "\"DataType\"", "]", "=", "dm_type", "ret", "[", "\"PixelDepth\"", "]", "=", "nparr", ".", "dtype", ".", "itemsize", "ret", "[", "\"Dimensions\"", "]", "=", "list", "(", "nparr", ".", "shape", "[", ":", ":", "-", "1", "]", ")", "if", "nparr", ".", "dtype", ".", "type", "in", "np_to_structarray_map", ":", "types", "=", "np_to_structarray_map", "[", "nparr", ".", "dtype", ".", "type", "]", "ret", "[", "\"Data\"", "]", "=", "parse_dm3", ".", "structarray", "(", "types", ")", "ret", "[", "\"Data\"", "]", ".", "raw_data", "=", "bytes", "(", "numpy", ".", "array", "(", "nparr", ",", "copy", "=", "False", ")", ".", "data", ")", "else", ":", "ret", "[", "\"Data\"", "]", "=", "parse_dm3", ".", "array", ".", "array", "(", "platform_independent_char", "(", "nparr", ".", "dtype", ")", ",", "numpy", ".", "array", "(", "nparr", ",", "copy", "=", "False", ")", ".", "flatten", "(", ")", ")", "return", "ret" ]
Convert the numpy array nparr into a suitable ImageList entry dictionary. Returns a dictionary with the appropriate Data, DataType, PixelDepth to be inserted into a dm3 tag dictionary and written to a file.
[ "Convert", "the", "numpy", "array", "nparr", "into", "a", "suitable", "ImageList", "entry", "dictionary", ".", "Returns", "a", "dictionary", "with", "the", "appropriate", "Data", "DataType", "PixelDepth", "to", "be", "inserted", "into", "a", "dm3", "tag", "dictionary", "and", "written", "to", "a", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/dm3_image_utils.py#L102-L137
nion-software/nionswift-io
nionswift_plugin/DM_IO/dm3_image_utils.py
load_image
def load_image(file) -> DataAndMetadata.DataAndMetadata: """ Loads the image from the file-like object or string file. If file is a string, the file is opened and then read. Returns a numpy ndarray of our best guess for the most important image in the file. """ if isinstance(file, str) or isinstance(file, str): with open(file, "rb") as f: return load_image(f) dmtag = parse_dm3.parse_dm_header(file) dmtag = fix_strings(dmtag) # display_keys(dmtag) img_index = -1 image_tags = dmtag['ImageList'][img_index] data = imagedatadict_to_ndarray(image_tags['ImageData']) calibrations = [] calibration_tags = image_tags['ImageData'].get('Calibrations', dict()) for dimension in calibration_tags.get('Dimension', list()): origin, scale, units = dimension.get('Origin', 0.0), dimension.get('Scale', 1.0), dimension.get('Units', str()) calibrations.append((-origin * scale, scale, units)) calibrations = tuple(reversed(calibrations)) if len(data.shape) == 3 and data.dtype != numpy.uint8: if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"): if data.shape[1] == 1: data = numpy.squeeze(data, 1) data = numpy.moveaxis(data, 0, 1) data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1) calibrations = (calibrations[2], calibrations[0]) else: data = numpy.moveaxis(data, 0, 2) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1) calibrations = tuple(calibrations[1:]) + (calibrations[0],) else: data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2) elif len(data.shape) == 4 and data.dtype != numpy.uint8: # data = numpy.moveaxis(data, 0, 2) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2) elif data.dtype == numpy.uint8: data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape[:-1])) else: data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape)) brightness = calibration_tags.get('Brightness', dict()) origin, scale, units = brightness.get('Origin', 0.0), brightness.get('Scale', 1.0), brightness.get('Units', str()) intensity = -origin * scale, scale, units timestamp = None timezone = None timezone_offset = None title = image_tags.get('Name') properties = dict() if 'ImageTags' in image_tags: voltage = image_tags['ImageTags'].get('ImageScanned', dict()).get('EHT', dict()) if voltage: properties.setdefault("hardware_source", dict())["autostem"] = { "high_tension_v": float(voltage) } dm_metadata_signal = image_tags['ImageTags'].get('Meta Data', dict()).get('Signal') if dm_metadata_signal and dm_metadata_signal.lower() == "eels": properties.setdefault("hardware_source", dict())["signal_type"] = dm_metadata_signal if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"): data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1 data_descriptor.datum_dimension_count = 1 if image_tags['ImageTags'].get('Meta Data', dict()).get("IsSequence", False) and data_descriptor.collection_dimension_count > 0: data_descriptor.is_sequence = True data_descriptor.collection_dimension_count -= 1 timestamp_str = image_tags['ImageTags'].get("Timestamp") if timestamp_str: timestamp = get_datetime_from_timestamp_str(timestamp_str) timezone = image_tags['ImageTags'].get("Timezone") timezone_offset = image_tags['ImageTags'].get("TimezoneOffset") # to avoid having duplicate copies in Swift, get rid of these tags image_tags['ImageTags'].pop("Timestamp", None) image_tags['ImageTags'].pop("Timezone", None) image_tags['ImageTags'].pop("TimezoneOffset", None) # put the image tags into properties properties.update(image_tags['ImageTags']) dimensional_calibrations = [Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations] while len(dimensional_calibrations) < data_descriptor.expected_dimension_count: dimensional_calibrations.append(Calibration.Calibration()) intensity_calibration = Calibration.Calibration(intensity[0], intensity[1], intensity[2]) return DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor, dimensional_calibrations=dimensional_calibrations, intensity_calibration=intensity_calibration, metadata=properties, timestamp=timestamp, timezone=timezone, timezone_offset=timezone_offset)
python
def load_image(file) -> DataAndMetadata.DataAndMetadata: """ Loads the image from the file-like object or string file. If file is a string, the file is opened and then read. Returns a numpy ndarray of our best guess for the most important image in the file. """ if isinstance(file, str) or isinstance(file, str): with open(file, "rb") as f: return load_image(f) dmtag = parse_dm3.parse_dm_header(file) dmtag = fix_strings(dmtag) # display_keys(dmtag) img_index = -1 image_tags = dmtag['ImageList'][img_index] data = imagedatadict_to_ndarray(image_tags['ImageData']) calibrations = [] calibration_tags = image_tags['ImageData'].get('Calibrations', dict()) for dimension in calibration_tags.get('Dimension', list()): origin, scale, units = dimension.get('Origin', 0.0), dimension.get('Scale', 1.0), dimension.get('Units', str()) calibrations.append((-origin * scale, scale, units)) calibrations = tuple(reversed(calibrations)) if len(data.shape) == 3 and data.dtype != numpy.uint8: if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"): if data.shape[1] == 1: data = numpy.squeeze(data, 1) data = numpy.moveaxis(data, 0, 1) data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1) calibrations = (calibrations[2], calibrations[0]) else: data = numpy.moveaxis(data, 0, 2) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1) calibrations = tuple(calibrations[1:]) + (calibrations[0],) else: data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2) elif len(data.shape) == 4 and data.dtype != numpy.uint8: # data = numpy.moveaxis(data, 0, 2) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2) elif data.dtype == numpy.uint8: data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape[:-1])) else: data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape)) brightness = calibration_tags.get('Brightness', dict()) origin, scale, units = brightness.get('Origin', 0.0), brightness.get('Scale', 1.0), brightness.get('Units', str()) intensity = -origin * scale, scale, units timestamp = None timezone = None timezone_offset = None title = image_tags.get('Name') properties = dict() if 'ImageTags' in image_tags: voltage = image_tags['ImageTags'].get('ImageScanned', dict()).get('EHT', dict()) if voltage: properties.setdefault("hardware_source", dict())["autostem"] = { "high_tension_v": float(voltage) } dm_metadata_signal = image_tags['ImageTags'].get('Meta Data', dict()).get('Signal') if dm_metadata_signal and dm_metadata_signal.lower() == "eels": properties.setdefault("hardware_source", dict())["signal_type"] = dm_metadata_signal if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"): data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1 data_descriptor.datum_dimension_count = 1 if image_tags['ImageTags'].get('Meta Data', dict()).get("IsSequence", False) and data_descriptor.collection_dimension_count > 0: data_descriptor.is_sequence = True data_descriptor.collection_dimension_count -= 1 timestamp_str = image_tags['ImageTags'].get("Timestamp") if timestamp_str: timestamp = get_datetime_from_timestamp_str(timestamp_str) timezone = image_tags['ImageTags'].get("Timezone") timezone_offset = image_tags['ImageTags'].get("TimezoneOffset") # to avoid having duplicate copies in Swift, get rid of these tags image_tags['ImageTags'].pop("Timestamp", None) image_tags['ImageTags'].pop("Timezone", None) image_tags['ImageTags'].pop("TimezoneOffset", None) # put the image tags into properties properties.update(image_tags['ImageTags']) dimensional_calibrations = [Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations] while len(dimensional_calibrations) < data_descriptor.expected_dimension_count: dimensional_calibrations.append(Calibration.Calibration()) intensity_calibration = Calibration.Calibration(intensity[0], intensity[1], intensity[2]) return DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor, dimensional_calibrations=dimensional_calibrations, intensity_calibration=intensity_calibration, metadata=properties, timestamp=timestamp, timezone=timezone, timezone_offset=timezone_offset)
[ "def", "load_image", "(", "file", ")", "->", "DataAndMetadata", ".", "DataAndMetadata", ":", "if", "isinstance", "(", "file", ",", "str", ")", "or", "isinstance", "(", "file", ",", "str", ")", ":", "with", "open", "(", "file", ",", "\"rb\"", ")", "as", "f", ":", "return", "load_image", "(", "f", ")", "dmtag", "=", "parse_dm3", ".", "parse_dm_header", "(", "file", ")", "dmtag", "=", "fix_strings", "(", "dmtag", ")", "# display_keys(dmtag)", "img_index", "=", "-", "1", "image_tags", "=", "dmtag", "[", "'ImageList'", "]", "[", "img_index", "]", "data", "=", "imagedatadict_to_ndarray", "(", "image_tags", "[", "'ImageData'", "]", ")", "calibrations", "=", "[", "]", "calibration_tags", "=", "image_tags", "[", "'ImageData'", "]", ".", "get", "(", "'Calibrations'", ",", "dict", "(", ")", ")", "for", "dimension", "in", "calibration_tags", ".", "get", "(", "'Dimension'", ",", "list", "(", ")", ")", ":", "origin", ",", "scale", ",", "units", "=", "dimension", ".", "get", "(", "'Origin'", ",", "0.0", ")", ",", "dimension", ".", "get", "(", "'Scale'", ",", "1.0", ")", ",", "dimension", ".", "get", "(", "'Units'", ",", "str", "(", ")", ")", "calibrations", ".", "append", "(", "(", "-", "origin", "*", "scale", ",", "scale", ",", "units", ")", ")", "calibrations", "=", "tuple", "(", "reversed", "(", "calibrations", ")", ")", "if", "len", "(", "data", ".", "shape", ")", "==", "3", "and", "data", ".", "dtype", "!=", "numpy", ".", "uint8", ":", "if", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "'Meta Data'", ",", "dict", "(", ")", ")", ".", "get", "(", "\"Format\"", ",", "str", "(", ")", ")", ".", "lower", "(", ")", "in", "(", "\"spectrum\"", ",", "\"spectrum image\"", ")", ":", "if", "data", ".", "shape", "[", "1", "]", "==", "1", ":", "data", "=", "numpy", ".", "squeeze", "(", "data", ",", "1", ")", "data", "=", "numpy", ".", "moveaxis", "(", "data", ",", "0", ",", "1", ")", "data_descriptor", "=", "DataAndMetadata", ".", "DataDescriptor", "(", "False", ",", "1", ",", "1", ")", "calibrations", "=", "(", "calibrations", "[", "2", "]", ",", "calibrations", "[", "0", "]", ")", "else", ":", "data", "=", "numpy", ".", "moveaxis", "(", "data", ",", "0", ",", "2", ")", "data_descriptor", "=", "DataAndMetadata", ".", "DataDescriptor", "(", "False", ",", "2", ",", "1", ")", "calibrations", "=", "tuple", "(", "calibrations", "[", "1", ":", "]", ")", "+", "(", "calibrations", "[", "0", "]", ",", ")", "else", ":", "data_descriptor", "=", "DataAndMetadata", ".", "DataDescriptor", "(", "False", ",", "1", ",", "2", ")", "elif", "len", "(", "data", ".", "shape", ")", "==", "4", "and", "data", ".", "dtype", "!=", "numpy", ".", "uint8", ":", "# data = numpy.moveaxis(data, 0, 2)", "data_descriptor", "=", "DataAndMetadata", ".", "DataDescriptor", "(", "False", ",", "2", ",", "2", ")", "elif", "data", ".", "dtype", "==", "numpy", ".", "uint8", ":", "data_descriptor", "=", "DataAndMetadata", ".", "DataDescriptor", "(", "False", ",", "0", ",", "len", "(", "data", ".", "shape", "[", ":", "-", "1", "]", ")", ")", "else", ":", "data_descriptor", "=", "DataAndMetadata", ".", "DataDescriptor", "(", "False", ",", "0", ",", "len", "(", "data", ".", "shape", ")", ")", "brightness", "=", "calibration_tags", ".", "get", "(", "'Brightness'", ",", "dict", "(", ")", ")", "origin", ",", "scale", ",", "units", "=", "brightness", ".", "get", "(", "'Origin'", ",", "0.0", ")", ",", "brightness", ".", "get", "(", "'Scale'", ",", "1.0", ")", ",", "brightness", ".", "get", "(", "'Units'", ",", "str", "(", ")", ")", "intensity", "=", "-", "origin", "*", "scale", ",", "scale", ",", "units", "timestamp", "=", "None", "timezone", "=", "None", "timezone_offset", "=", "None", "title", "=", "image_tags", ".", "get", "(", "'Name'", ")", "properties", "=", "dict", "(", ")", "if", "'ImageTags'", "in", "image_tags", ":", "voltage", "=", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "'ImageScanned'", ",", "dict", "(", ")", ")", ".", "get", "(", "'EHT'", ",", "dict", "(", ")", ")", "if", "voltage", ":", "properties", ".", "setdefault", "(", "\"hardware_source\"", ",", "dict", "(", ")", ")", "[", "\"autostem\"", "]", "=", "{", "\"high_tension_v\"", ":", "float", "(", "voltage", ")", "}", "dm_metadata_signal", "=", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "'Meta Data'", ",", "dict", "(", ")", ")", ".", "get", "(", "'Signal'", ")", "if", "dm_metadata_signal", "and", "dm_metadata_signal", ".", "lower", "(", ")", "==", "\"eels\"", ":", "properties", ".", "setdefault", "(", "\"hardware_source\"", ",", "dict", "(", ")", ")", "[", "\"signal_type\"", "]", "=", "dm_metadata_signal", "if", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "'Meta Data'", ",", "dict", "(", ")", ")", ".", "get", "(", "\"Format\"", ",", "str", "(", ")", ")", ".", "lower", "(", ")", "in", "(", "\"spectrum\"", ",", "\"spectrum image\"", ")", ":", "data_descriptor", ".", "collection_dimension_count", "+=", "data_descriptor", ".", "datum_dimension_count", "-", "1", "data_descriptor", ".", "datum_dimension_count", "=", "1", "if", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "'Meta Data'", ",", "dict", "(", ")", ")", ".", "get", "(", "\"IsSequence\"", ",", "False", ")", "and", "data_descriptor", ".", "collection_dimension_count", ">", "0", ":", "data_descriptor", ".", "is_sequence", "=", "True", "data_descriptor", ".", "collection_dimension_count", "-=", "1", "timestamp_str", "=", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "\"Timestamp\"", ")", "if", "timestamp_str", ":", "timestamp", "=", "get_datetime_from_timestamp_str", "(", "timestamp_str", ")", "timezone", "=", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "\"Timezone\"", ")", "timezone_offset", "=", "image_tags", "[", "'ImageTags'", "]", ".", "get", "(", "\"TimezoneOffset\"", ")", "# to avoid having duplicate copies in Swift, get rid of these tags", "image_tags", "[", "'ImageTags'", "]", ".", "pop", "(", "\"Timestamp\"", ",", "None", ")", "image_tags", "[", "'ImageTags'", "]", ".", "pop", "(", "\"Timezone\"", ",", "None", ")", "image_tags", "[", "'ImageTags'", "]", ".", "pop", "(", "\"TimezoneOffset\"", ",", "None", ")", "# put the image tags into properties", "properties", ".", "update", "(", "image_tags", "[", "'ImageTags'", "]", ")", "dimensional_calibrations", "=", "[", "Calibration", ".", "Calibration", "(", "c", "[", "0", "]", ",", "c", "[", "1", "]", ",", "c", "[", "2", "]", ")", "for", "c", "in", "calibrations", "]", "while", "len", "(", "dimensional_calibrations", ")", "<", "data_descriptor", ".", "expected_dimension_count", ":", "dimensional_calibrations", ".", "append", "(", "Calibration", ".", "Calibration", "(", ")", ")", "intensity_calibration", "=", "Calibration", ".", "Calibration", "(", "intensity", "[", "0", "]", ",", "intensity", "[", "1", "]", ",", "intensity", "[", "2", "]", ")", "return", "DataAndMetadata", ".", "new_data_and_metadata", "(", "data", ",", "data_descriptor", "=", "data_descriptor", ",", "dimensional_calibrations", "=", "dimensional_calibrations", ",", "intensity_calibration", "=", "intensity_calibration", ",", "metadata", "=", "properties", ",", "timestamp", "=", "timestamp", ",", "timezone", "=", "timezone", ",", "timezone_offset", "=", "timezone_offset", ")" ]
Loads the image from the file-like object or string file. If file is a string, the file is opened and then read. Returns a numpy ndarray of our best guess for the most important image in the file.
[ "Loads", "the", "image", "from", "the", "file", "-", "like", "object", "or", "string", "file", ".", "If", "file", "is", "a", "string", "the", "file", "is", "opened", "and", "then", "read", ".", "Returns", "a", "numpy", "ndarray", "of", "our", "best", "guess", "for", "the", "most", "important", "image", "in", "the", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/dm3_image_utils.py#L171-L256
nion-software/nionswift-io
nionswift_plugin/DM_IO/dm3_image_utils.py
save_image
def save_image(xdata: DataAndMetadata.DataAndMetadata, file): """ Saves the nparray data to the file-like object (or string) file. """ # we need to create a basic DM tree suitable for an image # we'll try the minimum: just an data list # doesn't work. Do we need a ImageSourceList too? # and a DocumentObjectList? data = xdata.data data_descriptor = xdata.data_descriptor dimensional_calibrations = xdata.dimensional_calibrations intensity_calibration = xdata.intensity_calibration metadata = xdata.metadata modified = xdata.timestamp timezone = xdata.timezone timezone_offset = xdata.timezone_offset needs_slice = False is_sequence = False if len(data.shape) == 3 and data.dtype != numpy.uint8 and data_descriptor.datum_dimension_count == 1: data = numpy.moveaxis(data, 2, 0) dimensional_calibrations = (dimensional_calibrations[2],) + tuple(dimensional_calibrations[0:2]) if len(data.shape) == 2 and data.dtype != numpy.uint8 and data_descriptor.datum_dimension_count == 1: is_sequence = data_descriptor.is_sequence data = numpy.moveaxis(data, 1, 0) data = numpy.expand_dims(data, axis=1) dimensional_calibrations = (dimensional_calibrations[1], Calibration.Calibration(), dimensional_calibrations[0]) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1) needs_slice = True data_dict = ndarray_to_imagedatadict(data) ret = {} ret["ImageList"] = [{"ImageData": data_dict}] if dimensional_calibrations and len(dimensional_calibrations) == len(data.shape): dimension_list = data_dict.setdefault("Calibrations", dict()).setdefault("Dimension", list()) for dimensional_calibration in reversed(dimensional_calibrations): dimension = dict() if dimensional_calibration.scale != 0.0: origin = -dimensional_calibration.offset / dimensional_calibration.scale else: origin = 0.0 dimension['Origin'] = origin dimension['Scale'] = dimensional_calibration.scale dimension['Units'] = dimensional_calibration.units dimension_list.append(dimension) if intensity_calibration: if intensity_calibration.scale != 0.0: origin = -intensity_calibration.offset / intensity_calibration.scale else: origin = 0.0 brightness = data_dict.setdefault("Calibrations", dict()).setdefault("Brightness", dict()) brightness['Origin'] = origin brightness['Scale'] = intensity_calibration.scale brightness['Units'] = intensity_calibration.units if modified: timezone_str = None if timezone_str is None and timezone: try: import pytz tz = pytz.timezone(timezone) timezone_str = tz.tzname(modified) except: pass if timezone_str is None and timezone_offset: timezone_str = timezone_offset timezone_str = " " + timezone_str if timezone_str is not None else "" date_str = modified.strftime("%x") time_str = modified.strftime("%X") + timezone_str ret["DataBar"] = {"Acquisition Date": date_str, "Acquisition Time": time_str} # I think ImageSource list creates a mapping between ImageSourceIds and Images ret["ImageSourceList"] = [{"ClassName": "ImageSource:Simple", "Id": [0], "ImageRef": 0}] # I think this lists the sources for the DocumentObjectlist. The source number is not # the indxe in the imagelist but is either the index in the ImageSourceList or the Id # from that list. We also need to set the annotation type to identify it as an data ret["DocumentObjectList"] = [{"ImageSource": 0, "AnnotationType": 20}] # finally some display options ret["Image Behavior"] = {"ViewDisplayID": 8} dm_metadata = copy.deepcopy(metadata) if metadata.get("hardware_source", dict()).get("signal_type", "").lower() == "eels": if len(data.shape) == 1 or (len(data.shape) == 2 and data.shape[0] == 1): dm_metadata.setdefault("Meta Data", dict())["Format"] = "Spectrum" dm_metadata.setdefault("Meta Data", dict())["Signal"] = "EELS" elif data_descriptor.collection_dimension_count == 2 and data_descriptor.datum_dimension_count == 1: dm_metadata.setdefault("Meta Data", dict())["Format"] = "Spectrum image" dm_metadata.setdefault("Meta Data", dict())["Signal"] = "EELS" elif data_descriptor.datum_dimension_count == 1: dm_metadata.setdefault("Meta Data", dict())["Format"] = "Spectrum" if (1 if data_descriptor.is_sequence else 0) + data_descriptor.collection_dimension_count == 1 or needs_slice: if data_descriptor.is_sequence or is_sequence: dm_metadata.setdefault("Meta Data", dict())["IsSequence"] = True ret["ImageSourceList"] = [{"ClassName": "ImageSource:Summed", "Do Sum": True, "Id": [0], "ImageRef": 0, "LayerEnd": 0, "LayerStart": 0, "Summed Dimension": len(data.shape) - 1}] if needs_slice: ret["DocumentObjectList"][0]["AnnotationGroupList"] = [{"AnnotationType": 23, "Name": "SICursor", "Rectangle": (0, 0, 1, 1)}] ret["DocumentObjectList"][0]["ImageDisplayType"] = 1 # display as an image if modified: dm_metadata["Timestamp"] = modified.isoformat() if timezone: dm_metadata["Timezone"] = timezone if timezone_offset: dm_metadata["TimezoneOffset"] = timezone_offset ret["ImageList"][0]["ImageTags"] = dm_metadata ret["InImageMode"] = True parse_dm3.parse_dm_header(file, ret)
python
def save_image(xdata: DataAndMetadata.DataAndMetadata, file): """ Saves the nparray data to the file-like object (or string) file. """ # we need to create a basic DM tree suitable for an image # we'll try the minimum: just an data list # doesn't work. Do we need a ImageSourceList too? # and a DocumentObjectList? data = xdata.data data_descriptor = xdata.data_descriptor dimensional_calibrations = xdata.dimensional_calibrations intensity_calibration = xdata.intensity_calibration metadata = xdata.metadata modified = xdata.timestamp timezone = xdata.timezone timezone_offset = xdata.timezone_offset needs_slice = False is_sequence = False if len(data.shape) == 3 and data.dtype != numpy.uint8 and data_descriptor.datum_dimension_count == 1: data = numpy.moveaxis(data, 2, 0) dimensional_calibrations = (dimensional_calibrations[2],) + tuple(dimensional_calibrations[0:2]) if len(data.shape) == 2 and data.dtype != numpy.uint8 and data_descriptor.datum_dimension_count == 1: is_sequence = data_descriptor.is_sequence data = numpy.moveaxis(data, 1, 0) data = numpy.expand_dims(data, axis=1) dimensional_calibrations = (dimensional_calibrations[1], Calibration.Calibration(), dimensional_calibrations[0]) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1) needs_slice = True data_dict = ndarray_to_imagedatadict(data) ret = {} ret["ImageList"] = [{"ImageData": data_dict}] if dimensional_calibrations and len(dimensional_calibrations) == len(data.shape): dimension_list = data_dict.setdefault("Calibrations", dict()).setdefault("Dimension", list()) for dimensional_calibration in reversed(dimensional_calibrations): dimension = dict() if dimensional_calibration.scale != 0.0: origin = -dimensional_calibration.offset / dimensional_calibration.scale else: origin = 0.0 dimension['Origin'] = origin dimension['Scale'] = dimensional_calibration.scale dimension['Units'] = dimensional_calibration.units dimension_list.append(dimension) if intensity_calibration: if intensity_calibration.scale != 0.0: origin = -intensity_calibration.offset / intensity_calibration.scale else: origin = 0.0 brightness = data_dict.setdefault("Calibrations", dict()).setdefault("Brightness", dict()) brightness['Origin'] = origin brightness['Scale'] = intensity_calibration.scale brightness['Units'] = intensity_calibration.units if modified: timezone_str = None if timezone_str is None and timezone: try: import pytz tz = pytz.timezone(timezone) timezone_str = tz.tzname(modified) except: pass if timezone_str is None and timezone_offset: timezone_str = timezone_offset timezone_str = " " + timezone_str if timezone_str is not None else "" date_str = modified.strftime("%x") time_str = modified.strftime("%X") + timezone_str ret["DataBar"] = {"Acquisition Date": date_str, "Acquisition Time": time_str} # I think ImageSource list creates a mapping between ImageSourceIds and Images ret["ImageSourceList"] = [{"ClassName": "ImageSource:Simple", "Id": [0], "ImageRef": 0}] # I think this lists the sources for the DocumentObjectlist. The source number is not # the indxe in the imagelist but is either the index in the ImageSourceList or the Id # from that list. We also need to set the annotation type to identify it as an data ret["DocumentObjectList"] = [{"ImageSource": 0, "AnnotationType": 20}] # finally some display options ret["Image Behavior"] = {"ViewDisplayID": 8} dm_metadata = copy.deepcopy(metadata) if metadata.get("hardware_source", dict()).get("signal_type", "").lower() == "eels": if len(data.shape) == 1 or (len(data.shape) == 2 and data.shape[0] == 1): dm_metadata.setdefault("Meta Data", dict())["Format"] = "Spectrum" dm_metadata.setdefault("Meta Data", dict())["Signal"] = "EELS" elif data_descriptor.collection_dimension_count == 2 and data_descriptor.datum_dimension_count == 1: dm_metadata.setdefault("Meta Data", dict())["Format"] = "Spectrum image" dm_metadata.setdefault("Meta Data", dict())["Signal"] = "EELS" elif data_descriptor.datum_dimension_count == 1: dm_metadata.setdefault("Meta Data", dict())["Format"] = "Spectrum" if (1 if data_descriptor.is_sequence else 0) + data_descriptor.collection_dimension_count == 1 or needs_slice: if data_descriptor.is_sequence or is_sequence: dm_metadata.setdefault("Meta Data", dict())["IsSequence"] = True ret["ImageSourceList"] = [{"ClassName": "ImageSource:Summed", "Do Sum": True, "Id": [0], "ImageRef": 0, "LayerEnd": 0, "LayerStart": 0, "Summed Dimension": len(data.shape) - 1}] if needs_slice: ret["DocumentObjectList"][0]["AnnotationGroupList"] = [{"AnnotationType": 23, "Name": "SICursor", "Rectangle": (0, 0, 1, 1)}] ret["DocumentObjectList"][0]["ImageDisplayType"] = 1 # display as an image if modified: dm_metadata["Timestamp"] = modified.isoformat() if timezone: dm_metadata["Timezone"] = timezone if timezone_offset: dm_metadata["TimezoneOffset"] = timezone_offset ret["ImageList"][0]["ImageTags"] = dm_metadata ret["InImageMode"] = True parse_dm3.parse_dm_header(file, ret)
[ "def", "save_image", "(", "xdata", ":", "DataAndMetadata", ".", "DataAndMetadata", ",", "file", ")", ":", "# we need to create a basic DM tree suitable for an image", "# we'll try the minimum: just an data list", "# doesn't work. Do we need a ImageSourceList too?", "# and a DocumentObjectList?", "data", "=", "xdata", ".", "data", "data_descriptor", "=", "xdata", ".", "data_descriptor", "dimensional_calibrations", "=", "xdata", ".", "dimensional_calibrations", "intensity_calibration", "=", "xdata", ".", "intensity_calibration", "metadata", "=", "xdata", ".", "metadata", "modified", "=", "xdata", ".", "timestamp", "timezone", "=", "xdata", ".", "timezone", "timezone_offset", "=", "xdata", ".", "timezone_offset", "needs_slice", "=", "False", "is_sequence", "=", "False", "if", "len", "(", "data", ".", "shape", ")", "==", "3", "and", "data", ".", "dtype", "!=", "numpy", ".", "uint8", "and", "data_descriptor", ".", "datum_dimension_count", "==", "1", ":", "data", "=", "numpy", ".", "moveaxis", "(", "data", ",", "2", ",", "0", ")", "dimensional_calibrations", "=", "(", "dimensional_calibrations", "[", "2", "]", ",", ")", "+", "tuple", "(", "dimensional_calibrations", "[", "0", ":", "2", "]", ")", "if", "len", "(", "data", ".", "shape", ")", "==", "2", "and", "data", ".", "dtype", "!=", "numpy", ".", "uint8", "and", "data_descriptor", ".", "datum_dimension_count", "==", "1", ":", "is_sequence", "=", "data_descriptor", ".", "is_sequence", "data", "=", "numpy", ".", "moveaxis", "(", "data", ",", "1", ",", "0", ")", "data", "=", "numpy", ".", "expand_dims", "(", "data", ",", "axis", "=", "1", ")", "dimensional_calibrations", "=", "(", "dimensional_calibrations", "[", "1", "]", ",", "Calibration", ".", "Calibration", "(", ")", ",", "dimensional_calibrations", "[", "0", "]", ")", "data_descriptor", "=", "DataAndMetadata", ".", "DataDescriptor", "(", "False", ",", "2", ",", "1", ")", "needs_slice", "=", "True", "data_dict", "=", "ndarray_to_imagedatadict", "(", "data", ")", "ret", "=", "{", "}", "ret", "[", "\"ImageList\"", "]", "=", "[", "{", "\"ImageData\"", ":", "data_dict", "}", "]", "if", "dimensional_calibrations", "and", "len", "(", "dimensional_calibrations", ")", "==", "len", "(", "data", ".", "shape", ")", ":", "dimension_list", "=", "data_dict", ".", "setdefault", "(", "\"Calibrations\"", ",", "dict", "(", ")", ")", ".", "setdefault", "(", "\"Dimension\"", ",", "list", "(", ")", ")", "for", "dimensional_calibration", "in", "reversed", "(", "dimensional_calibrations", ")", ":", "dimension", "=", "dict", "(", ")", "if", "dimensional_calibration", ".", "scale", "!=", "0.0", ":", "origin", "=", "-", "dimensional_calibration", ".", "offset", "/", "dimensional_calibration", ".", "scale", "else", ":", "origin", "=", "0.0", "dimension", "[", "'Origin'", "]", "=", "origin", "dimension", "[", "'Scale'", "]", "=", "dimensional_calibration", ".", "scale", "dimension", "[", "'Units'", "]", "=", "dimensional_calibration", ".", "units", "dimension_list", ".", "append", "(", "dimension", ")", "if", "intensity_calibration", ":", "if", "intensity_calibration", ".", "scale", "!=", "0.0", ":", "origin", "=", "-", "intensity_calibration", ".", "offset", "/", "intensity_calibration", ".", "scale", "else", ":", "origin", "=", "0.0", "brightness", "=", "data_dict", ".", "setdefault", "(", "\"Calibrations\"", ",", "dict", "(", ")", ")", ".", "setdefault", "(", "\"Brightness\"", ",", "dict", "(", ")", ")", "brightness", "[", "'Origin'", "]", "=", "origin", "brightness", "[", "'Scale'", "]", "=", "intensity_calibration", ".", "scale", "brightness", "[", "'Units'", "]", "=", "intensity_calibration", ".", "units", "if", "modified", ":", "timezone_str", "=", "None", "if", "timezone_str", "is", "None", "and", "timezone", ":", "try", ":", "import", "pytz", "tz", "=", "pytz", ".", "timezone", "(", "timezone", ")", "timezone_str", "=", "tz", ".", "tzname", "(", "modified", ")", "except", ":", "pass", "if", "timezone_str", "is", "None", "and", "timezone_offset", ":", "timezone_str", "=", "timezone_offset", "timezone_str", "=", "\" \"", "+", "timezone_str", "if", "timezone_str", "is", "not", "None", "else", "\"\"", "date_str", "=", "modified", ".", "strftime", "(", "\"%x\"", ")", "time_str", "=", "modified", ".", "strftime", "(", "\"%X\"", ")", "+", "timezone_str", "ret", "[", "\"DataBar\"", "]", "=", "{", "\"Acquisition Date\"", ":", "date_str", ",", "\"Acquisition Time\"", ":", "time_str", "}", "# I think ImageSource list creates a mapping between ImageSourceIds and Images", "ret", "[", "\"ImageSourceList\"", "]", "=", "[", "{", "\"ClassName\"", ":", "\"ImageSource:Simple\"", ",", "\"Id\"", ":", "[", "0", "]", ",", "\"ImageRef\"", ":", "0", "}", "]", "# I think this lists the sources for the DocumentObjectlist. The source number is not", "# the indxe in the imagelist but is either the index in the ImageSourceList or the Id", "# from that list. We also need to set the annotation type to identify it as an data", "ret", "[", "\"DocumentObjectList\"", "]", "=", "[", "{", "\"ImageSource\"", ":", "0", ",", "\"AnnotationType\"", ":", "20", "}", "]", "# finally some display options", "ret", "[", "\"Image Behavior\"", "]", "=", "{", "\"ViewDisplayID\"", ":", "8", "}", "dm_metadata", "=", "copy", ".", "deepcopy", "(", "metadata", ")", "if", "metadata", ".", "get", "(", "\"hardware_source\"", ",", "dict", "(", ")", ")", ".", "get", "(", "\"signal_type\"", ",", "\"\"", ")", ".", "lower", "(", ")", "==", "\"eels\"", ":", "if", "len", "(", "data", ".", "shape", ")", "==", "1", "or", "(", "len", "(", "data", ".", "shape", ")", "==", "2", "and", "data", ".", "shape", "[", "0", "]", "==", "1", ")", ":", "dm_metadata", ".", "setdefault", "(", "\"Meta Data\"", ",", "dict", "(", ")", ")", "[", "\"Format\"", "]", "=", "\"Spectrum\"", "dm_metadata", ".", "setdefault", "(", "\"Meta Data\"", ",", "dict", "(", ")", ")", "[", "\"Signal\"", "]", "=", "\"EELS\"", "elif", "data_descriptor", ".", "collection_dimension_count", "==", "2", "and", "data_descriptor", ".", "datum_dimension_count", "==", "1", ":", "dm_metadata", ".", "setdefault", "(", "\"Meta Data\"", ",", "dict", "(", ")", ")", "[", "\"Format\"", "]", "=", "\"Spectrum image\"", "dm_metadata", ".", "setdefault", "(", "\"Meta Data\"", ",", "dict", "(", ")", ")", "[", "\"Signal\"", "]", "=", "\"EELS\"", "elif", "data_descriptor", ".", "datum_dimension_count", "==", "1", ":", "dm_metadata", ".", "setdefault", "(", "\"Meta Data\"", ",", "dict", "(", ")", ")", "[", "\"Format\"", "]", "=", "\"Spectrum\"", "if", "(", "1", "if", "data_descriptor", ".", "is_sequence", "else", "0", ")", "+", "data_descriptor", ".", "collection_dimension_count", "==", "1", "or", "needs_slice", ":", "if", "data_descriptor", ".", "is_sequence", "or", "is_sequence", ":", "dm_metadata", ".", "setdefault", "(", "\"Meta Data\"", ",", "dict", "(", ")", ")", "[", "\"IsSequence\"", "]", "=", "True", "ret", "[", "\"ImageSourceList\"", "]", "=", "[", "{", "\"ClassName\"", ":", "\"ImageSource:Summed\"", ",", "\"Do Sum\"", ":", "True", ",", "\"Id\"", ":", "[", "0", "]", ",", "\"ImageRef\"", ":", "0", ",", "\"LayerEnd\"", ":", "0", ",", "\"LayerStart\"", ":", "0", ",", "\"Summed Dimension\"", ":", "len", "(", "data", ".", "shape", ")", "-", "1", "}", "]", "if", "needs_slice", ":", "ret", "[", "\"DocumentObjectList\"", "]", "[", "0", "]", "[", "\"AnnotationGroupList\"", "]", "=", "[", "{", "\"AnnotationType\"", ":", "23", ",", "\"Name\"", ":", "\"SICursor\"", ",", "\"Rectangle\"", ":", "(", "0", ",", "0", ",", "1", ",", "1", ")", "}", "]", "ret", "[", "\"DocumentObjectList\"", "]", "[", "0", "]", "[", "\"ImageDisplayType\"", "]", "=", "1", "# display as an image", "if", "modified", ":", "dm_metadata", "[", "\"Timestamp\"", "]", "=", "modified", ".", "isoformat", "(", ")", "if", "timezone", ":", "dm_metadata", "[", "\"Timezone\"", "]", "=", "timezone", "if", "timezone_offset", ":", "dm_metadata", "[", "\"TimezoneOffset\"", "]", "=", "timezone_offset", "ret", "[", "\"ImageList\"", "]", "[", "0", "]", "[", "\"ImageTags\"", "]", "=", "dm_metadata", "ret", "[", "\"InImageMode\"", "]", "=", "True", "parse_dm3", ".", "parse_dm_header", "(", "file", ",", "ret", ")" ]
Saves the nparray data to the file-like object (or string) file.
[ "Saves", "the", "nparray", "data", "to", "the", "file", "-", "like", "object", "(", "or", "string", ")", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/dm3_image_utils.py#L259-L361
nion-software/nionswift-io
nionswift_plugin/DM_IO/parse_dm3.py
parse_dm_header
def parse_dm_header(f, outdata=None): """ This is the start of the DM file. We check for some magic values and then treat the next entry as a tag_root If outdata is supplied, we write instead of read using the dictionary outdata as a source Hopefully parse_dm_header(newf, outdata=parse_dm_header(f)) copies f to newf """ # filesize is sizeondisk - 16. But we have 8 bytes of zero at the end of # the file. if outdata is not None: # this means we're WRITING to the file if verbose: print("write_dm_header start", f.tell()) ver, file_size, endianness = 3, -1, 1 put_into_file(f, "> l l l", ver, file_size, endianness) start = f.tell() parse_dm_tag_root(f, outdata) end = f.tell() # start is end of 3 long header. We want to write 2nd long f.seek(start - 8) # the real file size. We started counting after 12-byte version,fs,end # and we need to subtract 16 total: put_into_file(f, "> l", end - start + 4) f.seek(end) enda, endb = 0, 0 put_into_file(f, "> l l", enda, endb) if verbose: print("write_dm_header end", f.tell()) else: if verbose: print("read_dm_header start", f.tell()) ver = get_from_file(f, "> l") assert ver in [3,4], "Version must be 3 or 4, not %s" % ver # argh. why a global? global size_type, version if ver == 3: size_type = 'L' # may be Q? version = 3 if ver == 4: size_type = 'Q' # may be Q? version = 4 file_size, endianness = get_from_file(f, ">%c l" % size_type) assert endianness == 1, "Endianness must be 1, not %s"%endianness start = f.tell() ret = parse_dm_tag_root(f, outdata) end = f.tell() # print("fs", file_size, end - start, (end-start)%8) # mfm 2013-07-11 the file_size value is not always # end-start, sometimes there seems to be an extra 4 bytes, # other times not. Let's just ignore it for the moment # assert(file_size == end - start) enda, endb = get_from_file(f, "> l l") assert(enda == endb == 0) if verbose: print("read_dm_header end", f.tell()) return ret
python
def parse_dm_header(f, outdata=None): """ This is the start of the DM file. We check for some magic values and then treat the next entry as a tag_root If outdata is supplied, we write instead of read using the dictionary outdata as a source Hopefully parse_dm_header(newf, outdata=parse_dm_header(f)) copies f to newf """ # filesize is sizeondisk - 16. But we have 8 bytes of zero at the end of # the file. if outdata is not None: # this means we're WRITING to the file if verbose: print("write_dm_header start", f.tell()) ver, file_size, endianness = 3, -1, 1 put_into_file(f, "> l l l", ver, file_size, endianness) start = f.tell() parse_dm_tag_root(f, outdata) end = f.tell() # start is end of 3 long header. We want to write 2nd long f.seek(start - 8) # the real file size. We started counting after 12-byte version,fs,end # and we need to subtract 16 total: put_into_file(f, "> l", end - start + 4) f.seek(end) enda, endb = 0, 0 put_into_file(f, "> l l", enda, endb) if verbose: print("write_dm_header end", f.tell()) else: if verbose: print("read_dm_header start", f.tell()) ver = get_from_file(f, "> l") assert ver in [3,4], "Version must be 3 or 4, not %s" % ver # argh. why a global? global size_type, version if ver == 3: size_type = 'L' # may be Q? version = 3 if ver == 4: size_type = 'Q' # may be Q? version = 4 file_size, endianness = get_from_file(f, ">%c l" % size_type) assert endianness == 1, "Endianness must be 1, not %s"%endianness start = f.tell() ret = parse_dm_tag_root(f, outdata) end = f.tell() # print("fs", file_size, end - start, (end-start)%8) # mfm 2013-07-11 the file_size value is not always # end-start, sometimes there seems to be an extra 4 bytes, # other times not. Let's just ignore it for the moment # assert(file_size == end - start) enda, endb = get_from_file(f, "> l l") assert(enda == endb == 0) if verbose: print("read_dm_header end", f.tell()) return ret
[ "def", "parse_dm_header", "(", "f", ",", "outdata", "=", "None", ")", ":", "# filesize is sizeondisk - 16. But we have 8 bytes of zero at the end of", "# the file.", "if", "outdata", "is", "not", "None", ":", "# this means we're WRITING to the file", "if", "verbose", ":", "print", "(", "\"write_dm_header start\"", ",", "f", ".", "tell", "(", ")", ")", "ver", ",", "file_size", ",", "endianness", "=", "3", ",", "-", "1", ",", "1", "put_into_file", "(", "f", ",", "\"> l l l\"", ",", "ver", ",", "file_size", ",", "endianness", ")", "start", "=", "f", ".", "tell", "(", ")", "parse_dm_tag_root", "(", "f", ",", "outdata", ")", "end", "=", "f", ".", "tell", "(", ")", "# start is end of 3 long header. We want to write 2nd long", "f", ".", "seek", "(", "start", "-", "8", ")", "# the real file size. We started counting after 12-byte version,fs,end", "# and we need to subtract 16 total:", "put_into_file", "(", "f", ",", "\"> l\"", ",", "end", "-", "start", "+", "4", ")", "f", ".", "seek", "(", "end", ")", "enda", ",", "endb", "=", "0", ",", "0", "put_into_file", "(", "f", ",", "\"> l l\"", ",", "enda", ",", "endb", ")", "if", "verbose", ":", "print", "(", "\"write_dm_header end\"", ",", "f", ".", "tell", "(", ")", ")", "else", ":", "if", "verbose", ":", "print", "(", "\"read_dm_header start\"", ",", "f", ".", "tell", "(", ")", ")", "ver", "=", "get_from_file", "(", "f", ",", "\"> l\"", ")", "assert", "ver", "in", "[", "3", ",", "4", "]", ",", "\"Version must be 3 or 4, not %s\"", "%", "ver", "# argh. why a global?", "global", "size_type", ",", "version", "if", "ver", "==", "3", ":", "size_type", "=", "'L'", "# may be Q?", "version", "=", "3", "if", "ver", "==", "4", ":", "size_type", "=", "'Q'", "# may be Q?", "version", "=", "4", "file_size", ",", "endianness", "=", "get_from_file", "(", "f", ",", "\">%c l\"", "%", "size_type", ")", "assert", "endianness", "==", "1", ",", "\"Endianness must be 1, not %s\"", "%", "endianness", "start", "=", "f", ".", "tell", "(", ")", "ret", "=", "parse_dm_tag_root", "(", "f", ",", "outdata", ")", "end", "=", "f", ".", "tell", "(", ")", "# print(\"fs\", file_size, end - start, (end-start)%8)", "# mfm 2013-07-11 the file_size value is not always", "# end-start, sometimes there seems to be an extra 4 bytes,", "# other times not. Let's just ignore it for the moment", "# assert(file_size == end - start)", "enda", ",", "endb", "=", "get_from_file", "(", "f", ",", "\"> l l\"", ")", "assert", "(", "enda", "==", "endb", "==", "0", ")", "if", "verbose", ":", "print", "(", "\"read_dm_header end\"", ",", "f", ".", "tell", "(", ")", ")", "return", "ret" ]
This is the start of the DM file. We check for some magic values and then treat the next entry as a tag_root If outdata is supplied, we write instead of read using the dictionary outdata as a source Hopefully parse_dm_header(newf, outdata=parse_dm_header(f)) copies f to newf
[ "This", "is", "the", "start", "of", "the", "DM", "file", ".", "We", "check", "for", "some", "magic", "values", "and", "then", "treat", "the", "next", "entry", "as", "a", "tag_root" ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/parse_dm3.py#L96-L151
nion-software/nionswift-io
nionswift_plugin/DM_IO/parse_dm3.py
get_structdmtypes_for_python_typeorobject
def get_structdmtypes_for_python_typeorobject(typeorobj): """ Return structchar, dmtype for the python (or numpy) type or object typeorobj. For more complex types we only return the dm type """ # not isinstance is probably a bit more lenient than 'is' # ie isinstance(x,str) is nicer than type(x) is str. # hence we use isinstance when available if isinstance(typeorobj, type): comparer = lambda test: test is typeorobj else: comparer = lambda test: isinstance(typeorobj, test) if comparer(int) and not -2**31 < typeorobj < 2**31 - 1: return 'q', 11 for key, name, sc, types in dm_simple_names: for t in types: if comparer(t): return sc, key if comparer(str): return None, get_dmtype_for_name('array') # treat all strings as arrays! elif comparer(unicode_type): return None, get_dmtype_for_name('array') # treat all strings as arrays! elif comparer(array.array): return None, get_dmtype_for_name('array') elif comparer(tuple): return None, get_dmtype_for_name('struct') elif comparer(structarray): return None, get_dmtype_for_name('array') logging.warn("No appropriate DMType found for %s, %s", typeorobj, type(typeorobj)) return None
python
def get_structdmtypes_for_python_typeorobject(typeorobj): """ Return structchar, dmtype for the python (or numpy) type or object typeorobj. For more complex types we only return the dm type """ # not isinstance is probably a bit more lenient than 'is' # ie isinstance(x,str) is nicer than type(x) is str. # hence we use isinstance when available if isinstance(typeorobj, type): comparer = lambda test: test is typeorobj else: comparer = lambda test: isinstance(typeorobj, test) if comparer(int) and not -2**31 < typeorobj < 2**31 - 1: return 'q', 11 for key, name, sc, types in dm_simple_names: for t in types: if comparer(t): return sc, key if comparer(str): return None, get_dmtype_for_name('array') # treat all strings as arrays! elif comparer(unicode_type): return None, get_dmtype_for_name('array') # treat all strings as arrays! elif comparer(array.array): return None, get_dmtype_for_name('array') elif comparer(tuple): return None, get_dmtype_for_name('struct') elif comparer(structarray): return None, get_dmtype_for_name('array') logging.warn("No appropriate DMType found for %s, %s", typeorobj, type(typeorobj)) return None
[ "def", "get_structdmtypes_for_python_typeorobject", "(", "typeorobj", ")", ":", "# not isinstance is probably a bit more lenient than 'is'", "# ie isinstance(x,str) is nicer than type(x) is str.", "# hence we use isinstance when available", "if", "isinstance", "(", "typeorobj", ",", "type", ")", ":", "comparer", "=", "lambda", "test", ":", "test", "is", "typeorobj", "else", ":", "comparer", "=", "lambda", "test", ":", "isinstance", "(", "typeorobj", ",", "test", ")", "if", "comparer", "(", "int", ")", "and", "not", "-", "2", "**", "31", "<", "typeorobj", "<", "2", "**", "31", "-", "1", ":", "return", "'q'", ",", "11", "for", "key", ",", "name", ",", "sc", ",", "types", "in", "dm_simple_names", ":", "for", "t", "in", "types", ":", "if", "comparer", "(", "t", ")", ":", "return", "sc", ",", "key", "if", "comparer", "(", "str", ")", ":", "return", "None", ",", "get_dmtype_for_name", "(", "'array'", ")", "# treat all strings as arrays!", "elif", "comparer", "(", "unicode_type", ")", ":", "return", "None", ",", "get_dmtype_for_name", "(", "'array'", ")", "# treat all strings as arrays!", "elif", "comparer", "(", "array", ".", "array", ")", ":", "return", "None", ",", "get_dmtype_for_name", "(", "'array'", ")", "elif", "comparer", "(", "tuple", ")", ":", "return", "None", ",", "get_dmtype_for_name", "(", "'struct'", ")", "elif", "comparer", "(", "structarray", ")", ":", "return", "None", ",", "get_dmtype_for_name", "(", "'array'", ")", "logging", ".", "warn", "(", "\"No appropriate DMType found for %s, %s\"", ",", "typeorobj", ",", "type", "(", "typeorobj", ")", ")", "return", "None" ]
Return structchar, dmtype for the python (or numpy) type or object typeorobj. For more complex types we only return the dm type
[ "Return", "structchar", "dmtype", "for", "the", "python", "(", "or", "numpy", ")", "type", "or", "object", "typeorobj", ".", "For", "more", "complex", "types", "we", "only", "return", "the", "dm", "type" ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/parse_dm3.py#L331-L363
nion-software/nionswift-io
nionswift_plugin/DM_IO/parse_dm3.py
standard_dm_read
def standard_dm_read(datatype_num, desc): """ datatype_num is the number of the data type, see dm_simple_names above. desc is a (nicename, struct_char) tuple. We return a function that parses the data for us. """ nicename, structchar, types = desc def dm_read_x(f, outdata=None): """Reads (or write if outdata is given) a simple data type. returns the data if reading and the number of bytes of header """ if outdata is not None: # this means we're WRITING to the file if verbose: print("dm_write start", structchar, outdata, "at", f.tell()) put_into_file(f, "<" + structchar, outdata) if verbose: print("dm_write end", f.tell()) return 0 else: if verbose: print("dm_read start", structchar, "at", f.tell()) result = get_from_file(f, "<" + structchar) if verbose: print("dm_read end", f.tell()) return result, 0 return dm_read_x
python
def standard_dm_read(datatype_num, desc): """ datatype_num is the number of the data type, see dm_simple_names above. desc is a (nicename, struct_char) tuple. We return a function that parses the data for us. """ nicename, structchar, types = desc def dm_read_x(f, outdata=None): """Reads (or write if outdata is given) a simple data type. returns the data if reading and the number of bytes of header """ if outdata is not None: # this means we're WRITING to the file if verbose: print("dm_write start", structchar, outdata, "at", f.tell()) put_into_file(f, "<" + structchar, outdata) if verbose: print("dm_write end", f.tell()) return 0 else: if verbose: print("dm_read start", structchar, "at", f.tell()) result = get_from_file(f, "<" + structchar) if verbose: print("dm_read end", f.tell()) return result, 0 return dm_read_x
[ "def", "standard_dm_read", "(", "datatype_num", ",", "desc", ")", ":", "nicename", ",", "structchar", ",", "types", "=", "desc", "def", "dm_read_x", "(", "f", ",", "outdata", "=", "None", ")", ":", "\"\"\"Reads (or write if outdata is given) a simple data type.\n returns the data if reading and the number of bytes of header\n \"\"\"", "if", "outdata", "is", "not", "None", ":", "# this means we're WRITING to the file", "if", "verbose", ":", "print", "(", "\"dm_write start\"", ",", "structchar", ",", "outdata", ",", "\"at\"", ",", "f", ".", "tell", "(", ")", ")", "put_into_file", "(", "f", ",", "\"<\"", "+", "structchar", ",", "outdata", ")", "if", "verbose", ":", "print", "(", "\"dm_write end\"", ",", "f", ".", "tell", "(", ")", ")", "return", "0", "else", ":", "if", "verbose", ":", "print", "(", "\"dm_read start\"", ",", "structchar", ",", "\"at\"", ",", "f", ".", "tell", "(", ")", ")", "result", "=", "get_from_file", "(", "f", ",", "\"<\"", "+", "structchar", ")", "if", "verbose", ":", "print", "(", "\"dm_read end\"", ",", "f", ".", "tell", "(", ")", ")", "return", "result", ",", "0", "return", "dm_read_x" ]
datatype_num is the number of the data type, see dm_simple_names above. desc is a (nicename, struct_char) tuple. We return a function that parses the data for us.
[ "datatype_num", "is", "the", "number", "of", "the", "data", "type", "see", "dm_simple_names", "above", ".", "desc", "is", "a", "(", "nicename", "struct_char", ")", "tuple", ".", "We", "return", "a", "function", "that", "parses", "the", "data", "for", "us", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/DM_IO/parse_dm3.py#L380-L407
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imread
def imread(files, **kwargs): """Return image data from TIFF file(s) as numpy array. Refer to the TiffFile and TiffSequence classes and their asarray functions for documentation. Parameters ---------- files : str, binary stream, or sequence File name, seekable binary stream, glob pattern, or sequence of file names. kwargs : dict Parameters 'name', 'offset', 'size', 'multifile', and 'is_ome' are passed to the TiffFile constructor. The 'pattern' parameter is passed to the TiffSequence constructor. Other parameters are passed to the asarray functions. The first image series in the file is returned if no arguments are provided. """ kwargs_file = parse_kwargs(kwargs, 'is_ome', 'multifile', '_useframes', 'name', 'offset', 'size', 'multifile_close', 'fastij', 'movie') # legacy kwargs_seq = parse_kwargs(kwargs, 'pattern') if kwargs.get('pages', None) is not None: if kwargs.get('key', None) is not None: raise TypeError( "the 'pages' and 'key' arguments cannot be used together") log.warning("imread: the 'pages' argument is deprecated") kwargs['key'] = kwargs.pop('pages') if isinstance(files, basestring) and any(i in files for i in '?*'): files = glob.glob(files) if not files: raise ValueError('no files found') if not hasattr(files, 'seek') and len(files) == 1: files = files[0] if isinstance(files, basestring) or hasattr(files, 'seek'): with TiffFile(files, **kwargs_file) as tif: return tif.asarray(**kwargs) else: with TiffSequence(files, **kwargs_seq) as imseq: return imseq.asarray(**kwargs)
python
def imread(files, **kwargs): """Return image data from TIFF file(s) as numpy array. Refer to the TiffFile and TiffSequence classes and their asarray functions for documentation. Parameters ---------- files : str, binary stream, or sequence File name, seekable binary stream, glob pattern, or sequence of file names. kwargs : dict Parameters 'name', 'offset', 'size', 'multifile', and 'is_ome' are passed to the TiffFile constructor. The 'pattern' parameter is passed to the TiffSequence constructor. Other parameters are passed to the asarray functions. The first image series in the file is returned if no arguments are provided. """ kwargs_file = parse_kwargs(kwargs, 'is_ome', 'multifile', '_useframes', 'name', 'offset', 'size', 'multifile_close', 'fastij', 'movie') # legacy kwargs_seq = parse_kwargs(kwargs, 'pattern') if kwargs.get('pages', None) is not None: if kwargs.get('key', None) is not None: raise TypeError( "the 'pages' and 'key' arguments cannot be used together") log.warning("imread: the 'pages' argument is deprecated") kwargs['key'] = kwargs.pop('pages') if isinstance(files, basestring) and any(i in files for i in '?*'): files = glob.glob(files) if not files: raise ValueError('no files found') if not hasattr(files, 'seek') and len(files) == 1: files = files[0] if isinstance(files, basestring) or hasattr(files, 'seek'): with TiffFile(files, **kwargs_file) as tif: return tif.asarray(**kwargs) else: with TiffSequence(files, **kwargs_seq) as imseq: return imseq.asarray(**kwargs)
[ "def", "imread", "(", "files", ",", "*", "*", "kwargs", ")", ":", "kwargs_file", "=", "parse_kwargs", "(", "kwargs", ",", "'is_ome'", ",", "'multifile'", ",", "'_useframes'", ",", "'name'", ",", "'offset'", ",", "'size'", ",", "'multifile_close'", ",", "'fastij'", ",", "'movie'", ")", "# legacy", "kwargs_seq", "=", "parse_kwargs", "(", "kwargs", ",", "'pattern'", ")", "if", "kwargs", ".", "get", "(", "'pages'", ",", "None", ")", "is", "not", "None", ":", "if", "kwargs", ".", "get", "(", "'key'", ",", "None", ")", "is", "not", "None", ":", "raise", "TypeError", "(", "\"the 'pages' and 'key' arguments cannot be used together\"", ")", "log", ".", "warning", "(", "\"imread: the 'pages' argument is deprecated\"", ")", "kwargs", "[", "'key'", "]", "=", "kwargs", ".", "pop", "(", "'pages'", ")", "if", "isinstance", "(", "files", ",", "basestring", ")", "and", "any", "(", "i", "in", "files", "for", "i", "in", "'?*'", ")", ":", "files", "=", "glob", ".", "glob", "(", "files", ")", "if", "not", "files", ":", "raise", "ValueError", "(", "'no files found'", ")", "if", "not", "hasattr", "(", "files", ",", "'seek'", ")", "and", "len", "(", "files", ")", "==", "1", ":", "files", "=", "files", "[", "0", "]", "if", "isinstance", "(", "files", ",", "basestring", ")", "or", "hasattr", "(", "files", ",", "'seek'", ")", ":", "with", "TiffFile", "(", "files", ",", "*", "*", "kwargs_file", ")", "as", "tif", ":", "return", "tif", ".", "asarray", "(", "*", "*", "kwargs", ")", "else", ":", "with", "TiffSequence", "(", "files", ",", "*", "*", "kwargs_seq", ")", "as", "imseq", ":", "return", "imseq", ".", "asarray", "(", "*", "*", "kwargs", ")" ]
Return image data from TIFF file(s) as numpy array. Refer to the TiffFile and TiffSequence classes and their asarray functions for documentation. Parameters ---------- files : str, binary stream, or sequence File name, seekable binary stream, glob pattern, or sequence of file names. kwargs : dict Parameters 'name', 'offset', 'size', 'multifile', and 'is_ome' are passed to the TiffFile constructor. The 'pattern' parameter is passed to the TiffSequence constructor. Other parameters are passed to the asarray functions. The first image series in the file is returned if no arguments are provided.
[ "Return", "image", "data", "from", "TIFF", "file", "(", "s", ")", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L571-L615
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imwrite
def imwrite(file, data=None, shape=None, dtype=None, **kwargs): """Write numpy array to TIFF file. Refer to the TiffWriter class and its asarray function for documentation. A BigTIFF file is created if the data size in bytes is larger than 4 GB minus 32 MB (for metadata), and 'bigtiff' is not specified, and 'imagej' or 'truncate' are not enabled. Parameters ---------- file : str or binary stream File name or writable binary stream, such as an open file or BytesIO. data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. If None, an empty array of the specified shape and dtype is saved to file. Unless 'byteorder' is specified in 'kwargs', the TIFF file byte order is determined from the data's dtype or the dtype argument. shape : tuple If 'data' is None, shape of an empty array to save to the file. dtype : numpy.dtype If 'data' is None, data-type of an empty array to save to the file. kwargs : dict Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed to the TiffWriter constructor. Other parameters are passed to the TiffWriter.save function. Returns ------- offset, bytecount : tuple or None If the image data are written contiguously, return offset and bytecount of image data in the file. """ tifargs = parse_kwargs(kwargs, 'append', 'bigtiff', 'byteorder', 'imagej') if data is None: dtype = numpy.dtype(dtype) size = product(shape) * dtype.itemsize byteorder = dtype.byteorder else: try: size = data.nbytes byteorder = data.dtype.byteorder except Exception: size = 0 byteorder = None bigsize = kwargs.pop('bigsize', 2**32-2**25) if 'bigtiff' not in tifargs and size > bigsize and not ( tifargs.get('imagej', False) or tifargs.get('truncate', False)): tifargs['bigtiff'] = True if 'byteorder' not in tifargs: tifargs['byteorder'] = byteorder with TiffWriter(file, **tifargs) as tif: return tif.save(data, shape, dtype, **kwargs)
python
def imwrite(file, data=None, shape=None, dtype=None, **kwargs): """Write numpy array to TIFF file. Refer to the TiffWriter class and its asarray function for documentation. A BigTIFF file is created if the data size in bytes is larger than 4 GB minus 32 MB (for metadata), and 'bigtiff' is not specified, and 'imagej' or 'truncate' are not enabled. Parameters ---------- file : str or binary stream File name or writable binary stream, such as an open file or BytesIO. data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. If None, an empty array of the specified shape and dtype is saved to file. Unless 'byteorder' is specified in 'kwargs', the TIFF file byte order is determined from the data's dtype or the dtype argument. shape : tuple If 'data' is None, shape of an empty array to save to the file. dtype : numpy.dtype If 'data' is None, data-type of an empty array to save to the file. kwargs : dict Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed to the TiffWriter constructor. Other parameters are passed to the TiffWriter.save function. Returns ------- offset, bytecount : tuple or None If the image data are written contiguously, return offset and bytecount of image data in the file. """ tifargs = parse_kwargs(kwargs, 'append', 'bigtiff', 'byteorder', 'imagej') if data is None: dtype = numpy.dtype(dtype) size = product(shape) * dtype.itemsize byteorder = dtype.byteorder else: try: size = data.nbytes byteorder = data.dtype.byteorder except Exception: size = 0 byteorder = None bigsize = kwargs.pop('bigsize', 2**32-2**25) if 'bigtiff' not in tifargs and size > bigsize and not ( tifargs.get('imagej', False) or tifargs.get('truncate', False)): tifargs['bigtiff'] = True if 'byteorder' not in tifargs: tifargs['byteorder'] = byteorder with TiffWriter(file, **tifargs) as tif: return tif.save(data, shape, dtype, **kwargs)
[ "def", "imwrite", "(", "file", ",", "data", "=", "None", ",", "shape", "=", "None", ",", "dtype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "tifargs", "=", "parse_kwargs", "(", "kwargs", ",", "'append'", ",", "'bigtiff'", ",", "'byteorder'", ",", "'imagej'", ")", "if", "data", "is", "None", ":", "dtype", "=", "numpy", ".", "dtype", "(", "dtype", ")", "size", "=", "product", "(", "shape", ")", "*", "dtype", ".", "itemsize", "byteorder", "=", "dtype", ".", "byteorder", "else", ":", "try", ":", "size", "=", "data", ".", "nbytes", "byteorder", "=", "data", ".", "dtype", ".", "byteorder", "except", "Exception", ":", "size", "=", "0", "byteorder", "=", "None", "bigsize", "=", "kwargs", ".", "pop", "(", "'bigsize'", ",", "2", "**", "32", "-", "2", "**", "25", ")", "if", "'bigtiff'", "not", "in", "tifargs", "and", "size", ">", "bigsize", "and", "not", "(", "tifargs", ".", "get", "(", "'imagej'", ",", "False", ")", "or", "tifargs", ".", "get", "(", "'truncate'", ",", "False", ")", ")", ":", "tifargs", "[", "'bigtiff'", "]", "=", "True", "if", "'byteorder'", "not", "in", "tifargs", ":", "tifargs", "[", "'byteorder'", "]", "=", "byteorder", "with", "TiffWriter", "(", "file", ",", "*", "*", "tifargs", ")", "as", "tif", ":", "return", "tif", ".", "save", "(", "data", ",", "shape", ",", "dtype", ",", "*", "*", "kwargs", ")" ]
Write numpy array to TIFF file. Refer to the TiffWriter class and its asarray function for documentation. A BigTIFF file is created if the data size in bytes is larger than 4 GB minus 32 MB (for metadata), and 'bigtiff' is not specified, and 'imagej' or 'truncate' are not enabled. Parameters ---------- file : str or binary stream File name or writable binary stream, such as an open file or BytesIO. data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. If None, an empty array of the specified shape and dtype is saved to file. Unless 'byteorder' is specified in 'kwargs', the TIFF file byte order is determined from the data's dtype or the dtype argument. shape : tuple If 'data' is None, shape of an empty array to save to the file. dtype : numpy.dtype If 'data' is None, data-type of an empty array to save to the file. kwargs : dict Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed to the TiffWriter constructor. Other parameters are passed to the TiffWriter.save function. Returns ------- offset, bytecount : tuple or None If the image data are written contiguously, return offset and bytecount of image data in the file.
[ "Write", "numpy", "array", "to", "TIFF", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L618-L674
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
memmap
def memmap(filename, shape=None, dtype=None, page=None, series=0, mode='r+', **kwargs): """Return memory-mapped numpy array stored in TIFF file. Memory-mapping requires data stored in native byte order, without tiling, compression, predictors, etc. If 'shape' and 'dtype' are provided, existing files will be overwritten or appended to depending on the 'append' parameter. Otherwise the image data of a specified page or series in an existing file will be memory-mapped. By default, the image data of the first page series is memory-mapped. Call flush() to write any changes in the array to the file. Raise ValueError if the image data in the file is not memory-mappable. Parameters ---------- filename : str Name of the TIFF file which stores the array. shape : tuple Shape of the empty array. dtype : numpy.dtype Data-type of the empty array. page : int Index of the page which image data to memory-map. series : int Index of the page series which image data to memory-map. mode : {'r+', 'r', 'c'} The file open mode. Default is to open existing file for reading and writing ('r+'). kwargs : dict Additional parameters passed to imwrite() or TiffFile(). """ if shape is not None and dtype is not None: # create a new, empty array kwargs.update(data=None, shape=shape, dtype=dtype, returnoffset=True, align=TIFF.ALLOCATIONGRANULARITY) result = imwrite(filename, **kwargs) if result is None: # TODO: fail before creating file or writing data raise ValueError('image data are not memory-mappable') offset = result[0] else: # use existing file with TiffFile(filename, **kwargs) as tif: if page is not None: page = tif.pages[page] if not page.is_memmappable: raise ValueError('image data are not memory-mappable') offset, _ = page.is_contiguous shape = page.shape dtype = page.dtype else: series = tif.series[series] if series.offset is None: raise ValueError('image data are not memory-mappable') shape = series.shape dtype = series.dtype offset = series.offset dtype = tif.byteorder + dtype.char return numpy.memmap(filename, dtype, mode, offset, shape, 'C')
python
def memmap(filename, shape=None, dtype=None, page=None, series=0, mode='r+', **kwargs): """Return memory-mapped numpy array stored in TIFF file. Memory-mapping requires data stored in native byte order, without tiling, compression, predictors, etc. If 'shape' and 'dtype' are provided, existing files will be overwritten or appended to depending on the 'append' parameter. Otherwise the image data of a specified page or series in an existing file will be memory-mapped. By default, the image data of the first page series is memory-mapped. Call flush() to write any changes in the array to the file. Raise ValueError if the image data in the file is not memory-mappable. Parameters ---------- filename : str Name of the TIFF file which stores the array. shape : tuple Shape of the empty array. dtype : numpy.dtype Data-type of the empty array. page : int Index of the page which image data to memory-map. series : int Index of the page series which image data to memory-map. mode : {'r+', 'r', 'c'} The file open mode. Default is to open existing file for reading and writing ('r+'). kwargs : dict Additional parameters passed to imwrite() or TiffFile(). """ if shape is not None and dtype is not None: # create a new, empty array kwargs.update(data=None, shape=shape, dtype=dtype, returnoffset=True, align=TIFF.ALLOCATIONGRANULARITY) result = imwrite(filename, **kwargs) if result is None: # TODO: fail before creating file or writing data raise ValueError('image data are not memory-mappable') offset = result[0] else: # use existing file with TiffFile(filename, **kwargs) as tif: if page is not None: page = tif.pages[page] if not page.is_memmappable: raise ValueError('image data are not memory-mappable') offset, _ = page.is_contiguous shape = page.shape dtype = page.dtype else: series = tif.series[series] if series.offset is None: raise ValueError('image data are not memory-mappable') shape = series.shape dtype = series.dtype offset = series.offset dtype = tif.byteorder + dtype.char return numpy.memmap(filename, dtype, mode, offset, shape, 'C')
[ "def", "memmap", "(", "filename", ",", "shape", "=", "None", ",", "dtype", "=", "None", ",", "page", "=", "None", ",", "series", "=", "0", ",", "mode", "=", "'r+'", ",", "*", "*", "kwargs", ")", ":", "if", "shape", "is", "not", "None", "and", "dtype", "is", "not", "None", ":", "# create a new, empty array", "kwargs", ".", "update", "(", "data", "=", "None", ",", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "returnoffset", "=", "True", ",", "align", "=", "TIFF", ".", "ALLOCATIONGRANULARITY", ")", "result", "=", "imwrite", "(", "filename", ",", "*", "*", "kwargs", ")", "if", "result", "is", "None", ":", "# TODO: fail before creating file or writing data", "raise", "ValueError", "(", "'image data are not memory-mappable'", ")", "offset", "=", "result", "[", "0", "]", "else", ":", "# use existing file", "with", "TiffFile", "(", "filename", ",", "*", "*", "kwargs", ")", "as", "tif", ":", "if", "page", "is", "not", "None", ":", "page", "=", "tif", ".", "pages", "[", "page", "]", "if", "not", "page", ".", "is_memmappable", ":", "raise", "ValueError", "(", "'image data are not memory-mappable'", ")", "offset", ",", "_", "=", "page", ".", "is_contiguous", "shape", "=", "page", ".", "shape", "dtype", "=", "page", ".", "dtype", "else", ":", "series", "=", "tif", ".", "series", "[", "series", "]", "if", "series", ".", "offset", "is", "None", ":", "raise", "ValueError", "(", "'image data are not memory-mappable'", ")", "shape", "=", "series", ".", "shape", "dtype", "=", "series", ".", "dtype", "offset", "=", "series", ".", "offset", "dtype", "=", "tif", ".", "byteorder", "+", "dtype", ".", "char", "return", "numpy", ".", "memmap", "(", "filename", ",", "dtype", ",", "mode", ",", "offset", ",", "shape", ",", "'C'", ")" ]
Return memory-mapped numpy array stored in TIFF file. Memory-mapping requires data stored in native byte order, without tiling, compression, predictors, etc. If 'shape' and 'dtype' are provided, existing files will be overwritten or appended to depending on the 'append' parameter. Otherwise the image data of a specified page or series in an existing file will be memory-mapped. By default, the image data of the first page series is memory-mapped. Call flush() to write any changes in the array to the file. Raise ValueError if the image data in the file is not memory-mappable. Parameters ---------- filename : str Name of the TIFF file which stores the array. shape : tuple Shape of the empty array. dtype : numpy.dtype Data-type of the empty array. page : int Index of the page which image data to memory-map. series : int Index of the page series which image data to memory-map. mode : {'r+', 'r', 'c'} The file open mode. Default is to open existing file for reading and writing ('r+'). kwargs : dict Additional parameters passed to imwrite() or TiffFile().
[ "Return", "memory", "-", "mapped", "numpy", "array", "stored", "in", "TIFF", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L680-L740
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_tags
def read_tags(fh, byteorder, offsetsize, tagnames, customtags=None, maxifds=None): """Read tags from chain of IFDs and return as list of dicts. The file handle position must be at a valid IFD header. """ if offsetsize == 4: offsetformat = byteorder+'I' tagnosize = 2 tagnoformat = byteorder+'H' tagsize = 12 tagformat1 = byteorder+'HH' tagformat2 = byteorder+'I4s' elif offsetsize == 8: offsetformat = byteorder+'Q' tagnosize = 8 tagnoformat = byteorder+'Q' tagsize = 20 tagformat1 = byteorder+'HH' tagformat2 = byteorder+'Q8s' else: raise ValueError('invalid offset size') if customtags is None: customtags = {} if maxifds is None: maxifds = 2**32 result = [] unpack = struct.unpack offset = fh.tell() while len(result) < maxifds: # loop over IFDs try: tagno = unpack(tagnoformat, fh.read(tagnosize))[0] if tagno > 4096: raise TiffFileError('suspicious number of tags') except Exception: log.warning('read_tags: corrupted tag list at offset %i', offset) break tags = {} data = fh.read(tagsize * tagno) pos = fh.tell() index = 0 for _ in range(tagno): code, type_ = unpack(tagformat1, data[index:index+4]) count, value = unpack(tagformat2, data[index+4:index+tagsize]) index += tagsize name = tagnames.get(code, str(code)) try: dtype = TIFF.DATA_FORMATS[type_] except KeyError: raise TiffFileError('unknown tag data type %i' % type_) fmt = '%s%i%s' % (byteorder, count * int(dtype[0]), dtype[1]) size = struct.calcsize(fmt) if size > offsetsize or code in customtags: offset = unpack(offsetformat, value)[0] if offset < 8 or offset > fh.size - size: raise TiffFileError('invalid tag value offset %i' % offset) fh.seek(offset) if code in customtags: readfunc = customtags[code][1] value = readfunc(fh, byteorder, dtype, count, offsetsize) elif type_ == 7 or (count > 1 and dtype[-1] == 'B'): value = read_bytes(fh, byteorder, dtype, count, offsetsize) elif code in tagnames or dtype[-1] == 's': value = unpack(fmt, fh.read(size)) else: value = read_numpy(fh, byteorder, dtype, count, offsetsize) elif dtype[-1] == 'B' or type_ == 7: value = value[:size] else: value = unpack(fmt, value[:size]) if code not in customtags and code not in TIFF.TAG_TUPLE: if len(value) == 1: value = value[0] if type_ != 7 and dtype[-1] == 's' and isinstance(value, bytes): # TIFF ASCII fields can contain multiple strings, # each terminated with a NUL try: value = bytes2str(stripascii(value).strip()) except UnicodeDecodeError: log.warning( 'read_tags: coercing invalid ASCII to bytes (tag %i)', code) tags[name] = value result.append(tags) # read offset to next page fh.seek(pos) offset = unpack(offsetformat, fh.read(offsetsize))[0] if offset == 0: break if offset >= fh.size: log.warning('read_tags: invalid page offset (%i)', offset) break fh.seek(offset) if result and maxifds == 1: result = result[0] return result
python
def read_tags(fh, byteorder, offsetsize, tagnames, customtags=None, maxifds=None): """Read tags from chain of IFDs and return as list of dicts. The file handle position must be at a valid IFD header. """ if offsetsize == 4: offsetformat = byteorder+'I' tagnosize = 2 tagnoformat = byteorder+'H' tagsize = 12 tagformat1 = byteorder+'HH' tagformat2 = byteorder+'I4s' elif offsetsize == 8: offsetformat = byteorder+'Q' tagnosize = 8 tagnoformat = byteorder+'Q' tagsize = 20 tagformat1 = byteorder+'HH' tagformat2 = byteorder+'Q8s' else: raise ValueError('invalid offset size') if customtags is None: customtags = {} if maxifds is None: maxifds = 2**32 result = [] unpack = struct.unpack offset = fh.tell() while len(result) < maxifds: # loop over IFDs try: tagno = unpack(tagnoformat, fh.read(tagnosize))[0] if tagno > 4096: raise TiffFileError('suspicious number of tags') except Exception: log.warning('read_tags: corrupted tag list at offset %i', offset) break tags = {} data = fh.read(tagsize * tagno) pos = fh.tell() index = 0 for _ in range(tagno): code, type_ = unpack(tagformat1, data[index:index+4]) count, value = unpack(tagformat2, data[index+4:index+tagsize]) index += tagsize name = tagnames.get(code, str(code)) try: dtype = TIFF.DATA_FORMATS[type_] except KeyError: raise TiffFileError('unknown tag data type %i' % type_) fmt = '%s%i%s' % (byteorder, count * int(dtype[0]), dtype[1]) size = struct.calcsize(fmt) if size > offsetsize or code in customtags: offset = unpack(offsetformat, value)[0] if offset < 8 or offset > fh.size - size: raise TiffFileError('invalid tag value offset %i' % offset) fh.seek(offset) if code in customtags: readfunc = customtags[code][1] value = readfunc(fh, byteorder, dtype, count, offsetsize) elif type_ == 7 or (count > 1 and dtype[-1] == 'B'): value = read_bytes(fh, byteorder, dtype, count, offsetsize) elif code in tagnames or dtype[-1] == 's': value = unpack(fmt, fh.read(size)) else: value = read_numpy(fh, byteorder, dtype, count, offsetsize) elif dtype[-1] == 'B' or type_ == 7: value = value[:size] else: value = unpack(fmt, value[:size]) if code not in customtags and code not in TIFF.TAG_TUPLE: if len(value) == 1: value = value[0] if type_ != 7 and dtype[-1] == 's' and isinstance(value, bytes): # TIFF ASCII fields can contain multiple strings, # each terminated with a NUL try: value = bytes2str(stripascii(value).strip()) except UnicodeDecodeError: log.warning( 'read_tags: coercing invalid ASCII to bytes (tag %i)', code) tags[name] = value result.append(tags) # read offset to next page fh.seek(pos) offset = unpack(offsetformat, fh.read(offsetsize))[0] if offset == 0: break if offset >= fh.size: log.warning('read_tags: invalid page offset (%i)', offset) break fh.seek(offset) if result and maxifds == 1: result = result[0] return result
[ "def", "read_tags", "(", "fh", ",", "byteorder", ",", "offsetsize", ",", "tagnames", ",", "customtags", "=", "None", ",", "maxifds", "=", "None", ")", ":", "if", "offsetsize", "==", "4", ":", "offsetformat", "=", "byteorder", "+", "'I'", "tagnosize", "=", "2", "tagnoformat", "=", "byteorder", "+", "'H'", "tagsize", "=", "12", "tagformat1", "=", "byteorder", "+", "'HH'", "tagformat2", "=", "byteorder", "+", "'I4s'", "elif", "offsetsize", "==", "8", ":", "offsetformat", "=", "byteorder", "+", "'Q'", "tagnosize", "=", "8", "tagnoformat", "=", "byteorder", "+", "'Q'", "tagsize", "=", "20", "tagformat1", "=", "byteorder", "+", "'HH'", "tagformat2", "=", "byteorder", "+", "'Q8s'", "else", ":", "raise", "ValueError", "(", "'invalid offset size'", ")", "if", "customtags", "is", "None", ":", "customtags", "=", "{", "}", "if", "maxifds", "is", "None", ":", "maxifds", "=", "2", "**", "32", "result", "=", "[", "]", "unpack", "=", "struct", ".", "unpack", "offset", "=", "fh", ".", "tell", "(", ")", "while", "len", "(", "result", ")", "<", "maxifds", ":", "# loop over IFDs", "try", ":", "tagno", "=", "unpack", "(", "tagnoformat", ",", "fh", ".", "read", "(", "tagnosize", ")", ")", "[", "0", "]", "if", "tagno", ">", "4096", ":", "raise", "TiffFileError", "(", "'suspicious number of tags'", ")", "except", "Exception", ":", "log", ".", "warning", "(", "'read_tags: corrupted tag list at offset %i'", ",", "offset", ")", "break", "tags", "=", "{", "}", "data", "=", "fh", ".", "read", "(", "tagsize", "*", "tagno", ")", "pos", "=", "fh", ".", "tell", "(", ")", "index", "=", "0", "for", "_", "in", "range", "(", "tagno", ")", ":", "code", ",", "type_", "=", "unpack", "(", "tagformat1", ",", "data", "[", "index", ":", "index", "+", "4", "]", ")", "count", ",", "value", "=", "unpack", "(", "tagformat2", ",", "data", "[", "index", "+", "4", ":", "index", "+", "tagsize", "]", ")", "index", "+=", "tagsize", "name", "=", "tagnames", ".", "get", "(", "code", ",", "str", "(", "code", ")", ")", "try", ":", "dtype", "=", "TIFF", ".", "DATA_FORMATS", "[", "type_", "]", "except", "KeyError", ":", "raise", "TiffFileError", "(", "'unknown tag data type %i'", "%", "type_", ")", "fmt", "=", "'%s%i%s'", "%", "(", "byteorder", ",", "count", "*", "int", "(", "dtype", "[", "0", "]", ")", ",", "dtype", "[", "1", "]", ")", "size", "=", "struct", ".", "calcsize", "(", "fmt", ")", "if", "size", ">", "offsetsize", "or", "code", "in", "customtags", ":", "offset", "=", "unpack", "(", "offsetformat", ",", "value", ")", "[", "0", "]", "if", "offset", "<", "8", "or", "offset", ">", "fh", ".", "size", "-", "size", ":", "raise", "TiffFileError", "(", "'invalid tag value offset %i'", "%", "offset", ")", "fh", ".", "seek", "(", "offset", ")", "if", "code", "in", "customtags", ":", "readfunc", "=", "customtags", "[", "code", "]", "[", "1", "]", "value", "=", "readfunc", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", "elif", "type_", "==", "7", "or", "(", "count", ">", "1", "and", "dtype", "[", "-", "1", "]", "==", "'B'", ")", ":", "value", "=", "read_bytes", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", "elif", "code", "in", "tagnames", "or", "dtype", "[", "-", "1", "]", "==", "'s'", ":", "value", "=", "unpack", "(", "fmt", ",", "fh", ".", "read", "(", "size", ")", ")", "else", ":", "value", "=", "read_numpy", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", "elif", "dtype", "[", "-", "1", "]", "==", "'B'", "or", "type_", "==", "7", ":", "value", "=", "value", "[", ":", "size", "]", "else", ":", "value", "=", "unpack", "(", "fmt", ",", "value", "[", ":", "size", "]", ")", "if", "code", "not", "in", "customtags", "and", "code", "not", "in", "TIFF", ".", "TAG_TUPLE", ":", "if", "len", "(", "value", ")", "==", "1", ":", "value", "=", "value", "[", "0", "]", "if", "type_", "!=", "7", "and", "dtype", "[", "-", "1", "]", "==", "'s'", "and", "isinstance", "(", "value", ",", "bytes", ")", ":", "# TIFF ASCII fields can contain multiple strings,", "# each terminated with a NUL", "try", ":", "value", "=", "bytes2str", "(", "stripascii", "(", "value", ")", ".", "strip", "(", ")", ")", "except", "UnicodeDecodeError", ":", "log", ".", "warning", "(", "'read_tags: coercing invalid ASCII to bytes (tag %i)'", ",", "code", ")", "tags", "[", "name", "]", "=", "value", "result", ".", "append", "(", "tags", ")", "# read offset to next page", "fh", ".", "seek", "(", "pos", ")", "offset", "=", "unpack", "(", "offsetformat", ",", "fh", ".", "read", "(", "offsetsize", ")", ")", "[", "0", "]", "if", "offset", "==", "0", ":", "break", "if", "offset", ">=", "fh", ".", "size", ":", "log", ".", "warning", "(", "'read_tags: invalid page offset (%i)'", ",", "offset", ")", "break", "fh", ".", "seek", "(", "offset", ")", "if", "result", "and", "maxifds", "==", "1", ":", "result", "=", "result", "[", "0", "]", "return", "result" ]
Read tags from chain of IFDs and return as list of dicts. The file handle position must be at a valid IFD header.
[ "Read", "tags", "from", "chain", "of", "IFDs", "and", "return", "as", "list", "of", "dicts", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L7973-L8078
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_exif_ifd
def read_exif_ifd(fh, byteorder, dtype, count, offsetsize): """Read EXIF tags from file and return as dict.""" exif = read_tags(fh, byteorder, offsetsize, TIFF.EXIF_TAGS, maxifds=1) for name in ('ExifVersion', 'FlashpixVersion'): try: exif[name] = bytes2str(exif[name]) except Exception: pass if 'UserComment' in exif: idcode = exif['UserComment'][:8] try: if idcode == b'ASCII\x00\x00\x00': exif['UserComment'] = bytes2str(exif['UserComment'][8:]) elif idcode == b'UNICODE\x00': exif['UserComment'] = exif['UserComment'][8:].decode('utf-16') except Exception: pass return exif
python
def read_exif_ifd(fh, byteorder, dtype, count, offsetsize): """Read EXIF tags from file and return as dict.""" exif = read_tags(fh, byteorder, offsetsize, TIFF.EXIF_TAGS, maxifds=1) for name in ('ExifVersion', 'FlashpixVersion'): try: exif[name] = bytes2str(exif[name]) except Exception: pass if 'UserComment' in exif: idcode = exif['UserComment'][:8] try: if idcode == b'ASCII\x00\x00\x00': exif['UserComment'] = bytes2str(exif['UserComment'][8:]) elif idcode == b'UNICODE\x00': exif['UserComment'] = exif['UserComment'][8:].decode('utf-16') except Exception: pass return exif
[ "def", "read_exif_ifd", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "exif", "=", "read_tags", "(", "fh", ",", "byteorder", ",", "offsetsize", ",", "TIFF", ".", "EXIF_TAGS", ",", "maxifds", "=", "1", ")", "for", "name", "in", "(", "'ExifVersion'", ",", "'FlashpixVersion'", ")", ":", "try", ":", "exif", "[", "name", "]", "=", "bytes2str", "(", "exif", "[", "name", "]", ")", "except", "Exception", ":", "pass", "if", "'UserComment'", "in", "exif", ":", "idcode", "=", "exif", "[", "'UserComment'", "]", "[", ":", "8", "]", "try", ":", "if", "idcode", "==", "b'ASCII\\x00\\x00\\x00'", ":", "exif", "[", "'UserComment'", "]", "=", "bytes2str", "(", "exif", "[", "'UserComment'", "]", "[", "8", ":", "]", ")", "elif", "idcode", "==", "b'UNICODE\\x00'", ":", "exif", "[", "'UserComment'", "]", "=", "exif", "[", "'UserComment'", "]", "[", "8", ":", "]", ".", "decode", "(", "'utf-16'", ")", "except", "Exception", ":", "pass", "return", "exif" ]
Read EXIF tags from file and return as dict.
[ "Read", "EXIF", "tags", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8081-L8098
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_gps_ifd
def read_gps_ifd(fh, byteorder, dtype, count, offsetsize): """Read GPS tags from file and return as dict.""" return read_tags(fh, byteorder, offsetsize, TIFF.GPS_TAGS, maxifds=1)
python
def read_gps_ifd(fh, byteorder, dtype, count, offsetsize): """Read GPS tags from file and return as dict.""" return read_tags(fh, byteorder, offsetsize, TIFF.GPS_TAGS, maxifds=1)
[ "def", "read_gps_ifd", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "return", "read_tags", "(", "fh", ",", "byteorder", ",", "offsetsize", ",", "TIFF", ".", "GPS_TAGS", ",", "maxifds", "=", "1", ")" ]
Read GPS tags from file and return as dict.
[ "Read", "GPS", "tags", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8101-L8103
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_interoperability_ifd
def read_interoperability_ifd(fh, byteorder, dtype, count, offsetsize): """Read Interoperability tags from file and return as dict.""" tag_names = {1: 'InteroperabilityIndex'} return read_tags(fh, byteorder, offsetsize, tag_names, maxifds=1)
python
def read_interoperability_ifd(fh, byteorder, dtype, count, offsetsize): """Read Interoperability tags from file and return as dict.""" tag_names = {1: 'InteroperabilityIndex'} return read_tags(fh, byteorder, offsetsize, tag_names, maxifds=1)
[ "def", "read_interoperability_ifd", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "tag_names", "=", "{", "1", ":", "'InteroperabilityIndex'", "}", "return", "read_tags", "(", "fh", ",", "byteorder", ",", "offsetsize", ",", "tag_names", ",", "maxifds", "=", "1", ")" ]
Read Interoperability tags from file and return as dict.
[ "Read", "Interoperability", "tags", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8106-L8109
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_bytes
def read_bytes(fh, byteorder, dtype, count, offsetsize): """Read tag data from file and return as byte string.""" dtype = 'B' if dtype[-1] == 's' else byteorder+dtype[-1] count *= numpy.dtype(dtype).itemsize data = fh.read(count) if len(data) != count: log.warning('read_bytes: failed to read all bytes (%i < %i)', len(data), count) return data
python
def read_bytes(fh, byteorder, dtype, count, offsetsize): """Read tag data from file and return as byte string.""" dtype = 'B' if dtype[-1] == 's' else byteorder+dtype[-1] count *= numpy.dtype(dtype).itemsize data = fh.read(count) if len(data) != count: log.warning('read_bytes: failed to read all bytes (%i < %i)', len(data), count) return data
[ "def", "read_bytes", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "dtype", "=", "'B'", "if", "dtype", "[", "-", "1", "]", "==", "'s'", "else", "byteorder", "+", "dtype", "[", "-", "1", "]", "count", "*=", "numpy", ".", "dtype", "(", "dtype", ")", ".", "itemsize", "data", "=", "fh", ".", "read", "(", "count", ")", "if", "len", "(", "data", ")", "!=", "count", ":", "log", ".", "warning", "(", "'read_bytes: failed to read all bytes (%i < %i)'", ",", "len", "(", "data", ")", ",", "count", ")", "return", "data" ]
Read tag data from file and return as byte string.
[ "Read", "tag", "data", "from", "file", "and", "return", "as", "byte", "string", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8112-L8120
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_utf8
def read_utf8(fh, byteorder, dtype, count, offsetsize): """Read tag data from file and return as unicode string.""" return fh.read(count).decode('utf-8')
python
def read_utf8(fh, byteorder, dtype, count, offsetsize): """Read tag data from file and return as unicode string.""" return fh.read(count).decode('utf-8')
[ "def", "read_utf8", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "return", "fh", ".", "read", "(", "count", ")", ".", "decode", "(", "'utf-8'", ")" ]
Read tag data from file and return as unicode string.
[ "Read", "tag", "data", "from", "file", "and", "return", "as", "unicode", "string", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8123-L8125
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_numpy
def read_numpy(fh, byteorder, dtype, count, offsetsize): """Read tag data from file and return as numpy array.""" dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1] return fh.read_array(dtype, count)
python
def read_numpy(fh, byteorder, dtype, count, offsetsize): """Read tag data from file and return as numpy array.""" dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1] return fh.read_array(dtype, count)
[ "def", "read_numpy", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "dtype", "=", "'b'", "if", "dtype", "[", "-", "1", "]", "==", "'s'", "else", "byteorder", "+", "dtype", "[", "-", "1", "]", "return", "fh", ".", "read_array", "(", "dtype", ",", "count", ")" ]
Read tag data from file and return as numpy array.
[ "Read", "tag", "data", "from", "file", "and", "return", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8128-L8131
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_colormap
def read_colormap(fh, byteorder, dtype, count, offsetsize): """Read ColorMap data from file and return as numpy array.""" cmap = fh.read_array(byteorder+dtype[-1], count) cmap.shape = (3, -1) return cmap
python
def read_colormap(fh, byteorder, dtype, count, offsetsize): """Read ColorMap data from file and return as numpy array.""" cmap = fh.read_array(byteorder+dtype[-1], count) cmap.shape = (3, -1) return cmap
[ "def", "read_colormap", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "cmap", "=", "fh", ".", "read_array", "(", "byteorder", "+", "dtype", "[", "-", "1", "]", ",", "count", ")", "cmap", ".", "shape", "=", "(", "3", ",", "-", "1", ")", "return", "cmap" ]
Read ColorMap data from file and return as numpy array.
[ "Read", "ColorMap", "data", "from", "file", "and", "return", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8134-L8138
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_json
def read_json(fh, byteorder, dtype, count, offsetsize): """Read JSON tag data from file and return as object.""" data = fh.read(count) try: return json.loads(unicode(stripnull(data), 'utf-8')) except ValueError: log.warning('read_json: invalid JSON')
python
def read_json(fh, byteorder, dtype, count, offsetsize): """Read JSON tag data from file and return as object.""" data = fh.read(count) try: return json.loads(unicode(stripnull(data), 'utf-8')) except ValueError: log.warning('read_json: invalid JSON')
[ "def", "read_json", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "data", "=", "fh", ".", "read", "(", "count", ")", "try", ":", "return", "json", ".", "loads", "(", "unicode", "(", "stripnull", "(", "data", ")", ",", "'utf-8'", ")", ")", "except", "ValueError", ":", "log", ".", "warning", "(", "'read_json: invalid JSON'", ")" ]
Read JSON tag data from file and return as object.
[ "Read", "JSON", "tag", "data", "from", "file", "and", "return", "as", "object", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8141-L8147
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_mm_header
def read_mm_header(fh, byteorder, dtype, count, offsetsize): """Read FluoView mm_header tag from file and return as dict.""" mmh = fh.read_record(TIFF.MM_HEADER, byteorder=byteorder) mmh = recarray2dict(mmh) mmh['Dimensions'] = [ (bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) for d in mmh['Dimensions']] d = mmh['GrayChannel'] mmh['GrayChannel'] = ( bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) return mmh
python
def read_mm_header(fh, byteorder, dtype, count, offsetsize): """Read FluoView mm_header tag from file and return as dict.""" mmh = fh.read_record(TIFF.MM_HEADER, byteorder=byteorder) mmh = recarray2dict(mmh) mmh['Dimensions'] = [ (bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) for d in mmh['Dimensions']] d = mmh['GrayChannel'] mmh['GrayChannel'] = ( bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) return mmh
[ "def", "read_mm_header", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "mmh", "=", "fh", ".", "read_record", "(", "TIFF", ".", "MM_HEADER", ",", "byteorder", "=", "byteorder", ")", "mmh", "=", "recarray2dict", "(", "mmh", ")", "mmh", "[", "'Dimensions'", "]", "=", "[", "(", "bytes2str", "(", "d", "[", "0", "]", ")", ".", "strip", "(", ")", ",", "d", "[", "1", "]", ",", "d", "[", "2", "]", ",", "d", "[", "3", "]", ",", "bytes2str", "(", "d", "[", "4", "]", ")", ".", "strip", "(", ")", ")", "for", "d", "in", "mmh", "[", "'Dimensions'", "]", "]", "d", "=", "mmh", "[", "'GrayChannel'", "]", "mmh", "[", "'GrayChannel'", "]", "=", "(", "bytes2str", "(", "d", "[", "0", "]", ")", ".", "strip", "(", ")", ",", "d", "[", "1", "]", ",", "d", "[", "2", "]", ",", "d", "[", "3", "]", ",", "bytes2str", "(", "d", "[", "4", "]", ")", ".", "strip", "(", ")", ")", "return", "mmh" ]
Read FluoView mm_header tag from file and return as dict.
[ "Read", "FluoView", "mm_header", "tag", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8150-L8160
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_uic1tag
def read_uic1tag(fh, byteorder, dtype, count, offsetsize, planecount=None): """Read MetaMorph STK UIC1Tag from file and return as dict. Return empty dictionary if planecount is unknown. """ assert dtype in ('2I', '1I') and byteorder == '<' result = {} if dtype == '2I': # pre MetaMorph 2.5 (not tested) values = fh.read_array('<u4', 2*count).reshape(count, 2) result = {'ZDistance': values[:, 0] / values[:, 1]} elif planecount: for _ in range(count): tagid = struct.unpack('<I', fh.read(4))[0] if tagid in (28, 29, 37, 40, 41): # silently skip unexpected tags fh.read(4) continue name, value = read_uic_tag(fh, tagid, planecount, offset=True) result[name] = value return result
python
def read_uic1tag(fh, byteorder, dtype, count, offsetsize, planecount=None): """Read MetaMorph STK UIC1Tag from file and return as dict. Return empty dictionary if planecount is unknown. """ assert dtype in ('2I', '1I') and byteorder == '<' result = {} if dtype == '2I': # pre MetaMorph 2.5 (not tested) values = fh.read_array('<u4', 2*count).reshape(count, 2) result = {'ZDistance': values[:, 0] / values[:, 1]} elif planecount: for _ in range(count): tagid = struct.unpack('<I', fh.read(4))[0] if tagid in (28, 29, 37, 40, 41): # silently skip unexpected tags fh.read(4) continue name, value = read_uic_tag(fh, tagid, planecount, offset=True) result[name] = value return result
[ "def", "read_uic1tag", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ",", "planecount", "=", "None", ")", ":", "assert", "dtype", "in", "(", "'2I'", ",", "'1I'", ")", "and", "byteorder", "==", "'<'", "result", "=", "{", "}", "if", "dtype", "==", "'2I'", ":", "# pre MetaMorph 2.5 (not tested)", "values", "=", "fh", ".", "read_array", "(", "'<u4'", ",", "2", "*", "count", ")", ".", "reshape", "(", "count", ",", "2", ")", "result", "=", "{", "'ZDistance'", ":", "values", "[", ":", ",", "0", "]", "/", "values", "[", ":", ",", "1", "]", "}", "elif", "planecount", ":", "for", "_", "in", "range", "(", "count", ")", ":", "tagid", "=", "struct", ".", "unpack", "(", "'<I'", ",", "fh", ".", "read", "(", "4", ")", ")", "[", "0", "]", "if", "tagid", "in", "(", "28", ",", "29", ",", "37", ",", "40", ",", "41", ")", ":", "# silently skip unexpected tags", "fh", ".", "read", "(", "4", ")", "continue", "name", ",", "value", "=", "read_uic_tag", "(", "fh", ",", "tagid", ",", "planecount", ",", "offset", "=", "True", ")", "result", "[", "name", "]", "=", "value", "return", "result" ]
Read MetaMorph STK UIC1Tag from file and return as dict. Return empty dictionary if planecount is unknown.
[ "Read", "MetaMorph", "STK", "UIC1Tag", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8168-L8189
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_uic2tag
def read_uic2tag(fh, byteorder, dtype, planecount, offsetsize): """Read MetaMorph STK UIC2Tag from file and return as dict.""" assert dtype == '2I' and byteorder == '<' values = fh.read_array('<u4', 6*planecount).reshape(planecount, 6) return { 'ZDistance': values[:, 0] / values[:, 1], 'DateCreated': values[:, 2], # julian days 'TimeCreated': values[:, 3], # milliseconds 'DateModified': values[:, 4], # julian days 'TimeModified': values[:, 5]}
python
def read_uic2tag(fh, byteorder, dtype, planecount, offsetsize): """Read MetaMorph STK UIC2Tag from file and return as dict.""" assert dtype == '2I' and byteorder == '<' values = fh.read_array('<u4', 6*planecount).reshape(planecount, 6) return { 'ZDistance': values[:, 0] / values[:, 1], 'DateCreated': values[:, 2], # julian days 'TimeCreated': values[:, 3], # milliseconds 'DateModified': values[:, 4], # julian days 'TimeModified': values[:, 5]}
[ "def", "read_uic2tag", "(", "fh", ",", "byteorder", ",", "dtype", ",", "planecount", ",", "offsetsize", ")", ":", "assert", "dtype", "==", "'2I'", "and", "byteorder", "==", "'<'", "values", "=", "fh", ".", "read_array", "(", "'<u4'", ",", "6", "*", "planecount", ")", ".", "reshape", "(", "planecount", ",", "6", ")", "return", "{", "'ZDistance'", ":", "values", "[", ":", ",", "0", "]", "/", "values", "[", ":", ",", "1", "]", ",", "'DateCreated'", ":", "values", "[", ":", ",", "2", "]", ",", "# julian days", "'TimeCreated'", ":", "values", "[", ":", ",", "3", "]", ",", "# milliseconds", "'DateModified'", ":", "values", "[", ":", ",", "4", "]", ",", "# julian days", "'TimeModified'", ":", "values", "[", ":", ",", "5", "]", "}" ]
Read MetaMorph STK UIC2Tag from file and return as dict.
[ "Read", "MetaMorph", "STK", "UIC2Tag", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8192-L8201
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_uic4tag
def read_uic4tag(fh, byteorder, dtype, planecount, offsetsize): """Read MetaMorph STK UIC4Tag from file and return as dict.""" assert dtype == '1I' and byteorder == '<' result = {} while True: tagid = struct.unpack('<H', fh.read(2))[0] if tagid == 0: break name, value = read_uic_tag(fh, tagid, planecount, offset=False) result[name] = value return result
python
def read_uic4tag(fh, byteorder, dtype, planecount, offsetsize): """Read MetaMorph STK UIC4Tag from file and return as dict.""" assert dtype == '1I' and byteorder == '<' result = {} while True: tagid = struct.unpack('<H', fh.read(2))[0] if tagid == 0: break name, value = read_uic_tag(fh, tagid, planecount, offset=False) result[name] = value return result
[ "def", "read_uic4tag", "(", "fh", ",", "byteorder", ",", "dtype", ",", "planecount", ",", "offsetsize", ")", ":", "assert", "dtype", "==", "'1I'", "and", "byteorder", "==", "'<'", "result", "=", "{", "}", "while", "True", ":", "tagid", "=", "struct", ".", "unpack", "(", "'<H'", ",", "fh", ".", "read", "(", "2", ")", ")", "[", "0", "]", "if", "tagid", "==", "0", ":", "break", "name", ",", "value", "=", "read_uic_tag", "(", "fh", ",", "tagid", ",", "planecount", ",", "offset", "=", "False", ")", "result", "[", "name", "]", "=", "value", "return", "result" ]
Read MetaMorph STK UIC4Tag from file and return as dict.
[ "Read", "MetaMorph", "STK", "UIC4Tag", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8211-L8221
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_uic_tag
def read_uic_tag(fh, tagid, planecount, offset): """Read a single UIC tag value from file and return tag name and value. UIC1Tags use an offset. """ def read_int(count=1): value = struct.unpack('<%iI' % count, fh.read(4*count)) return value[0] if count == 1 else value try: name, dtype = TIFF.UIC_TAGS[tagid] except IndexError: # unknown tag return '_TagId%i' % tagid, read_int() Fraction = TIFF.UIC_TAGS[4][1] if offset: pos = fh.tell() if dtype not in (int, None): off = read_int() if off < 8: if dtype is str: return name, '' log.warning("read_uic_tag: invalid offset for tag '%s' (%i)", name, off) return name, off fh.seek(off) if dtype is None: # skip name = '_' + name value = read_int() elif dtype is int: # int value = read_int() elif dtype is Fraction: # fraction value = read_int(2) value = value[0] / value[1] elif dtype is julian_datetime: # datetime value = julian_datetime(*read_int(2)) elif dtype is read_uic_image_property: # ImagePropertyEx value = read_uic_image_property(fh) elif dtype is str: # pascal string size = read_int() if 0 <= size < 2**10: value = struct.unpack('%is' % size, fh.read(size))[0][:-1] value = bytes2str(stripnull(value)) elif offset: value = '' log.warning("read_uic_tag: corrupt string in tag '%s'", name) else: raise ValueError('read_uic_tag: invalid string size %i' % size) elif dtype == '%ip': # sequence of pascal strings value = [] for _ in range(planecount): size = read_int() if 0 <= size < 2**10: string = struct.unpack('%is' % size, fh.read(size))[0][:-1] string = bytes2str(stripnull(string)) value.append(string) elif offset: log.warning("read_uic_tag: corrupt string in tag '%s'", name) else: raise ValueError('read_uic_tag: invalid string size: %i' % size) else: # struct or numpy type dtype = '<' + dtype if '%i' in dtype: dtype = dtype % planecount if '(' in dtype: # numpy type value = fh.read_array(dtype, 1)[0] if value.shape[-1] == 2: # assume fractions value = value[..., 0] / value[..., 1] else: # struct format value = struct.unpack(dtype, fh.read(struct.calcsize(dtype))) if len(value) == 1: value = value[0] if offset: fh.seek(pos + 4) return name, value
python
def read_uic_tag(fh, tagid, planecount, offset): """Read a single UIC tag value from file and return tag name and value. UIC1Tags use an offset. """ def read_int(count=1): value = struct.unpack('<%iI' % count, fh.read(4*count)) return value[0] if count == 1 else value try: name, dtype = TIFF.UIC_TAGS[tagid] except IndexError: # unknown tag return '_TagId%i' % tagid, read_int() Fraction = TIFF.UIC_TAGS[4][1] if offset: pos = fh.tell() if dtype not in (int, None): off = read_int() if off < 8: if dtype is str: return name, '' log.warning("read_uic_tag: invalid offset for tag '%s' (%i)", name, off) return name, off fh.seek(off) if dtype is None: # skip name = '_' + name value = read_int() elif dtype is int: # int value = read_int() elif dtype is Fraction: # fraction value = read_int(2) value = value[0] / value[1] elif dtype is julian_datetime: # datetime value = julian_datetime(*read_int(2)) elif dtype is read_uic_image_property: # ImagePropertyEx value = read_uic_image_property(fh) elif dtype is str: # pascal string size = read_int() if 0 <= size < 2**10: value = struct.unpack('%is' % size, fh.read(size))[0][:-1] value = bytes2str(stripnull(value)) elif offset: value = '' log.warning("read_uic_tag: corrupt string in tag '%s'", name) else: raise ValueError('read_uic_tag: invalid string size %i' % size) elif dtype == '%ip': # sequence of pascal strings value = [] for _ in range(planecount): size = read_int() if 0 <= size < 2**10: string = struct.unpack('%is' % size, fh.read(size))[0][:-1] string = bytes2str(stripnull(string)) value.append(string) elif offset: log.warning("read_uic_tag: corrupt string in tag '%s'", name) else: raise ValueError('read_uic_tag: invalid string size: %i' % size) else: # struct or numpy type dtype = '<' + dtype if '%i' in dtype: dtype = dtype % planecount if '(' in dtype: # numpy type value = fh.read_array(dtype, 1)[0] if value.shape[-1] == 2: # assume fractions value = value[..., 0] / value[..., 1] else: # struct format value = struct.unpack(dtype, fh.read(struct.calcsize(dtype))) if len(value) == 1: value = value[0] if offset: fh.seek(pos + 4) return name, value
[ "def", "read_uic_tag", "(", "fh", ",", "tagid", ",", "planecount", ",", "offset", ")", ":", "def", "read_int", "(", "count", "=", "1", ")", ":", "value", "=", "struct", ".", "unpack", "(", "'<%iI'", "%", "count", ",", "fh", ".", "read", "(", "4", "*", "count", ")", ")", "return", "value", "[", "0", "]", "if", "count", "==", "1", "else", "value", "try", ":", "name", ",", "dtype", "=", "TIFF", ".", "UIC_TAGS", "[", "tagid", "]", "except", "IndexError", ":", "# unknown tag", "return", "'_TagId%i'", "%", "tagid", ",", "read_int", "(", ")", "Fraction", "=", "TIFF", ".", "UIC_TAGS", "[", "4", "]", "[", "1", "]", "if", "offset", ":", "pos", "=", "fh", ".", "tell", "(", ")", "if", "dtype", "not", "in", "(", "int", ",", "None", ")", ":", "off", "=", "read_int", "(", ")", "if", "off", "<", "8", ":", "if", "dtype", "is", "str", ":", "return", "name", ",", "''", "log", ".", "warning", "(", "\"read_uic_tag: invalid offset for tag '%s' (%i)\"", ",", "name", ",", "off", ")", "return", "name", ",", "off", "fh", ".", "seek", "(", "off", ")", "if", "dtype", "is", "None", ":", "# skip", "name", "=", "'_'", "+", "name", "value", "=", "read_int", "(", ")", "elif", "dtype", "is", "int", ":", "# int", "value", "=", "read_int", "(", ")", "elif", "dtype", "is", "Fraction", ":", "# fraction", "value", "=", "read_int", "(", "2", ")", "value", "=", "value", "[", "0", "]", "/", "value", "[", "1", "]", "elif", "dtype", "is", "julian_datetime", ":", "# datetime", "value", "=", "julian_datetime", "(", "*", "read_int", "(", "2", ")", ")", "elif", "dtype", "is", "read_uic_image_property", ":", "# ImagePropertyEx", "value", "=", "read_uic_image_property", "(", "fh", ")", "elif", "dtype", "is", "str", ":", "# pascal string", "size", "=", "read_int", "(", ")", "if", "0", "<=", "size", "<", "2", "**", "10", ":", "value", "=", "struct", ".", "unpack", "(", "'%is'", "%", "size", ",", "fh", ".", "read", "(", "size", ")", ")", "[", "0", "]", "[", ":", "-", "1", "]", "value", "=", "bytes2str", "(", "stripnull", "(", "value", ")", ")", "elif", "offset", ":", "value", "=", "''", "log", ".", "warning", "(", "\"read_uic_tag: corrupt string in tag '%s'\"", ",", "name", ")", "else", ":", "raise", "ValueError", "(", "'read_uic_tag: invalid string size %i'", "%", "size", ")", "elif", "dtype", "==", "'%ip'", ":", "# sequence of pascal strings", "value", "=", "[", "]", "for", "_", "in", "range", "(", "planecount", ")", ":", "size", "=", "read_int", "(", ")", "if", "0", "<=", "size", "<", "2", "**", "10", ":", "string", "=", "struct", ".", "unpack", "(", "'%is'", "%", "size", ",", "fh", ".", "read", "(", "size", ")", ")", "[", "0", "]", "[", ":", "-", "1", "]", "string", "=", "bytes2str", "(", "stripnull", "(", "string", ")", ")", "value", ".", "append", "(", "string", ")", "elif", "offset", ":", "log", ".", "warning", "(", "\"read_uic_tag: corrupt string in tag '%s'\"", ",", "name", ")", "else", ":", "raise", "ValueError", "(", "'read_uic_tag: invalid string size: %i'", "%", "size", ")", "else", ":", "# struct or numpy type", "dtype", "=", "'<'", "+", "dtype", "if", "'%i'", "in", "dtype", ":", "dtype", "=", "dtype", "%", "planecount", "if", "'('", "in", "dtype", ":", "# numpy type", "value", "=", "fh", ".", "read_array", "(", "dtype", ",", "1", ")", "[", "0", "]", "if", "value", ".", "shape", "[", "-", "1", "]", "==", "2", ":", "# assume fractions", "value", "=", "value", "[", "...", ",", "0", "]", "/", "value", "[", "...", ",", "1", "]", "else", ":", "# struct format", "value", "=", "struct", ".", "unpack", "(", "dtype", ",", "fh", ".", "read", "(", "struct", ".", "calcsize", "(", "dtype", ")", ")", ")", "if", "len", "(", "value", ")", "==", "1", ":", "value", "=", "value", "[", "0", "]", "if", "offset", ":", "fh", ".", "seek", "(", "pos", "+", "4", ")", "return", "name", ",", "value" ]
Read a single UIC tag value from file and return tag name and value. UIC1Tags use an offset.
[ "Read", "a", "single", "UIC", "tag", "value", "from", "file", "and", "return", "tag", "name", "and", "value", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8224-L8316
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_uic_image_property
def read_uic_image_property(fh): """Read UIC ImagePropertyEx tag from file and return as dict.""" # TODO: test this size = struct.unpack('B', fh.read(1))[0] name = struct.unpack('%is' % size, fh.read(size))[0][:-1] flags, prop = struct.unpack('<IB', fh.read(5)) if prop == 1: value = struct.unpack('II', fh.read(8)) value = value[0] / value[1] else: size = struct.unpack('B', fh.read(1))[0] value = struct.unpack('%is' % size, fh.read(size))[0] return dict(name=name, flags=flags, value=value)
python
def read_uic_image_property(fh): """Read UIC ImagePropertyEx tag from file and return as dict.""" # TODO: test this size = struct.unpack('B', fh.read(1))[0] name = struct.unpack('%is' % size, fh.read(size))[0][:-1] flags, prop = struct.unpack('<IB', fh.read(5)) if prop == 1: value = struct.unpack('II', fh.read(8)) value = value[0] / value[1] else: size = struct.unpack('B', fh.read(1))[0] value = struct.unpack('%is' % size, fh.read(size))[0] return dict(name=name, flags=flags, value=value)
[ "def", "read_uic_image_property", "(", "fh", ")", ":", "# TODO: test this", "size", "=", "struct", ".", "unpack", "(", "'B'", ",", "fh", ".", "read", "(", "1", ")", ")", "[", "0", "]", "name", "=", "struct", ".", "unpack", "(", "'%is'", "%", "size", ",", "fh", ".", "read", "(", "size", ")", ")", "[", "0", "]", "[", ":", "-", "1", "]", "flags", ",", "prop", "=", "struct", ".", "unpack", "(", "'<IB'", ",", "fh", ".", "read", "(", "5", ")", ")", "if", "prop", "==", "1", ":", "value", "=", "struct", ".", "unpack", "(", "'II'", ",", "fh", ".", "read", "(", "8", ")", ")", "value", "=", "value", "[", "0", "]", "/", "value", "[", "1", "]", "else", ":", "size", "=", "struct", ".", "unpack", "(", "'B'", ",", "fh", ".", "read", "(", "1", ")", ")", "[", "0", "]", "value", "=", "struct", ".", "unpack", "(", "'%is'", "%", "size", ",", "fh", ".", "read", "(", "size", ")", ")", "[", "0", "]", "return", "dict", "(", "name", "=", "name", ",", "flags", "=", "flags", ",", "value", "=", "value", ")" ]
Read UIC ImagePropertyEx tag from file and return as dict.
[ "Read", "UIC", "ImagePropertyEx", "tag", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8319-L8331
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_cz_lsminfo
def read_cz_lsminfo(fh, byteorder, dtype, count, offsetsize): """Read CZ_LSMINFO tag from file and return as dict.""" assert byteorder == '<' magic_number, structure_size = struct.unpack('<II', fh.read(8)) if magic_number not in (50350412, 67127628): raise ValueError('invalid CZ_LSMINFO structure') fh.seek(-8, 1) if structure_size < numpy.dtype(TIFF.CZ_LSMINFO).itemsize: # adjust structure according to structure_size lsminfo = [] size = 0 for name, dtype in TIFF.CZ_LSMINFO: size += numpy.dtype(dtype).itemsize if size > structure_size: break lsminfo.append((name, dtype)) else: lsminfo = TIFF.CZ_LSMINFO lsminfo = fh.read_record(lsminfo, byteorder=byteorder) lsminfo = recarray2dict(lsminfo) # read LSM info subrecords at offsets for name, reader in TIFF.CZ_LSMINFO_READERS.items(): if reader is None: continue offset = lsminfo.get('Offset' + name, 0) if offset < 8: continue fh.seek(offset) try: lsminfo[name] = reader(fh) except ValueError: pass return lsminfo
python
def read_cz_lsminfo(fh, byteorder, dtype, count, offsetsize): """Read CZ_LSMINFO tag from file and return as dict.""" assert byteorder == '<' magic_number, structure_size = struct.unpack('<II', fh.read(8)) if magic_number not in (50350412, 67127628): raise ValueError('invalid CZ_LSMINFO structure') fh.seek(-8, 1) if structure_size < numpy.dtype(TIFF.CZ_LSMINFO).itemsize: # adjust structure according to structure_size lsminfo = [] size = 0 for name, dtype in TIFF.CZ_LSMINFO: size += numpy.dtype(dtype).itemsize if size > structure_size: break lsminfo.append((name, dtype)) else: lsminfo = TIFF.CZ_LSMINFO lsminfo = fh.read_record(lsminfo, byteorder=byteorder) lsminfo = recarray2dict(lsminfo) # read LSM info subrecords at offsets for name, reader in TIFF.CZ_LSMINFO_READERS.items(): if reader is None: continue offset = lsminfo.get('Offset' + name, 0) if offset < 8: continue fh.seek(offset) try: lsminfo[name] = reader(fh) except ValueError: pass return lsminfo
[ "def", "read_cz_lsminfo", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "assert", "byteorder", "==", "'<'", "magic_number", ",", "structure_size", "=", "struct", ".", "unpack", "(", "'<II'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "magic_number", "not", "in", "(", "50350412", ",", "67127628", ")", ":", "raise", "ValueError", "(", "'invalid CZ_LSMINFO structure'", ")", "fh", ".", "seek", "(", "-", "8", ",", "1", ")", "if", "structure_size", "<", "numpy", ".", "dtype", "(", "TIFF", ".", "CZ_LSMINFO", ")", ".", "itemsize", ":", "# adjust structure according to structure_size", "lsminfo", "=", "[", "]", "size", "=", "0", "for", "name", ",", "dtype", "in", "TIFF", ".", "CZ_LSMINFO", ":", "size", "+=", "numpy", ".", "dtype", "(", "dtype", ")", ".", "itemsize", "if", "size", ">", "structure_size", ":", "break", "lsminfo", ".", "append", "(", "(", "name", ",", "dtype", ")", ")", "else", ":", "lsminfo", "=", "TIFF", ".", "CZ_LSMINFO", "lsminfo", "=", "fh", ".", "read_record", "(", "lsminfo", ",", "byteorder", "=", "byteorder", ")", "lsminfo", "=", "recarray2dict", "(", "lsminfo", ")", "# read LSM info subrecords at offsets", "for", "name", ",", "reader", "in", "TIFF", ".", "CZ_LSMINFO_READERS", ".", "items", "(", ")", ":", "if", "reader", "is", "None", ":", "continue", "offset", "=", "lsminfo", ".", "get", "(", "'Offset'", "+", "name", ",", "0", ")", "if", "offset", "<", "8", ":", "continue", "fh", ".", "seek", "(", "offset", ")", "try", ":", "lsminfo", "[", "name", "]", "=", "reader", "(", "fh", ")", "except", "ValueError", ":", "pass", "return", "lsminfo" ]
Read CZ_LSMINFO tag from file and return as dict.
[ "Read", "CZ_LSMINFO", "tag", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8334-L8369
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_lsm_floatpairs
def read_lsm_floatpairs(fh): """Read LSM sequence of float pairs from file and return as list.""" size = struct.unpack('<i', fh.read(4))[0] return fh.read_array('<2f8', count=size)
python
def read_lsm_floatpairs(fh): """Read LSM sequence of float pairs from file and return as list.""" size = struct.unpack('<i', fh.read(4))[0] return fh.read_array('<2f8', count=size)
[ "def", "read_lsm_floatpairs", "(", "fh", ")", ":", "size", "=", "struct", ".", "unpack", "(", "'<i'", ",", "fh", ".", "read", "(", "4", ")", ")", "[", "0", "]", "return", "fh", ".", "read_array", "(", "'<2f8'", ",", "count", "=", "size", ")" ]
Read LSM sequence of float pairs from file and return as list.
[ "Read", "LSM", "sequence", "of", "float", "pairs", "from", "file", "and", "return", "as", "list", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8372-L8375
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_lsm_positions
def read_lsm_positions(fh): """Read LSM positions from file and return as list.""" size = struct.unpack('<I', fh.read(4))[0] return fh.read_array('<2f8', count=size)
python
def read_lsm_positions(fh): """Read LSM positions from file and return as list.""" size = struct.unpack('<I', fh.read(4))[0] return fh.read_array('<2f8', count=size)
[ "def", "read_lsm_positions", "(", "fh", ")", ":", "size", "=", "struct", ".", "unpack", "(", "'<I'", ",", "fh", ".", "read", "(", "4", ")", ")", "[", "0", "]", "return", "fh", ".", "read_array", "(", "'<2f8'", ",", "count", "=", "size", ")" ]
Read LSM positions from file and return as list.
[ "Read", "LSM", "positions", "from", "file", "and", "return", "as", "list", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8378-L8381
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_lsm_timestamps
def read_lsm_timestamps(fh): """Read LSM time stamps from file and return as list.""" size, count = struct.unpack('<ii', fh.read(8)) if size != (8 + 8 * count): log.warning('read_lsm_timestamps: invalid LSM TimeStamps block') return [] # return struct.unpack('<%dd' % count, fh.read(8*count)) return fh.read_array('<f8', count=count)
python
def read_lsm_timestamps(fh): """Read LSM time stamps from file and return as list.""" size, count = struct.unpack('<ii', fh.read(8)) if size != (8 + 8 * count): log.warning('read_lsm_timestamps: invalid LSM TimeStamps block') return [] # return struct.unpack('<%dd' % count, fh.read(8*count)) return fh.read_array('<f8', count=count)
[ "def", "read_lsm_timestamps", "(", "fh", ")", ":", "size", ",", "count", "=", "struct", ".", "unpack", "(", "'<ii'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "size", "!=", "(", "8", "+", "8", "*", "count", ")", ":", "log", ".", "warning", "(", "'read_lsm_timestamps: invalid LSM TimeStamps block'", ")", "return", "[", "]", "# return struct.unpack('<%dd' % count, fh.read(8*count))", "return", "fh", ".", "read_array", "(", "'<f8'", ",", "count", "=", "count", ")" ]
Read LSM time stamps from file and return as list.
[ "Read", "LSM", "time", "stamps", "from", "file", "and", "return", "as", "list", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8384-L8391
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_lsm_eventlist
def read_lsm_eventlist(fh): """Read LSM events from file and return as list of (time, type, text).""" count = struct.unpack('<II', fh.read(8))[1] events = [] while count > 0: esize, etime, etype = struct.unpack('<IdI', fh.read(16)) etext = bytes2str(stripnull(fh.read(esize - 16))) events.append((etime, etype, etext)) count -= 1 return events
python
def read_lsm_eventlist(fh): """Read LSM events from file and return as list of (time, type, text).""" count = struct.unpack('<II', fh.read(8))[1] events = [] while count > 0: esize, etime, etype = struct.unpack('<IdI', fh.read(16)) etext = bytes2str(stripnull(fh.read(esize - 16))) events.append((etime, etype, etext)) count -= 1 return events
[ "def", "read_lsm_eventlist", "(", "fh", ")", ":", "count", "=", "struct", ".", "unpack", "(", "'<II'", ",", "fh", ".", "read", "(", "8", ")", ")", "[", "1", "]", "events", "=", "[", "]", "while", "count", ">", "0", ":", "esize", ",", "etime", ",", "etype", "=", "struct", ".", "unpack", "(", "'<IdI'", ",", "fh", ".", "read", "(", "16", ")", ")", "etext", "=", "bytes2str", "(", "stripnull", "(", "fh", ".", "read", "(", "esize", "-", "16", ")", ")", ")", "events", ".", "append", "(", "(", "etime", ",", "etype", ",", "etext", ")", ")", "count", "-=", "1", "return", "events" ]
Read LSM events from file and return as list of (time, type, text).
[ "Read", "LSM", "events", "from", "file", "and", "return", "as", "list", "of", "(", "time", "type", "text", ")", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8394-L8403
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_lsm_channelcolors
def read_lsm_channelcolors(fh): """Read LSM ChannelColors structure from file and return as dict.""" result = {'Mono': False, 'Colors': [], 'ColorNames': []} pos = fh.tell() (size, ncolors, nnames, coffset, noffset, mono) = struct.unpack('<IIIIII', fh.read(24)) if ncolors != nnames: log.warning( 'read_lsm_channelcolors: invalid LSM ChannelColors structure') return result result['Mono'] = bool(mono) # Colors fh.seek(pos + coffset) colors = fh.read_array('uint8', count=ncolors*4).reshape((ncolors, 4)) result['Colors'] = colors.tolist() # ColorNames fh.seek(pos + noffset) buffer = fh.read(size - noffset) names = [] while len(buffer) > 4: size = struct.unpack('<I', buffer[:4])[0] names.append(bytes2str(buffer[4:3+size])) buffer = buffer[4+size:] result['ColorNames'] = names return result
python
def read_lsm_channelcolors(fh): """Read LSM ChannelColors structure from file and return as dict.""" result = {'Mono': False, 'Colors': [], 'ColorNames': []} pos = fh.tell() (size, ncolors, nnames, coffset, noffset, mono) = struct.unpack('<IIIIII', fh.read(24)) if ncolors != nnames: log.warning( 'read_lsm_channelcolors: invalid LSM ChannelColors structure') return result result['Mono'] = bool(mono) # Colors fh.seek(pos + coffset) colors = fh.read_array('uint8', count=ncolors*4).reshape((ncolors, 4)) result['Colors'] = colors.tolist() # ColorNames fh.seek(pos + noffset) buffer = fh.read(size - noffset) names = [] while len(buffer) > 4: size = struct.unpack('<I', buffer[:4])[0] names.append(bytes2str(buffer[4:3+size])) buffer = buffer[4+size:] result['ColorNames'] = names return result
[ "def", "read_lsm_channelcolors", "(", "fh", ")", ":", "result", "=", "{", "'Mono'", ":", "False", ",", "'Colors'", ":", "[", "]", ",", "'ColorNames'", ":", "[", "]", "}", "pos", "=", "fh", ".", "tell", "(", ")", "(", "size", ",", "ncolors", ",", "nnames", ",", "coffset", ",", "noffset", ",", "mono", ")", "=", "struct", ".", "unpack", "(", "'<IIIIII'", ",", "fh", ".", "read", "(", "24", ")", ")", "if", "ncolors", "!=", "nnames", ":", "log", ".", "warning", "(", "'read_lsm_channelcolors: invalid LSM ChannelColors structure'", ")", "return", "result", "result", "[", "'Mono'", "]", "=", "bool", "(", "mono", ")", "# Colors", "fh", ".", "seek", "(", "pos", "+", "coffset", ")", "colors", "=", "fh", ".", "read_array", "(", "'uint8'", ",", "count", "=", "ncolors", "*", "4", ")", ".", "reshape", "(", "(", "ncolors", ",", "4", ")", ")", "result", "[", "'Colors'", "]", "=", "colors", ".", "tolist", "(", ")", "# ColorNames", "fh", ".", "seek", "(", "pos", "+", "noffset", ")", "buffer", "=", "fh", ".", "read", "(", "size", "-", "noffset", ")", "names", "=", "[", "]", "while", "len", "(", "buffer", ")", ">", "4", ":", "size", "=", "struct", ".", "unpack", "(", "'<I'", ",", "buffer", "[", ":", "4", "]", ")", "[", "0", "]", "names", ".", "append", "(", "bytes2str", "(", "buffer", "[", "4", ":", "3", "+", "size", "]", ")", ")", "buffer", "=", "buffer", "[", "4", "+", "size", ":", "]", "result", "[", "'ColorNames'", "]", "=", "names", "return", "result" ]
Read LSM ChannelColors structure from file and return as dict.
[ "Read", "LSM", "ChannelColors", "structure", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8406-L8430
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_lsm_scaninfo
def read_lsm_scaninfo(fh): """Read LSM ScanInfo structure from file and return as dict.""" block = {} blocks = [block] unpack = struct.unpack if struct.unpack('<I', fh.read(4))[0] != 0x10000000: # not a Recording sub block log.warning('read_lsm_scaninfo: invalid LSM ScanInfo structure') return block fh.read(8) while True: entry, dtype, size = unpack('<III', fh.read(12)) if dtype == 2: # ascii value = bytes2str(stripnull(fh.read(size))) elif dtype == 4: # long value = unpack('<i', fh.read(4))[0] elif dtype == 5: # rational value = unpack('<d', fh.read(8))[0] else: value = 0 if entry in TIFF.CZ_LSMINFO_SCANINFO_ARRAYS: blocks.append(block) name = TIFF.CZ_LSMINFO_SCANINFO_ARRAYS[entry] newobj = [] block[name] = newobj block = newobj elif entry in TIFF.CZ_LSMINFO_SCANINFO_STRUCTS: blocks.append(block) newobj = {} block.append(newobj) block = newobj elif entry in TIFF.CZ_LSMINFO_SCANINFO_ATTRIBUTES: name = TIFF.CZ_LSMINFO_SCANINFO_ATTRIBUTES[entry] block[name] = value elif entry == 0xffffffff: # end sub block block = blocks.pop() else: # unknown entry block['Entry0x%x' % entry] = value if not blocks: break return block
python
def read_lsm_scaninfo(fh): """Read LSM ScanInfo structure from file and return as dict.""" block = {} blocks = [block] unpack = struct.unpack if struct.unpack('<I', fh.read(4))[0] != 0x10000000: # not a Recording sub block log.warning('read_lsm_scaninfo: invalid LSM ScanInfo structure') return block fh.read(8) while True: entry, dtype, size = unpack('<III', fh.read(12)) if dtype == 2: # ascii value = bytes2str(stripnull(fh.read(size))) elif dtype == 4: # long value = unpack('<i', fh.read(4))[0] elif dtype == 5: # rational value = unpack('<d', fh.read(8))[0] else: value = 0 if entry in TIFF.CZ_LSMINFO_SCANINFO_ARRAYS: blocks.append(block) name = TIFF.CZ_LSMINFO_SCANINFO_ARRAYS[entry] newobj = [] block[name] = newobj block = newobj elif entry in TIFF.CZ_LSMINFO_SCANINFO_STRUCTS: blocks.append(block) newobj = {} block.append(newobj) block = newobj elif entry in TIFF.CZ_LSMINFO_SCANINFO_ATTRIBUTES: name = TIFF.CZ_LSMINFO_SCANINFO_ATTRIBUTES[entry] block[name] = value elif entry == 0xffffffff: # end sub block block = blocks.pop() else: # unknown entry block['Entry0x%x' % entry] = value if not blocks: break return block
[ "def", "read_lsm_scaninfo", "(", "fh", ")", ":", "block", "=", "{", "}", "blocks", "=", "[", "block", "]", "unpack", "=", "struct", ".", "unpack", "if", "struct", ".", "unpack", "(", "'<I'", ",", "fh", ".", "read", "(", "4", ")", ")", "[", "0", "]", "!=", "0x10000000", ":", "# not a Recording sub block", "log", ".", "warning", "(", "'read_lsm_scaninfo: invalid LSM ScanInfo structure'", ")", "return", "block", "fh", ".", "read", "(", "8", ")", "while", "True", ":", "entry", ",", "dtype", ",", "size", "=", "unpack", "(", "'<III'", ",", "fh", ".", "read", "(", "12", ")", ")", "if", "dtype", "==", "2", ":", "# ascii", "value", "=", "bytes2str", "(", "stripnull", "(", "fh", ".", "read", "(", "size", ")", ")", ")", "elif", "dtype", "==", "4", ":", "# long", "value", "=", "unpack", "(", "'<i'", ",", "fh", ".", "read", "(", "4", ")", ")", "[", "0", "]", "elif", "dtype", "==", "5", ":", "# rational", "value", "=", "unpack", "(", "'<d'", ",", "fh", ".", "read", "(", "8", ")", ")", "[", "0", "]", "else", ":", "value", "=", "0", "if", "entry", "in", "TIFF", ".", "CZ_LSMINFO_SCANINFO_ARRAYS", ":", "blocks", ".", "append", "(", "block", ")", "name", "=", "TIFF", ".", "CZ_LSMINFO_SCANINFO_ARRAYS", "[", "entry", "]", "newobj", "=", "[", "]", "block", "[", "name", "]", "=", "newobj", "block", "=", "newobj", "elif", "entry", "in", "TIFF", ".", "CZ_LSMINFO_SCANINFO_STRUCTS", ":", "blocks", ".", "append", "(", "block", ")", "newobj", "=", "{", "}", "block", ".", "append", "(", "newobj", ")", "block", "=", "newobj", "elif", "entry", "in", "TIFF", ".", "CZ_LSMINFO_SCANINFO_ATTRIBUTES", ":", "name", "=", "TIFF", ".", "CZ_LSMINFO_SCANINFO_ATTRIBUTES", "[", "entry", "]", "block", "[", "name", "]", "=", "value", "elif", "entry", "==", "0xffffffff", ":", "# end sub block", "block", "=", "blocks", ".", "pop", "(", ")", "else", ":", "# unknown entry", "block", "[", "'Entry0x%x'", "%", "entry", "]", "=", "value", "if", "not", "blocks", ":", "break", "return", "block" ]
Read LSM ScanInfo structure from file and return as dict.
[ "Read", "LSM", "ScanInfo", "structure", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8433-L8478
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_sis
def read_sis(fh, byteorder, dtype, count, offsetsize): """Read OlympusSIS structure and return as dict. No specification is avaliable. Only few fields are known. """ result = {} (magic, _, minute, hour, day, month, year, _, name, tagcount ) = struct.unpack('<4s6shhhhh6s32sh', fh.read(60)) if magic != b'SIS0': raise ValueError('invalid OlympusSIS structure') result['name'] = bytes2str(stripnull(name)) try: result['datetime'] = datetime.datetime(1900+year, month+1, day, hour, minute) except ValueError: pass data = fh.read(8 * tagcount) for i in range(0, tagcount*8, 8): tagtype, count, offset = struct.unpack('<hhI', data[i:i+8]) fh.seek(offset) if tagtype == 1: # general data (_, lenexp, xcal, ycal, _, mag, _, camname, pictype, ) = struct.unpack('<10shdd8sd2s34s32s', fh.read(112)) # 220 m = math.pow(10, lenexp) result['pixelsizex'] = xcal * m result['pixelsizey'] = ycal * m result['magnification'] = mag result['cameraname'] = bytes2str(stripnull(camname)) result['picturetype'] = bytes2str(stripnull(pictype)) elif tagtype == 10: # channel data continue # TODO: does not seem to work? # (length, _, exptime, emv, _, camname, _, mictype, # ) = struct.unpack('<h22sId4s32s48s32s', fh.read(152)) # 720 # result['exposuretime'] = exptime # result['emvoltage'] = emv # result['cameraname2'] = bytes2str(stripnull(camname)) # result['microscopename'] = bytes2str(stripnull(mictype)) return result
python
def read_sis(fh, byteorder, dtype, count, offsetsize): """Read OlympusSIS structure and return as dict. No specification is avaliable. Only few fields are known. """ result = {} (magic, _, minute, hour, day, month, year, _, name, tagcount ) = struct.unpack('<4s6shhhhh6s32sh', fh.read(60)) if magic != b'SIS0': raise ValueError('invalid OlympusSIS structure') result['name'] = bytes2str(stripnull(name)) try: result['datetime'] = datetime.datetime(1900+year, month+1, day, hour, minute) except ValueError: pass data = fh.read(8 * tagcount) for i in range(0, tagcount*8, 8): tagtype, count, offset = struct.unpack('<hhI', data[i:i+8]) fh.seek(offset) if tagtype == 1: # general data (_, lenexp, xcal, ycal, _, mag, _, camname, pictype, ) = struct.unpack('<10shdd8sd2s34s32s', fh.read(112)) # 220 m = math.pow(10, lenexp) result['pixelsizex'] = xcal * m result['pixelsizey'] = ycal * m result['magnification'] = mag result['cameraname'] = bytes2str(stripnull(camname)) result['picturetype'] = bytes2str(stripnull(pictype)) elif tagtype == 10: # channel data continue # TODO: does not seem to work? # (length, _, exptime, emv, _, camname, _, mictype, # ) = struct.unpack('<h22sId4s32s48s32s', fh.read(152)) # 720 # result['exposuretime'] = exptime # result['emvoltage'] = emv # result['cameraname2'] = bytes2str(stripnull(camname)) # result['microscopename'] = bytes2str(stripnull(mictype)) return result
[ "def", "read_sis", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "result", "=", "{", "}", "(", "magic", ",", "_", ",", "minute", ",", "hour", ",", "day", ",", "month", ",", "year", ",", "_", ",", "name", ",", "tagcount", ")", "=", "struct", ".", "unpack", "(", "'<4s6shhhhh6s32sh'", ",", "fh", ".", "read", "(", "60", ")", ")", "if", "magic", "!=", "b'SIS0'", ":", "raise", "ValueError", "(", "'invalid OlympusSIS structure'", ")", "result", "[", "'name'", "]", "=", "bytes2str", "(", "stripnull", "(", "name", ")", ")", "try", ":", "result", "[", "'datetime'", "]", "=", "datetime", ".", "datetime", "(", "1900", "+", "year", ",", "month", "+", "1", ",", "day", ",", "hour", ",", "minute", ")", "except", "ValueError", ":", "pass", "data", "=", "fh", ".", "read", "(", "8", "*", "tagcount", ")", "for", "i", "in", "range", "(", "0", ",", "tagcount", "*", "8", ",", "8", ")", ":", "tagtype", ",", "count", ",", "offset", "=", "struct", ".", "unpack", "(", "'<hhI'", ",", "data", "[", "i", ":", "i", "+", "8", "]", ")", "fh", ".", "seek", "(", "offset", ")", "if", "tagtype", "==", "1", ":", "# general data", "(", "_", ",", "lenexp", ",", "xcal", ",", "ycal", ",", "_", ",", "mag", ",", "_", ",", "camname", ",", "pictype", ",", ")", "=", "struct", ".", "unpack", "(", "'<10shdd8sd2s34s32s'", ",", "fh", ".", "read", "(", "112", ")", ")", "# 220", "m", "=", "math", ".", "pow", "(", "10", ",", "lenexp", ")", "result", "[", "'pixelsizex'", "]", "=", "xcal", "*", "m", "result", "[", "'pixelsizey'", "]", "=", "ycal", "*", "m", "result", "[", "'magnification'", "]", "=", "mag", "result", "[", "'cameraname'", "]", "=", "bytes2str", "(", "stripnull", "(", "camname", ")", ")", "result", "[", "'picturetype'", "]", "=", "bytes2str", "(", "stripnull", "(", "pictype", ")", ")", "elif", "tagtype", "==", "10", ":", "# channel data", "continue", "# TODO: does not seem to work?", "# (length, _, exptime, emv, _, camname, _, mictype,", "# ) = struct.unpack('<h22sId4s32s48s32s', fh.read(152)) # 720", "# result['exposuretime'] = exptime", "# result['emvoltage'] = emv", "# result['cameraname2'] = bytes2str(stripnull(camname))", "# result['microscopename'] = bytes2str(stripnull(mictype))", "return", "result" ]
Read OlympusSIS structure and return as dict. No specification is avaliable. Only few fields are known.
[ "Read", "OlympusSIS", "structure", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8481-L8527
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_sis_ini
def read_sis_ini(fh, byteorder, dtype, count, offsetsize): """Read OlympusSIS INI string and return as dict.""" inistr = fh.read(count) inistr = bytes2str(stripnull(inistr)) try: return olympusini_metadata(inistr) except Exception as exc: log.warning('olympusini_metadata: %s: %s', exc.__class__.__name__, exc) return {}
python
def read_sis_ini(fh, byteorder, dtype, count, offsetsize): """Read OlympusSIS INI string and return as dict.""" inistr = fh.read(count) inistr = bytes2str(stripnull(inistr)) try: return olympusini_metadata(inistr) except Exception as exc: log.warning('olympusini_metadata: %s: %s', exc.__class__.__name__, exc) return {}
[ "def", "read_sis_ini", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "inistr", "=", "fh", ".", "read", "(", "count", ")", "inistr", "=", "bytes2str", "(", "stripnull", "(", "inistr", ")", ")", "try", ":", "return", "olympusini_metadata", "(", "inistr", ")", "except", "Exception", "as", "exc", ":", "log", ".", "warning", "(", "'olympusini_metadata: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "return", "{", "}" ]
Read OlympusSIS INI string and return as dict.
[ "Read", "OlympusSIS", "INI", "string", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8530-L8538
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_tvips_header
def read_tvips_header(fh, byteorder, dtype, count, offsetsize): """Read TVIPS EM-MENU headers and return as dict.""" result = {} header = fh.read_record(TIFF.TVIPS_HEADER_V1, byteorder=byteorder) for name, typestr in TIFF.TVIPS_HEADER_V1: result[name] = header[name].tolist() if header['Version'] == 2: header = fh.read_record(TIFF.TVIPS_HEADER_V2, byteorder=byteorder) if header['Magic'] != int(0xaaaaaaaa): log.warning('read_tvips_header: invalid TVIPS v2 magic number') return {} # decode utf16 strings for name, typestr in TIFF.TVIPS_HEADER_V2: if typestr.startswith('V'): s = header[name].tostring().decode('utf16', errors='ignore') result[name] = stripnull(s, null='\0') else: result[name] = header[name].tolist() # convert nm to m for axis in 'XY': header['PhysicalPixelSize' + axis] /= 1e9 header['PixelSize' + axis] /= 1e9 elif header.version != 1: log.warning('read_tvips_header: unknown TVIPS header version') return {} return result
python
def read_tvips_header(fh, byteorder, dtype, count, offsetsize): """Read TVIPS EM-MENU headers and return as dict.""" result = {} header = fh.read_record(TIFF.TVIPS_HEADER_V1, byteorder=byteorder) for name, typestr in TIFF.TVIPS_HEADER_V1: result[name] = header[name].tolist() if header['Version'] == 2: header = fh.read_record(TIFF.TVIPS_HEADER_V2, byteorder=byteorder) if header['Magic'] != int(0xaaaaaaaa): log.warning('read_tvips_header: invalid TVIPS v2 magic number') return {} # decode utf16 strings for name, typestr in TIFF.TVIPS_HEADER_V2: if typestr.startswith('V'): s = header[name].tostring().decode('utf16', errors='ignore') result[name] = stripnull(s, null='\0') else: result[name] = header[name].tolist() # convert nm to m for axis in 'XY': header['PhysicalPixelSize' + axis] /= 1e9 header['PixelSize' + axis] /= 1e9 elif header.version != 1: log.warning('read_tvips_header: unknown TVIPS header version') return {} return result
[ "def", "read_tvips_header", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "result", "=", "{", "}", "header", "=", "fh", ".", "read_record", "(", "TIFF", ".", "TVIPS_HEADER_V1", ",", "byteorder", "=", "byteorder", ")", "for", "name", ",", "typestr", "in", "TIFF", ".", "TVIPS_HEADER_V1", ":", "result", "[", "name", "]", "=", "header", "[", "name", "]", ".", "tolist", "(", ")", "if", "header", "[", "'Version'", "]", "==", "2", ":", "header", "=", "fh", ".", "read_record", "(", "TIFF", ".", "TVIPS_HEADER_V2", ",", "byteorder", "=", "byteorder", ")", "if", "header", "[", "'Magic'", "]", "!=", "int", "(", "0xaaaaaaaa", ")", ":", "log", ".", "warning", "(", "'read_tvips_header: invalid TVIPS v2 magic number'", ")", "return", "{", "}", "# decode utf16 strings", "for", "name", ",", "typestr", "in", "TIFF", ".", "TVIPS_HEADER_V2", ":", "if", "typestr", ".", "startswith", "(", "'V'", ")", ":", "s", "=", "header", "[", "name", "]", ".", "tostring", "(", ")", ".", "decode", "(", "'utf16'", ",", "errors", "=", "'ignore'", ")", "result", "[", "name", "]", "=", "stripnull", "(", "s", ",", "null", "=", "'\\0'", ")", "else", ":", "result", "[", "name", "]", "=", "header", "[", "name", "]", ".", "tolist", "(", ")", "# convert nm to m", "for", "axis", "in", "'XY'", ":", "header", "[", "'PhysicalPixelSize'", "+", "axis", "]", "/=", "1e9", "header", "[", "'PixelSize'", "+", "axis", "]", "/=", "1e9", "elif", "header", ".", "version", "!=", "1", ":", "log", ".", "warning", "(", "'read_tvips_header: unknown TVIPS header version'", ")", "return", "{", "}", "return", "result" ]
Read TVIPS EM-MENU headers and return as dict.
[ "Read", "TVIPS", "EM", "-", "MENU", "headers", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8541-L8566
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_fei_metadata
def read_fei_metadata(fh, byteorder, dtype, count, offsetsize): """Read FEI SFEG/HELIOS headers and return as dict.""" result = {} section = {} data = bytes2str(stripnull(fh.read(count))) for line in data.splitlines(): line = line.strip() if line.startswith('['): section = {} result[line[1:-1]] = section continue try: key, value = line.split('=') except ValueError: continue section[key] = astype(value) return result
python
def read_fei_metadata(fh, byteorder, dtype, count, offsetsize): """Read FEI SFEG/HELIOS headers and return as dict.""" result = {} section = {} data = bytes2str(stripnull(fh.read(count))) for line in data.splitlines(): line = line.strip() if line.startswith('['): section = {} result[line[1:-1]] = section continue try: key, value = line.split('=') except ValueError: continue section[key] = astype(value) return result
[ "def", "read_fei_metadata", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "result", "=", "{", "}", "section", "=", "{", "}", "data", "=", "bytes2str", "(", "stripnull", "(", "fh", ".", "read", "(", "count", ")", ")", ")", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'['", ")", ":", "section", "=", "{", "}", "result", "[", "line", "[", "1", ":", "-", "1", "]", "]", "=", "section", "continue", "try", ":", "key", ",", "value", "=", "line", ".", "split", "(", "'='", ")", "except", "ValueError", ":", "continue", "section", "[", "key", "]", "=", "astype", "(", "value", ")", "return", "result" ]
Read FEI SFEG/HELIOS headers and return as dict.
[ "Read", "FEI", "SFEG", "/", "HELIOS", "headers", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8569-L8585
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_cz_sem
def read_cz_sem(fh, byteorder, dtype, count, offsetsize): """Read Zeiss SEM tag and return as dict. See https://sourceforge.net/p/gwyddion/mailman/message/29275000/ for unnamed values. """ result = {'': ()} key = None data = bytes2str(stripnull(fh.read(count))) for line in data.splitlines(): if line.isupper(): key = line.lower() elif key: try: name, value = line.split('=') except ValueError: try: name, value = line.split(':', 1) except Exception: continue value = value.strip() unit = '' try: v, u = value.split() number = astype(v, (int, float)) if number != v: value = number unit = u except Exception: number = astype(value, (int, float)) if number != value: value = number if value in ('No', 'Off'): value = False elif value in ('Yes', 'On'): value = True result[key] = (name.strip(), value) if unit: result[key] += (unit,) key = None else: result[''] += (astype(line, (int, float)),) return result
python
def read_cz_sem(fh, byteorder, dtype, count, offsetsize): """Read Zeiss SEM tag and return as dict. See https://sourceforge.net/p/gwyddion/mailman/message/29275000/ for unnamed values. """ result = {'': ()} key = None data = bytes2str(stripnull(fh.read(count))) for line in data.splitlines(): if line.isupper(): key = line.lower() elif key: try: name, value = line.split('=') except ValueError: try: name, value = line.split(':', 1) except Exception: continue value = value.strip() unit = '' try: v, u = value.split() number = astype(v, (int, float)) if number != v: value = number unit = u except Exception: number = astype(value, (int, float)) if number != value: value = number if value in ('No', 'Off'): value = False elif value in ('Yes', 'On'): value = True result[key] = (name.strip(), value) if unit: result[key] += (unit,) key = None else: result[''] += (astype(line, (int, float)),) return result
[ "def", "read_cz_sem", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "result", "=", "{", "''", ":", "(", ")", "}", "key", "=", "None", "data", "=", "bytes2str", "(", "stripnull", "(", "fh", ".", "read", "(", "count", ")", ")", ")", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "if", "line", ".", "isupper", "(", ")", ":", "key", "=", "line", ".", "lower", "(", ")", "elif", "key", ":", "try", ":", "name", ",", "value", "=", "line", ".", "split", "(", "'='", ")", "except", "ValueError", ":", "try", ":", "name", ",", "value", "=", "line", ".", "split", "(", "':'", ",", "1", ")", "except", "Exception", ":", "continue", "value", "=", "value", ".", "strip", "(", ")", "unit", "=", "''", "try", ":", "v", ",", "u", "=", "value", ".", "split", "(", ")", "number", "=", "astype", "(", "v", ",", "(", "int", ",", "float", ")", ")", "if", "number", "!=", "v", ":", "value", "=", "number", "unit", "=", "u", "except", "Exception", ":", "number", "=", "astype", "(", "value", ",", "(", "int", ",", "float", ")", ")", "if", "number", "!=", "value", ":", "value", "=", "number", "if", "value", "in", "(", "'No'", ",", "'Off'", ")", ":", "value", "=", "False", "elif", "value", "in", "(", "'Yes'", ",", "'On'", ")", ":", "value", "=", "True", "result", "[", "key", "]", "=", "(", "name", ".", "strip", "(", ")", ",", "value", ")", "if", "unit", ":", "result", "[", "key", "]", "+=", "(", "unit", ",", ")", "key", "=", "None", "else", ":", "result", "[", "''", "]", "+=", "(", "astype", "(", "line", ",", "(", "int", ",", "float", ")", ")", ",", ")", "return", "result" ]
Read Zeiss SEM tag and return as dict. See https://sourceforge.net/p/gwyddion/mailman/message/29275000/ for unnamed values.
[ "Read", "Zeiss", "SEM", "tag", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8588-L8631
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_nih_image_header
def read_nih_image_header(fh, byteorder, dtype, count, offsetsize): """Read NIH_IMAGE_HEADER tag from file and return as dict.""" a = fh.read_record(TIFF.NIH_IMAGE_HEADER, byteorder=byteorder) a = a.newbyteorder(byteorder) a = recarray2dict(a) a['XUnit'] = a['XUnit'][:a['XUnitSize']] a['UM'] = a['UM'][:a['UMsize']] return a
python
def read_nih_image_header(fh, byteorder, dtype, count, offsetsize): """Read NIH_IMAGE_HEADER tag from file and return as dict.""" a = fh.read_record(TIFF.NIH_IMAGE_HEADER, byteorder=byteorder) a = a.newbyteorder(byteorder) a = recarray2dict(a) a['XUnit'] = a['XUnit'][:a['XUnitSize']] a['UM'] = a['UM'][:a['UMsize']] return a
[ "def", "read_nih_image_header", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ",", "offsetsize", ")", ":", "a", "=", "fh", ".", "read_record", "(", "TIFF", ".", "NIH_IMAGE_HEADER", ",", "byteorder", "=", "byteorder", ")", "a", "=", "a", ".", "newbyteorder", "(", "byteorder", ")", "a", "=", "recarray2dict", "(", "a", ")", "a", "[", "'XUnit'", "]", "=", "a", "[", "'XUnit'", "]", "[", ":", "a", "[", "'XUnitSize'", "]", "]", "a", "[", "'UM'", "]", "=", "a", "[", "'UM'", "]", "[", ":", "a", "[", "'UMsize'", "]", "]", "return", "a" ]
Read NIH_IMAGE_HEADER tag from file and return as dict.
[ "Read", "NIH_IMAGE_HEADER", "tag", "from", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8634-L8641
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_scanimage_metadata
def read_scanimage_metadata(fh): """Read ScanImage BigTIFF v3 static and ROI metadata from open file. Return non-varying frame data as dict and ROI group data as JSON. The settings can be used to read image data and metadata without parsing the TIFF file. Raise ValueError if file does not contain valid ScanImage v3 metadata. """ fh.seek(0) try: byteorder, version = struct.unpack('<2sH', fh.read(4)) if byteorder != b'II' or version != 43: raise Exception fh.seek(16) magic, version, size0, size1 = struct.unpack('<IIII', fh.read(16)) if magic != 117637889 or version != 3: raise Exception except Exception: raise ValueError('not a ScanImage BigTIFF v3 file') frame_data = matlabstr2py(bytes2str(fh.read(size0)[:-1])) roi_data = read_json(fh, '<', None, size1, None) if size1 > 1 else {} return frame_data, roi_data
python
def read_scanimage_metadata(fh): """Read ScanImage BigTIFF v3 static and ROI metadata from open file. Return non-varying frame data as dict and ROI group data as JSON. The settings can be used to read image data and metadata without parsing the TIFF file. Raise ValueError if file does not contain valid ScanImage v3 metadata. """ fh.seek(0) try: byteorder, version = struct.unpack('<2sH', fh.read(4)) if byteorder != b'II' or version != 43: raise Exception fh.seek(16) magic, version, size0, size1 = struct.unpack('<IIII', fh.read(16)) if magic != 117637889 or version != 3: raise Exception except Exception: raise ValueError('not a ScanImage BigTIFF v3 file') frame_data = matlabstr2py(bytes2str(fh.read(size0)[:-1])) roi_data = read_json(fh, '<', None, size1, None) if size1 > 1 else {} return frame_data, roi_data
[ "def", "read_scanimage_metadata", "(", "fh", ")", ":", "fh", ".", "seek", "(", "0", ")", "try", ":", "byteorder", ",", "version", "=", "struct", ".", "unpack", "(", "'<2sH'", ",", "fh", ".", "read", "(", "4", ")", ")", "if", "byteorder", "!=", "b'II'", "or", "version", "!=", "43", ":", "raise", "Exception", "fh", ".", "seek", "(", "16", ")", "magic", ",", "version", ",", "size0", ",", "size1", "=", "struct", ".", "unpack", "(", "'<IIII'", ",", "fh", ".", "read", "(", "16", ")", ")", "if", "magic", "!=", "117637889", "or", "version", "!=", "3", ":", "raise", "Exception", "except", "Exception", ":", "raise", "ValueError", "(", "'not a ScanImage BigTIFF v3 file'", ")", "frame_data", "=", "matlabstr2py", "(", "bytes2str", "(", "fh", ".", "read", "(", "size0", ")", "[", ":", "-", "1", "]", ")", ")", "roi_data", "=", "read_json", "(", "fh", ",", "'<'", ",", "None", ",", "size1", ",", "None", ")", "if", "size1", ">", "1", "else", "{", "}", "return", "frame_data", ",", "roi_data" ]
Read ScanImage BigTIFF v3 static and ROI metadata from open file. Return non-varying frame data as dict and ROI group data as JSON. The settings can be used to read image data and metadata without parsing the TIFF file. Raise ValueError if file does not contain valid ScanImage v3 metadata.
[ "Read", "ScanImage", "BigTIFF", "v3", "static", "and", "ROI", "metadata", "from", "open", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8644-L8669
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
read_micromanager_metadata
def read_micromanager_metadata(fh): """Read MicroManager non-TIFF settings from open file and return as dict. The settings can be used to read image data without parsing the TIFF file. Raise ValueError if the file does not contain valid MicroManager metadata. """ fh.seek(0) try: byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] except IndexError: raise ValueError('not a MicroManager TIFF file') result = {} fh.seek(8) (index_header, index_offset, display_header, display_offset, comments_header, comments_offset, summary_header, summary_length ) = struct.unpack(byteorder + 'IIIIIIII', fh.read(32)) if summary_header != 2355492: raise ValueError('invalid MicroManager summary header') result['Summary'] = read_json(fh, byteorder, None, summary_length, None) if index_header != 54773648: raise ValueError('invalid MicroManager index header') fh.seek(index_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 3453623: raise ValueError('invalid MicroManager index header') data = struct.unpack(byteorder + 'IIIII'*count, fh.read(20*count)) result['IndexMap'] = {'Channel': data[::5], 'Slice': data[1::5], 'Frame': data[2::5], 'Position': data[3::5], 'Offset': data[4::5]} if display_header != 483765892: raise ValueError('invalid MicroManager display header') fh.seek(display_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 347834724: raise ValueError('invalid MicroManager display header') result['DisplaySettings'] = read_json(fh, byteorder, None, count, None) if comments_header != 99384722: raise ValueError('invalid MicroManager comments header') fh.seek(comments_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 84720485: raise ValueError('invalid MicroManager comments header') result['Comments'] = read_json(fh, byteorder, None, count, None) return result
python
def read_micromanager_metadata(fh): """Read MicroManager non-TIFF settings from open file and return as dict. The settings can be used to read image data without parsing the TIFF file. Raise ValueError if the file does not contain valid MicroManager metadata. """ fh.seek(0) try: byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] except IndexError: raise ValueError('not a MicroManager TIFF file') result = {} fh.seek(8) (index_header, index_offset, display_header, display_offset, comments_header, comments_offset, summary_header, summary_length ) = struct.unpack(byteorder + 'IIIIIIII', fh.read(32)) if summary_header != 2355492: raise ValueError('invalid MicroManager summary header') result['Summary'] = read_json(fh, byteorder, None, summary_length, None) if index_header != 54773648: raise ValueError('invalid MicroManager index header') fh.seek(index_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 3453623: raise ValueError('invalid MicroManager index header') data = struct.unpack(byteorder + 'IIIII'*count, fh.read(20*count)) result['IndexMap'] = {'Channel': data[::5], 'Slice': data[1::5], 'Frame': data[2::5], 'Position': data[3::5], 'Offset': data[4::5]} if display_header != 483765892: raise ValueError('invalid MicroManager display header') fh.seek(display_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 347834724: raise ValueError('invalid MicroManager display header') result['DisplaySettings'] = read_json(fh, byteorder, None, count, None) if comments_header != 99384722: raise ValueError('invalid MicroManager comments header') fh.seek(comments_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 84720485: raise ValueError('invalid MicroManager comments header') result['Comments'] = read_json(fh, byteorder, None, count, None) return result
[ "def", "read_micromanager_metadata", "(", "fh", ")", ":", "fh", ".", "seek", "(", "0", ")", "try", ":", "byteorder", "=", "{", "b'II'", ":", "'<'", ",", "b'MM'", ":", "'>'", "}", "[", "fh", ".", "read", "(", "2", ")", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "'not a MicroManager TIFF file'", ")", "result", "=", "{", "}", "fh", ".", "seek", "(", "8", ")", "(", "index_header", ",", "index_offset", ",", "display_header", ",", "display_offset", ",", "comments_header", ",", "comments_offset", ",", "summary_header", ",", "summary_length", ")", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'IIIIIIII'", ",", "fh", ".", "read", "(", "32", ")", ")", "if", "summary_header", "!=", "2355492", ":", "raise", "ValueError", "(", "'invalid MicroManager summary header'", ")", "result", "[", "'Summary'", "]", "=", "read_json", "(", "fh", ",", "byteorder", ",", "None", ",", "summary_length", ",", "None", ")", "if", "index_header", "!=", "54773648", ":", "raise", "ValueError", "(", "'invalid MicroManager index header'", ")", "fh", ".", "seek", "(", "index_offset", ")", "header", ",", "count", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'II'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "header", "!=", "3453623", ":", "raise", "ValueError", "(", "'invalid MicroManager index header'", ")", "data", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'IIIII'", "*", "count", ",", "fh", ".", "read", "(", "20", "*", "count", ")", ")", "result", "[", "'IndexMap'", "]", "=", "{", "'Channel'", ":", "data", "[", ":", ":", "5", "]", ",", "'Slice'", ":", "data", "[", "1", ":", ":", "5", "]", ",", "'Frame'", ":", "data", "[", "2", ":", ":", "5", "]", ",", "'Position'", ":", "data", "[", "3", ":", ":", "5", "]", ",", "'Offset'", ":", "data", "[", "4", ":", ":", "5", "]", "}", "if", "display_header", "!=", "483765892", ":", "raise", "ValueError", "(", "'invalid MicroManager display header'", ")", "fh", ".", "seek", "(", "display_offset", ")", "header", ",", "count", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'II'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "header", "!=", "347834724", ":", "raise", "ValueError", "(", "'invalid MicroManager display header'", ")", "result", "[", "'DisplaySettings'", "]", "=", "read_json", "(", "fh", ",", "byteorder", ",", "None", ",", "count", ",", "None", ")", "if", "comments_header", "!=", "99384722", ":", "raise", "ValueError", "(", "'invalid MicroManager comments header'", ")", "fh", ".", "seek", "(", "comments_offset", ")", "header", ",", "count", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'II'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "header", "!=", "84720485", ":", "raise", "ValueError", "(", "'invalid MicroManager comments header'", ")", "result", "[", "'Comments'", "]", "=", "read_json", "(", "fh", ",", "byteorder", ",", "None", ",", "count", ",", "None", ")", "return", "result" ]
Read MicroManager non-TIFF settings from open file and return as dict. The settings can be used to read image data without parsing the TIFF file. Raise ValueError if the file does not contain valid MicroManager metadata.
[ "Read", "MicroManager", "non", "-", "TIFF", "settings", "from", "open", "file", "and", "return", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8672-L8725
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imagej_metadata_tag
def imagej_metadata_tag(metadata, byteorder): """Return IJMetadata and IJMetadataByteCounts tags from metadata dict. The tags can be passed to the TiffWriter.save function as extratags. The metadata dict may contain the following keys and values: Info : str Human-readable information as string. Labels : sequence of str Human-readable labels for each channel. Ranges : sequence of doubles Lower and upper values for each channel. LUTs : sequence of (3, 256) uint8 ndarrays Color palettes for each channel. Plot : bytes Undocumented ImageJ internal format. ROI: bytes Undocumented ImageJ internal region of interest format. Overlays : bytes Undocumented ImageJ internal format. """ header = [{'>': b'IJIJ', '<': b'JIJI'}[byteorder]] bytecounts = [0] body = [] def _string(data, byteorder): return data.encode('utf-16' + {'>': 'be', '<': 'le'}[byteorder]) def _doubles(data, byteorder): return struct.pack(byteorder+('d' * len(data)), *data) def _ndarray(data, byteorder): return data.tobytes() def _bytes(data, byteorder): return data metadata_types = ( ('Info', b'info', 1, _string), ('Labels', b'labl', None, _string), ('Ranges', b'rang', 1, _doubles), ('LUTs', b'luts', None, _ndarray), ('Plot', b'plot', 1, _bytes), ('ROI', b'roi ', 1, _bytes), ('Overlays', b'over', None, _bytes)) for key, mtype, count, func in metadata_types: if key.lower() in metadata: key = key.lower() elif key not in metadata: continue if byteorder == '<': mtype = mtype[::-1] values = metadata[key] if count is None: count = len(values) else: values = [values] header.append(mtype + struct.pack(byteorder+'I', count)) for value in values: data = func(value, byteorder) body.append(data) bytecounts.append(len(data)) if not body: return () body = b''.join(body) header = b''.join(header) data = header + body bytecounts[0] = len(header) bytecounts = struct.pack(byteorder+('I' * len(bytecounts)), *bytecounts) return ((50839, 'B', len(data), data, True), (50838, 'I', len(bytecounts)//4, bytecounts, True))
python
def imagej_metadata_tag(metadata, byteorder): """Return IJMetadata and IJMetadataByteCounts tags from metadata dict. The tags can be passed to the TiffWriter.save function as extratags. The metadata dict may contain the following keys and values: Info : str Human-readable information as string. Labels : sequence of str Human-readable labels for each channel. Ranges : sequence of doubles Lower and upper values for each channel. LUTs : sequence of (3, 256) uint8 ndarrays Color palettes for each channel. Plot : bytes Undocumented ImageJ internal format. ROI: bytes Undocumented ImageJ internal region of interest format. Overlays : bytes Undocumented ImageJ internal format. """ header = [{'>': b'IJIJ', '<': b'JIJI'}[byteorder]] bytecounts = [0] body = [] def _string(data, byteorder): return data.encode('utf-16' + {'>': 'be', '<': 'le'}[byteorder]) def _doubles(data, byteorder): return struct.pack(byteorder+('d' * len(data)), *data) def _ndarray(data, byteorder): return data.tobytes() def _bytes(data, byteorder): return data metadata_types = ( ('Info', b'info', 1, _string), ('Labels', b'labl', None, _string), ('Ranges', b'rang', 1, _doubles), ('LUTs', b'luts', None, _ndarray), ('Plot', b'plot', 1, _bytes), ('ROI', b'roi ', 1, _bytes), ('Overlays', b'over', None, _bytes)) for key, mtype, count, func in metadata_types: if key.lower() in metadata: key = key.lower() elif key not in metadata: continue if byteorder == '<': mtype = mtype[::-1] values = metadata[key] if count is None: count = len(values) else: values = [values] header.append(mtype + struct.pack(byteorder+'I', count)) for value in values: data = func(value, byteorder) body.append(data) bytecounts.append(len(data)) if not body: return () body = b''.join(body) header = b''.join(header) data = header + body bytecounts[0] = len(header) bytecounts = struct.pack(byteorder+('I' * len(bytecounts)), *bytecounts) return ((50839, 'B', len(data), data, True), (50838, 'I', len(bytecounts)//4, bytecounts, True))
[ "def", "imagej_metadata_tag", "(", "metadata", ",", "byteorder", ")", ":", "header", "=", "[", "{", "'>'", ":", "b'IJIJ'", ",", "'<'", ":", "b'JIJI'", "}", "[", "byteorder", "]", "]", "bytecounts", "=", "[", "0", "]", "body", "=", "[", "]", "def", "_string", "(", "data", ",", "byteorder", ")", ":", "return", "data", ".", "encode", "(", "'utf-16'", "+", "{", "'>'", ":", "'be'", ",", "'<'", ":", "'le'", "}", "[", "byteorder", "]", ")", "def", "_doubles", "(", "data", ",", "byteorder", ")", ":", "return", "struct", ".", "pack", "(", "byteorder", "+", "(", "'d'", "*", "len", "(", "data", ")", ")", ",", "*", "data", ")", "def", "_ndarray", "(", "data", ",", "byteorder", ")", ":", "return", "data", ".", "tobytes", "(", ")", "def", "_bytes", "(", "data", ",", "byteorder", ")", ":", "return", "data", "metadata_types", "=", "(", "(", "'Info'", ",", "b'info'", ",", "1", ",", "_string", ")", ",", "(", "'Labels'", ",", "b'labl'", ",", "None", ",", "_string", ")", ",", "(", "'Ranges'", ",", "b'rang'", ",", "1", ",", "_doubles", ")", ",", "(", "'LUTs'", ",", "b'luts'", ",", "None", ",", "_ndarray", ")", ",", "(", "'Plot'", ",", "b'plot'", ",", "1", ",", "_bytes", ")", ",", "(", "'ROI'", ",", "b'roi '", ",", "1", ",", "_bytes", ")", ",", "(", "'Overlays'", ",", "b'over'", ",", "None", ",", "_bytes", ")", ")", "for", "key", ",", "mtype", ",", "count", ",", "func", "in", "metadata_types", ":", "if", "key", ".", "lower", "(", ")", "in", "metadata", ":", "key", "=", "key", ".", "lower", "(", ")", "elif", "key", "not", "in", "metadata", ":", "continue", "if", "byteorder", "==", "'<'", ":", "mtype", "=", "mtype", "[", ":", ":", "-", "1", "]", "values", "=", "metadata", "[", "key", "]", "if", "count", "is", "None", ":", "count", "=", "len", "(", "values", ")", "else", ":", "values", "=", "[", "values", "]", "header", ".", "append", "(", "mtype", "+", "struct", ".", "pack", "(", "byteorder", "+", "'I'", ",", "count", ")", ")", "for", "value", "in", "values", ":", "data", "=", "func", "(", "value", ",", "byteorder", ")", "body", ".", "append", "(", "data", ")", "bytecounts", ".", "append", "(", "len", "(", "data", ")", ")", "if", "not", "body", ":", "return", "(", ")", "body", "=", "b''", ".", "join", "(", "body", ")", "header", "=", "b''", ".", "join", "(", "header", ")", "data", "=", "header", "+", "body", "bytecounts", "[", "0", "]", "=", "len", "(", "header", ")", "bytecounts", "=", "struct", ".", "pack", "(", "byteorder", "+", "(", "'I'", "*", "len", "(", "bytecounts", ")", ")", ",", "*", "bytecounts", ")", "return", "(", "(", "50839", ",", "'B'", ",", "len", "(", "data", ")", ",", "data", ",", "True", ")", ",", "(", "50838", ",", "'I'", ",", "len", "(", "bytecounts", ")", "//", "4", ",", "bytecounts", ",", "True", ")", ")" ]
Return IJMetadata and IJMetadataByteCounts tags from metadata dict. The tags can be passed to the TiffWriter.save function as extratags. The metadata dict may contain the following keys and values: Info : str Human-readable information as string. Labels : sequence of str Human-readable labels for each channel. Ranges : sequence of doubles Lower and upper values for each channel. LUTs : sequence of (3, 256) uint8 ndarrays Color palettes for each channel. Plot : bytes Undocumented ImageJ internal format. ROI: bytes Undocumented ImageJ internal region of interest format. Overlays : bytes Undocumented ImageJ internal format.
[ "Return", "IJMetadata", "and", "IJMetadataByteCounts", "tags", "from", "metadata", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8738-L8812
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imagej_metadata
def imagej_metadata(data, bytecounts, byteorder): """Return IJMetadata tag value as dict. The 'Info' string can have multiple formats, e.g. OIF or ScanImage, that might be parsed into dicts using the matlabstr2py or oiffile.SettingsFile functions. """ def _string(data, byteorder): return data.decode('utf-16' + {'>': 'be', '<': 'le'}[byteorder]) def _doubles(data, byteorder): return struct.unpack(byteorder+('d' * (len(data) // 8)), data) def _lut(data, byteorder): return numpy.frombuffer(data, 'uint8').reshape(-1, 256) def _bytes(data, byteorder): return data metadata_types = { # big-endian b'info': ('Info', _string), b'labl': ('Labels', _string), b'rang': ('Ranges', _doubles), b'luts': ('LUTs', _lut), b'plot': ('Plots', _bytes), b'roi ': ('ROI', _bytes), b'over': ('Overlays', _bytes)} metadata_types.update( # little-endian dict((k[::-1], v) for k, v in metadata_types.items())) if not bytecounts: raise ValueError('no ImageJ metadata') if not data[:4] in (b'IJIJ', b'JIJI'): raise ValueError('invalid ImageJ metadata') header_size = bytecounts[0] if header_size < 12 or header_size > 804: raise ValueError('invalid ImageJ metadata header size') ntypes = (header_size - 4) // 8 header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8]) pos = 4 + ntypes * 8 counter = 0 result = {} for mtype, count in zip(header[::2], header[1::2]): values = [] name, func = metadata_types.get(mtype, (bytes2str(mtype), read_bytes)) for _ in range(count): counter += 1 pos1 = pos + bytecounts[counter] values.append(func(data[pos:pos1], byteorder)) pos = pos1 result[name.strip()] = values[0] if count == 1 else values return result
python
def imagej_metadata(data, bytecounts, byteorder): """Return IJMetadata tag value as dict. The 'Info' string can have multiple formats, e.g. OIF or ScanImage, that might be parsed into dicts using the matlabstr2py or oiffile.SettingsFile functions. """ def _string(data, byteorder): return data.decode('utf-16' + {'>': 'be', '<': 'le'}[byteorder]) def _doubles(data, byteorder): return struct.unpack(byteorder+('d' * (len(data) // 8)), data) def _lut(data, byteorder): return numpy.frombuffer(data, 'uint8').reshape(-1, 256) def _bytes(data, byteorder): return data metadata_types = { # big-endian b'info': ('Info', _string), b'labl': ('Labels', _string), b'rang': ('Ranges', _doubles), b'luts': ('LUTs', _lut), b'plot': ('Plots', _bytes), b'roi ': ('ROI', _bytes), b'over': ('Overlays', _bytes)} metadata_types.update( # little-endian dict((k[::-1], v) for k, v in metadata_types.items())) if not bytecounts: raise ValueError('no ImageJ metadata') if not data[:4] in (b'IJIJ', b'JIJI'): raise ValueError('invalid ImageJ metadata') header_size = bytecounts[0] if header_size < 12 or header_size > 804: raise ValueError('invalid ImageJ metadata header size') ntypes = (header_size - 4) // 8 header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8]) pos = 4 + ntypes * 8 counter = 0 result = {} for mtype, count in zip(header[::2], header[1::2]): values = [] name, func = metadata_types.get(mtype, (bytes2str(mtype), read_bytes)) for _ in range(count): counter += 1 pos1 = pos + bytecounts[counter] values.append(func(data[pos:pos1], byteorder)) pos = pos1 result[name.strip()] = values[0] if count == 1 else values return result
[ "def", "imagej_metadata", "(", "data", ",", "bytecounts", ",", "byteorder", ")", ":", "def", "_string", "(", "data", ",", "byteorder", ")", ":", "return", "data", ".", "decode", "(", "'utf-16'", "+", "{", "'>'", ":", "'be'", ",", "'<'", ":", "'le'", "}", "[", "byteorder", "]", ")", "def", "_doubles", "(", "data", ",", "byteorder", ")", ":", "return", "struct", ".", "unpack", "(", "byteorder", "+", "(", "'d'", "*", "(", "len", "(", "data", ")", "//", "8", ")", ")", ",", "data", ")", "def", "_lut", "(", "data", ",", "byteorder", ")", ":", "return", "numpy", ".", "frombuffer", "(", "data", ",", "'uint8'", ")", ".", "reshape", "(", "-", "1", ",", "256", ")", "def", "_bytes", "(", "data", ",", "byteorder", ")", ":", "return", "data", "metadata_types", "=", "{", "# big-endian", "b'info'", ":", "(", "'Info'", ",", "_string", ")", ",", "b'labl'", ":", "(", "'Labels'", ",", "_string", ")", ",", "b'rang'", ":", "(", "'Ranges'", ",", "_doubles", ")", ",", "b'luts'", ":", "(", "'LUTs'", ",", "_lut", ")", ",", "b'plot'", ":", "(", "'Plots'", ",", "_bytes", ")", ",", "b'roi '", ":", "(", "'ROI'", ",", "_bytes", ")", ",", "b'over'", ":", "(", "'Overlays'", ",", "_bytes", ")", "}", "metadata_types", ".", "update", "(", "# little-endian", "dict", "(", "(", "k", "[", ":", ":", "-", "1", "]", ",", "v", ")", "for", "k", ",", "v", "in", "metadata_types", ".", "items", "(", ")", ")", ")", "if", "not", "bytecounts", ":", "raise", "ValueError", "(", "'no ImageJ metadata'", ")", "if", "not", "data", "[", ":", "4", "]", "in", "(", "b'IJIJ'", ",", "b'JIJI'", ")", ":", "raise", "ValueError", "(", "'invalid ImageJ metadata'", ")", "header_size", "=", "bytecounts", "[", "0", "]", "if", "header_size", "<", "12", "or", "header_size", ">", "804", ":", "raise", "ValueError", "(", "'invalid ImageJ metadata header size'", ")", "ntypes", "=", "(", "header_size", "-", "4", ")", "//", "8", "header", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'4sI'", "*", "ntypes", ",", "data", "[", "4", ":", "4", "+", "ntypes", "*", "8", "]", ")", "pos", "=", "4", "+", "ntypes", "*", "8", "counter", "=", "0", "result", "=", "{", "}", "for", "mtype", ",", "count", "in", "zip", "(", "header", "[", ":", ":", "2", "]", ",", "header", "[", "1", ":", ":", "2", "]", ")", ":", "values", "=", "[", "]", "name", ",", "func", "=", "metadata_types", ".", "get", "(", "mtype", ",", "(", "bytes2str", "(", "mtype", ")", ",", "read_bytes", ")", ")", "for", "_", "in", "range", "(", "count", ")", ":", "counter", "+=", "1", "pos1", "=", "pos", "+", "bytecounts", "[", "counter", "]", "values", ".", "append", "(", "func", "(", "data", "[", "pos", ":", "pos1", "]", ",", "byteorder", ")", ")", "pos", "=", "pos1", "result", "[", "name", ".", "strip", "(", ")", "]", "=", "values", "[", "0", "]", "if", "count", "==", "1", "else", "values", "return", "result" ]
Return IJMetadata tag value as dict. The 'Info' string can have multiple formats, e.g. OIF or ScanImage, that might be parsed into dicts using the matlabstr2py or oiffile.SettingsFile functions.
[ "Return", "IJMetadata", "tag", "value", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8815-L8870
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imagej_description_metadata
def imagej_description_metadata(description): """Return metatata from ImageJ image description as dict. Raise ValueError if not a valid ImageJ description. >>> description = 'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n' >>> imagej_description_metadata(description) # doctest: +SKIP {'ImageJ': '1.11a', 'images': 510, 'hyperstack': True} """ def _bool(val): return {'true': True, 'false': False}[val.lower()] result = {} for line in description.splitlines(): try: key, val = line.split('=') except Exception: continue key = key.strip() val = val.strip() for dtype in (int, float, _bool): try: val = dtype(val) break except Exception: pass result[key] = val if 'ImageJ' not in result: raise ValueError('not a ImageJ image description') return result
python
def imagej_description_metadata(description): """Return metatata from ImageJ image description as dict. Raise ValueError if not a valid ImageJ description. >>> description = 'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n' >>> imagej_description_metadata(description) # doctest: +SKIP {'ImageJ': '1.11a', 'images': 510, 'hyperstack': True} """ def _bool(val): return {'true': True, 'false': False}[val.lower()] result = {} for line in description.splitlines(): try: key, val = line.split('=') except Exception: continue key = key.strip() val = val.strip() for dtype in (int, float, _bool): try: val = dtype(val) break except Exception: pass result[key] = val if 'ImageJ' not in result: raise ValueError('not a ImageJ image description') return result
[ "def", "imagej_description_metadata", "(", "description", ")", ":", "def", "_bool", "(", "val", ")", ":", "return", "{", "'true'", ":", "True", ",", "'false'", ":", "False", "}", "[", "val", ".", "lower", "(", ")", "]", "result", "=", "{", "}", "for", "line", "in", "description", ".", "splitlines", "(", ")", ":", "try", ":", "key", ",", "val", "=", "line", ".", "split", "(", "'='", ")", "except", "Exception", ":", "continue", "key", "=", "key", ".", "strip", "(", ")", "val", "=", "val", ".", "strip", "(", ")", "for", "dtype", "in", "(", "int", ",", "float", ",", "_bool", ")", ":", "try", ":", "val", "=", "dtype", "(", "val", ")", "break", "except", "Exception", ":", "pass", "result", "[", "key", "]", "=", "val", "if", "'ImageJ'", "not", "in", "result", ":", "raise", "ValueError", "(", "'not a ImageJ image description'", ")", "return", "result" ]
Return metatata from ImageJ image description as dict. Raise ValueError if not a valid ImageJ description. >>> description = 'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n' >>> imagej_description_metadata(description) # doctest: +SKIP {'ImageJ': '1.11a', 'images': 510, 'hyperstack': True}
[ "Return", "metatata", "from", "ImageJ", "image", "description", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8873-L8904
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imagej_description
def imagej_description(shape, rgb=None, colormaped=False, version=None, hyperstack=None, mode=None, loop=None, **kwargs): """Return ImageJ image description from data shape. ImageJ can handle up to 6 dimensions in order TZCYXS. >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP ImageJ=1.11a images=510 channels=2 slices=5 frames=51 hyperstack=true mode=grayscale loop=false """ if colormaped: raise NotImplementedError('ImageJ colormapping not supported') if version is None: version = '1.11a' shape = imagej_shape(shape, rgb=rgb) rgb = shape[-1] in (3, 4) result = ['ImageJ=%s' % version] append = [] result.append('images=%i' % product(shape[:-3])) if hyperstack is None: hyperstack = True append.append('hyperstack=true') else: append.append('hyperstack=%s' % bool(hyperstack)) if shape[2] > 1: result.append('channels=%i' % shape[2]) if mode is None and not rgb: mode = 'grayscale' if hyperstack and mode: append.append('mode=%s' % mode) if shape[1] > 1: result.append('slices=%i' % shape[1]) if shape[0] > 1: result.append('frames=%i' % shape[0]) if loop is None: append.append('loop=false') if loop is not None: append.append('loop=%s' % bool(loop)) for key, value in kwargs.items(): append.append('%s=%s' % (key.lower(), value)) return '\n'.join(result + append + [''])
python
def imagej_description(shape, rgb=None, colormaped=False, version=None, hyperstack=None, mode=None, loop=None, **kwargs): """Return ImageJ image description from data shape. ImageJ can handle up to 6 dimensions in order TZCYXS. >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP ImageJ=1.11a images=510 channels=2 slices=5 frames=51 hyperstack=true mode=grayscale loop=false """ if colormaped: raise NotImplementedError('ImageJ colormapping not supported') if version is None: version = '1.11a' shape = imagej_shape(shape, rgb=rgb) rgb = shape[-1] in (3, 4) result = ['ImageJ=%s' % version] append = [] result.append('images=%i' % product(shape[:-3])) if hyperstack is None: hyperstack = True append.append('hyperstack=true') else: append.append('hyperstack=%s' % bool(hyperstack)) if shape[2] > 1: result.append('channels=%i' % shape[2]) if mode is None and not rgb: mode = 'grayscale' if hyperstack and mode: append.append('mode=%s' % mode) if shape[1] > 1: result.append('slices=%i' % shape[1]) if shape[0] > 1: result.append('frames=%i' % shape[0]) if loop is None: append.append('loop=false') if loop is not None: append.append('loop=%s' % bool(loop)) for key, value in kwargs.items(): append.append('%s=%s' % (key.lower(), value)) return '\n'.join(result + append + [''])
[ "def", "imagej_description", "(", "shape", ",", "rgb", "=", "None", ",", "colormaped", "=", "False", ",", "version", "=", "None", ",", "hyperstack", "=", "None", ",", "mode", "=", "None", ",", "loop", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "colormaped", ":", "raise", "NotImplementedError", "(", "'ImageJ colormapping not supported'", ")", "if", "version", "is", "None", ":", "version", "=", "'1.11a'", "shape", "=", "imagej_shape", "(", "shape", ",", "rgb", "=", "rgb", ")", "rgb", "=", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "result", "=", "[", "'ImageJ=%s'", "%", "version", "]", "append", "=", "[", "]", "result", ".", "append", "(", "'images=%i'", "%", "product", "(", "shape", "[", ":", "-", "3", "]", ")", ")", "if", "hyperstack", "is", "None", ":", "hyperstack", "=", "True", "append", ".", "append", "(", "'hyperstack=true'", ")", "else", ":", "append", ".", "append", "(", "'hyperstack=%s'", "%", "bool", "(", "hyperstack", ")", ")", "if", "shape", "[", "2", "]", ">", "1", ":", "result", ".", "append", "(", "'channels=%i'", "%", "shape", "[", "2", "]", ")", "if", "mode", "is", "None", "and", "not", "rgb", ":", "mode", "=", "'grayscale'", "if", "hyperstack", "and", "mode", ":", "append", ".", "append", "(", "'mode=%s'", "%", "mode", ")", "if", "shape", "[", "1", "]", ">", "1", ":", "result", ".", "append", "(", "'slices=%i'", "%", "shape", "[", "1", "]", ")", "if", "shape", "[", "0", "]", ">", "1", ":", "result", ".", "append", "(", "'frames=%i'", "%", "shape", "[", "0", "]", ")", "if", "loop", "is", "None", ":", "append", ".", "append", "(", "'loop=false'", ")", "if", "loop", "is", "not", "None", ":", "append", ".", "append", "(", "'loop=%s'", "%", "bool", "(", "loop", ")", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "append", ".", "append", "(", "'%s=%s'", "%", "(", "key", ".", "lower", "(", ")", ",", "value", ")", ")", "return", "'\\n'", ".", "join", "(", "result", "+", "append", "+", "[", "''", "]", ")" ]
Return ImageJ image description from data shape. ImageJ can handle up to 6 dimensions in order TZCYXS. >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP ImageJ=1.11a images=510 channels=2 slices=5 frames=51 hyperstack=true mode=grayscale loop=false
[ "Return", "ImageJ", "image", "description", "from", "data", "shape", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8907-L8956
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imagej_shape
def imagej_shape(shape, rgb=None): """Return shape normalized to 6D ImageJ hyperstack TZCYXS. Raise ValueError if not a valid ImageJ hyperstack shape. >>> imagej_shape((2, 3, 4, 5, 3), False) (2, 3, 4, 5, 3, 1) """ shape = tuple(int(i) for i in shape) ndim = len(shape) if 1 > ndim > 6: raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional') if rgb is None: rgb = shape[-1] in (3, 4) and ndim > 2 if rgb and shape[-1] not in (3, 4): raise ValueError('invalid ImageJ hyperstack: not a RGB image') if not rgb and ndim == 6 and shape[-1] != 1: raise ValueError('invalid ImageJ hyperstack: not a non-RGB image') if rgb or shape[-1] == 1: return (1, ) * (6 - ndim) + shape return (1, ) * (5 - ndim) + shape + (1,)
python
def imagej_shape(shape, rgb=None): """Return shape normalized to 6D ImageJ hyperstack TZCYXS. Raise ValueError if not a valid ImageJ hyperstack shape. >>> imagej_shape((2, 3, 4, 5, 3), False) (2, 3, 4, 5, 3, 1) """ shape = tuple(int(i) for i in shape) ndim = len(shape) if 1 > ndim > 6: raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional') if rgb is None: rgb = shape[-1] in (3, 4) and ndim > 2 if rgb and shape[-1] not in (3, 4): raise ValueError('invalid ImageJ hyperstack: not a RGB image') if not rgb and ndim == 6 and shape[-1] != 1: raise ValueError('invalid ImageJ hyperstack: not a non-RGB image') if rgb or shape[-1] == 1: return (1, ) * (6 - ndim) + shape return (1, ) * (5 - ndim) + shape + (1,)
[ "def", "imagej_shape", "(", "shape", ",", "rgb", "=", "None", ")", ":", "shape", "=", "tuple", "(", "int", "(", "i", ")", "for", "i", "in", "shape", ")", "ndim", "=", "len", "(", "shape", ")", "if", "1", ">", "ndim", ">", "6", ":", "raise", "ValueError", "(", "'invalid ImageJ hyperstack: not 2 to 6 dimensional'", ")", "if", "rgb", "is", "None", ":", "rgb", "=", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "and", "ndim", ">", "2", "if", "rgb", "and", "shape", "[", "-", "1", "]", "not", "in", "(", "3", ",", "4", ")", ":", "raise", "ValueError", "(", "'invalid ImageJ hyperstack: not a RGB image'", ")", "if", "not", "rgb", "and", "ndim", "==", "6", "and", "shape", "[", "-", "1", "]", "!=", "1", ":", "raise", "ValueError", "(", "'invalid ImageJ hyperstack: not a non-RGB image'", ")", "if", "rgb", "or", "shape", "[", "-", "1", "]", "==", "1", ":", "return", "(", "1", ",", ")", "*", "(", "6", "-", "ndim", ")", "+", "shape", "return", "(", "1", ",", ")", "*", "(", "5", "-", "ndim", ")", "+", "shape", "+", "(", "1", ",", ")" ]
Return shape normalized to 6D ImageJ hyperstack TZCYXS. Raise ValueError if not a valid ImageJ hyperstack shape. >>> imagej_shape((2, 3, 4, 5, 3), False) (2, 3, 4, 5, 3, 1)
[ "Return", "shape", "normalized", "to", "6D", "ImageJ", "hyperstack", "TZCYXS", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8959-L8980
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
json_description
def json_description(shape, **metadata): """Return JSON image description from data shape and other metadata. Return UTF-8 encoded JSON. >>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP b'{"shape": [256, 256, 3], "axes": "YXS"}' """ metadata.update(shape=shape) return json.dumps(metadata)
python
def json_description(shape, **metadata): """Return JSON image description from data shape and other metadata. Return UTF-8 encoded JSON. >>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP b'{"shape": [256, 256, 3], "axes": "YXS"}' """ metadata.update(shape=shape) return json.dumps(metadata)
[ "def", "json_description", "(", "shape", ",", "*", "*", "metadata", ")", ":", "metadata", ".", "update", "(", "shape", "=", "shape", ")", "return", "json", ".", "dumps", "(", "metadata", ")" ]
Return JSON image description from data shape and other metadata. Return UTF-8 encoded JSON. >>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP b'{"shape": [256, 256, 3], "axes": "YXS"}'
[ "Return", "JSON", "image", "description", "from", "data", "shape", "and", "other", "metadata", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8983-L8993
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
json_description_metadata
def json_description_metadata(description): """Return metatata from JSON formated image description as dict. Raise ValuError if description is of unknown format. >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' >>> json_description_metadata(description) # doctest: +SKIP {'shape': [256, 256, 3], 'axes': 'YXS'} >>> json_description_metadata('shape=(256, 256, 3)') {'shape': (256, 256, 3)} """ if description[:6] == 'shape=': # old-style 'shaped' description; not JSON shape = tuple(int(i) for i in description[7:-1].split(',')) return dict(shape=shape) if description[:1] == '{' and description[-1:] == '}': # JSON description return json.loads(description) raise ValueError('invalid JSON image description', description)
python
def json_description_metadata(description): """Return metatata from JSON formated image description as dict. Raise ValuError if description is of unknown format. >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' >>> json_description_metadata(description) # doctest: +SKIP {'shape': [256, 256, 3], 'axes': 'YXS'} >>> json_description_metadata('shape=(256, 256, 3)') {'shape': (256, 256, 3)} """ if description[:6] == 'shape=': # old-style 'shaped' description; not JSON shape = tuple(int(i) for i in description[7:-1].split(',')) return dict(shape=shape) if description[:1] == '{' and description[-1:] == '}': # JSON description return json.loads(description) raise ValueError('invalid JSON image description', description)
[ "def", "json_description_metadata", "(", "description", ")", ":", "if", "description", "[", ":", "6", "]", "==", "'shape='", ":", "# old-style 'shaped' description; not JSON", "shape", "=", "tuple", "(", "int", "(", "i", ")", "for", "i", "in", "description", "[", "7", ":", "-", "1", "]", ".", "split", "(", "','", ")", ")", "return", "dict", "(", "shape", "=", "shape", ")", "if", "description", "[", ":", "1", "]", "==", "'{'", "and", "description", "[", "-", "1", ":", "]", "==", "'}'", ":", "# JSON description", "return", "json", ".", "loads", "(", "description", ")", "raise", "ValueError", "(", "'invalid JSON image description'", ",", "description", ")" ]
Return metatata from JSON formated image description as dict. Raise ValuError if description is of unknown format. >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' >>> json_description_metadata(description) # doctest: +SKIP {'shape': [256, 256, 3], 'axes': 'YXS'} >>> json_description_metadata('shape=(256, 256, 3)') {'shape': (256, 256, 3)}
[ "Return", "metatata", "from", "JSON", "formated", "image", "description", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8996-L9015
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
fluoview_description_metadata
def fluoview_description_metadata(description, ignoresections=None): """Return metatata from FluoView image description as dict. The FluoView image description format is unspecified. Expect failures. >>> descr = ('[Intensity Mapping]\\nMap Ch0: Range=00000 to 02047\\n' ... '[Intensity Mapping End]') >>> fluoview_description_metadata(descr) {'Intensity Mapping': {'Map Ch0: Range': '00000 to 02047'}} """ if not description.startswith('['): raise ValueError('invalid FluoView image description') if ignoresections is None: ignoresections = {'Region Info (Fields)', 'Protocol Description'} result = {} sections = [result] comment = False for line in description.splitlines(): if not comment: line = line.strip() if not line: continue if line[0] == '[': if line[-5:] == ' End]': # close section del sections[-1] section = sections[-1] name = line[1:-5] if comment: section[name] = '\n'.join(section[name]) if name[:4] == 'LUT ': a = numpy.array(section[name], dtype='uint8') a.shape = -1, 3 section[name] = a continue # new section comment = False name = line[1:-1] if name[:4] == 'LUT ': section = [] elif name in ignoresections: section = [] comment = True else: section = {} sections.append(section) result[name] = section continue # add entry if comment: section.append(line) continue line = line.split('=', 1) if len(line) == 1: section[line[0].strip()] = None continue key, value = line if key[:4] == 'RGB ': section.extend(int(rgb) for rgb in value.split()) else: section[key.strip()] = astype(value.strip()) return result
python
def fluoview_description_metadata(description, ignoresections=None): """Return metatata from FluoView image description as dict. The FluoView image description format is unspecified. Expect failures. >>> descr = ('[Intensity Mapping]\\nMap Ch0: Range=00000 to 02047\\n' ... '[Intensity Mapping End]') >>> fluoview_description_metadata(descr) {'Intensity Mapping': {'Map Ch0: Range': '00000 to 02047'}} """ if not description.startswith('['): raise ValueError('invalid FluoView image description') if ignoresections is None: ignoresections = {'Region Info (Fields)', 'Protocol Description'} result = {} sections = [result] comment = False for line in description.splitlines(): if not comment: line = line.strip() if not line: continue if line[0] == '[': if line[-5:] == ' End]': # close section del sections[-1] section = sections[-1] name = line[1:-5] if comment: section[name] = '\n'.join(section[name]) if name[:4] == 'LUT ': a = numpy.array(section[name], dtype='uint8') a.shape = -1, 3 section[name] = a continue # new section comment = False name = line[1:-1] if name[:4] == 'LUT ': section = [] elif name in ignoresections: section = [] comment = True else: section = {} sections.append(section) result[name] = section continue # add entry if comment: section.append(line) continue line = line.split('=', 1) if len(line) == 1: section[line[0].strip()] = None continue key, value = line if key[:4] == 'RGB ': section.extend(int(rgb) for rgb in value.split()) else: section[key.strip()] = astype(value.strip()) return result
[ "def", "fluoview_description_metadata", "(", "description", ",", "ignoresections", "=", "None", ")", ":", "if", "not", "description", ".", "startswith", "(", "'['", ")", ":", "raise", "ValueError", "(", "'invalid FluoView image description'", ")", "if", "ignoresections", "is", "None", ":", "ignoresections", "=", "{", "'Region Info (Fields)'", ",", "'Protocol Description'", "}", "result", "=", "{", "}", "sections", "=", "[", "result", "]", "comment", "=", "False", "for", "line", "in", "description", ".", "splitlines", "(", ")", ":", "if", "not", "comment", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "if", "line", "[", "0", "]", "==", "'['", ":", "if", "line", "[", "-", "5", ":", "]", "==", "' End]'", ":", "# close section", "del", "sections", "[", "-", "1", "]", "section", "=", "sections", "[", "-", "1", "]", "name", "=", "line", "[", "1", ":", "-", "5", "]", "if", "comment", ":", "section", "[", "name", "]", "=", "'\\n'", ".", "join", "(", "section", "[", "name", "]", ")", "if", "name", "[", ":", "4", "]", "==", "'LUT '", ":", "a", "=", "numpy", ".", "array", "(", "section", "[", "name", "]", ",", "dtype", "=", "'uint8'", ")", "a", ".", "shape", "=", "-", "1", ",", "3", "section", "[", "name", "]", "=", "a", "continue", "# new section", "comment", "=", "False", "name", "=", "line", "[", "1", ":", "-", "1", "]", "if", "name", "[", ":", "4", "]", "==", "'LUT '", ":", "section", "=", "[", "]", "elif", "name", "in", "ignoresections", ":", "section", "=", "[", "]", "comment", "=", "True", "else", ":", "section", "=", "{", "}", "sections", ".", "append", "(", "section", ")", "result", "[", "name", "]", "=", "section", "continue", "# add entry", "if", "comment", ":", "section", ".", "append", "(", "line", ")", "continue", "line", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "if", "len", "(", "line", ")", "==", "1", ":", "section", "[", "line", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "None", "continue", "key", ",", "value", "=", "line", "if", "key", "[", ":", "4", "]", "==", "'RGB '", ":", "section", ".", "extend", "(", "int", "(", "rgb", ")", "for", "rgb", "in", "value", ".", "split", "(", ")", ")", "else", ":", "section", "[", "key", ".", "strip", "(", ")", "]", "=", "astype", "(", "value", ".", "strip", "(", ")", ")", "return", "result" ]
Return metatata from FluoView image description as dict. The FluoView image description format is unspecified. Expect failures. >>> descr = ('[Intensity Mapping]\\nMap Ch0: Range=00000 to 02047\\n' ... '[Intensity Mapping End]') >>> fluoview_description_metadata(descr) {'Intensity Mapping': {'Map Ch0: Range': '00000 to 02047'}}
[ "Return", "metatata", "from", "FluoView", "image", "description", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9018-L9081
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
pilatus_description_metadata
def pilatus_description_metadata(description): """Return metatata from Pilatus image description as dict. Return metadata from Pilatus pixel array detectors by Dectris, created by camserver or TVX software. >>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m') {'Pixel_size': (0.000172, 0.000172)} """ result = {} if not description.startswith('# '): return result for c in '#:=,()': description = description.replace(c, ' ') for line in description.split('\n'): if line[:2] != ' ': continue line = line.split() name = line[0] if line[0] not in TIFF.PILATUS_HEADER: try: result['DateTime'] = datetime.datetime.strptime( ' '.join(line), '%Y-%m-%dT%H %M %S.%f') except Exception: result[name] = ' '.join(line[1:]) continue indices, dtype = TIFF.PILATUS_HEADER[line[0]] if isinstance(indices[0], slice): # assumes one slice values = line[indices[0]] else: values = [line[i] for i in indices] if dtype is float and values[0] == 'not': values = ['NaN'] values = tuple(dtype(v) for v in values) if dtype == str: values = ' '.join(values) elif len(values) == 1: values = values[0] result[name] = values return result
python
def pilatus_description_metadata(description): """Return metatata from Pilatus image description as dict. Return metadata from Pilatus pixel array detectors by Dectris, created by camserver or TVX software. >>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m') {'Pixel_size': (0.000172, 0.000172)} """ result = {} if not description.startswith('# '): return result for c in '#:=,()': description = description.replace(c, ' ') for line in description.split('\n'): if line[:2] != ' ': continue line = line.split() name = line[0] if line[0] not in TIFF.PILATUS_HEADER: try: result['DateTime'] = datetime.datetime.strptime( ' '.join(line), '%Y-%m-%dT%H %M %S.%f') except Exception: result[name] = ' '.join(line[1:]) continue indices, dtype = TIFF.PILATUS_HEADER[line[0]] if isinstance(indices[0], slice): # assumes one slice values = line[indices[0]] else: values = [line[i] for i in indices] if dtype is float and values[0] == 'not': values = ['NaN'] values = tuple(dtype(v) for v in values) if dtype == str: values = ' '.join(values) elif len(values) == 1: values = values[0] result[name] = values return result
[ "def", "pilatus_description_metadata", "(", "description", ")", ":", "result", "=", "{", "}", "if", "not", "description", ".", "startswith", "(", "'# '", ")", ":", "return", "result", "for", "c", "in", "'#:=,()'", ":", "description", "=", "description", ".", "replace", "(", "c", ",", "' '", ")", "for", "line", "in", "description", ".", "split", "(", "'\\n'", ")", ":", "if", "line", "[", ":", "2", "]", "!=", "' '", ":", "continue", "line", "=", "line", ".", "split", "(", ")", "name", "=", "line", "[", "0", "]", "if", "line", "[", "0", "]", "not", "in", "TIFF", ".", "PILATUS_HEADER", ":", "try", ":", "result", "[", "'DateTime'", "]", "=", "datetime", ".", "datetime", ".", "strptime", "(", "' '", ".", "join", "(", "line", ")", ",", "'%Y-%m-%dT%H %M %S.%f'", ")", "except", "Exception", ":", "result", "[", "name", "]", "=", "' '", ".", "join", "(", "line", "[", "1", ":", "]", ")", "continue", "indices", ",", "dtype", "=", "TIFF", ".", "PILATUS_HEADER", "[", "line", "[", "0", "]", "]", "if", "isinstance", "(", "indices", "[", "0", "]", ",", "slice", ")", ":", "# assumes one slice", "values", "=", "line", "[", "indices", "[", "0", "]", "]", "else", ":", "values", "=", "[", "line", "[", "i", "]", "for", "i", "in", "indices", "]", "if", "dtype", "is", "float", "and", "values", "[", "0", "]", "==", "'not'", ":", "values", "=", "[", "'NaN'", "]", "values", "=", "tuple", "(", "dtype", "(", "v", ")", "for", "v", "in", "values", ")", "if", "dtype", "==", "str", ":", "values", "=", "' '", ".", "join", "(", "values", ")", "elif", "len", "(", "values", ")", "==", "1", ":", "values", "=", "values", "[", "0", "]", "result", "[", "name", "]", "=", "values", "return", "result" ]
Return metatata from Pilatus image description as dict. Return metadata from Pilatus pixel array detectors by Dectris, created by camserver or TVX software. >>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m') {'Pixel_size': (0.000172, 0.000172)}
[ "Return", "metatata", "from", "Pilatus", "image", "description", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9084-L9125
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
svs_description_metadata
def svs_description_metadata(description): """Return metatata from Aperio image description as dict. The Aperio image description format is unspecified. Expect failures. >>> svs_description_metadata('Aperio Image Library v1.0') {'Aperio Image Library': 'v1.0'} """ if not description.startswith('Aperio Image Library '): raise ValueError('invalid Aperio image description') result = {} lines = description.split('\n') key, value = lines[0].strip().rsplit(None, 1) # 'Aperio Image Library' result[key.strip()] = value.strip() if len(lines) == 1: return result items = lines[1].split('|') result[''] = items[0].strip() # TODO: parse this? for item in items[1:]: key, value = item.split(' = ') result[key.strip()] = astype(value.strip()) return result
python
def svs_description_metadata(description): """Return metatata from Aperio image description as dict. The Aperio image description format is unspecified. Expect failures. >>> svs_description_metadata('Aperio Image Library v1.0') {'Aperio Image Library': 'v1.0'} """ if not description.startswith('Aperio Image Library '): raise ValueError('invalid Aperio image description') result = {} lines = description.split('\n') key, value = lines[0].strip().rsplit(None, 1) # 'Aperio Image Library' result[key.strip()] = value.strip() if len(lines) == 1: return result items = lines[1].split('|') result[''] = items[0].strip() # TODO: parse this? for item in items[1:]: key, value = item.split(' = ') result[key.strip()] = astype(value.strip()) return result
[ "def", "svs_description_metadata", "(", "description", ")", ":", "if", "not", "description", ".", "startswith", "(", "'Aperio Image Library '", ")", ":", "raise", "ValueError", "(", "'invalid Aperio image description'", ")", "result", "=", "{", "}", "lines", "=", "description", ".", "split", "(", "'\\n'", ")", "key", ",", "value", "=", "lines", "[", "0", "]", ".", "strip", "(", ")", ".", "rsplit", "(", "None", ",", "1", ")", "# 'Aperio Image Library'", "result", "[", "key", ".", "strip", "(", ")", "]", "=", "value", ".", "strip", "(", ")", "if", "len", "(", "lines", ")", "==", "1", ":", "return", "result", "items", "=", "lines", "[", "1", "]", ".", "split", "(", "'|'", ")", "result", "[", "''", "]", "=", "items", "[", "0", "]", ".", "strip", "(", ")", "# TODO: parse this?", "for", "item", "in", "items", "[", "1", ":", "]", ":", "key", ",", "value", "=", "item", ".", "split", "(", "' = '", ")", "result", "[", "key", ".", "strip", "(", ")", "]", "=", "astype", "(", "value", ".", "strip", "(", ")", ")", "return", "result" ]
Return metatata from Aperio image description as dict. The Aperio image description format is unspecified. Expect failures. >>> svs_description_metadata('Aperio Image Library v1.0') {'Aperio Image Library': 'v1.0'}
[ "Return", "metatata", "from", "Aperio", "image", "description", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9128-L9150
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
stk_description_metadata
def stk_description_metadata(description): """Return metadata from MetaMorph image description as list of dict. The MetaMorph image description format is unspecified. Expect failures. """ description = description.strip() if not description: return [] try: description = bytes2str(description) except UnicodeDecodeError as exc: log.warning('stk_description_metadata: %s: %s', exc.__class__.__name__, exc) return [] result = [] for plane in description.split('\x00'): d = {} for line in plane.split('\r\n'): line = line.split(':', 1) if len(line) > 1: name, value = line d[name.strip()] = astype(value.strip()) else: value = line[0].strip() if value: if '' in d: d[''].append(value) else: d[''] = [value] result.append(d) return result
python
def stk_description_metadata(description): """Return metadata from MetaMorph image description as list of dict. The MetaMorph image description format is unspecified. Expect failures. """ description = description.strip() if not description: return [] try: description = bytes2str(description) except UnicodeDecodeError as exc: log.warning('stk_description_metadata: %s: %s', exc.__class__.__name__, exc) return [] result = [] for plane in description.split('\x00'): d = {} for line in plane.split('\r\n'): line = line.split(':', 1) if len(line) > 1: name, value = line d[name.strip()] = astype(value.strip()) else: value = line[0].strip() if value: if '' in d: d[''].append(value) else: d[''] = [value] result.append(d) return result
[ "def", "stk_description_metadata", "(", "description", ")", ":", "description", "=", "description", ".", "strip", "(", ")", "if", "not", "description", ":", "return", "[", "]", "try", ":", "description", "=", "bytes2str", "(", "description", ")", "except", "UnicodeDecodeError", "as", "exc", ":", "log", ".", "warning", "(", "'stk_description_metadata: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "return", "[", "]", "result", "=", "[", "]", "for", "plane", "in", "description", ".", "split", "(", "'\\x00'", ")", ":", "d", "=", "{", "}", "for", "line", "in", "plane", ".", "split", "(", "'\\r\\n'", ")", ":", "line", "=", "line", ".", "split", "(", "':'", ",", "1", ")", "if", "len", "(", "line", ")", ">", "1", ":", "name", ",", "value", "=", "line", "d", "[", "name", ".", "strip", "(", ")", "]", "=", "astype", "(", "value", ".", "strip", "(", ")", ")", "else", ":", "value", "=", "line", "[", "0", "]", ".", "strip", "(", ")", "if", "value", ":", "if", "''", "in", "d", ":", "d", "[", "''", "]", ".", "append", "(", "value", ")", "else", ":", "d", "[", "''", "]", "=", "[", "value", "]", "result", ".", "append", "(", "d", ")", "return", "result" ]
Return metadata from MetaMorph image description as list of dict. The MetaMorph image description format is unspecified. Expect failures.
[ "Return", "metadata", "from", "MetaMorph", "image", "description", "as", "list", "of", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9153-L9184
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
metaseries_description_metadata
def metaseries_description_metadata(description): """Return metatata from MetaSeries image description as dict.""" if not description.startswith('<MetaData>'): raise ValueError('invalid MetaSeries image description') from xml.etree import cElementTree as etree # delayed import root = etree.fromstring(description) types = {'float': float, 'int': int, 'bool': lambda x: asbool(x, 'on', 'off')} def parse(root, result): # recursive for child in root: attrib = child.attrib if not attrib: result[child.tag] = parse(child, {}) continue if 'id' in attrib: i = attrib['id'] t = attrib['type'] v = attrib['value'] if t in types: result[i] = types[t](v) else: result[i] = v return result adict = parse(root, {}) if 'Description' in adict: adict['Description'] = adict['Description'].replace('&#13;&#10;', '\n') return adict
python
def metaseries_description_metadata(description): """Return metatata from MetaSeries image description as dict.""" if not description.startswith('<MetaData>'): raise ValueError('invalid MetaSeries image description') from xml.etree import cElementTree as etree # delayed import root = etree.fromstring(description) types = {'float': float, 'int': int, 'bool': lambda x: asbool(x, 'on', 'off')} def parse(root, result): # recursive for child in root: attrib = child.attrib if not attrib: result[child.tag] = parse(child, {}) continue if 'id' in attrib: i = attrib['id'] t = attrib['type'] v = attrib['value'] if t in types: result[i] = types[t](v) else: result[i] = v return result adict = parse(root, {}) if 'Description' in adict: adict['Description'] = adict['Description'].replace('&#13;&#10;', '\n') return adict
[ "def", "metaseries_description_metadata", "(", "description", ")", ":", "if", "not", "description", ".", "startswith", "(", "'<MetaData>'", ")", ":", "raise", "ValueError", "(", "'invalid MetaSeries image description'", ")", "from", "xml", ".", "etree", "import", "cElementTree", "as", "etree", "# delayed import", "root", "=", "etree", ".", "fromstring", "(", "description", ")", "types", "=", "{", "'float'", ":", "float", ",", "'int'", ":", "int", ",", "'bool'", ":", "lambda", "x", ":", "asbool", "(", "x", ",", "'on'", ",", "'off'", ")", "}", "def", "parse", "(", "root", ",", "result", ")", ":", "# recursive", "for", "child", "in", "root", ":", "attrib", "=", "child", ".", "attrib", "if", "not", "attrib", ":", "result", "[", "child", ".", "tag", "]", "=", "parse", "(", "child", ",", "{", "}", ")", "continue", "if", "'id'", "in", "attrib", ":", "i", "=", "attrib", "[", "'id'", "]", "t", "=", "attrib", "[", "'type'", "]", "v", "=", "attrib", "[", "'value'", "]", "if", "t", "in", "types", ":", "result", "[", "i", "]", "=", "types", "[", "t", "]", "(", "v", ")", "else", ":", "result", "[", "i", "]", "=", "v", "return", "result", "adict", "=", "parse", "(", "root", ",", "{", "}", ")", "if", "'Description'", "in", "adict", ":", "adict", "[", "'Description'", "]", "=", "adict", "[", "'Description'", "]", ".", "replace", "(", "'&#13;&#10;'", ",", "'\\n'", ")", "return", "adict" ]
Return metatata from MetaSeries image description as dict.
[ "Return", "metatata", "from", "MetaSeries", "image", "description", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9187-L9217
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
scanimage_artist_metadata
def scanimage_artist_metadata(artist): """Return metatata from ScanImage artist tag as dict.""" try: return json.loads(artist) except ValueError as exc: log.warning('scanimage_artist_metadata: %s: %s', exc.__class__.__name__, exc)
python
def scanimage_artist_metadata(artist): """Return metatata from ScanImage artist tag as dict.""" try: return json.loads(artist) except ValueError as exc: log.warning('scanimage_artist_metadata: %s: %s', exc.__class__.__name__, exc)
[ "def", "scanimage_artist_metadata", "(", "artist", ")", ":", "try", ":", "return", "json", ".", "loads", "(", "artist", ")", "except", "ValueError", "as", "exc", ":", "log", ".", "warning", "(", "'scanimage_artist_metadata: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")" ]
Return metatata from ScanImage artist tag as dict.
[ "Return", "metatata", "from", "ScanImage", "artist", "tag", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9225-L9231
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
olympusini_metadata
def olympusini_metadata(inistr): """Return OlympusSIS metadata from INI string. No documentation is available. """ def keyindex(key): # split key into name and index index = 0 i = len(key.rstrip('0123456789')) if i < len(key): index = int(key[i:]) - 1 key = key[:i] return key, index result = {} bands = [] zpos = None tpos = None for line in inistr.splitlines(): line = line.strip() if line == '' or line[0] == ';': continue if line[0] == '[' and line[-1] == ']': section_name = line[1:-1] result[section_name] = section = {} if section_name == 'Dimension': result['axes'] = axes = [] result['shape'] = shape = [] elif section_name == 'ASD': result[section_name] = [] elif section_name == 'Z': if 'Dimension' in result: result[section_name]['ZPos'] = zpos = [] elif section_name == 'Time': if 'Dimension' in result: result[section_name]['TimePos'] = tpos = [] elif section_name == 'Band': nbands = result['Dimension']['Band'] bands = [{'LUT': []} for i in range(nbands)] result[section_name] = bands iband = 0 else: key, value = line.split('=') if value.strip() == '': value = None elif ',' in value: value = tuple(astype(v) for v in value.split(',')) else: value = astype(value) if section_name == 'Dimension': section[key] = value axes.append(key) shape.append(value) elif section_name == 'ASD': if key == 'Count': result['ASD'] = [{}] * value else: key, index = keyindex(key) result['ASD'][index][key] = value elif section_name == 'Band': if key[:3] == 'LUT': lut = bands[iband]['LUT'] value = struct.pack('<I', value) lut.append( [ord(value[0:1]), ord(value[1:2]), ord(value[2:3])]) else: key, iband = keyindex(key) bands[iband][key] = value elif key[:4] == 'ZPos' and zpos is not None: zpos.append(value) elif key[:7] == 'TimePos' and tpos is not None: tpos.append(value) else: section[key] = value if 'axes' in result: sisaxes = {'Band': 'C'} axes = [] shape = [] for i, x in zip(result['shape'], result['axes']): if i > 1: axes.append(sisaxes.get(x, x[0].upper())) shape.append(i) result['axes'] = ''.join(axes) result['shape'] = tuple(shape) try: result['Z']['ZPos'] = numpy.array( result['Z']['ZPos'][:result['Dimension']['Z']], 'float64') except Exception: pass try: result['Time']['TimePos'] = numpy.array( result['Time']['TimePos'][:result['Dimension']['Time']], 'int32') except Exception: pass for band in bands: band['LUT'] = numpy.array(band['LUT'], 'uint8') return result
python
def olympusini_metadata(inistr): """Return OlympusSIS metadata from INI string. No documentation is available. """ def keyindex(key): # split key into name and index index = 0 i = len(key.rstrip('0123456789')) if i < len(key): index = int(key[i:]) - 1 key = key[:i] return key, index result = {} bands = [] zpos = None tpos = None for line in inistr.splitlines(): line = line.strip() if line == '' or line[0] == ';': continue if line[0] == '[' and line[-1] == ']': section_name = line[1:-1] result[section_name] = section = {} if section_name == 'Dimension': result['axes'] = axes = [] result['shape'] = shape = [] elif section_name == 'ASD': result[section_name] = [] elif section_name == 'Z': if 'Dimension' in result: result[section_name]['ZPos'] = zpos = [] elif section_name == 'Time': if 'Dimension' in result: result[section_name]['TimePos'] = tpos = [] elif section_name == 'Band': nbands = result['Dimension']['Band'] bands = [{'LUT': []} for i in range(nbands)] result[section_name] = bands iband = 0 else: key, value = line.split('=') if value.strip() == '': value = None elif ',' in value: value = tuple(astype(v) for v in value.split(',')) else: value = astype(value) if section_name == 'Dimension': section[key] = value axes.append(key) shape.append(value) elif section_name == 'ASD': if key == 'Count': result['ASD'] = [{}] * value else: key, index = keyindex(key) result['ASD'][index][key] = value elif section_name == 'Band': if key[:3] == 'LUT': lut = bands[iband]['LUT'] value = struct.pack('<I', value) lut.append( [ord(value[0:1]), ord(value[1:2]), ord(value[2:3])]) else: key, iband = keyindex(key) bands[iband][key] = value elif key[:4] == 'ZPos' and zpos is not None: zpos.append(value) elif key[:7] == 'TimePos' and tpos is not None: tpos.append(value) else: section[key] = value if 'axes' in result: sisaxes = {'Band': 'C'} axes = [] shape = [] for i, x in zip(result['shape'], result['axes']): if i > 1: axes.append(sisaxes.get(x, x[0].upper())) shape.append(i) result['axes'] = ''.join(axes) result['shape'] = tuple(shape) try: result['Z']['ZPos'] = numpy.array( result['Z']['ZPos'][:result['Dimension']['Z']], 'float64') except Exception: pass try: result['Time']['TimePos'] = numpy.array( result['Time']['TimePos'][:result['Dimension']['Time']], 'int32') except Exception: pass for band in bands: band['LUT'] = numpy.array(band['LUT'], 'uint8') return result
[ "def", "olympusini_metadata", "(", "inistr", ")", ":", "def", "keyindex", "(", "key", ")", ":", "# split key into name and index", "index", "=", "0", "i", "=", "len", "(", "key", ".", "rstrip", "(", "'0123456789'", ")", ")", "if", "i", "<", "len", "(", "key", ")", ":", "index", "=", "int", "(", "key", "[", "i", ":", "]", ")", "-", "1", "key", "=", "key", "[", ":", "i", "]", "return", "key", ",", "index", "result", "=", "{", "}", "bands", "=", "[", "]", "zpos", "=", "None", "tpos", "=", "None", "for", "line", "in", "inistr", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "==", "''", "or", "line", "[", "0", "]", "==", "';'", ":", "continue", "if", "line", "[", "0", "]", "==", "'['", "and", "line", "[", "-", "1", "]", "==", "']'", ":", "section_name", "=", "line", "[", "1", ":", "-", "1", "]", "result", "[", "section_name", "]", "=", "section", "=", "{", "}", "if", "section_name", "==", "'Dimension'", ":", "result", "[", "'axes'", "]", "=", "axes", "=", "[", "]", "result", "[", "'shape'", "]", "=", "shape", "=", "[", "]", "elif", "section_name", "==", "'ASD'", ":", "result", "[", "section_name", "]", "=", "[", "]", "elif", "section_name", "==", "'Z'", ":", "if", "'Dimension'", "in", "result", ":", "result", "[", "section_name", "]", "[", "'ZPos'", "]", "=", "zpos", "=", "[", "]", "elif", "section_name", "==", "'Time'", ":", "if", "'Dimension'", "in", "result", ":", "result", "[", "section_name", "]", "[", "'TimePos'", "]", "=", "tpos", "=", "[", "]", "elif", "section_name", "==", "'Band'", ":", "nbands", "=", "result", "[", "'Dimension'", "]", "[", "'Band'", "]", "bands", "=", "[", "{", "'LUT'", ":", "[", "]", "}", "for", "i", "in", "range", "(", "nbands", ")", "]", "result", "[", "section_name", "]", "=", "bands", "iband", "=", "0", "else", ":", "key", ",", "value", "=", "line", ".", "split", "(", "'='", ")", "if", "value", ".", "strip", "(", ")", "==", "''", ":", "value", "=", "None", "elif", "','", "in", "value", ":", "value", "=", "tuple", "(", "astype", "(", "v", ")", "for", "v", "in", "value", ".", "split", "(", "','", ")", ")", "else", ":", "value", "=", "astype", "(", "value", ")", "if", "section_name", "==", "'Dimension'", ":", "section", "[", "key", "]", "=", "value", "axes", ".", "append", "(", "key", ")", "shape", ".", "append", "(", "value", ")", "elif", "section_name", "==", "'ASD'", ":", "if", "key", "==", "'Count'", ":", "result", "[", "'ASD'", "]", "=", "[", "{", "}", "]", "*", "value", "else", ":", "key", ",", "index", "=", "keyindex", "(", "key", ")", "result", "[", "'ASD'", "]", "[", "index", "]", "[", "key", "]", "=", "value", "elif", "section_name", "==", "'Band'", ":", "if", "key", "[", ":", "3", "]", "==", "'LUT'", ":", "lut", "=", "bands", "[", "iband", "]", "[", "'LUT'", "]", "value", "=", "struct", ".", "pack", "(", "'<I'", ",", "value", ")", "lut", ".", "append", "(", "[", "ord", "(", "value", "[", "0", ":", "1", "]", ")", ",", "ord", "(", "value", "[", "1", ":", "2", "]", ")", ",", "ord", "(", "value", "[", "2", ":", "3", "]", ")", "]", ")", "else", ":", "key", ",", "iband", "=", "keyindex", "(", "key", ")", "bands", "[", "iband", "]", "[", "key", "]", "=", "value", "elif", "key", "[", ":", "4", "]", "==", "'ZPos'", "and", "zpos", "is", "not", "None", ":", "zpos", ".", "append", "(", "value", ")", "elif", "key", "[", ":", "7", "]", "==", "'TimePos'", "and", "tpos", "is", "not", "None", ":", "tpos", ".", "append", "(", "value", ")", "else", ":", "section", "[", "key", "]", "=", "value", "if", "'axes'", "in", "result", ":", "sisaxes", "=", "{", "'Band'", ":", "'C'", "}", "axes", "=", "[", "]", "shape", "=", "[", "]", "for", "i", ",", "x", "in", "zip", "(", "result", "[", "'shape'", "]", ",", "result", "[", "'axes'", "]", ")", ":", "if", "i", ">", "1", ":", "axes", ".", "append", "(", "sisaxes", ".", "get", "(", "x", ",", "x", "[", "0", "]", ".", "upper", "(", ")", ")", ")", "shape", ".", "append", "(", "i", ")", "result", "[", "'axes'", "]", "=", "''", ".", "join", "(", "axes", ")", "result", "[", "'shape'", "]", "=", "tuple", "(", "shape", ")", "try", ":", "result", "[", "'Z'", "]", "[", "'ZPos'", "]", "=", "numpy", ".", "array", "(", "result", "[", "'Z'", "]", "[", "'ZPos'", "]", "[", ":", "result", "[", "'Dimension'", "]", "[", "'Z'", "]", "]", ",", "'float64'", ")", "except", "Exception", ":", "pass", "try", ":", "result", "[", "'Time'", "]", "[", "'TimePos'", "]", "=", "numpy", ".", "array", "(", "result", "[", "'Time'", "]", "[", "'TimePos'", "]", "[", ":", "result", "[", "'Dimension'", "]", "[", "'Time'", "]", "]", ",", "'int32'", ")", "except", "Exception", ":", "pass", "for", "band", "in", "bands", ":", "band", "[", "'LUT'", "]", "=", "numpy", ".", "array", "(", "band", "[", "'LUT'", "]", ",", "'uint8'", ")", "return", "result" ]
Return OlympusSIS metadata from INI string. No documentation is available.
[ "Return", "OlympusSIS", "metadata", "from", "INI", "string", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9234-L9334
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
tile_decode
def tile_decode(tile, tileindex, tileshape, tiledshape, lsb2msb, decompress, unpack, unpredict, out): """Decode tile segment bytes into 5D output array.""" _, imagedepth, imagelength, imagewidth, _ = out.shape tileddepth, tiledlength, tiledwidth = tiledshape tiledepth, tilelength, tilewidth, samples = tileshape tilesize = tiledepth * tilelength * tilewidth * samples pl = tileindex // (tiledwidth * tiledlength * tileddepth) td = (tileindex // (tiledwidth * tiledlength)) % tileddepth * tiledepth tl = (tileindex // tiledwidth) % tiledlength * tilelength tw = tileindex % tiledwidth * tilewidth if tile: if lsb2msb: tile = bitorder_decode(tile, out=tile) tile = decompress(tile) tile = unpack(tile) # decompression / unpacking might return too many bytes tile = tile[:tilesize] try: # complete tile according to TIFF specification tile.shape = tileshape except ValueError: # tile fills remaining space; found in some JPEG compressed slides s = (min(imagedepth - td, tiledepth), min(imagelength - tl, tilelength), min(imagewidth - tw, tilewidth), samples) try: tile.shape = s except ValueError: # incomplete tile; see gdal issue #1179 log.warning('tile_decode: incomplete tile %s %s', tile.shape, tileshape) t = numpy.zeros(tilesize, tile.dtype) s = min(tile.size, tilesize) t[:s] = tile[:s] tile = t.reshape(tileshape) tile = unpredict(tile, axis=-2, out=tile) out[pl, td:td+tiledepth, tl:tl+tilelength, tw:tw+tilewidth] = ( tile[:imagedepth-td, :imagelength-tl, :imagewidth-tw]) else: out[pl, td:td+tiledepth, tl:tl+tilelength, tw:tw+tilewidth] = 0
python
def tile_decode(tile, tileindex, tileshape, tiledshape, lsb2msb, decompress, unpack, unpredict, out): """Decode tile segment bytes into 5D output array.""" _, imagedepth, imagelength, imagewidth, _ = out.shape tileddepth, tiledlength, tiledwidth = tiledshape tiledepth, tilelength, tilewidth, samples = tileshape tilesize = tiledepth * tilelength * tilewidth * samples pl = tileindex // (tiledwidth * tiledlength * tileddepth) td = (tileindex // (tiledwidth * tiledlength)) % tileddepth * tiledepth tl = (tileindex // tiledwidth) % tiledlength * tilelength tw = tileindex % tiledwidth * tilewidth if tile: if lsb2msb: tile = bitorder_decode(tile, out=tile) tile = decompress(tile) tile = unpack(tile) # decompression / unpacking might return too many bytes tile = tile[:tilesize] try: # complete tile according to TIFF specification tile.shape = tileshape except ValueError: # tile fills remaining space; found in some JPEG compressed slides s = (min(imagedepth - td, tiledepth), min(imagelength - tl, tilelength), min(imagewidth - tw, tilewidth), samples) try: tile.shape = s except ValueError: # incomplete tile; see gdal issue #1179 log.warning('tile_decode: incomplete tile %s %s', tile.shape, tileshape) t = numpy.zeros(tilesize, tile.dtype) s = min(tile.size, tilesize) t[:s] = tile[:s] tile = t.reshape(tileshape) tile = unpredict(tile, axis=-2, out=tile) out[pl, td:td+tiledepth, tl:tl+tilelength, tw:tw+tilewidth] = ( tile[:imagedepth-td, :imagelength-tl, :imagewidth-tw]) else: out[pl, td:td+tiledepth, tl:tl+tilelength, tw:tw+tilewidth] = 0
[ "def", "tile_decode", "(", "tile", ",", "tileindex", ",", "tileshape", ",", "tiledshape", ",", "lsb2msb", ",", "decompress", ",", "unpack", ",", "unpredict", ",", "out", ")", ":", "_", ",", "imagedepth", ",", "imagelength", ",", "imagewidth", ",", "_", "=", "out", ".", "shape", "tileddepth", ",", "tiledlength", ",", "tiledwidth", "=", "tiledshape", "tiledepth", ",", "tilelength", ",", "tilewidth", ",", "samples", "=", "tileshape", "tilesize", "=", "tiledepth", "*", "tilelength", "*", "tilewidth", "*", "samples", "pl", "=", "tileindex", "//", "(", "tiledwidth", "*", "tiledlength", "*", "tileddepth", ")", "td", "=", "(", "tileindex", "//", "(", "tiledwidth", "*", "tiledlength", ")", ")", "%", "tileddepth", "*", "tiledepth", "tl", "=", "(", "tileindex", "//", "tiledwidth", ")", "%", "tiledlength", "*", "tilelength", "tw", "=", "tileindex", "%", "tiledwidth", "*", "tilewidth", "if", "tile", ":", "if", "lsb2msb", ":", "tile", "=", "bitorder_decode", "(", "tile", ",", "out", "=", "tile", ")", "tile", "=", "decompress", "(", "tile", ")", "tile", "=", "unpack", "(", "tile", ")", "# decompression / unpacking might return too many bytes", "tile", "=", "tile", "[", ":", "tilesize", "]", "try", ":", "# complete tile according to TIFF specification", "tile", ".", "shape", "=", "tileshape", "except", "ValueError", ":", "# tile fills remaining space; found in some JPEG compressed slides", "s", "=", "(", "min", "(", "imagedepth", "-", "td", ",", "tiledepth", ")", ",", "min", "(", "imagelength", "-", "tl", ",", "tilelength", ")", ",", "min", "(", "imagewidth", "-", "tw", ",", "tilewidth", ")", ",", "samples", ")", "try", ":", "tile", ".", "shape", "=", "s", "except", "ValueError", ":", "# incomplete tile; see gdal issue #1179", "log", ".", "warning", "(", "'tile_decode: incomplete tile %s %s'", ",", "tile", ".", "shape", ",", "tileshape", ")", "t", "=", "numpy", ".", "zeros", "(", "tilesize", ",", "tile", ".", "dtype", ")", "s", "=", "min", "(", "tile", ".", "size", ",", "tilesize", ")", "t", "[", ":", "s", "]", "=", "tile", "[", ":", "s", "]", "tile", "=", "t", ".", "reshape", "(", "tileshape", ")", "tile", "=", "unpredict", "(", "tile", ",", "axis", "=", "-", "2", ",", "out", "=", "tile", ")", "out", "[", "pl", ",", "td", ":", "td", "+", "tiledepth", ",", "tl", ":", "tl", "+", "tilelength", ",", "tw", ":", "tw", "+", "tilewidth", "]", "=", "(", "tile", "[", ":", "imagedepth", "-", "td", ",", ":", "imagelength", "-", "tl", ",", ":", "imagewidth", "-", "tw", "]", ")", "else", ":", "out", "[", "pl", ",", "td", ":", "td", "+", "tiledepth", ",", "tl", ":", "tl", "+", "tilelength", ",", "tw", ":", "tw", "+", "tilewidth", "]", "=", "0" ]
Decode tile segment bytes into 5D output array.
[ "Decode", "tile", "segment", "bytes", "into", "5D", "output", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9337-L9381
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
unpack_rgb
def unpack_rgb(data, dtype=None, bitspersample=None, rescale=True): """Return array from byte string containing packed samples. Use to unpack RGB565 or RGB555 to RGB888 format. Parameters ---------- data : byte str The data to be decoded. Samples in each pixel are stored consecutively. Pixels are aligned to 8, 16, or 32 bit boundaries. dtype : numpy.dtype The sample data type. The byteorder applies also to the data stream. bitspersample : tuple Number of bits for each sample in a pixel. rescale : bool Upscale samples to the number of bits in dtype. Returns ------- numpy.ndarray Flattened array of unpacked samples of native dtype. Examples -------- >>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) >>> print(unpack_rgb(data, '<B', (5, 6, 5), False)) [ 1 1 1 31 63 31] >>> print(unpack_rgb(data, '<B', (5, 6, 5))) [ 8 4 8 255 255 255] >>> print(unpack_rgb(data, '<B', (5, 5, 5))) [ 16 8 8 255 255 255] """ if bitspersample is None: bitspersample = (5, 6, 5) if dtype is None: dtype = '<B' dtype = numpy.dtype(dtype) bits = int(numpy.sum(bitspersample)) if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)): raise ValueError('sample size not supported: %s' % str(bitspersample)) dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits) data = numpy.frombuffer(data, dtype.byteorder+dt) result = numpy.empty((data.size, len(bitspersample)), dtype.char) for i, bps in enumerate(bitspersample): t = data >> int(numpy.sum(bitspersample[i+1:])) t &= int('0b'+'1'*bps, 2) if rescale: o = ((dtype.itemsize * 8) // bps + 1) * bps if o > data.dtype.itemsize * 8: t = t.astype('I') t *= (2**o - 1) // (2**bps - 1) t //= 2**(o - (dtype.itemsize * 8)) result[:, i] = t return result.reshape(-1)
python
def unpack_rgb(data, dtype=None, bitspersample=None, rescale=True): """Return array from byte string containing packed samples. Use to unpack RGB565 or RGB555 to RGB888 format. Parameters ---------- data : byte str The data to be decoded. Samples in each pixel are stored consecutively. Pixels are aligned to 8, 16, or 32 bit boundaries. dtype : numpy.dtype The sample data type. The byteorder applies also to the data stream. bitspersample : tuple Number of bits for each sample in a pixel. rescale : bool Upscale samples to the number of bits in dtype. Returns ------- numpy.ndarray Flattened array of unpacked samples of native dtype. Examples -------- >>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) >>> print(unpack_rgb(data, '<B', (5, 6, 5), False)) [ 1 1 1 31 63 31] >>> print(unpack_rgb(data, '<B', (5, 6, 5))) [ 8 4 8 255 255 255] >>> print(unpack_rgb(data, '<B', (5, 5, 5))) [ 16 8 8 255 255 255] """ if bitspersample is None: bitspersample = (5, 6, 5) if dtype is None: dtype = '<B' dtype = numpy.dtype(dtype) bits = int(numpy.sum(bitspersample)) if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)): raise ValueError('sample size not supported: %s' % str(bitspersample)) dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits) data = numpy.frombuffer(data, dtype.byteorder+dt) result = numpy.empty((data.size, len(bitspersample)), dtype.char) for i, bps in enumerate(bitspersample): t = data >> int(numpy.sum(bitspersample[i+1:])) t &= int('0b'+'1'*bps, 2) if rescale: o = ((dtype.itemsize * 8) // bps + 1) * bps if o > data.dtype.itemsize * 8: t = t.astype('I') t *= (2**o - 1) // (2**bps - 1) t //= 2**(o - (dtype.itemsize * 8)) result[:, i] = t return result.reshape(-1)
[ "def", "unpack_rgb", "(", "data", ",", "dtype", "=", "None", ",", "bitspersample", "=", "None", ",", "rescale", "=", "True", ")", ":", "if", "bitspersample", "is", "None", ":", "bitspersample", "=", "(", "5", ",", "6", ",", "5", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "'<B'", "dtype", "=", "numpy", ".", "dtype", "(", "dtype", ")", "bits", "=", "int", "(", "numpy", ".", "sum", "(", "bitspersample", ")", ")", "if", "not", "(", "bits", "<=", "32", "and", "all", "(", "i", "<=", "dtype", ".", "itemsize", "*", "8", "for", "i", "in", "bitspersample", ")", ")", ":", "raise", "ValueError", "(", "'sample size not supported: %s'", "%", "str", "(", "bitspersample", ")", ")", "dt", "=", "next", "(", "i", "for", "i", "in", "'BHI'", "if", "numpy", ".", "dtype", "(", "i", ")", ".", "itemsize", "*", "8", ">=", "bits", ")", "data", "=", "numpy", ".", "frombuffer", "(", "data", ",", "dtype", ".", "byteorder", "+", "dt", ")", "result", "=", "numpy", ".", "empty", "(", "(", "data", ".", "size", ",", "len", "(", "bitspersample", ")", ")", ",", "dtype", ".", "char", ")", "for", "i", ",", "bps", "in", "enumerate", "(", "bitspersample", ")", ":", "t", "=", "data", ">>", "int", "(", "numpy", ".", "sum", "(", "bitspersample", "[", "i", "+", "1", ":", "]", ")", ")", "t", "&=", "int", "(", "'0b'", "+", "'1'", "*", "bps", ",", "2", ")", "if", "rescale", ":", "o", "=", "(", "(", "dtype", ".", "itemsize", "*", "8", ")", "//", "bps", "+", "1", ")", "*", "bps", "if", "o", ">", "data", ".", "dtype", ".", "itemsize", "*", "8", ":", "t", "=", "t", ".", "astype", "(", "'I'", ")", "t", "*=", "(", "2", "**", "o", "-", "1", ")", "//", "(", "2", "**", "bps", "-", "1", ")", "t", "//=", "2", "**", "(", "o", "-", "(", "dtype", ".", "itemsize", "*", "8", ")", ")", "result", "[", ":", ",", "i", "]", "=", "t", "return", "result", ".", "reshape", "(", "-", "1", ")" ]
Return array from byte string containing packed samples. Use to unpack RGB565 or RGB555 to RGB888 format. Parameters ---------- data : byte str The data to be decoded. Samples in each pixel are stored consecutively. Pixels are aligned to 8, 16, or 32 bit boundaries. dtype : numpy.dtype The sample data type. The byteorder applies also to the data stream. bitspersample : tuple Number of bits for each sample in a pixel. rescale : bool Upscale samples to the number of bits in dtype. Returns ------- numpy.ndarray Flattened array of unpacked samples of native dtype. Examples -------- >>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) >>> print(unpack_rgb(data, '<B', (5, 6, 5), False)) [ 1 1 1 31 63 31] >>> print(unpack_rgb(data, '<B', (5, 6, 5))) [ 8 4 8 255 255 255] >>> print(unpack_rgb(data, '<B', (5, 5, 5))) [ 16 8 8 255 255 255]
[ "Return", "array", "from", "byte", "string", "containing", "packed", "samples", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9384-L9438
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
delta_encode
def delta_encode(data, axis=-1, out=None): """Encode Delta.""" if isinstance(data, (bytes, bytearray)): data = numpy.frombuffer(data, dtype='u1') diff = numpy.diff(data, axis=0) return numpy.insert(diff, 0, data[0]).tobytes() dtype = data.dtype if dtype.kind == 'f': data = data.view('u%i' % dtype.itemsize) diff = numpy.diff(data, axis=axis) key = [slice(None)] * data.ndim key[axis] = 0 diff = numpy.insert(diff, 0, data[tuple(key)], axis=axis) if dtype.kind == 'f': return diff.view(dtype) return diff
python
def delta_encode(data, axis=-1, out=None): """Encode Delta.""" if isinstance(data, (bytes, bytearray)): data = numpy.frombuffer(data, dtype='u1') diff = numpy.diff(data, axis=0) return numpy.insert(diff, 0, data[0]).tobytes() dtype = data.dtype if dtype.kind == 'f': data = data.view('u%i' % dtype.itemsize) diff = numpy.diff(data, axis=axis) key = [slice(None)] * data.ndim key[axis] = 0 diff = numpy.insert(diff, 0, data[tuple(key)], axis=axis) if dtype.kind == 'f': return diff.view(dtype) return diff
[ "def", "delta_encode", "(", "data", ",", "axis", "=", "-", "1", ",", "out", "=", "None", ")", ":", "if", "isinstance", "(", "data", ",", "(", "bytes", ",", "bytearray", ")", ")", ":", "data", "=", "numpy", ".", "frombuffer", "(", "data", ",", "dtype", "=", "'u1'", ")", "diff", "=", "numpy", ".", "diff", "(", "data", ",", "axis", "=", "0", ")", "return", "numpy", ".", "insert", "(", "diff", ",", "0", ",", "data", "[", "0", "]", ")", ".", "tobytes", "(", ")", "dtype", "=", "data", ".", "dtype", "if", "dtype", ".", "kind", "==", "'f'", ":", "data", "=", "data", ".", "view", "(", "'u%i'", "%", "dtype", ".", "itemsize", ")", "diff", "=", "numpy", ".", "diff", "(", "data", ",", "axis", "=", "axis", ")", "key", "=", "[", "slice", "(", "None", ")", "]", "*", "data", ".", "ndim", "key", "[", "axis", "]", "=", "0", "diff", "=", "numpy", ".", "insert", "(", "diff", ",", "0", ",", "data", "[", "tuple", "(", "key", ")", "]", ",", "axis", "=", "axis", ")", "if", "dtype", ".", "kind", "==", "'f'", ":", "return", "diff", ".", "view", "(", "dtype", ")", "return", "diff" ]
Encode Delta.
[ "Encode", "Delta", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9441-L9459
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
delta_decode
def delta_decode(data, axis=-1, out=None): """Decode Delta.""" if out is not None and not out.flags.writeable: out = None if isinstance(data, (bytes, bytearray)): data = numpy.frombuffer(data, dtype='u1') return numpy.cumsum(data, axis=0, dtype='u1', out=out).tobytes() if data.dtype.kind == 'f': view = data.view('u%i' % data.dtype.itemsize) view = numpy.cumsum(view, axis=axis, dtype=view.dtype) return view.view(data.dtype) return numpy.cumsum(data, axis=axis, dtype=data.dtype, out=out)
python
def delta_decode(data, axis=-1, out=None): """Decode Delta.""" if out is not None and not out.flags.writeable: out = None if isinstance(data, (bytes, bytearray)): data = numpy.frombuffer(data, dtype='u1') return numpy.cumsum(data, axis=0, dtype='u1', out=out).tobytes() if data.dtype.kind == 'f': view = data.view('u%i' % data.dtype.itemsize) view = numpy.cumsum(view, axis=axis, dtype=view.dtype) return view.view(data.dtype) return numpy.cumsum(data, axis=axis, dtype=data.dtype, out=out)
[ "def", "delta_decode", "(", "data", ",", "axis", "=", "-", "1", ",", "out", "=", "None", ")", ":", "if", "out", "is", "not", "None", "and", "not", "out", ".", "flags", ".", "writeable", ":", "out", "=", "None", "if", "isinstance", "(", "data", ",", "(", "bytes", ",", "bytearray", ")", ")", ":", "data", "=", "numpy", ".", "frombuffer", "(", "data", ",", "dtype", "=", "'u1'", ")", "return", "numpy", ".", "cumsum", "(", "data", ",", "axis", "=", "0", ",", "dtype", "=", "'u1'", ",", "out", "=", "out", ")", ".", "tobytes", "(", ")", "if", "data", ".", "dtype", ".", "kind", "==", "'f'", ":", "view", "=", "data", ".", "view", "(", "'u%i'", "%", "data", ".", "dtype", ".", "itemsize", ")", "view", "=", "numpy", ".", "cumsum", "(", "view", ",", "axis", "=", "axis", ",", "dtype", "=", "view", ".", "dtype", ")", "return", "view", ".", "view", "(", "data", ".", "dtype", ")", "return", "numpy", ".", "cumsum", "(", "data", ",", "axis", "=", "axis", ",", "dtype", "=", "data", ".", "dtype", ",", "out", "=", "out", ")" ]
Decode Delta.
[ "Decode", "Delta", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9462-L9473
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
bitorder_decode
def bitorder_decode(data, out=None, _bitorder=[]): """Reverse bits in each byte of byte string or numpy array. Decode data where pixels with lower column values are stored in the lower-order bits of the bytes (TIFF FillOrder is LSB2MSB). Parameters ---------- data : byte string or ndarray The data to be bit reversed. If byte string, a new bit-reversed byte string is returned. Numpy arrays are bit-reversed in-place. Examples -------- >>> bitorder_decode(b'\\x01\\x64') b'\\x80&' >>> data = numpy.array([1, 666], dtype='uint16') >>> bitorder_decode(data) >>> data array([ 128, 16473], dtype=uint16) """ if not _bitorder: _bitorder.append( b'\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(' b'\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14' b'\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|' b'\xfc\x02\x82B\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*' b'\xaaj\xea\x1a\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16' b'\x96V\xd66\xb6v\xf6\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~' b'\xfe\x01\x81A\xc1!\xa1a\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)' b'\xa9i\xe9\x19\x99Y\xd99\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15' b'\x95U\xd55\xb5u\xf5\r\x8dM\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}' b'\xfd\x03\x83C\xc3#\xa3c\xe3\x13\x93S\xd33\xb3s\xf3\x0b\x8bK' b'\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb\x07\x87G\xc7\'\xa7g\xe7' b'\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo\xef\x1f\x9f_' b'\xdf?\xbf\x7f\xff') _bitorder.append(numpy.frombuffer(_bitorder[0], dtype='uint8')) try: view = data.view('uint8') numpy.take(_bitorder[1], view, out=view) return data except AttributeError: return data.translate(_bitorder[0]) except ValueError: raise NotImplementedError('slices of arrays not supported') return None
python
def bitorder_decode(data, out=None, _bitorder=[]): """Reverse bits in each byte of byte string or numpy array. Decode data where pixels with lower column values are stored in the lower-order bits of the bytes (TIFF FillOrder is LSB2MSB). Parameters ---------- data : byte string or ndarray The data to be bit reversed. If byte string, a new bit-reversed byte string is returned. Numpy arrays are bit-reversed in-place. Examples -------- >>> bitorder_decode(b'\\x01\\x64') b'\\x80&' >>> data = numpy.array([1, 666], dtype='uint16') >>> bitorder_decode(data) >>> data array([ 128, 16473], dtype=uint16) """ if not _bitorder: _bitorder.append( b'\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(' b'\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14' b'\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|' b'\xfc\x02\x82B\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*' b'\xaaj\xea\x1a\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16' b'\x96V\xd66\xb6v\xf6\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~' b'\xfe\x01\x81A\xc1!\xa1a\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)' b'\xa9i\xe9\x19\x99Y\xd99\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15' b'\x95U\xd55\xb5u\xf5\r\x8dM\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}' b'\xfd\x03\x83C\xc3#\xa3c\xe3\x13\x93S\xd33\xb3s\xf3\x0b\x8bK' b'\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb\x07\x87G\xc7\'\xa7g\xe7' b'\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo\xef\x1f\x9f_' b'\xdf?\xbf\x7f\xff') _bitorder.append(numpy.frombuffer(_bitorder[0], dtype='uint8')) try: view = data.view('uint8') numpy.take(_bitorder[1], view, out=view) return data except AttributeError: return data.translate(_bitorder[0]) except ValueError: raise NotImplementedError('slices of arrays not supported') return None
[ "def", "bitorder_decode", "(", "data", ",", "out", "=", "None", ",", "_bitorder", "=", "[", "]", ")", ":", "if", "not", "_bitorder", ":", "_bitorder", ".", "append", "(", "b'\\x00\\x80@\\xc0 \\xa0`\\xe0\\x10\\x90P\\xd00\\xb0p\\xf0\\x08\\x88H\\xc8('", "b'\\xa8h\\xe8\\x18\\x98X\\xd88\\xb8x\\xf8\\x04\\x84D\\xc4$\\xa4d\\xe4\\x14'", "b'\\x94T\\xd44\\xb4t\\xf4\\x0c\\x8cL\\xcc,\\xacl\\xec\\x1c\\x9c\\\\\\xdc<\\xbc|'", "b'\\xfc\\x02\\x82B\\xc2\"\\xa2b\\xe2\\x12\\x92R\\xd22\\xb2r\\xf2\\n\\x8aJ\\xca*'", "b'\\xaaj\\xea\\x1a\\x9aZ\\xda:\\xbaz\\xfa\\x06\\x86F\\xc6&\\xa6f\\xe6\\x16'", "b'\\x96V\\xd66\\xb6v\\xf6\\x0e\\x8eN\\xce.\\xaen\\xee\\x1e\\x9e^\\xde>\\xbe~'", "b'\\xfe\\x01\\x81A\\xc1!\\xa1a\\xe1\\x11\\x91Q\\xd11\\xb1q\\xf1\\t\\x89I\\xc9)'", "b'\\xa9i\\xe9\\x19\\x99Y\\xd99\\xb9y\\xf9\\x05\\x85E\\xc5%\\xa5e\\xe5\\x15'", "b'\\x95U\\xd55\\xb5u\\xf5\\r\\x8dM\\xcd-\\xadm\\xed\\x1d\\x9d]\\xdd=\\xbd}'", "b'\\xfd\\x03\\x83C\\xc3#\\xa3c\\xe3\\x13\\x93S\\xd33\\xb3s\\xf3\\x0b\\x8bK'", "b'\\xcb+\\xabk\\xeb\\x1b\\x9b[\\xdb;\\xbb{\\xfb\\x07\\x87G\\xc7\\'\\xa7g\\xe7'", "b'\\x17\\x97W\\xd77\\xb7w\\xf7\\x0f\\x8fO\\xcf/\\xafo\\xef\\x1f\\x9f_'", "b'\\xdf?\\xbf\\x7f\\xff'", ")", "_bitorder", ".", "append", "(", "numpy", ".", "frombuffer", "(", "_bitorder", "[", "0", "]", ",", "dtype", "=", "'uint8'", ")", ")", "try", ":", "view", "=", "data", ".", "view", "(", "'uint8'", ")", "numpy", ".", "take", "(", "_bitorder", "[", "1", "]", ",", "view", ",", "out", "=", "view", ")", "return", "data", "except", "AttributeError", ":", "return", "data", ".", "translate", "(", "_bitorder", "[", "0", "]", ")", "except", "ValueError", ":", "raise", "NotImplementedError", "(", "'slices of arrays not supported'", ")", "return", "None" ]
Reverse bits in each byte of byte string or numpy array. Decode data where pixels with lower column values are stored in the lower-order bits of the bytes (TIFF FillOrder is LSB2MSB). Parameters ---------- data : byte string or ndarray The data to be bit reversed. If byte string, a new bit-reversed byte string is returned. Numpy arrays are bit-reversed in-place. Examples -------- >>> bitorder_decode(b'\\x01\\x64') b'\\x80&' >>> data = numpy.array([1, 666], dtype='uint16') >>> bitorder_decode(data) >>> data array([ 128, 16473], dtype=uint16)
[ "Reverse", "bits", "in", "each", "byte", "of", "byte", "string", "or", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9476-L9522
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
packints_decode
def packints_decode(data, dtype, numbits, runlen=0, out=None): """Decompress byte string to array of integers. This implementation only handles itemsizes 1, 8, 16, 32, and 64 bits. Install the imagecodecs package for decoding other integer sizes. Parameters ---------- data : byte str Data to decompress. dtype : numpy.dtype or str A numpy boolean or integer type. numbits : int Number of bits per integer. runlen : int Number of consecutive integers, after which to start at next byte. Examples -------- >>> packints_decode(b'a', 'B', 1) array([0, 1, 1, 0, 0, 0, 0, 1], dtype=uint8) """ if numbits == 1: # bitarray data = numpy.frombuffer(data, '|B') data = numpy.unpackbits(data) if runlen % 8: data = data.reshape(-1, runlen + (8 - runlen % 8)) data = data[:, :runlen].reshape(-1) return data.astype(dtype) if numbits in (8, 16, 32, 64): return numpy.frombuffer(data, dtype) raise NotImplementedError('unpacking %s-bit integers to %s not supported' % (numbits, numpy.dtype(dtype)))
python
def packints_decode(data, dtype, numbits, runlen=0, out=None): """Decompress byte string to array of integers. This implementation only handles itemsizes 1, 8, 16, 32, and 64 bits. Install the imagecodecs package for decoding other integer sizes. Parameters ---------- data : byte str Data to decompress. dtype : numpy.dtype or str A numpy boolean or integer type. numbits : int Number of bits per integer. runlen : int Number of consecutive integers, after which to start at next byte. Examples -------- >>> packints_decode(b'a', 'B', 1) array([0, 1, 1, 0, 0, 0, 0, 1], dtype=uint8) """ if numbits == 1: # bitarray data = numpy.frombuffer(data, '|B') data = numpy.unpackbits(data) if runlen % 8: data = data.reshape(-1, runlen + (8 - runlen % 8)) data = data[:, :runlen].reshape(-1) return data.astype(dtype) if numbits in (8, 16, 32, 64): return numpy.frombuffer(data, dtype) raise NotImplementedError('unpacking %s-bit integers to %s not supported' % (numbits, numpy.dtype(dtype)))
[ "def", "packints_decode", "(", "data", ",", "dtype", ",", "numbits", ",", "runlen", "=", "0", ",", "out", "=", "None", ")", ":", "if", "numbits", "==", "1", ":", "# bitarray", "data", "=", "numpy", ".", "frombuffer", "(", "data", ",", "'|B'", ")", "data", "=", "numpy", ".", "unpackbits", "(", "data", ")", "if", "runlen", "%", "8", ":", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "runlen", "+", "(", "8", "-", "runlen", "%", "8", ")", ")", "data", "=", "data", "[", ":", ",", ":", "runlen", "]", ".", "reshape", "(", "-", "1", ")", "return", "data", ".", "astype", "(", "dtype", ")", "if", "numbits", "in", "(", "8", ",", "16", ",", "32", ",", "64", ")", ":", "return", "numpy", ".", "frombuffer", "(", "data", ",", "dtype", ")", "raise", "NotImplementedError", "(", "'unpacking %s-bit integers to %s not supported'", "%", "(", "numbits", ",", "numpy", ".", "dtype", "(", "dtype", ")", ")", ")" ]
Decompress byte string to array of integers. This implementation only handles itemsizes 1, 8, 16, 32, and 64 bits. Install the imagecodecs package for decoding other integer sizes. Parameters ---------- data : byte str Data to decompress. dtype : numpy.dtype or str A numpy boolean or integer type. numbits : int Number of bits per integer. runlen : int Number of consecutive integers, after which to start at next byte. Examples -------- >>> packints_decode(b'a', 'B', 1) array([0, 1, 1, 0, 0, 0, 0, 1], dtype=uint8)
[ "Decompress", "byte", "string", "to", "array", "of", "integers", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9525-L9558
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
apply_colormap
def apply_colormap(image, colormap, contig=True): """Return palette-colored image. The image values are used to index the colormap on axis 1. The returned image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype. Parameters ---------- image : numpy.ndarray Indexes into the colormap. colormap : numpy.ndarray RGB lookup table aka palette of shape (3, 2**bits_per_sample). contig : bool If True, return a contiguous array. Examples -------- >>> image = numpy.arange(256, dtype='uint8') >>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256 >>> apply_colormap(image, colormap)[-1] array([65280, 65280, 65280], dtype=uint16) """ image = numpy.take(colormap, image, axis=1) image = numpy.rollaxis(image, 0, image.ndim) if contig: image = numpy.ascontiguousarray(image) return image
python
def apply_colormap(image, colormap, contig=True): """Return palette-colored image. The image values are used to index the colormap on axis 1. The returned image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype. Parameters ---------- image : numpy.ndarray Indexes into the colormap. colormap : numpy.ndarray RGB lookup table aka palette of shape (3, 2**bits_per_sample). contig : bool If True, return a contiguous array. Examples -------- >>> image = numpy.arange(256, dtype='uint8') >>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256 >>> apply_colormap(image, colormap)[-1] array([65280, 65280, 65280], dtype=uint16) """ image = numpy.take(colormap, image, axis=1) image = numpy.rollaxis(image, 0, image.ndim) if contig: image = numpy.ascontiguousarray(image) return image
[ "def", "apply_colormap", "(", "image", ",", "colormap", ",", "contig", "=", "True", ")", ":", "image", "=", "numpy", ".", "take", "(", "colormap", ",", "image", ",", "axis", "=", "1", ")", "image", "=", "numpy", ".", "rollaxis", "(", "image", ",", "0", ",", "image", ".", "ndim", ")", "if", "contig", ":", "image", "=", "numpy", ".", "ascontiguousarray", "(", "image", ")", "return", "image" ]
Return palette-colored image. The image values are used to index the colormap on axis 1. The returned image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype. Parameters ---------- image : numpy.ndarray Indexes into the colormap. colormap : numpy.ndarray RGB lookup table aka palette of shape (3, 2**bits_per_sample). contig : bool If True, return a contiguous array. Examples -------- >>> image = numpy.arange(256, dtype='uint8') >>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256 >>> apply_colormap(image, colormap)[-1] array([65280, 65280, 65280], dtype=uint16)
[ "Return", "palette", "-", "colored", "image", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9574-L9601
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
reorient
def reorient(image, orientation): """Return reoriented view of image array. Parameters ---------- image : numpy.ndarray Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF.ORIENTATION names or values. """ orient = TIFF.ORIENTATION orientation = enumarg(orient, orientation) if orientation == orient.TOPLEFT: return image if orientation == orient.TOPRIGHT: return image[..., ::-1, :] if orientation == orient.BOTLEFT: return image[..., ::-1, :, :] if orientation == orient.BOTRIGHT: return image[..., ::-1, ::-1, :] if orientation == orient.LEFTTOP: return numpy.swapaxes(image, -3, -2) if orientation == orient.RIGHTTOP: return numpy.swapaxes(image, -3, -2)[..., ::-1, :] if orientation == orient.RIGHTBOT: return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] if orientation == orient.LEFTBOT: return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] return image
python
def reorient(image, orientation): """Return reoriented view of image array. Parameters ---------- image : numpy.ndarray Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF.ORIENTATION names or values. """ orient = TIFF.ORIENTATION orientation = enumarg(orient, orientation) if orientation == orient.TOPLEFT: return image if orientation == orient.TOPRIGHT: return image[..., ::-1, :] if orientation == orient.BOTLEFT: return image[..., ::-1, :, :] if orientation == orient.BOTRIGHT: return image[..., ::-1, ::-1, :] if orientation == orient.LEFTTOP: return numpy.swapaxes(image, -3, -2) if orientation == orient.RIGHTTOP: return numpy.swapaxes(image, -3, -2)[..., ::-1, :] if orientation == orient.RIGHTBOT: return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] if orientation == orient.LEFTBOT: return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] return image
[ "def", "reorient", "(", "image", ",", "orientation", ")", ":", "orient", "=", "TIFF", ".", "ORIENTATION", "orientation", "=", "enumarg", "(", "orient", ",", "orientation", ")", "if", "orientation", "==", "orient", ".", "TOPLEFT", ":", "return", "image", "if", "orientation", "==", "orient", ".", "TOPRIGHT", ":", "return", "image", "[", "...", ",", ":", ":", "-", "1", ",", ":", "]", "if", "orientation", "==", "orient", ".", "BOTLEFT", ":", "return", "image", "[", "...", ",", ":", ":", "-", "1", ",", ":", ",", ":", "]", "if", "orientation", "==", "orient", ".", "BOTRIGHT", ":", "return", "image", "[", "...", ",", ":", ":", "-", "1", ",", ":", ":", "-", "1", ",", ":", "]", "if", "orientation", "==", "orient", ".", "LEFTTOP", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "if", "orientation", "==", "orient", ".", "RIGHTTOP", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "[", "...", ",", ":", ":", "-", "1", ",", ":", "]", "if", "orientation", "==", "orient", ".", "RIGHTBOT", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "[", "...", ",", ":", ":", "-", "1", ",", ":", ",", ":", "]", "if", "orientation", "==", "orient", ".", "LEFTBOT", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "[", "...", ",", ":", ":", "-", "1", ",", ":", ":", "-", "1", ",", ":", "]", "return", "image" ]
Return reoriented view of image array. Parameters ---------- image : numpy.ndarray Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF.ORIENTATION names or values.
[ "Return", "reoriented", "view", "of", "image", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9604-L9635
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
repeat_nd
def repeat_nd(a, repeats): """Return read-only view into input array with elements repeated. Zoom nD image by integer factors using nearest neighbor interpolation (box filter). Parameters ---------- a : array_like Input array. repeats : sequence of int The number of repetitions to apply along each dimension of input array. Examples -------- >>> repeat_nd([[1, 2], [3, 4]], (2, 2)) array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]]) """ a = numpy.asarray(a) reshape = [] shape = [] strides = [] for i, j, k in zip(a.strides, a.shape, repeats): shape.extend((j, k)) strides.extend((i, 0)) reshape.append(j * k) return numpy.lib.stride_tricks.as_strided( a, shape, strides, writeable=False).reshape(reshape)
python
def repeat_nd(a, repeats): """Return read-only view into input array with elements repeated. Zoom nD image by integer factors using nearest neighbor interpolation (box filter). Parameters ---------- a : array_like Input array. repeats : sequence of int The number of repetitions to apply along each dimension of input array. Examples -------- >>> repeat_nd([[1, 2], [3, 4]], (2, 2)) array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]]) """ a = numpy.asarray(a) reshape = [] shape = [] strides = [] for i, j, k in zip(a.strides, a.shape, repeats): shape.extend((j, k)) strides.extend((i, 0)) reshape.append(j * k) return numpy.lib.stride_tricks.as_strided( a, shape, strides, writeable=False).reshape(reshape)
[ "def", "repeat_nd", "(", "a", ",", "repeats", ")", ":", "a", "=", "numpy", ".", "asarray", "(", "a", ")", "reshape", "=", "[", "]", "shape", "=", "[", "]", "strides", "=", "[", "]", "for", "i", ",", "j", ",", "k", "in", "zip", "(", "a", ".", "strides", ",", "a", ".", "shape", ",", "repeats", ")", ":", "shape", ".", "extend", "(", "(", "j", ",", "k", ")", ")", "strides", ".", "extend", "(", "(", "i", ",", "0", ")", ")", "reshape", ".", "append", "(", "j", "*", "k", ")", "return", "numpy", ".", "lib", ".", "stride_tricks", ".", "as_strided", "(", "a", ",", "shape", ",", "strides", ",", "writeable", "=", "False", ")", ".", "reshape", "(", "reshape", ")" ]
Return read-only view into input array with elements repeated. Zoom nD image by integer factors using nearest neighbor interpolation (box filter). Parameters ---------- a : array_like Input array. repeats : sequence of int The number of repetitions to apply along each dimension of input array. Examples -------- >>> repeat_nd([[1, 2], [3, 4]], (2, 2)) array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]])
[ "Return", "read", "-", "only", "view", "into", "input", "array", "with", "elements", "repeated", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9638-L9669
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
reshape_nd
def reshape_nd(data_or_shape, ndim): """Return image array or shape with at least ndim dimensions. Prepend 1s to image shape as necessary. >>> reshape_nd(numpy.empty(0), 1).shape (0,) >>> reshape_nd(numpy.empty(1), 2).shape (1, 1) >>> reshape_nd(numpy.empty((2, 3)), 3).shape (1, 2, 3) >>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape (3, 4, 5) >>> reshape_nd((2, 3), 3) (1, 2, 3) """ is_shape = isinstance(data_or_shape, tuple) shape = data_or_shape if is_shape else data_or_shape.shape if len(shape) >= ndim: return data_or_shape shape = (1,) * (ndim - len(shape)) + shape return shape if is_shape else data_or_shape.reshape(shape)
python
def reshape_nd(data_or_shape, ndim): """Return image array or shape with at least ndim dimensions. Prepend 1s to image shape as necessary. >>> reshape_nd(numpy.empty(0), 1).shape (0,) >>> reshape_nd(numpy.empty(1), 2).shape (1, 1) >>> reshape_nd(numpy.empty((2, 3)), 3).shape (1, 2, 3) >>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape (3, 4, 5) >>> reshape_nd((2, 3), 3) (1, 2, 3) """ is_shape = isinstance(data_or_shape, tuple) shape = data_or_shape if is_shape else data_or_shape.shape if len(shape) >= ndim: return data_or_shape shape = (1,) * (ndim - len(shape)) + shape return shape if is_shape else data_or_shape.reshape(shape)
[ "def", "reshape_nd", "(", "data_or_shape", ",", "ndim", ")", ":", "is_shape", "=", "isinstance", "(", "data_or_shape", ",", "tuple", ")", "shape", "=", "data_or_shape", "if", "is_shape", "else", "data_or_shape", ".", "shape", "if", "len", "(", "shape", ")", ">=", "ndim", ":", "return", "data_or_shape", "shape", "=", "(", "1", ",", ")", "*", "(", "ndim", "-", "len", "(", "shape", ")", ")", "+", "shape", "return", "shape", "if", "is_shape", "else", "data_or_shape", ".", "reshape", "(", "shape", ")" ]
Return image array or shape with at least ndim dimensions. Prepend 1s to image shape as necessary. >>> reshape_nd(numpy.empty(0), 1).shape (0,) >>> reshape_nd(numpy.empty(1), 2).shape (1, 1) >>> reshape_nd(numpy.empty((2, 3)), 3).shape (1, 2, 3) >>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape (3, 4, 5) >>> reshape_nd((2, 3), 3) (1, 2, 3)
[ "Return", "image", "array", "or", "shape", "with", "at", "least", "ndim", "dimensions", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9672-L9694
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
squeeze_axes
def squeeze_axes(shape, axes, skip=None): """Return shape and axes with single-dimensional entries removed. Remove unused dimensions unless their axes are listed in 'skip'. >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') ((5, 2, 1), 'TYX') """ if len(shape) != len(axes): raise ValueError('dimensions of axes and shape do not match') if skip is None: skip = 'XY' shape, axes = zip(*(i for i in zip(shape, axes) if i[0] > 1 or i[1] in skip)) return tuple(shape), ''.join(axes)
python
def squeeze_axes(shape, axes, skip=None): """Return shape and axes with single-dimensional entries removed. Remove unused dimensions unless their axes are listed in 'skip'. >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') ((5, 2, 1), 'TYX') """ if len(shape) != len(axes): raise ValueError('dimensions of axes and shape do not match') if skip is None: skip = 'XY' shape, axes = zip(*(i for i in zip(shape, axes) if i[0] > 1 or i[1] in skip)) return tuple(shape), ''.join(axes)
[ "def", "squeeze_axes", "(", "shape", ",", "axes", ",", "skip", "=", "None", ")", ":", "if", "len", "(", "shape", ")", "!=", "len", "(", "axes", ")", ":", "raise", "ValueError", "(", "'dimensions of axes and shape do not match'", ")", "if", "skip", "is", "None", ":", "skip", "=", "'XY'", "shape", ",", "axes", "=", "zip", "(", "*", "(", "i", "for", "i", "in", "zip", "(", "shape", ",", "axes", ")", "if", "i", "[", "0", "]", ">", "1", "or", "i", "[", "1", "]", "in", "skip", ")", ")", "return", "tuple", "(", "shape", ")", ",", "''", ".", "join", "(", "axes", ")" ]
Return shape and axes with single-dimensional entries removed. Remove unused dimensions unless their axes are listed in 'skip'. >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') ((5, 2, 1), 'TYX')
[ "Return", "shape", "and", "axes", "with", "single", "-", "dimensional", "entries", "removed", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9697-L9712