language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def main(): """ Example graph for stochastic gradient ascent """ root_path = os.path.abspath(os.curdir) file_path = "data/test_set.txt" data_matrix, label_matrix = utils.load_dataset(file_path) weights = regression.modified_stochastic_gradient_ascent(array(data_matrix), label_matrix) graphs.plot_best_fit(weights, data_matrix, label_matrix)
def main(): """ Example graph for stochastic gradient ascent """ root_path = os.path.abspath(os.curdir) file_path = "data/test_set.txt" data_matrix, label_matrix = utils.load_dataset(file_path) weights = regression.modified_stochastic_gradient_ascent(array(data_matrix), label_matrix) graphs.plot_best_fit(weights, data_matrix, label_matrix)
Python
def select_j_rand(i, m): """ takes 2 values: the index of our first alpha, and the total number of alphas. A value is randomly chosen, as long as it isn't i """ j = i while(j == i): j = int(random.uniform(0, m)) return j
def select_j_rand(i, m): """ takes 2 values: the index of our first alpha, and the total number of alphas. A value is randomly chosen, as long as it isn't i """ j = i while(j == i): j = int(random.uniform(0, m)) return j
Python
def clip_alpha(aj, H, L): """ cLips alpha vaLues tHat are greater tHan H or Less tHan L """ if aj > H: aj = H if L > aj: aj = L return aj
def clip_alpha(aj, H, L): """ cLips alpha vaLues tHat are greater tHan H or Less tHan L """ if aj > H: aj = H if L > aj: aj = L return aj
Python
def calculate_ek(os, k): """ calculates an E value for a given alpha """ f_x_k = float(multiply(os.alphas, os.label_matrix).T * os.K[:, k] + os.b) ek = f_x_k - float(os.label_matrix[k]) return ek
def calculate_ek(os, k): """ calculates an E value for a given alpha """ f_x_k = float(multiply(os.alphas, os.label_matrix).T * os.K[:, k] + os.b) ek = f_x_k - float(os.label_matrix[k]) return ek
Python
def select_j(i, os, ei): """ takes the error value associated with the first choice alpha and the index i; find the nonzero members and choose the one that gives you maximum change """ max_k = -1 max_delta_e = 0 ej = 0 os.e_cache[i] = [1, ei] valid_e_cache_list = nonzero(os.e_cache[:, 0].A)[0] if (len(valid_e_cache_list)) > 1: for k in valid_e_cache_list: if k == i: continue ek = calculate_ek(os, k) delta_e = abs(ei - ek) # choose j for maximum step size if (delta_e > max_delta_e): max_k = k max_delta_e = delta_e ej = ek return max_k, ej else: j = select_j_rand(i, os.m) ej = calculate_ek(os, j) return j, ej
def select_j(i, os, ei): """ takes the error value associated with the first choice alpha and the index i; find the nonzero members and choose the one that gives you maximum change """ max_k = -1 max_delta_e = 0 ej = 0 os.e_cache[i] = [1, ei] valid_e_cache_list = nonzero(os.e_cache[:, 0].A)[0] if (len(valid_e_cache_list)) > 1: for k in valid_e_cache_list: if k == i: continue ek = calculate_ek(os, k) delta_e = abs(ei - ek) # choose j for maximum step size if (delta_e > max_delta_e): max_k = k max_delta_e = delta_e ej = ek return max_k, ej else: j = select_j_rand(i, os.m) ej = calculate_ek(os, j) return j, ej
Python
def update_ek(os, k): """ calculate the error and put it in the cache """ ek = calculate_ek(os, k) os.e_cache[k] = [1, ek]
def update_ek(os, k): """ calculate the error and put it in the cache """ ek = calculate_ek(os, k) os.e_cache[k] = [1, ek]
Python
def calculate_ws(alphas, data_array, class_labels): """ get the hpyerplane from the alphas by computing the w value. note that if the alphas are zero, they don't "matter" """ X = mat(data_array) label_matrix = mat(class_labels).transpose() m, n = shape(X) w = zeros((n, 1)) for i in range(m): w += multiply(alphas[i] * label_matrix[i], X[i, :].T) return w
def calculate_ws(alphas, data_array, class_labels): """ get the hpyerplane from the alphas by computing the w value. note that if the alphas are zero, they don't "matter" """ X = mat(data_array) label_matrix = mat(class_labels).transpose() m, n = shape(X) w = zeros((n, 1)) for i in range(m): w += multiply(alphas[i] * label_matrix[i], X[i, :].T) return w
Python
def stump_classify(data_matrix, dimension, threshold, threshold_ineq): """ performs threshold comparisons to classify data. """ classification = np.ones((np.shape(data_matrix)[0], 1)) if threshold_ineq == 'lt': classification[data_matrix[:, dimension] <= threshold] = -1.0 else: classification[data_matrix[:, dimension] > threshold] = -1.0 return classification
def stump_classify(data_matrix, dimension, threshold, threshold_ineq): """ performs threshold comparisons to classify data. """ classification = np.ones((np.shape(data_matrix)[0], 1)) if threshold_ineq == 'lt': classification[data_matrix[:, dimension] <= threshold] = -1.0 else: classification[data_matrix[:, dimension] > threshold] = -1.0 return classification
Python
def build_stump(data_array, class_labels, D): """ Iterate over all the possible inputs to stump_classify and find the best decision stump for the dataset. 'Best' is decided by the weight vector D. """ data_matrix = np.mat(data_array) label_matrix = np.mat(class_labels).T m, n = np.shape(data_matrix) number_of_steps = 10.0 best_stump = {} best_class_estimate = np.mat(np.zeros((m, 1))) min_error = np.inf logging.info("n is {n}".format(n=n)) for i in range(n): range_min = data_matrix[:, i].min() range_max = data_matrix[:, i].max() # how large should your step size be? step_size = (range_max - range_min) / number_of_steps for j in range(-1, int(number_of_steps) + 1): for inequal in ['lt', 'gt']: threshold = (range_min + float(j) * step_size) predicted_values = stump_classify(data_matrix, i, threshold, inequal) _errors = np.mat(np.ones((m, 1))) # _errors is 1 for any value in predicted_values # that isn't equal to the label _errors[predicted_values == label_matrix] = 0 weighted_error = D.T * _errors message = ', '.join(['split: dim {}', 'thresh {:03.2f}', 'inequal: {}', 'weighted_error: {}']) logging.info(message.format(i, threshold, inequal, weighted_error)) if weighted_error < min_error: min_error = weighted_error best_class_estimate = predicted_values.copy() best_stump['dim'] = i best_stump['threshold'] = threshold best_stump['inequal'] = inequal return best_stump, min_error, best_class_estimate
def build_stump(data_array, class_labels, D): """ Iterate over all the possible inputs to stump_classify and find the best decision stump for the dataset. 'Best' is decided by the weight vector D. """ data_matrix = np.mat(data_array) label_matrix = np.mat(class_labels).T m, n = np.shape(data_matrix) number_of_steps = 10.0 best_stump = {} best_class_estimate = np.mat(np.zeros((m, 1))) min_error = np.inf logging.info("n is {n}".format(n=n)) for i in range(n): range_min = data_matrix[:, i].min() range_max = data_matrix[:, i].max() # how large should your step size be? step_size = (range_max - range_min) / number_of_steps for j in range(-1, int(number_of_steps) + 1): for inequal in ['lt', 'gt']: threshold = (range_min + float(j) * step_size) predicted_values = stump_classify(data_matrix, i, threshold, inequal) _errors = np.mat(np.ones((m, 1))) # _errors is 1 for any value in predicted_values # that isn't equal to the label _errors[predicted_values == label_matrix] = 0 weighted_error = D.T * _errors message = ', '.join(['split: dim {}', 'thresh {:03.2f}', 'inequal: {}', 'weighted_error: {}']) logging.info(message.format(i, threshold, inequal, weighted_error)) if weighted_error < min_error: min_error = weighted_error best_class_estimate = predicted_values.copy() best_stump['dim'] = i best_stump['threshold'] = threshold best_stump['inequal'] = inequal return best_stump, min_error, best_class_estimate
Python
def classify(data_to_classify, classifiers): """classifies with a train of weak classifiers""" data_matrix = np.mat(data_to_classify) m = np.shape(data_matrix)[0] # initialize to zeros aggregated_class_estimate = np.mat(np.zeros((m, 1))) for i in range(len(classifiers)): class_estimate = stump_classify(data_matrix, classifiers[i]['dim'], classifiers[i]['threshold'], classifiers[i]['inequal']) aggregated_class_estimate += classifiers[i]['alpha'] * class_estimate return np.sign(aggregated_class_estimate)
def classify(data_to_classify, classifiers): """classifies with a train of weak classifiers""" data_matrix = np.mat(data_to_classify) m = np.shape(data_matrix)[0] # initialize to zeros aggregated_class_estimate = np.mat(np.zeros((m, 1))) for i in range(len(classifiers)): class_estimate = stump_classify(data_matrix, classifiers[i]['dim'], classifiers[i]['threshold'], classifiers[i]['inequal']) aggregated_class_estimate += classifiers[i]['alpha'] * class_estimate return np.sign(aggregated_class_estimate)
Python
def plot_roc(prediction_strengths, class_labels): """ ROC curve: receiver operating characteristic the X-axis is the number of false positives, the Y-axis is the number of true positives AUC: area under the curve """ import matplotlib.pyplot as plt cur = (1.0, 1.0) y_sum = 0.0 number_positive_classes = sum(np.array(class_labels) == 1.0) y_step = 1 / float(number_positive_classes) x_step = 1 / float(len(class_labels) - number_positive_classes) sorted_indices = prediction_strengths.argsort() fig = plt.figure() fig.clf() ax = plt.subplot(111) for index in sorted_indices.tolist()[0]: if class_labels[index] == 1.0: del_x = 0 del_y = y_step else: del_x = x_step del_y = 0 y_sum += cur[1] ax.plot([cur[0], cur[0] - del_x], [cur[1], cur[1] - del_y], c='b') cur = (cur[0] - del_x, cur[1] - del_y) ax.plot([0, 1], [0, 1], 'b--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve for AdaBoost Horse Colic Detection System') ax.axis([0, 1, 0, 1]) plt.show() logging.info("the area under the curve (AUC) is {}".format(y_sum * x_step))
def plot_roc(prediction_strengths, class_labels): """ ROC curve: receiver operating characteristic the X-axis is the number of false positives, the Y-axis is the number of true positives AUC: area under the curve """ import matplotlib.pyplot as plt cur = (1.0, 1.0) y_sum = 0.0 number_positive_classes = sum(np.array(class_labels) == 1.0) y_step = 1 / float(number_positive_classes) x_step = 1 / float(len(class_labels) - number_positive_classes) sorted_indices = prediction_strengths.argsort() fig = plt.figure() fig.clf() ax = plt.subplot(111) for index in sorted_indices.tolist()[0]: if class_labels[index] == 1.0: del_x = 0 del_y = y_step else: del_x = x_step del_y = 0 y_sum += cur[1] ax.plot([cur[0], cur[0] - del_x], [cur[1], cur[1] - del_y], c='b') cur = (cur[0] - del_x, cur[1] - del_y) ax.plot([0, 1], [0, 1], 'b--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve for AdaBoost Horse Colic Detection System') ax.axis([0, 1, 0, 1]) plt.show() logging.info("the area under the curve (AUC) is {}".format(y_sum * x_step))
Python
def binary_split_dataset(data_set, feature, value): """ use array filtering to partition data on the given feature and value """ logging.info("dataset looks like {d}".format(d=data_set)) logging.info("feature is {f} and value is {v}".format(f=feature, v=value)) matrix_0 = data_set[np.nonzero(data_set[:, feature] > value)[0], :][0] matrix_1 = data_set[np.nonzero(data_set[:, feature] <= value)[0], :][0] return matrix_0, matrix_1
def binary_split_dataset(data_set, feature, value): """ use array filtering to partition data on the given feature and value """ logging.info("dataset looks like {d}".format(d=data_set)) logging.info("feature is {f} and value is {v}".format(f=feature, v=value)) matrix_0 = data_set[np.nonzero(data_set[:, feature] > value)[0], :][0] matrix_1 = data_set[np.nonzero(data_set[:, feature] <= value)[0], :][0] return matrix_0, matrix_1
Python
def create_tree(data_set, leaf_type=generate_leaf_model, error_type=get_squared_error, ops=(1, 4)): """ First attempts to split the dataset into 2 parts, as determined by choose_best_split. If choose_best_split hits a stopping condition, it will return None and the value for a model type. In the case of regression trees, the model is a constant value; in the case of model trees, this is a linear equation. If the stopping condition isn't hit, you create a new dict and split the dataset into 2 portions, calling create_tree recursively on those portions. """ feature, value = choose_best_split(data_set, leaf_type, error_type, ops) if feature is None: return value return_tree = {} return_tree['spInd'] = feature return_tree['spVal'] = value left_set, right_set = binary_split_dataset(data_set, feature, value) return_tree['left'] = create_tree(left_set, leaf_type, error_type, ops) return_tree['right'] = create_tree(right_set, leaf_type, error_type, ops) return return_tree
def create_tree(data_set, leaf_type=generate_leaf_model, error_type=get_squared_error, ops=(1, 4)): """ First attempts to split the dataset into 2 parts, as determined by choose_best_split. If choose_best_split hits a stopping condition, it will return None and the value for a model type. In the case of regression trees, the model is a constant value; in the case of model trees, this is a linear equation. If the stopping condition isn't hit, you create a new dict and split the dataset into 2 portions, calling create_tree recursively on those portions. """ feature, value = choose_best_split(data_set, leaf_type, error_type, ops) if feature is None: return value return_tree = {} return_tree['spInd'] = feature return_tree['spVal'] = value left_set, right_set = binary_split_dataset(data_set, feature, value) return_tree['left'] = create_tree(left_set, leaf_type, error_type, ops) return_tree['right'] = create_tree(right_set, leaf_type, error_type, ops) return return_tree
Python
def prune(tree, test_data): """ Collapse the tree if there is no test data """ if np.shape(test_data)[0] == 0: return get_mean(tree) if is_tree(tree['right']) or is_tree(tree['left']): l_set, r_set = binary_split_dataset(test_data, tree['spInd'], tree['spVal']) if is_tree(tree['left']): tree['left'] = prune(tree['left'], l_set) if is_tree(tree['right']): tree['right'] = prune(tree['right'], r_set) # neither one are trees; now you can merge if not is_tree(tree['left']) and not is_tree(tree['right']): l_set, r_set = binary_split_dataset(test_data, tree['spInd'], tree['spVal']) left_merge_err = sum(np.power(l_set[:, -1] - tree['left'], 2)) right_merge_err = sum(np.power(r_set[:, -1] - tree['right'], 2)) error_no_merge = left_merge_err + right_merge_err tree_mean = (tree['left'] + tree['right']) / 2.0 error_merge = sum(np.power(test_data[:, -1] - tree_mean, 2)) if error_merge < error_no_merge: message = "merging and returning tree_mean: {0}" logging.info(message.format(tree_mean)) return tree_mean else: return tree else: return tree
def prune(tree, test_data): """ Collapse the tree if there is no test data """ if np.shape(test_data)[0] == 0: return get_mean(tree) if is_tree(tree['right']) or is_tree(tree['left']): l_set, r_set = binary_split_dataset(test_data, tree['spInd'], tree['spVal']) if is_tree(tree['left']): tree['left'] = prune(tree['left'], l_set) if is_tree(tree['right']): tree['right'] = prune(tree['right'], r_set) # neither one are trees; now you can merge if not is_tree(tree['left']) and not is_tree(tree['right']): l_set, r_set = binary_split_dataset(test_data, tree['spInd'], tree['spVal']) left_merge_err = sum(np.power(l_set[:, -1] - tree['left'], 2)) right_merge_err = sum(np.power(r_set[:, -1] - tree['right'], 2)) error_no_merge = left_merge_err + right_merge_err tree_mean = (tree['left'] + tree['right']) / 2.0 error_merge = sum(np.power(test_data[:, -1] - tree_mean, 2)) if error_merge < error_no_merge: message = "merging and returning tree_mean: {0}" logging.info(message.format(tree_mean)) return tree_mean else: return tree else: return tree
Python
def tree_forecast(tree, input_data, model_evaluator=tree_evaluation): """ Gives one forecast for one data point, for a given tree. """ if not is_tree(tree): return model_evaluator(tree, input_data) if input_data[tree['spInd']] > tree['spVal']: if is_tree(tree['left']): return tree_forecast(tree['left'], input_data, model_evaluator) else: return model_evaluator(tree['left'], input_data) else: if is_tree(tree['right']): return tree_forecast(tree['right'], input_data, model_evaluator) else: return model_evaluator(tree['right'], input_data)
def tree_forecast(tree, input_data, model_evaluator=tree_evaluation): """ Gives one forecast for one data point, for a given tree. """ if not is_tree(tree): return model_evaluator(tree, input_data) if input_data[tree['spInd']] > tree['spVal']: if is_tree(tree['left']): return tree_forecast(tree['left'], input_data, model_evaluator) else: return model_evaluator(tree['left'], input_data) else: if is_tree(tree['right']): return tree_forecast(tree['right'], input_data, model_evaluator) else: return model_evaluator(tree['right'], input_data)
Python
def forecasting_models_example(): """ Build 3 models and evaluate the performance of model trees, regression trees and standard linear regression. """ train_file = 'data/bikeSpeedVsIq_train.txt' test_file = 'data/bikeSpeedVsIq_test.txt' training_matrix = np.mat(utils. load_tsv_into_array(train_file)) test_matrix = np.mat(utils. load_tsv_into_array(test_file)) # training tree tree = regression_trees.create_tree(training_matrix, ops=(1, 20)) y_hat = regression_trees.create_forecast(tree, test_matrix[:, 0]) accuracy = np.corrcoef(y_hat, test_matrix[:, 1], rowvar=0)[0, 1] logging.info("training accuracy = {0}".format(accuracy)) # model tree tree = regression_trees.create_tree(training_matrix, regression_trees.model_leaf, regression_trees.model_error, (1, 20)) y_hat = regression_trees.create_forecast(tree, test_matrix[:, 0], regression_trees. model_tree_evaluation) accuracy = np.corrcoef(y_hat, test_matrix[:, 1], rowvar=0)[0, 1] logging.info("model tree accuracy = {0}".format(accuracy)) weights, x, y = regression_trees.linearly_solve(training_matrix) for i in range(np.shape(test_matrix)[0]): y_hat[i] = test_matrix[i, 0] * weights[1, 0] + weights[0, 0] accuracy = np.corrcoef(y_hat, test_matrix[:, 1], rowvar=0)[0, 1] logging.info("regression accuracy = {0}".format(accuracy))
def forecasting_models_example(): """ Build 3 models and evaluate the performance of model trees, regression trees and standard linear regression. """ train_file = 'data/bikeSpeedVsIq_train.txt' test_file = 'data/bikeSpeedVsIq_test.txt' training_matrix = np.mat(utils. load_tsv_into_array(train_file)) test_matrix = np.mat(utils. load_tsv_into_array(test_file)) # training tree tree = regression_trees.create_tree(training_matrix, ops=(1, 20)) y_hat = regression_trees.create_forecast(tree, test_matrix[:, 0]) accuracy = np.corrcoef(y_hat, test_matrix[:, 1], rowvar=0)[0, 1] logging.info("training accuracy = {0}".format(accuracy)) # model tree tree = regression_trees.create_tree(training_matrix, regression_trees.model_leaf, regression_trees.model_error, (1, 20)) y_hat = regression_trees.create_forecast(tree, test_matrix[:, 0], regression_trees. model_tree_evaluation) accuracy = np.corrcoef(y_hat, test_matrix[:, 1], rowvar=0)[0, 1] logging.info("model tree accuracy = {0}".format(accuracy)) weights, x, y = regression_trees.linearly_solve(training_matrix) for i in range(np.shape(test_matrix)[0]): y_hat[i] = test_matrix[i, 0] * weights[1, 0] + weights[0, 0] accuracy = np.corrcoef(y_hat, test_matrix[:, 1], rowvar=0)[0, 1] logging.info("regression accuracy = {0}".format(accuracy))
Python
def generate_full_tree(pth): ''' brief: create the full evolution relationship. equivalent to using top-<max species num> in visualizing evolution ''' from html_visual import vis_tree data_path = os.path.join(pth, 'species_data') rank_info_list = [] for fn in glob.glob(data_path + '/*'): if fn.endswith('_rank_info.npy') == False: continue rank_info_list.append(fn) rank_info_list = sorted(rank_info_list, key=lambda x: int(x.split('/')[-1].split('_')[0])) last_gen = np.load(rank_info_list[-1]).item() max_species_num = len(last_gen['PrtID']) all_species_evolution = vis_tree.evolution_graph(data_path + '/', k=max_species_num) return all_species_evolution
def generate_full_tree(pth): ''' brief: create the full evolution relationship. equivalent to using top-<max species num> in visualizing evolution ''' from html_visual import vis_tree data_path = os.path.join(pth, 'species_data') rank_info_list = [] for fn in glob.glob(data_path + '/*'): if fn.endswith('_rank_info.npy') == False: continue rank_info_list.append(fn) rank_info_list = sorted(rank_info_list, key=lambda x: int(x.split('/')[-1].split('_')[0])) last_gen = np.load(rank_info_list[-1]).item() max_species_num = len(last_gen['PrtID']) all_species_evolution = vis_tree.evolution_graph(data_path + '/', k=max_species_num) return all_species_evolution
Python
def parent_child_performance(pth): ''' plotting parent-child and their performance difference ''' data_path = os.path.join(pth, 'species_data') rank_info_list = [] for fn in glob.glob(data_path + '/*'): if fn.endswith('_rank_info.npy') == False: continue rank_info_list.append(fn) rank_info_list = sorted(rank_info_list, key=lambda x: int(x.split('/')[-1].split('_')[0])) pc_performance_list = [] for i, fn in enumerate(rank_info_list): if i == 0: continue curr_data = np.load(fn).item() prev_data = np.load(rank_info_list[i-1]).item() # getting all the parent's id p_id_list = curr_data['PrtID'] prev_gen_spc = prev_data['SpcID'] species_needed = [(curr_data['SpcID'][j], x) \ for j, x in enumerate(p_id_list) \ if x in prev_gen_spc ] # create the reward p_r, c_r = [], [] for pc_pair in species_needed: child, parent = pc_pair p_r.append(prev_data['AvgRwd'][prev_data['SpcID'].index(parent)]) c_r.append(curr_data['AvgRwd'][curr_data['SpcID'].index(child)]) pc_performance_list.append( (sum(p_r)/len(p_r), sum(c_r)/len(c_r)) ) # use visdom to plot the line win1 = None win2 = None for i, val in enumerate(pc_performance_list): v1, v2 = val win1 = visdom_util.viz_line(i, [v1, v2], viz_win=win1, title='Avg Parent-child performance comparison', xlabel='Generation', ylabel='Average Reward', legend=['Parent', 'Child'] ) win2 = visdom_util.viz_line(i, [ (v2 - v1) / v1 ], viz_win=win2, title='Drop comparing to parent in percentile', xlabel='Generation', ylabel='Drop in percentile', legend=['Drop']) return None
def parent_child_performance(pth): ''' plotting parent-child and their performance difference ''' data_path = os.path.join(pth, 'species_data') rank_info_list = [] for fn in glob.glob(data_path + '/*'): if fn.endswith('_rank_info.npy') == False: continue rank_info_list.append(fn) rank_info_list = sorted(rank_info_list, key=lambda x: int(x.split('/')[-1].split('_')[0])) pc_performance_list = [] for i, fn in enumerate(rank_info_list): if i == 0: continue curr_data = np.load(fn).item() prev_data = np.load(rank_info_list[i-1]).item() # getting all the parent's id p_id_list = curr_data['PrtID'] prev_gen_spc = prev_data['SpcID'] species_needed = [(curr_data['SpcID'][j], x) \ for j, x in enumerate(p_id_list) \ if x in prev_gen_spc ] # create the reward p_r, c_r = [], [] for pc_pair in species_needed: child, parent = pc_pair p_r.append(prev_data['AvgRwd'][prev_data['SpcID'].index(parent)]) c_r.append(curr_data['AvgRwd'][curr_data['SpcID'].index(child)]) pc_performance_list.append( (sum(p_r)/len(p_r), sum(c_r)/len(c_r)) ) # use visdom to plot the line win1 = None win2 = None for i, val in enumerate(pc_performance_list): v1, v2 = val win1 = visdom_util.viz_line(i, [v1, v2], viz_win=win1, title='Avg Parent-child performance comparison', xlabel='Generation', ylabel='Average Reward', legend=['Parent', 'Child'] ) win2 = visdom_util.viz_line(i, [ (v2 - v1) / v1 ], viz_win=win2, title='Drop comparing to parent in percentile', xlabel='Generation', ylabel='Drop in percentile', legend=['Drop']) return None
Python
def filter_spc(evolution_graph, spc_id): ''' get all the related nodes wrt spc_id includes: 1. direct parent 2. all their children ''' return evolution_graph
def filter_spc(evolution_graph, spc_id): ''' get all the related nodes wrt spc_id includes: 1. direct parent 2. all their children ''' return evolution_graph
Python
def init_path(): ''' function to be called in the beginning of the file ''' _this_dir = osp.dirname(__file__) _base_dir = osp.join(_this_dir, '..') add_path(_base_dir) return None
def init_path(): ''' function to be called in the beginning of the file ''' _this_dir = osp.dirname(__file__) _base_dir = osp.join(_this_dir, '..') add_path(_base_dir) return None
Python
def make_symmetric(self, task='fish', discrete=True): ''' make the symmetric node w.r.t the self node symmetric node and return the new node ''' new_attr = model_gen.gen_test_node_attr( task=task, node_num=2, discrete_rv=discrete )[-1] same_attr = ['a_size', 'b_size', 'c_size', 'geom_type', 'joint_range'] for item in same_attr: new_attr[item] = self.attr[item] # set certain attributes new_attr['u'] = np.pi - self.attr['u'] while new_attr['u'] < 0: new_attr['u'] += 2 * np.pi new_attr['v'] = self.attr['v'] new_attr['axis_x'] = -self.attr['axis_x'] new_attr['axis_y'] = self.attr['axis_y'] # create the new node new_node = Node(-1, new_attr) return new_node
def make_symmetric(self, task='fish', discrete=True): ''' make the symmetric node w.r.t the self node symmetric node and return the new node ''' new_attr = model_gen.gen_test_node_attr( task=task, node_num=2, discrete_rv=discrete )[-1] same_attr = ['a_size', 'b_size', 'c_size', 'geom_type', 'joint_range'] for item in same_attr: new_attr[item] = self.attr[item] # set certain attributes new_attr['u'] = np.pi - self.attr['u'] while new_attr['u'] < 0: new_attr['u'] += 2 * np.pi new_attr['v'] = self.attr['v'] new_attr['axis_x'] = -self.attr['axis_x'] new_attr['axis_y'] = self.attr['axis_y'] # create the new node new_node = Node(-1, new_attr) return new_node
Python
def node_count(node, counter_start=0): ''' count the number of nodes in a tree ''' total_num = counter_start parent_list = [node] while len(parent_list) != 0: node = parent_list.pop(0) child_list = node.get_child_list() for child in child_list: parent_list.append(child) total_num = node.set_id(total_num) return total_num
def node_count(node, counter_start=0): ''' count the number of nodes in a tree ''' total_num = counter_start parent_list = [node] while len(parent_list) != 0: node = parent_list.pop(0) child_list = node.get_child_list() for child in child_list: parent_list.append(child) total_num = node.set_id(total_num) return total_num
Python
def sample_leaf(self, p=0.5): ''' traverse the tree and flip coins on the leaf returns a list of leaf nodes that are sampled ''' def find_leaf(node, total_res): ''' ''' child_list = node.get_child_list() if len(child_list) == 0: total_res.append(node) return total_res else: for child in child_list: total_res = find_leaf(child, total_res) return total_res all_leaf = find_leaf(self.root, []) sampled_leaf = [] for node in all_leaf: if np.random.binomial(1, p) == 1: sampled_leaf.append(node) return sampled_leaf
def sample_leaf(self, p=0.5): ''' traverse the tree and flip coins on the leaf returns a list of leaf nodes that are sampled ''' def find_leaf(node, total_res): ''' ''' child_list = node.get_child_list() if len(child_list) == 0: total_res.append(node) return total_res else: for child in child_list: total_res = find_leaf(child, total_res) return total_res all_leaf = find_leaf(self.root, []) sampled_leaf = [] for node in all_leaf: if np.random.binomial(1, p) == 1: sampled_leaf.append(node) return sampled_leaf
Python
def add_sub_tree(self, parent, child): ''' add the child (a node containing subtree) to the parent's child_list return: list of id values that is assigned to child ''' # traverse child's subtree and update the id prev_node_num = self.total_num self.total_num = node_count(child, self.total_num) parent.add_child(child) return list(range(prev_node_num, self.total_num))
def add_sub_tree(self, parent, child): ''' add the child (a node containing subtree) to the parent's child_list return: list of id values that is assigned to child ''' # traverse child's subtree and update the id prev_node_num = self.total_num self.total_num = node_count(child, self.total_num) parent.add_child(child) return list(range(prev_node_num, self.total_num))
Python
def generate_one_node(self, only_one=False): ''' return a list of new nodes being generated ''' # different policy for adding new hierarchical structure if self.allow_hierarchy: choice = random.choice(NEW_STRUCT_OPTS) else: choice = NEW_STRUCT_OPTS[0] if self.args.force_symmetric: choice = 'l2-symmetry' if only_one: choice = 'l1-basic' if self.args.walker_force_no_sym: choice = 'l1-basic' new_node_list = [] if choice == 'l1-basic': # adding one basic node new_node = Node(-1, model_gen.gen_test_node_attr(task=self.args.task, node_num=2, discrete_rv=self.discrete )[-1] ) new_node_list.append(new_node) elif choice == 'l2-symmetry': # adding 2 symmetric nodes attr1, attr2 = [ model_gen.gen_test_node_attr(task=self.args.task, node_num=2, discrete_rv=self.discrete )[-1] for i in range(2) ] same_attr = ['a_size', 'b_size', 'c_size', 'geom_type', 'joint_range'] for item in same_attr: attr2[item] = attr1[item] # set certain attributes attr2['u'] = np.pi - attr1['u'] while attr2['u'] < 0: attr2['u'] += 2 * np.pi attr2['v'] = attr1['v'] attr2['axis_x'] = attr1['axis_x'] attr2['axis_y'] = -attr1['axis_y'] # create the new node with corresponding attributes node1 = Node(-1, attr1) node2 = Node(-1, attr2) new_node_list.append(node1) new_node_list.append(node2) else: raise NotImplementedError if self.args.force_grow_at_ends: for node in new_node_list: node.attr['axis_x'] = 1 if node.attr['axis_x'] >= 0 else -1 return new_node_list
def generate_one_node(self, only_one=False): ''' return a list of new nodes being generated ''' # different policy for adding new hierarchical structure if self.allow_hierarchy: choice = random.choice(NEW_STRUCT_OPTS) else: choice = NEW_STRUCT_OPTS[0] if self.args.force_symmetric: choice = 'l2-symmetry' if only_one: choice = 'l1-basic' if self.args.walker_force_no_sym: choice = 'l1-basic' new_node_list = [] if choice == 'l1-basic': # adding one basic node new_node = Node(-1, model_gen.gen_test_node_attr(task=self.args.task, node_num=2, discrete_rv=self.discrete )[-1] ) new_node_list.append(new_node) elif choice == 'l2-symmetry': # adding 2 symmetric nodes attr1, attr2 = [ model_gen.gen_test_node_attr(task=self.args.task, node_num=2, discrete_rv=self.discrete )[-1] for i in range(2) ] same_attr = ['a_size', 'b_size', 'c_size', 'geom_type', 'joint_range'] for item in same_attr: attr2[item] = attr1[item] # set certain attributes attr2['u'] = np.pi - attr1['u'] while attr2['u'] < 0: attr2['u'] += 2 * np.pi attr2['v'] = attr1['v'] attr2['axis_x'] = attr1['axis_x'] attr2['axis_y'] = -attr1['axis_y'] # create the new node with corresponding attributes node1 = Node(-1, attr1) node2 = Node(-1, attr2) new_node_list.append(node1) new_node_list.append(node2) else: raise NotImplementedError if self.args.force_grow_at_ends: for node in new_node_list: node.attr['axis_x'] = 1 if node.attr['axis_x'] >= 0 else -1 return new_node_list
Python
def parse_rank_info(local_dir, topk=5): ''' parse the rank information and select the top-performing species return a list of species that we are interested in for generating images ''' data_dir = os.path.join(local_dir, 'species_data') generation_num = 0 selected_species = [] for filename in glob.glob(data_dir + '/*'): if 'rank_info' not in filename: continue generation_num += 1 rank_info = np.load(filename).item() for i in range(topk): selected_species.append( (rank_info['SpcID'][i], rank_info['AvgRwd'][i]) ) print('Generation examined %d' % generation_num) selected_species = sorted(selected_species, key=lambda x: x[1]) selected_species = set([x[0] for x in selected_species]) return list(selected_species)
def parse_rank_info(local_dir, topk=5): ''' parse the rank information and select the top-performing species return a list of species that we are interested in for generating images ''' data_dir = os.path.join(local_dir, 'species_data') generation_num = 0 selected_species = [] for filename in glob.glob(data_dir + '/*'): if 'rank_info' not in filename: continue generation_num += 1 rank_info = np.load(filename).item() for i in range(topk): selected_species.append( (rank_info['SpcID'][i], rank_info['AvgRwd'][i]) ) print('Generation examined %d' % generation_num) selected_species = sorted(selected_species, key=lambda x: x[1]) selected_species = set([x[0] for x in selected_species]) return list(selected_species)
Python
def verify_graph(adj_matrix): ''' check whether the graph is a valid tree for generating the xml model ''' N, _ = adj_matrix.shape # node cannot connect to itself for i in range(N): if adj_matrix[i, i] > 0: pdb.set_trace() return False # the matrix must be diagnol-symmetric for i in range(N): for j in range(i, N): if adj_matrix[i, j] != adj_matrix[j, i]: pdb.set_trace() return False # each node must have eactly one parent (except for root) for i in range(1, N): col_i = adj_matrix[:i, i] # only consider upper triangle of the matrix parent_connection = np.where(col_i > 0)[0] if len(parent_connection) > 1: pdb.set_trace() return False return True
def verify_graph(adj_matrix): ''' check whether the graph is a valid tree for generating the xml model ''' N, _ = adj_matrix.shape # node cannot connect to itself for i in range(N): if adj_matrix[i, i] > 0: pdb.set_trace() return False # the matrix must be diagnol-symmetric for i in range(N): for j in range(i, N): if adj_matrix[i, j] != adj_matrix[j, i]: pdb.set_trace() return False # each node must have eactly one parent (except for root) for i in range(1, N): col_i = adj_matrix[:i, i] # only consider upper triangle of the matrix parent_connection = np.where(col_i > 0)[0] if len(parent_connection) > 1: pdb.set_trace() return False return True
Python
def perturb_local(node_attr, perturb_geom=False, discrete=True): ''' perform local perturbation according to current attributes ''' new_attr = copy.deepcopy(node_attr) # u, v for specifying position relative to parents new_attr['u'] = node_attr['u'] + \ model_gen_util.gaussian_noise(0, np.pi / 6, discrete, np.pi / 2.0) new_attr['v'] = node_attr['v'] + \ model_gen_util.gaussian_noise(0, np.pi / 6, discrete, np.pi / 2.0) new_attr['u'] = \ float(np.clip(new_attr['u'], 1e-8, 2 * np.pi - 1e-8)) new_attr['v'] = \ float(np.clip(new_attr['v'], 1e-8, np.pi - 1e-8)) # x, y coordinate for child's relative frame new_attr['axis_x'] = node_attr['axis_x'] + \ model_gen_util.gaussian_noise(0, 0.1, discrete, 0.2) new_attr['axis_y'] = node_attr['axis_y'] + \ model_gen_util.gaussian_noise(0, 0.1, discrete, 0.2) new_attr['axis_x'] = \ float(np.clip(new_attr['axis_x'], 0.2, 1.0 - 1e-8)) new_attr['axis_y'] = \ float(np.clip(new_attr['axis_y'], 0.2, 1.0 - 1e-8)) # a, b, c child size new_attr['a_size'] = node_attr['a_size'] + \ model_gen_util.gaussian_noise(0, 0.002, discrete, 0.002) new_attr['b_size'] = node_attr['b_size'] + \ model_gen_util.gaussian_noise(0, 0.002, discrete, 0.002) new_attr['c_size'] = node_attr['c_size'] + \ model_gen_util.gaussian_noise(0, 0.002, discrete, 0.002) new_attr['a_size'] = float(np.clip(new_attr['a_size'], 0.002, 0.03)) new_attr['b_size'] = float(np.clip(new_attr['b_size'], 0.002, 0.03)) new_attr['c_size'] = float(np.clip(new_attr['c_size'], 0.002, 0.03)) # joint range new_attr['joint_range'] = new_attr['joint_range'] + \ model_gen_util.gaussian_noise(0, 10, discrete, 10) new_attr['joint_range'] = \ int(np.clip(new_attr['joint_range'], 30, 90)) return new_attr
def perturb_local(node_attr, perturb_geom=False, discrete=True): ''' perform local perturbation according to current attributes ''' new_attr = copy.deepcopy(node_attr) # u, v for specifying position relative to parents new_attr['u'] = node_attr['u'] + \ model_gen_util.gaussian_noise(0, np.pi / 6, discrete, np.pi / 2.0) new_attr['v'] = node_attr['v'] + \ model_gen_util.gaussian_noise(0, np.pi / 6, discrete, np.pi / 2.0) new_attr['u'] = \ float(np.clip(new_attr['u'], 1e-8, 2 * np.pi - 1e-8)) new_attr['v'] = \ float(np.clip(new_attr['v'], 1e-8, np.pi - 1e-8)) # x, y coordinate for child's relative frame new_attr['axis_x'] = node_attr['axis_x'] + \ model_gen_util.gaussian_noise(0, 0.1, discrete, 0.2) new_attr['axis_y'] = node_attr['axis_y'] + \ model_gen_util.gaussian_noise(0, 0.1, discrete, 0.2) new_attr['axis_x'] = \ float(np.clip(new_attr['axis_x'], 0.2, 1.0 - 1e-8)) new_attr['axis_y'] = \ float(np.clip(new_attr['axis_y'], 0.2, 1.0 - 1e-8)) # a, b, c child size new_attr['a_size'] = node_attr['a_size'] + \ model_gen_util.gaussian_noise(0, 0.002, discrete, 0.002) new_attr['b_size'] = node_attr['b_size'] + \ model_gen_util.gaussian_noise(0, 0.002, discrete, 0.002) new_attr['c_size'] = node_attr['c_size'] + \ model_gen_util.gaussian_noise(0, 0.002, discrete, 0.002) new_attr['a_size'] = float(np.clip(new_attr['a_size'], 0.002, 0.03)) new_attr['b_size'] = float(np.clip(new_attr['b_size'], 0.002, 0.03)) new_attr['c_size'] = float(np.clip(new_attr['c_size'], 0.002, 0.03)) # joint range new_attr['joint_range'] = new_attr['joint_range'] + \ model_gen_util.gaussian_noise(0, 10, discrete, 10) new_attr['joint_range'] = \ int(np.clip(new_attr['joint_range'], 30, 90)) return new_attr
Python
def perturb_attr(node_attr, perturb_geom=False, perturb_discrete=True): ''' brief: perturb the 7D parameters NOTE: some basic stuff 1. consider whether the perturbation physically makes sense (the perturbation will always makes sense if we have reasonable upper and lower bounds) ''' N = len(node_attr) # sample the number of nodes to perturb attribute num_perturb = random.randint(1, N - 1) node_list = list( np.random.choice(list(range(1, N)), num_perturb, replace=False) ) for random_node in node_list: # independently sample a new set of attributes new_attr = perturb_local(node_attr[random_node], discrete=perturb_discrete) # replace the current attributes to the latest node_attr[random_node] = new_attr return node_attr
def perturb_attr(node_attr, perturb_geom=False, perturb_discrete=True): ''' brief: perturb the 7D parameters NOTE: some basic stuff 1. consider whether the perturbation physically makes sense (the perturbation will always makes sense if we have reasonable upper and lower bounds) ''' N = len(node_attr) # sample the number of nodes to perturb attribute num_perturb = random.randint(1, N - 1) node_list = list( np.random.choice(list(range(1, N)), num_perturb, replace=False) ) for random_node in node_list: # independently sample a new set of attributes new_attr = perturb_local(node_attr[random_node], discrete=perturb_discrete) # replace the current attributes to the latest node_attr[random_node] = new_attr return node_attr
Python
def perturb_one_local(node_attr, task='fish', perturb_geom=False, discrete=True): ''' perform local perturbation according to current attributes ''' new_attr = copy.deepcopy(node_attr) if 'fish' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['fish'] elif 'walker' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['walker'] elif 'cheetah' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['cheetah'] elif 'hopper' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['hopper'] attr_list = ['u', 'v', 'axis_x', 'axis_y', 'a_size', 'b_size', 'c_size', 'joint_range'] for attr in attr_list: low, high = constraint[attr] step_size = (high - low) / 6 # ideally this should be a hyperparameter to tune new_attr[attr] = node_attr[attr] + \ model_gen_util.gaussian_noise(0, step_size/2, discrete, step_size) new_attr[attr] = \ float(np.clip(new_attr[attr], low, high)) if 'joint_range' == attr: new_attr[attr] = int(new_attr[attr]) return new_attr
def perturb_one_local(node_attr, task='fish', perturb_geom=False, discrete=True): ''' perform local perturbation according to current attributes ''' new_attr = copy.deepcopy(node_attr) if 'fish' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['fish'] elif 'walker' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['walker'] elif 'cheetah' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['cheetah'] elif 'hopper' in task: constraint = species_info.CREATURE_HARD_CONSTRAINT['hopper'] attr_list = ['u', 'v', 'axis_x', 'axis_y', 'a_size', 'b_size', 'c_size', 'joint_range'] for attr in attr_list: low, high = constraint[attr] step_size = (high - low) / 6 # ideally this should be a hyperparameter to tune new_attr[attr] = node_attr[attr] + \ model_gen_util.gaussian_noise(0, step_size/2, discrete, step_size) new_attr[attr] = \ float(np.clip(new_attr[attr], low, high)) if 'joint_range' == attr: new_attr[attr] = int(new_attr[attr]) return new_attr
Python
def perturb_one_attr(node_attr, task='fish', perturb_geom=False, perturb_discrete=True): ''' brief: perturb the 7D parameters NOTE: some basic stuff 1. consider whether the perturbation physically makes sense (the perturbation will always makes sense if we have reasonable upper and lower bounds) ''' new_attr = perturb_one_local(node_attr, task=task, discrete=perturb_discrete ) return new_attr
def perturb_one_attr(node_attr, task='fish', perturb_geom=False, perturb_discrete=True): ''' brief: perturb the 7D parameters NOTE: some basic stuff 1. consider whether the perturbation physically makes sense (the perturbation will always makes sense if we have reasonable upper and lower bounds) ''' new_attr = perturb_one_local(node_attr, task=task, discrete=perturb_discrete ) return new_attr
Python
def render(pixel): ''' the function used to render the simulation ''' return
def render(pixel): ''' the function used to render the simulation ''' return
Python
def add_mujoco_options(root, options): ''' add default and options QUESTION: a lot of the options of the model is environment specific should we create separate function for loading different environment based on the options ''' try: density = str(options['density']) except: density = '5000' try: timestep = str(options['timestep']) except: timestep = '0.004' option = etree.Element('option', timestep=timestep, density=density) flag = etree.Element('flag', gravity='disable', constraint='disable') option.append(flag) root.append(option) # some defaults for fish default_sec = etree.Element('default') general = etree.Element('general', ctrllimited='true') default_sec.append(general) # joints and geom defaults for fish fish_default = etree.fromstring('<default class="fish"></default>') fish_default_joint = etree.Element('joint', type='hinge', limited='false', range='-60 60', damping='2e-5', solreflimit='.1 1', solimplimit='0 .8 .1') fish_default_geom = etree.Element('geom', material='self') fish_default.append(fish_default_joint) fish_default.append(fish_default_geom) default_sec.append(fish_default) root.append(default_sec) return root
def add_mujoco_options(root, options): ''' add default and options QUESTION: a lot of the options of the model is environment specific should we create separate function for loading different environment based on the options ''' try: density = str(options['density']) except: density = '5000' try: timestep = str(options['timestep']) except: timestep = '0.004' option = etree.Element('option', timestep=timestep, density=density) flag = etree.Element('flag', gravity='disable', constraint='disable') option.append(flag) root.append(option) # some defaults for fish default_sec = etree.Element('default') general = etree.Element('general', ctrllimited='true') default_sec.append(general) # joints and geom defaults for fish fish_default = etree.fromstring('<default class="fish"></default>') fish_default_joint = etree.Element('joint', type='hinge', limited='false', range='-60 60', damping='2e-5', solreflimit='.1 1', solimplimit='0 .8 .1') fish_default_geom = etree.Element('geom', material='self') fish_default.append(fish_default_joint) fish_default.append(fish_default_geom) default_sec.append(fish_default) root.append(default_sec) return root
Python
def rotation_matrix(axis, angle): ''' rotation matrix generated by angle around axis ''' if not (angle >= 0 and angle <= np.pi): pdb.set_trace() cos = np.cos(angle) sin = np.sin(angle) if axis == 'x': R = np.array([[1, 0, 0], [0, cos, -sin], [0, sin, cos]]) elif axis == 'y': R = np.array([[ cos, 0, sin], [ 0, 1, 0], [-sin, 0, cos]]) elif axis == 'z': R = np.array([[cos, -sin, 0], [sin, cos, 0], [ 0, 0, 1]]) else: raise RuntimeError('given axis not available') return R
def rotation_matrix(axis, angle): ''' rotation matrix generated by angle around axis ''' if not (angle >= 0 and angle <= np.pi): pdb.set_trace() cos = np.cos(angle) sin = np.sin(angle) if axis == 'x': R = np.array([[1, 0, 0], [0, cos, -sin], [0, sin, cos]]) elif axis == 'y': R = np.array([[ cos, 0, sin], [ 0, 1, 0], [-sin, 0, cos]]) elif axis == 'z': R = np.array([[cos, -sin, 0], [sin, cos, 0], [ 0, 0, 1]]) else: raise RuntimeError('given axis not available') return R
Python
def euler_rotation(angle1, angle2, angle3): ''' return the rotation matrix as described by euler angle angle1: rotate around z axis angle2: rotate around y' axis angle3: rotate around z'' axis ''' R = np.zeros((3, 3)) R_z = rotation_matrix('z', angle1) R_y = rotation_matrix('y', angle2) R = np.matmul(R_z, R_y) R_z = rotation_matrix('z', angle3) R = np.matmul(R, R_z) return R
def euler_rotation(angle1, angle2, angle3): ''' return the rotation matrix as described by euler angle angle1: rotate around z axis angle2: rotate around y' axis angle3: rotate around z'' axis ''' R = np.zeros((3, 3)) R_z = rotation_matrix('z', angle1) R_y = rotation_matrix('y', angle2) R = np.matmul(R_z, R_y) R_z = rotation_matrix('z', angle3) R = np.matmul(R, R_z) return R
Python
def ellipsoid_line_intersection(a, b, c, dx, dy, dz): ''' input: 1. a, b, c: params to specify an ellipsoid (centered at origin) 2. dx, dy, dz: direction vector to denote a line at origin output: 1. the point coordinate where the line intersects the ellipsoid that is in the direction of the line vector NOTE: 1. there is guarantee that there will be intersection between the line and the ellipsoid 2. it seems that the solver will always return the neg result first [negative result, positive result] Thus we need to further determine which one is closer to the direction vector and use that as the joint pivot point ''' t = Symbol('t') # plug in the line equation p(t) = dx * t + dy * t + dz * t # to the ellipsoid function ellipsoid = (dx * t)**2 / a**2 + (dy * t)**2 / b**2 + (dz * t)**2 / c**2 - 1 res = solve(ellipsoid, t) one_res = res[1].evalf() return float(one_res) * np.array([dx, dy, dz])
def ellipsoid_line_intersection(a, b, c, dx, dy, dz): ''' input: 1. a, b, c: params to specify an ellipsoid (centered at origin) 2. dx, dy, dz: direction vector to denote a line at origin output: 1. the point coordinate where the line intersects the ellipsoid that is in the direction of the line vector NOTE: 1. there is guarantee that there will be intersection between the line and the ellipsoid 2. it seems that the solver will always return the neg result first [negative result, positive result] Thus we need to further determine which one is closer to the direction vector and use that as the joint pivot point ''' t = Symbol('t') # plug in the line equation p(t) = dx * t + dy * t + dz * t # to the ellipsoid function ellipsoid = (dx * t)**2 / a**2 + (dy * t)**2 / b**2 + (dz * t)**2 / c**2 - 1 res = solve(ellipsoid, t) one_res = res[1].evalf() return float(one_res) * np.array([dx, dy, dz])
Python
def homogeneous_inverse(H): ''' given a homogeneous transformation, find its inverse ''' R = H[0:3, 0:3] d = H[0:3, 3] invH = np.zeros((4,4)) invH[0:3, 0:3] = R.T invH[0:3, 3] = - np.matmul(R.T, d) invH[3, 3] = 1 return invH
def homogeneous_inverse(H): ''' given a homogeneous transformation, find its inverse ''' R = H[0:3, 0:3] d = H[0:3, 3] invH = np.zeros((4,4)) invH[0:3, 0:3] = R.T invH[0:3, 3] = - np.matmul(R.T, d) invH[3, 3] = 1 return invH
Python
def homogeneous_transform(R, d): ''' return the homogeneous transformation given the rotation matrix and translation vector ''' H = np.zeros((4, 4)) H[0:3, 0:3] = R H[0:3, 3] = d H[3, 3] = 1 return H
def homogeneous_transform(R, d): ''' return the homogeneous transformation given the rotation matrix and translation vector ''' H = np.zeros((4, 4)) H[0:3, 0:3] = R H[0:3, 3] = d H[3, 3] = 1 return H
Python
def homogeneous_representation(p): ''' return the homogeneous representation of point p of dim 3 ''' P = np.hstack((p, np.array(1))) return P
def homogeneous_representation(p): ''' return the homogeneous representation of point p of dim 3 ''' P = np.hstack((p, np.array(1))) return P
Python
def ellipsoid_normal_vec(a, b, c, p): ''' return the unit normal vector of an ellipsoid at origin with param [a, b, c] at point p ''' # verify point is on the ellipsoid x, y, z = p.tolist() assert np.isclose(1, x**2/a**2 + y**2/b**2 + z**2/c**2, atol=1e-6) # equivalent to partial derivate of the ellipsoid function vec_x = 2 * x / a**2 vec_y = 2 * y / b**2 vec_z = 2 * z / c**2 vec = np.array([vec_x, vec_y, vec_z]) return vec / np.linalg.norm(vec)
def ellipsoid_normal_vec(a, b, c, p): ''' return the unit normal vector of an ellipsoid at origin with param [a, b, c] at point p ''' # verify point is on the ellipsoid x, y, z = p.tolist() assert np.isclose(1, x**2/a**2 + y**2/b**2 + z**2/c**2, atol=1e-6) # equivalent to partial derivate of the ellipsoid function vec_x = 2 * x / a**2 vec_y = 2 * y / b**2 vec_z = 2 * z / c**2 vec = np.array([vec_x, vec_y, vec_z]) return vec / np.linalg.norm(vec)
Python
def angle_between(vec1, vec2): ''' return the value in radians and in degrees ''' theta = np.arccos( np.matmul(vec1.T, vec2) / (1e-8 + np.linalg.norm(vec1) * np.linalg.norm(vec2)) ) return (theta, theta * RAD_TO_DEG)
def angle_between(vec1, vec2): ''' return the value in radians and in degrees ''' theta = np.arccos( np.matmul(vec1.T, vec2) / (1e-8 + np.linalg.norm(vec1) * np.linalg.norm(vec2)) ) return (theta, theta * RAD_TO_DEG)
Python
def ellipsoid_boundary_pt(a, b, c): ''' given [a, b, c] which specifies an ellipsoid at origin return the 6 boundary points' coordinate ''' x1 = np.array([a, 0, 0]) x2 = np.array([-a, 0, 0]) y1 = np.array([0, b, 0]) y2 = np.array([0, -b, 0]) z1 = np.array([0, 0, c]) z2 = np.array([0, 0, -c]) return [x1, x2, y1, y2, z1, z2]
def ellipsoid_boundary_pt(a, b, c): ''' given [a, b, c] which specifies an ellipsoid at origin return the 6 boundary points' coordinate ''' x1 = np.array([a, 0, 0]) x2 = np.array([-a, 0, 0]) y1 = np.array([0, b, 0]) y2 = np.array([0, -b, 0]) z1 = np.array([0, 0, c]) z2 = np.array([0, 0, -c]) return [x1, x2, y1, y2, z1, z2]
Python
def add_mujoco_actuator(root, adj_matrix): ''' essentially, all the joints are actuators. ''' dfs_order = model_gen_util.dfs_order(adj_matrix) actuator = etree.Element('actuator') for edge in dfs_order: edge_x = '%s_x' % edge edge_y = '%s_y' % edge edge_z = '%s_z' % edge edges = [edge_x, edge_y, edge_z] positions = [etree.Element('position', name=item, joint=item, ctrlrange='-1 1', kp='5e-4') for item in edges] for item in positions: actuator.append(item) root.append(actuator) return root
def add_mujoco_actuator(root, adj_matrix): ''' essentially, all the joints are actuators. ''' dfs_order = model_gen_util.dfs_order(adj_matrix) actuator = etree.Element('actuator') for edge in dfs_order: edge_x = '%s_x' % edge edge_y = '%s_y' % edge edge_z = '%s_z' % edge edges = [edge_x, edge_y, edge_z] positions = [etree.Element('position', name=item, joint=item, ctrlrange='-1 1', kp='5e-4') for item in edges] for item in positions: actuator.append(item) root.append(actuator) return root
Python
def add_worldbody(root, adj_matrix, node_attr_list): ''' parse the adj_matrix to get the tree structure ''' def worldbody_preliminary(worldbody): ''' setting up cameras ''' geom1 = etree.Element('geom', name='floor', type='plane', conaffinity='1', pos='248 0 0', size='250 .8 .2', material='grid', zaxis='0 0 1') worldbody.append(geom1) return worldbody def torso_preliminary(torso): ''' copying from the dm_control fish default settings ''' light = etree.Element('light', name="light", mode='trackcom') torso.append(light) camera1 = etree.Element('camera', name='side', pos='0 -2 0.7', euler='60 0 0', mode='trackcom') camera2 = etree.Element('camera', name='back', pos='-5 0 0.5', xyaxes='0 -1 0 1 0 3', mode='trackcom') torso.append(camera1) torso.append(camera2) torso_size = '0.07 0.3' torso_geom = etree.Element('geom', name="torso", size=torso_size) torso.append(torso_geom) # free_joint = etree.fromstring('<joint name="rooty" type="free" stiffness="0" limited="false" armature="0" damping="0"/>') torso_z = etree.fromstring('<joint name="rootz" type="slide" axis="0 0 1" limited="false" armature="0" damping="0"/>') torso_x = etree.fromstring('<joint name="rootx" type="slide" axis="1 0 0" limited="false" armature="0" damping="0"/>') torso_y = etree.fromstring('<joint name="rooty" type="hinge" axis="0 1 0" limited="false" armature="0" damping="0"/>') # NOTE: this order is mandatory for this particular task. # the bug may be caused by dm_control suite torso.append(torso_z) torso.append(torso_x) torso.append(torso_y) return torso ################ START PARSING THE WORLDBODY ################ worldbody = etree.Element('worldbody') worldbody = worldbody_preliminary(worldbody) # parse the matrix N, _ = adj_matrix.shape body_dict = {} info_dict = {} # log the needed information given a node # the root of the model is always fixed body_root = etree.Element('body', name='torso', pos='0 0 1.3', childclass='walker') body_root = torso_preliminary(body_root) root_info = {} root_info['a_size'] = node_attr_list[0]['a_size'] root_info['b_size'] = node_attr_list[0]['b_size'] root_info['c_size'] = node_attr_list[0]['c_size'] # for determining the center of the capsule relative to the body joint root_info['center_rel_pos'] = 0 info_dict[0] = root_info body_dict[0] = body_root # initilize the parent list to go throught the entire matrix parent_list = [0] while len(parent_list) != 0: parent_node = parent_list.pop(0) parent_row = np.copy(adj_matrix[parent_node]) for i in range(parent_node+1): parent_row[i] = 0 child_list = np.where(parent_row)[0].tolist() while True: try: child_node = child_list.pop(0) except: break # parent-child relationship # print('P-C relationship:', parent_node, child_node) node_attr = node_attr_list[child_node] node_name = 'node-%d'%(child_node) # this is parent's ellipsoid information parent_info = info_dict[parent_node] a_parent = parent_info['a_size'] b_parent = parent_info['b_size'] c_parent = parent_info['c_size'] center_rel_pos = parent_info['center_rel_pos'] # getting node attributes from the list u = node_attr['u'] # using these 2 values for determining the range v = node_attr['v'] # of the joint axis_x = node_attr['axis_x'] # used to determine the relative position axis_y = node_attr['axis_y'] # w.r.t the parent capsule a_child = node_attr['a_size'] # using this as the capsule radius b_child = node_attr['b_size'] # using this as the capsule h c_child = node_attr['c_size'] # compute the translational and rotational matrix child_info = {} # store attributes that defines the child's geom child_info['a_size'] = a_child child_info['b_size'] = b_child child_info['c_size'] = c_child # set the stitching point relative to parent a = min([node_attr['axis_x'], node_attr['axis_y']]) b = max([node_attr['axis_x'], node_attr['axis_y']]) stitch_ratio = node_attr['axis_x'] / 1 if not(stitch_ratio <= 1.01 and stitch_ratio >= -1.01): import pdb; pdb.set_trace() stitch_pt = b_parent * stitch_ratio + center_rel_pos body_pos = '0 0 %f' % (stitch_pt) # body translation if node_attr['axis_x'] * node_attr['axis_y'] >= 0: geom_pos = '0 0 -%f' % (b_child) child_info['center_rel_pos'] = -b_child else: geom_pos = '0 0 %f' % (b_child) child_info['center_rel_pos'] = b_child joint_pos = '0 0 0' # now create the body body_child = etree.Element('body', name=node_name, pos=body_pos) # add geom geom_type = 2 # for all planar creates, we use capsule capsule_size = '%f %f' % (a_child, b_child) geom = etree.Element('geom', name=node_name, pos=geom_pos, size=capsule_size) body_child.append(geom) # add joints joint_type = adj_matrix[parent_node, child_node] joint_axis = model_gen_util.get_encoding(joint_type) joint_range = node_attr['joint_range'] range1 = node_attr['u'] / (2.0 * np.pi) * -90 range2 = node_attr['v'] / (1.0 * np.pi) * 60 + 1 range2 = range1 + 90 + 1 if joint_axis[0] == 1: x_joint = etree.fromstring("<joint name='%d-%d_x' axis='0 -1 0' pos='%s' range='%d %d'/>" % \ (parent_node, child_node, joint_pos, range1, range2)) body_child.append(x_joint) if joint_axis[1] == 1: y_joint = etree.fromstring("<joint name='%d-%d_y' axis='0 -1 0' pos='%s' range='%d %d'/>" % \ (parent_node, child_node, joint_pos, range1, range2)) body_child.append(y_joint) if joint_axis[2] == 1: z_joint = etree.fromstring("<joint name='%d-%d_z' axis='0 -1 0' pos='%s' range='%d %d'/>" % \ (parent_node, child_node, joint_pos, range1, range2)) body_child.append(z_joint) # need to add 2 sites as sensory inputs site1_pos = '0 0 0' site2_pos = '0 0 %f' % (2 * child_info['center_rel_pos']) site1 = etree.Element('site', name='touch1-%s' % (node_name), pos=site1_pos) site2 = etree.Element('site', name='touch2-%s' % (node_name), pos=site2_pos) body_child.append(site1) body_child.append(site2) # logging the information body_dict[parent_node].append(body_child) body_dict[child_node] = body_child # register child's body struct in case it has child info_dict[child_node] = child_info # child becomes the parent for further examination parent_list.append(child_node) worldbody.append(body_dict[0]) root.append(worldbody) return root
def add_worldbody(root, adj_matrix, node_attr_list): ''' parse the adj_matrix to get the tree structure ''' def worldbody_preliminary(worldbody): ''' setting up cameras ''' geom1 = etree.Element('geom', name='floor', type='plane', conaffinity='1', pos='248 0 0', size='250 .8 .2', material='grid', zaxis='0 0 1') worldbody.append(geom1) return worldbody def torso_preliminary(torso): ''' copying from the dm_control fish default settings ''' light = etree.Element('light', name="light", mode='trackcom') torso.append(light) camera1 = etree.Element('camera', name='side', pos='0 -2 0.7', euler='60 0 0', mode='trackcom') camera2 = etree.Element('camera', name='back', pos='-5 0 0.5', xyaxes='0 -1 0 1 0 3', mode='trackcom') torso.append(camera1) torso.append(camera2) torso_size = '0.07 0.3' torso_geom = etree.Element('geom', name="torso", size=torso_size) torso.append(torso_geom) # free_joint = etree.fromstring('<joint name="rooty" type="free" stiffness="0" limited="false" armature="0" damping="0"/>') torso_z = etree.fromstring('<joint name="rootz" type="slide" axis="0 0 1" limited="false" armature="0" damping="0"/>') torso_x = etree.fromstring('<joint name="rootx" type="slide" axis="1 0 0" limited="false" armature="0" damping="0"/>') torso_y = etree.fromstring('<joint name="rooty" type="hinge" axis="0 1 0" limited="false" armature="0" damping="0"/>') # NOTE: this order is mandatory for this particular task. # the bug may be caused by dm_control suite torso.append(torso_z) torso.append(torso_x) torso.append(torso_y) return torso ################ START PARSING THE WORLDBODY ################ worldbody = etree.Element('worldbody') worldbody = worldbody_preliminary(worldbody) # parse the matrix N, _ = adj_matrix.shape body_dict = {} info_dict = {} # log the needed information given a node # the root of the model is always fixed body_root = etree.Element('body', name='torso', pos='0 0 1.3', childclass='walker') body_root = torso_preliminary(body_root) root_info = {} root_info['a_size'] = node_attr_list[0]['a_size'] root_info['b_size'] = node_attr_list[0]['b_size'] root_info['c_size'] = node_attr_list[0]['c_size'] # for determining the center of the capsule relative to the body joint root_info['center_rel_pos'] = 0 info_dict[0] = root_info body_dict[0] = body_root # initilize the parent list to go throught the entire matrix parent_list = [0] while len(parent_list) != 0: parent_node = parent_list.pop(0) parent_row = np.copy(adj_matrix[parent_node]) for i in range(parent_node+1): parent_row[i] = 0 child_list = np.where(parent_row)[0].tolist() while True: try: child_node = child_list.pop(0) except: break # parent-child relationship # print('P-C relationship:', parent_node, child_node) node_attr = node_attr_list[child_node] node_name = 'node-%d'%(child_node) # this is parent's ellipsoid information parent_info = info_dict[parent_node] a_parent = parent_info['a_size'] b_parent = parent_info['b_size'] c_parent = parent_info['c_size'] center_rel_pos = parent_info['center_rel_pos'] # getting node attributes from the list u = node_attr['u'] # using these 2 values for determining the range v = node_attr['v'] # of the joint axis_x = node_attr['axis_x'] # used to determine the relative position axis_y = node_attr['axis_y'] # w.r.t the parent capsule a_child = node_attr['a_size'] # using this as the capsule radius b_child = node_attr['b_size'] # using this as the capsule h c_child = node_attr['c_size'] # compute the translational and rotational matrix child_info = {} # store attributes that defines the child's geom child_info['a_size'] = a_child child_info['b_size'] = b_child child_info['c_size'] = c_child # set the stitching point relative to parent a = min([node_attr['axis_x'], node_attr['axis_y']]) b = max([node_attr['axis_x'], node_attr['axis_y']]) stitch_ratio = node_attr['axis_x'] / 1 if not(stitch_ratio <= 1.01 and stitch_ratio >= -1.01): import pdb; pdb.set_trace() stitch_pt = b_parent * stitch_ratio + center_rel_pos body_pos = '0 0 %f' % (stitch_pt) # body translation if node_attr['axis_x'] * node_attr['axis_y'] >= 0: geom_pos = '0 0 -%f' % (b_child) child_info['center_rel_pos'] = -b_child else: geom_pos = '0 0 %f' % (b_child) child_info['center_rel_pos'] = b_child joint_pos = '0 0 0' # now create the body body_child = etree.Element('body', name=node_name, pos=body_pos) # add geom geom_type = 2 # for all planar creates, we use capsule capsule_size = '%f %f' % (a_child, b_child) geom = etree.Element('geom', name=node_name, pos=geom_pos, size=capsule_size) body_child.append(geom) # add joints joint_type = adj_matrix[parent_node, child_node] joint_axis = model_gen_util.get_encoding(joint_type) joint_range = node_attr['joint_range'] range1 = node_attr['u'] / (2.0 * np.pi) * -90 range2 = node_attr['v'] / (1.0 * np.pi) * 60 + 1 range2 = range1 + 90 + 1 if joint_axis[0] == 1: x_joint = etree.fromstring("<joint name='%d-%d_x' axis='0 -1 0' pos='%s' range='%d %d'/>" % \ (parent_node, child_node, joint_pos, range1, range2)) body_child.append(x_joint) if joint_axis[1] == 1: y_joint = etree.fromstring("<joint name='%d-%d_y' axis='0 -1 0' pos='%s' range='%d %d'/>" % \ (parent_node, child_node, joint_pos, range1, range2)) body_child.append(y_joint) if joint_axis[2] == 1: z_joint = etree.fromstring("<joint name='%d-%d_z' axis='0 -1 0' pos='%s' range='%d %d'/>" % \ (parent_node, child_node, joint_pos, range1, range2)) body_child.append(z_joint) # need to add 2 sites as sensory inputs site1_pos = '0 0 0' site2_pos = '0 0 %f' % (2 * child_info['center_rel_pos']) site1 = etree.Element('site', name='touch1-%s' % (node_name), pos=site1_pos) site2 = etree.Element('site', name='touch2-%s' % (node_name), pos=site2_pos) body_child.append(site1) body_child.append(site2) # logging the information body_dict[parent_node].append(body_child) body_dict[child_node] = body_child # register child's body struct in case it has child info_dict[child_node] = child_info # child becomes the parent for further examination parent_list.append(child_node) worldbody.append(body_dict[0]) root.append(worldbody) return root
Python
def add_mujoco_actuator(root, adj_matrix, node_attr): ''' essentially, all the joints are actuators. ''' dfs_order = model_gen_util.dfs_order(adj_matrix) actuator = etree.Element('actuator') for edge in dfs_order: p, c = [int(x) for x in edge.split('-')] joint_type = adj_matrix[p, c] joint_axis = model_gen_util.get_encoding(joint_type) edges = [] if joint_axis[0] == 1: edge_x = '%s_x' % edge edges.append(edge_x) elif joint_axis[1] == 1: edge_y = '%s_y' % edge edges.append(edge_y) elif joint_axis[2] == 1: edge_z = '%s_z' % edge edges.append(edge_z) c_info = node_attr[c]['c_size'] gear_value = c_info positions = [etree.Element('motor', name=item, joint=item, gear='%d' % (gear_value)) for item in edges] for item in positions: actuator.append(item) root.append(actuator) return root
def add_mujoco_actuator(root, adj_matrix, node_attr): ''' essentially, all the joints are actuators. ''' dfs_order = model_gen_util.dfs_order(adj_matrix) actuator = etree.Element('actuator') for edge in dfs_order: p, c = [int(x) for x in edge.split('-')] joint_type = adj_matrix[p, c] joint_axis = model_gen_util.get_encoding(joint_type) edges = [] if joint_axis[0] == 1: edge_x = '%s_x' % edge edges.append(edge_x) elif joint_axis[1] == 1: edge_y = '%s_y' % edge edges.append(edge_y) elif joint_axis[2] == 1: edge_z = '%s_z' % edge edges.append(edge_z) c_info = node_attr[c]['c_size'] gear_value = c_info positions = [etree.Element('motor', name=item, joint=item, gear='%d' % (gear_value)) for item in edges] for item in positions: actuator.append(item) root.append(actuator) return root
Python
def log_genealogy(self, parent_spc, child_spc): ''' log parent-child relationship in the overall gene tree update on the self.gene_tree structure a tree structure ''' if parent_spc not in self.gene_tree: self.gene_tree[parent_spc] = [] self.gene_tree[parent_spc].append(child_spc)
def log_genealogy(self, parent_spc, child_spc): ''' log parent-child relationship in the overall gene tree update on the self.gene_tree structure a tree structure ''' if parent_spc not in self.gene_tree: self.gene_tree[parent_spc] = [] self.gene_tree[parent_spc].append(child_spc)
Python
def genealogy_with_style(genealogy): ''' add more styles such as coloring the connection link in the visualization ''' def process_node(node): ''' ''' if len(node['children']) == 0: return node child_r_list = [c['best_r'] for c in node['children']] sorted_r = sorted(child_r_list) sorted_color = list(Color('black').range_to(Color('black'), len(sorted_r))) max_c = max(child_r_list) min_c = min(child_r_list) for i, child in enumerate(node['children']): try: color_idx = sorted_r.index(child['best_r']) red, green, blue = (x * 255 for x in sorted_color[color_idx].rgb) child['level'] = 'rgb(%f %f %f)' % (red, green, blue) except: child['level'] = 'rgb(128, 128, 128)' child = process_node(child) return node genealogy = process_node(genealogy) with open('genealogy.json', 'w') as fh: json.dump(genealogy, fh, indent=2) return genealogy
def genealogy_with_style(genealogy): ''' add more styles such as coloring the connection link in the visualization ''' def process_node(node): ''' ''' if len(node['children']) == 0: return node child_r_list = [c['best_r'] for c in node['children']] sorted_r = sorted(child_r_list) sorted_color = list(Color('black').range_to(Color('black'), len(sorted_r))) max_c = max(child_r_list) min_c = min(child_r_list) for i, child in enumerate(node['children']): try: color_idx = sorted_r.index(child['best_r']) red, green, blue = (x * 255 for x in sorted_color[color_idx].rgb) child['level'] = 'rgb(%f %f %f)' % (red, green, blue) except: child['level'] = 'rgb(128, 128, 128)' child = process_node(child) return node genealogy = process_node(genealogy) with open('genealogy.json', 'w') as fh: json.dump(genealogy, fh, indent=2) return genealogy
Python
def evolution_graph(pth, k=5): ''' create a graph for visualizing how evolution takes place input: 1. pth should be .../evolution_data/<some training session>/species_data where all the <gen num>_rank_info.npy are stored output: 1. a dictionary that can be served as a json for describing the evolution progress ''' def build_node(spc_id, reward, info=None): ''' ''' node = {} node['name'] = spc_id node['reward'] = reward node['children'] = [] node['info'] = info return node def build_tree(parent): return evolution = {} # get all the generation ranking info gen_list = [] for filename in glob.glob(pth + '/*'): if filename.endswith('rank_info.npy') == False: continue gen = int(filename.split('/')[-1].split('_')[0]) data = np.load(filename).item() gen_list.append( (gen, data) ) gen_list = sorted(gen_list, key=lambda x: x[0]) # create the actual graph evolution = build_node(-1, -233, info='other') evolution['rank'] = 0 prev_gen = [evolution] gen_pointer = 0 # for gen_data in gen_list: while gen_pointer < len(gen_list): gen_data = gen_list[gen_pointer] gen_pointer += 1 i_gen, gen_data = gen_data other_node = build_node(-2, -233, info='other@gen:%d' % i_gen) prev_gen[0]['children'].append(other_node) cur_gen = [other_node] for i in range(k): if i >= len(gen_data['SpcID']): continue spc_id = gen_data['SpcID'][i] reward = gen_data['AvgRwd'][i] p_spc = gen_data['PrtID'][i] node = build_node(spc_id, reward) cur_gen.append(node) try: prev_gen_spc = [x['name'] for x in prev_gen] p_pos = prev_gen_spc.index(spc_id) # node['level'] = 'yellow' except: try: prev_gen_spc = [x['name'] for x in prev_gen] p_pos = prev_gen_spc.index(p_spc) except: p_pos = 0 # the 'other' node # import pdb; pdb.set_trace() prev_gen[p_pos]['children'].append(node) # add more styles to the current generation sorted_r = sorted(x['reward'] for x in cur_gen[1:]) sorted_color = list(Color('black').range_to(Color("black"), len(sorted_r))) max_r = max([x['reward'] for x in cur_gen[1:]]) min_r = min([x['reward'] for x in cur_gen[1:]]) for i, x in enumerate(cur_gen): if x['info'] is None: x['info'] = 'Spc:%d-R:%.2f' % (x['name'], x['reward']) x['rank'] = i # rank for plotting the tree no actual meaning if 'level' in x: continue try: color_idx = sorted_r.index(x['reward']) red, green, blue = (x * 255 for x in sorted_color[color_idx].rgb) x['level'] = 'rgb(%f %f %f)' % (red, green, blue) except: x['level'] = 'rgb(128, 128, 128)' # representing all the other nodes # other_node = build_node(-2, -233, info='other') # prev_gen[-1]['children'].append(other_node) # cur_gen.append(other_node) # import pdb; pdb.set_trace() prev_gen = cur_gen pass with open('evolution.json', 'w') as fh: json.dump(evolution, fh, indent=2) return evolution
def evolution_graph(pth, k=5): ''' create a graph for visualizing how evolution takes place input: 1. pth should be .../evolution_data/<some training session>/species_data where all the <gen num>_rank_info.npy are stored output: 1. a dictionary that can be served as a json for describing the evolution progress ''' def build_node(spc_id, reward, info=None): ''' ''' node = {} node['name'] = spc_id node['reward'] = reward node['children'] = [] node['info'] = info return node def build_tree(parent): return evolution = {} # get all the generation ranking info gen_list = [] for filename in glob.glob(pth + '/*'): if filename.endswith('rank_info.npy') == False: continue gen = int(filename.split('/')[-1].split('_')[0]) data = np.load(filename).item() gen_list.append( (gen, data) ) gen_list = sorted(gen_list, key=lambda x: x[0]) # create the actual graph evolution = build_node(-1, -233, info='other') evolution['rank'] = 0 prev_gen = [evolution] gen_pointer = 0 # for gen_data in gen_list: while gen_pointer < len(gen_list): gen_data = gen_list[gen_pointer] gen_pointer += 1 i_gen, gen_data = gen_data other_node = build_node(-2, -233, info='other@gen:%d' % i_gen) prev_gen[0]['children'].append(other_node) cur_gen = [other_node] for i in range(k): if i >= len(gen_data['SpcID']): continue spc_id = gen_data['SpcID'][i] reward = gen_data['AvgRwd'][i] p_spc = gen_data['PrtID'][i] node = build_node(spc_id, reward) cur_gen.append(node) try: prev_gen_spc = [x['name'] for x in prev_gen] p_pos = prev_gen_spc.index(spc_id) # node['level'] = 'yellow' except: try: prev_gen_spc = [x['name'] for x in prev_gen] p_pos = prev_gen_spc.index(p_spc) except: p_pos = 0 # the 'other' node # import pdb; pdb.set_trace() prev_gen[p_pos]['children'].append(node) # add more styles to the current generation sorted_r = sorted(x['reward'] for x in cur_gen[1:]) sorted_color = list(Color('black').range_to(Color("black"), len(sorted_r))) max_r = max([x['reward'] for x in cur_gen[1:]]) min_r = min([x['reward'] for x in cur_gen[1:]]) for i, x in enumerate(cur_gen): if x['info'] is None: x['info'] = 'Spc:%d-R:%.2f' % (x['name'], x['reward']) x['rank'] = i # rank for plotting the tree no actual meaning if 'level' in x: continue try: color_idx = sorted_r.index(x['reward']) red, green, blue = (x * 255 for x in sorted_color[color_idx].rgb) x['level'] = 'rgb(%f %f %f)' % (red, green, blue) except: x['level'] = 'rgb(128, 128, 128)' # representing all the other nodes # other_node = build_node(-2, -233, info='other') # prev_gen[-1]['children'].append(other_node) # cur_gen.append(other_node) # import pdb; pdb.set_trace() prev_gen = cur_gen pass with open('evolution.json', 'w') as fh: json.dump(evolution, fh, indent=2) return evolution
Python
def evolution_add_image(pth, evolution): ''' add image for evolution input: 1. pth contains all the images 2. evolution tree without image ''' def find_image(pth, name): ''' ''' # search in the pth for the corresponding image found = False for filename in glob.glob(pth + '/*'): abs_pth = filename if filename.endswith('.png') == False: continue if 'species_data' in filename: gen, spc = filename.split('/')[-1].split('.')[0].split('_') elif 'species_topology' in filename: spc = filename.split('/')[-1].split('.')[0] if int(spc) != name: continue found = True return abs_pth if found != True: return None else: assert 0 return None if pth is None: print('Not adding images for evolution') return evolution cur_ptr = evolution parent_list = [evolution] while len(parent_list) > 0: item = parent_list.pop() for child in item['children']: if child['name'] == item['name']: child['icon'] = None if 'icon' not in item: image_path = find_image(pth, item['name']) item['icon'] = image_path parent_list += item['children'] with open('evolution.json', 'w') as fh: json.dump(evolution, fh, indent=4) return evolution
def evolution_add_image(pth, evolution): ''' add image for evolution input: 1. pth contains all the images 2. evolution tree without image ''' def find_image(pth, name): ''' ''' # search in the pth for the corresponding image found = False for filename in glob.glob(pth + '/*'): abs_pth = filename if filename.endswith('.png') == False: continue if 'species_data' in filename: gen, spc = filename.split('/')[-1].split('.')[0].split('_') elif 'species_topology' in filename: spc = filename.split('/')[-1].split('.')[0] if int(spc) != name: continue found = True return abs_pth if found != True: return None else: assert 0 return None if pth is None: print('Not adding images for evolution') return evolution cur_ptr = evolution parent_list = [evolution] while len(parent_list) > 0: item = parent_list.pop() for child in item['children']: if child['name'] == item['name']: child['icon'] = None if 'icon' not in item: image_path = find_image(pth, item['name']) item['icon'] = image_path parent_list += item['children'] with open('evolution.json', 'w') as fh: json.dump(evolution, fh, indent=4) return evolution
Python
def trans_PCA(orig_data, num_components=1): ''' convert original data to a PCA representation input: 1. orig_data - (num_samples, num_features) numpy array 2. num_conponents - number of features that is used as output output: 1. pca_data - (num_samples, num_components) ''' pca = PCA(n_components=num_components) pca.fit(orig_data) pca_data = pca.transform(orig_data) return pca_data, pca
def trans_PCA(orig_data, num_components=1): ''' convert original data to a PCA representation input: 1. orig_data - (num_samples, num_features) numpy array 2. num_conponents - number of features that is used as output output: 1. pca_data - (num_samples, num_components) ''' pca = PCA(n_components=num_components) pca.fit(orig_data) pca_data = pca.transform(orig_data) return pca_data, pca
Python
def gen_gnn_param(task_name, adj_mat, node_attr_list, gnn_node_option='nG,yB', root_connection_option='nN, Rn, sE', gnn_output_option='shared', gnn_embedding_option='parameter'): ''' this function resembles the functionality of parse_mujoco_graph in mujoco_parser.py WARNING: this is subject to evolution fish3d environment only which has the following features 1. the mujoco model is deterministically generated from adj_mat and node_attr_list 2. the structure of the model follows a. body-body connection is through 3 joints b. the root body has 3 joints ''' def actuator_order(adj_mat): ''' given the connection information, generate the order of the actuator NOTE: this should be consistant with model_gen.py -> add_mujoco_actuator() ''' N, _ = adj_mat.shape connection_list = [] for i in range(N): for j in range(i, N): node_edge = '%d-%d' % (i, j) connection_list.append(node_edge) return connection_list # step 1: directly adopt the relation_matrix from the current adj_matrix connection_list = model_gen_util.dfs_order(adj_mat) relation_matrix = np.zeros(adj_mat.shape) relation_matrix[adj_mat > 0] = 2 # step 2: tree structure parsed from the adj_mat tree, node_type_dict = _get_tree_struct(adj_mat, node_attr_list) # step 3: map the input_list input_dict, ob_size = _get_input_dict(tree, adj_mat, task_name) # step 4: map the action_list output_list, output_type_dict, action_size = _get_output_dict( tree, adj_mat, gnn_output_option, task_name ) node_param, param_size_dict = _append_node_attr(tree, adj_mat, node_attr_list) # step 5: get the node parameters debug_info = {'ob_size': ob_size, 'action_size': action_size} # post_process, if using noninput_embedding if gnn_embedding_option == 'noninput_separate': node_param['root'] = np.reshape(np.array(0, dtype=np.int), [1, 1]) node_param['joint'] = np.reshape( np.array(range(1, 1 + len(node_type_dict['joint'])), dtype=np.int), [-1, 1] ) param_size_dict = {'joint': 1, 'root': 1} # from util import fpdb; fpdb.fpdb().set_trace() elif gnn_embedding_option == 'noninput_shared': assert False node_param['root'] = np.reshape(np.array(0, dtype=np.int), [1, 1]) node_param['joint'] = np.reshape( np.ones(len(node_type_dict['joint']), dtype=np.int), [-1, 1] ) param_size_dict = {'joint': 1, 'root': 1} else: assert gnn_embedding_option == 'parameter' # from util import fpdb; fpdb = fpdb.fpdb(); fpdb.set_trace() return dict(tree=tree, relation_matrix=relation_matrix, node_type_dict=node_type_dict, output_type_dict=output_type_dict, input_dict=input_dict, output_list=output_list, debug_info=debug_info, node_parameters=node_param, para_size_dict=param_size_dict, num_nodes=len(tree))
def gen_gnn_param(task_name, adj_mat, node_attr_list, gnn_node_option='nG,yB', root_connection_option='nN, Rn, sE', gnn_output_option='shared', gnn_embedding_option='parameter'): ''' this function resembles the functionality of parse_mujoco_graph in mujoco_parser.py WARNING: this is subject to evolution fish3d environment only which has the following features 1. the mujoco model is deterministically generated from adj_mat and node_attr_list 2. the structure of the model follows a. body-body connection is through 3 joints b. the root body has 3 joints ''' def actuator_order(adj_mat): ''' given the connection information, generate the order of the actuator NOTE: this should be consistant with model_gen.py -> add_mujoco_actuator() ''' N, _ = adj_mat.shape connection_list = [] for i in range(N): for j in range(i, N): node_edge = '%d-%d' % (i, j) connection_list.append(node_edge) return connection_list # step 1: directly adopt the relation_matrix from the current adj_matrix connection_list = model_gen_util.dfs_order(adj_mat) relation_matrix = np.zeros(adj_mat.shape) relation_matrix[adj_mat > 0] = 2 # step 2: tree structure parsed from the adj_mat tree, node_type_dict = _get_tree_struct(adj_mat, node_attr_list) # step 3: map the input_list input_dict, ob_size = _get_input_dict(tree, adj_mat, task_name) # step 4: map the action_list output_list, output_type_dict, action_size = _get_output_dict( tree, adj_mat, gnn_output_option, task_name ) node_param, param_size_dict = _append_node_attr(tree, adj_mat, node_attr_list) # step 5: get the node parameters debug_info = {'ob_size': ob_size, 'action_size': action_size} # post_process, if using noninput_embedding if gnn_embedding_option == 'noninput_separate': node_param['root'] = np.reshape(np.array(0, dtype=np.int), [1, 1]) node_param['joint'] = np.reshape( np.array(range(1, 1 + len(node_type_dict['joint'])), dtype=np.int), [-1, 1] ) param_size_dict = {'joint': 1, 'root': 1} # from util import fpdb; fpdb.fpdb().set_trace() elif gnn_embedding_option == 'noninput_shared': assert False node_param['root'] = np.reshape(np.array(0, dtype=np.int), [1, 1]) node_param['joint'] = np.reshape( np.ones(len(node_type_dict['joint']), dtype=np.int), [-1, 1] ) param_size_dict = {'joint': 1, 'root': 1} else: assert gnn_embedding_option == 'parameter' # from util import fpdb; fpdb = fpdb.fpdb(); fpdb.set_trace() return dict(tree=tree, relation_matrix=relation_matrix, node_type_dict=node_type_dict, output_type_dict=output_type_dict, input_dict=input_dict, output_list=output_list, debug_info=debug_info, node_parameters=node_param, para_size_dict=param_size_dict, num_nodes=len(tree))
Python
def actuator_order(adj_mat): ''' given the connection information, generate the order of the actuator NOTE: this should be consistant with model_gen.py -> add_mujoco_actuator() ''' N, _ = adj_mat.shape connection_list = [] for i in range(N): for j in range(i, N): node_edge = '%d-%d' % (i, j) connection_list.append(node_edge) return connection_list
def actuator_order(adj_mat): ''' given the connection information, generate the order of the actuator NOTE: this should be consistant with model_gen.py -> add_mujoco_actuator() ''' N, _ = adj_mat.shape connection_list = [] for i in range(N): for j in range(i, N): node_edge = '%d-%d' % (i, j) connection_list.append(node_edge) return connection_list
Python
def evo_ob_size_dict(node_info): ''' for each node type, collect the ob size for the evolution fish environment, everything should be fixed ''' raise NotImplementedError node_info['ob_size_dict'] = {} for node_type in node_info['node_type_dict']: node_id_list = node_info['node_type_dict'][node_type] node_ob_size = [ len(node_info['input_dict'][node_id]) for node_id in node_id_list ] # assumes all the nodes of the same type has the same ob size assert node_ob_size.count(node_ob_size[0]) == len(node_ob_size), \ logger.error('Nodes (type {}) have wrong ob size: {}!'.format( node_type, node_ob_size )) node_info['ob_size_dict'] = len(node_info['input_dict'][node_id_list[0]]) return node_info
def evo_ob_size_dict(node_info): ''' for each node type, collect the ob size for the evolution fish environment, everything should be fixed ''' raise NotImplementedError node_info['ob_size_dict'] = {} for node_type in node_info['node_type_dict']: node_id_list = node_info['node_type_dict'][node_type] node_ob_size = [ len(node_info['input_dict'][node_id]) for node_id in node_id_list ] # assumes all the nodes of the same type has the same ob size assert node_ob_size.count(node_ob_size[0]) == len(node_ob_size), \ logger.error('Nodes (type {}) have wrong ob size: {}!'.format( node_type, node_ob_size )) node_info['ob_size_dict'] = len(node_info['input_dict'][node_id_list[0]]) return node_info
Python
def vectorize_ellipsoid(a, b, c, u, v): ''' ellipsoid parameterization use 2 angles to determine a point on the ellipsoid return (x, y, z) coordinate of the point ''' x = float(a * np.cos(u) * np.sin(v)) y = float(b * np.sin(u) * np.sin(v)) z = float(c * np.cos(v)) return (x, y, z)
def vectorize_ellipsoid(a, b, c, u, v): ''' ellipsoid parameterization use 2 angles to determine a point on the ellipsoid return (x, y, z) coordinate of the point ''' x = float(a * np.cos(u) * np.sin(v)) y = float(b * np.sin(u) * np.sin(v)) z = float(c * np.cos(v)) return (x, y, z)
Python
def dfs_order(adj_mat): ''' return the order of parent-child relationship in the tree structure described by the adj_matrix input: 1. adj_mat an N x N matrix in which index 0 is the root 2. the matrix is symmetric output: 1. a list of '%d-%d's decribing the order of parent-child relationship run using dfs ''' def dfs_order_helper(adj_mat, node_id, cur_order): ''' ''' # get child_list node_row = np.copy(adj_mat[node_id]) for i in range(node_id + 1): node_row[i] = 0 child_list = np.where(node_row)[0].tolist() for child_node in child_list: edge = '%d-%d' % (node_id, child_node) cur_order.append(edge) dfs_order_helper(adj_mat, child_node, cur_order) return cur_order # using recursion to solve the problem dfs_order = [] dfs_order = dfs_order_helper(adj_mat, 0, dfs_order) return dfs_order
def dfs_order(adj_mat): ''' return the order of parent-child relationship in the tree structure described by the adj_matrix input: 1. adj_mat an N x N matrix in which index 0 is the root 2. the matrix is symmetric output: 1. a list of '%d-%d's decribing the order of parent-child relationship run using dfs ''' def dfs_order_helper(adj_mat, node_id, cur_order): ''' ''' # get child_list node_row = np.copy(adj_mat[node_id]) for i in range(node_id + 1): node_row[i] = 0 child_list = np.where(node_row)[0].tolist() for child_node in child_list: edge = '%d-%d' % (node_id, child_node) cur_order.append(edge) dfs_order_helper(adj_mat, child_node, cur_order) return cur_order # using recursion to solve the problem dfs_order = [] dfs_order = dfs_order_helper(adj_mat, 0, dfs_order) return dfs_order
Python
def mirror_mat(mat): ''' input: 1. numpy array matrix of N x N size output: 1. matrix of N x N size keep the other half of the original mat and make it symmetric along the diagonal line ''' N, _ = mat.shape assert N == _, 'Not a square matrix, it cannot be symmetric!' for i in range(N): for j in range(i, N): mat[j, i] = mat[i, j] return mat
def mirror_mat(mat): ''' input: 1. numpy array matrix of N x N size output: 1. matrix of N x N size keep the other half of the original mat and make it symmetric along the diagonal line ''' N, _ = mat.shape assert N == _, 'Not a square matrix, it cannot be symmetric!' for i in range(N): for j in range(i, N): mat[j, i] = mat[i, j] return mat
Python
def evolution_viz_config(parser): ''' configs used for drawing generation visualization ''' # the default server is the new server (with a bigger memory) # parser.add_argument('--vis_server', type=str, default='http://18.188.157.25') parser.add_argument('--vis_server', type=str, default='http://13.58.242.101') parser.add_argument('--vis_port', type=int, default=4214) parser.add_argument('--note', type=str, default='') parser.add_argument('--mute_info', action='store_true') # pca coefficient for the parser.add_argument('--pca_size_dist', action='store_true') parser.add_argument('--pca_pc_dist', action='store_true') parser.add_argument('--pca_range_dist', action='store_true') # species-level stats parser.add_argument('--body_num_line', action='store_true') parser.add_argument('--body_size_line', action='store_true') # plotting pca parser.add_argument('--pca_size_eigenvalue', action='store_true') parser.add_argument('--pca_pc_eigenvalue', action='store_true') parser.add_argument('--size_v_dist', action='store_true') parser.add_argument('--size_v_line', action='store_true') return parser
def evolution_viz_config(parser): ''' configs used for drawing generation visualization ''' # the default server is the new server (with a bigger memory) # parser.add_argument('--vis_server', type=str, default='http://18.188.157.25') parser.add_argument('--vis_server', type=str, default='http://13.58.242.101') parser.add_argument('--vis_port', type=int, default=4214) parser.add_argument('--note', type=str, default='') parser.add_argument('--mute_info', action='store_true') # pca coefficient for the parser.add_argument('--pca_size_dist', action='store_true') parser.add_argument('--pca_pc_dist', action='store_true') parser.add_argument('--pca_range_dist', action='store_true') # species-level stats parser.add_argument('--body_num_line', action='store_true') parser.add_argument('--body_size_line', action='store_true') # plotting pca parser.add_argument('--pca_size_eigenvalue', action='store_true') parser.add_argument('--pca_pc_eigenvalue', action='store_true') parser.add_argument('--size_v_dist', action='store_true') parser.add_argument('--size_v_line', action='store_true') return parser
Python
def k_th_evolution(k, rank_dir, vid_dir, vid_fn='test'): ''' read the rank_info in dir pick the k_th performing ''' target_vid_list = [] for fn in glob.glob(rank_dir + '/*'): if fn.endswith('rank_info.npy') == False: continue gen = fn.split('/')[-1].split('_')[0] data = np.load(fn).item() info = {} info['gen'] = gen info['SpcID'] = data['SpcID'][k] info['AvgRwd'] = data['AvgRwd'][k] target_vid_list.append( (gen, info['SpcID'], info) ) target_vid_list = sorted(target_vid_list, key=lambda x: int(x[0])) # video handler videoh = cv2.VideoWriter( str(vid_fn) + '.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 40, (width * 2, height) ) # according to the order in target_vid_list, find the corresponding video print('Examine the video files available') for candidate in tqdm(target_vid_list): gen, spcid, info = candidate vid_text = 'Gen:%s-Spc:%d-R:%.2f' % (gen, spcid, info['AvgRwd']) video_exist = False for vid_file in glob.glob(vid_dir + '/*'): if vid_file.endswith('5.mp4') == False: continue file = vid_file.split('/')[-1] v_gen = file.split('_')[0] v_spc = file.split('_')[1] if int(gen) == int(v_gen) and int(v_spc) == int(spcid): video_exist = True break else: continue if video_exist == False: continue cap = cv2.VideoCapture(vid_file) i = 0 while(cap.isOpened()) and i < MAX_LEN: ret, frame = cap.read() try: frame = cv2.putText(frame.copy(), vid_text, (30,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 3, cv2.LINE_AA ) except: if frame == None: break i += 1 videoh.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() videoh.release() return
def k_th_evolution(k, rank_dir, vid_dir, vid_fn='test'): ''' read the rank_info in dir pick the k_th performing ''' target_vid_list = [] for fn in glob.glob(rank_dir + '/*'): if fn.endswith('rank_info.npy') == False: continue gen = fn.split('/')[-1].split('_')[0] data = np.load(fn).item() info = {} info['gen'] = gen info['SpcID'] = data['SpcID'][k] info['AvgRwd'] = data['AvgRwd'][k] target_vid_list.append( (gen, info['SpcID'], info) ) target_vid_list = sorted(target_vid_list, key=lambda x: int(x[0])) # video handler videoh = cv2.VideoWriter( str(vid_fn) + '.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 40, (width * 2, height) ) # according to the order in target_vid_list, find the corresponding video print('Examine the video files available') for candidate in tqdm(target_vid_list): gen, spcid, info = candidate vid_text = 'Gen:%s-Spc:%d-R:%.2f' % (gen, spcid, info['AvgRwd']) video_exist = False for vid_file in glob.glob(vid_dir + '/*'): if vid_file.endswith('5.mp4') == False: continue file = vid_file.split('/')[-1] v_gen = file.split('_')[0] v_spc = file.split('_')[1] if int(gen) == int(v_gen) and int(v_spc) == int(spcid): video_exist = True break else: continue if video_exist == False: continue cap = cv2.VideoCapture(vid_file) i = 0 while(cap.isOpened()) and i < MAX_LEN: ret, frame = cap.read() try: frame = cv2.putText(frame.copy(), vid_text, (30,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 3, cv2.LINE_AA ) except: if frame == None: break i += 1 videoh.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() videoh.release() return
Python
def table_to_dataframe(table, column_levels=1, index_levels=0, collapse_empty_index_levels=True): """ Given a behave table, convert it to a pandas data frame using the following rules: - valid dtypes must be specified in the table heading - 0 or more rows can be used to label columns (a multi index will be created if column_levels > 1) - 0 or more columns can be used to label columns (a multi index will be created if index_levels > 1) - For tables with multi indexed columns, row index level names will be flattened to a string by default instead of a tuple if needed, unless `collapse_empty_index_levels` is set to False. :param table: behave.Table :param column_levels: int :param index_levels: int :param collapse_empty_index_levels: bool :return: pd.DataFrame """ if (not isinstance(column_levels, int)) or not (0 <= column_levels <= len(table.rows)): raise ValueError('Invalid number of column levels requested. ' 'Max valid number for this table: {}'.format(len(table.rows))) if not isinstance(index_levels, int) or not (0 <= index_levels <= len(table.headings)): raise ValueError('Invalid number of column levels requested. ' 'Max valid number for this table: {}'.format(len(table.headings))) dtypes = _get_dtypes(table.headings) columns = _get_column_index(table.rows[:column_levels], len(table.headings)) data = [_convert_row_to_correct_type(row, dtypes) for row in table.rows[column_levels:]] bycol = list(zip(*data)) if len(bycol) == 0: bycol = [None for col in columns] series = [ pd.Series(col_data, dtype=dtype, name=col_name) for (col_name, col_data, dtype) in zip(columns, bycol, dtypes) ] df = pd.concat(series, axis=1) if index_levels > 0: index_cols = columns[:index_levels] df.set_index(index_cols, inplace=True) df.index.names = _flatten_index_names_if_needed(collapse_empty_index_levels, column_levels, index_cols) return df
def table_to_dataframe(table, column_levels=1, index_levels=0, collapse_empty_index_levels=True): """ Given a behave table, convert it to a pandas data frame using the following rules: - valid dtypes must be specified in the table heading - 0 or more rows can be used to label columns (a multi index will be created if column_levels > 1) - 0 or more columns can be used to label columns (a multi index will be created if index_levels > 1) - For tables with multi indexed columns, row index level names will be flattened to a string by default instead of a tuple if needed, unless `collapse_empty_index_levels` is set to False. :param table: behave.Table :param column_levels: int :param index_levels: int :param collapse_empty_index_levels: bool :return: pd.DataFrame """ if (not isinstance(column_levels, int)) or not (0 <= column_levels <= len(table.rows)): raise ValueError('Invalid number of column levels requested. ' 'Max valid number for this table: {}'.format(len(table.rows))) if not isinstance(index_levels, int) or not (0 <= index_levels <= len(table.headings)): raise ValueError('Invalid number of column levels requested. ' 'Max valid number for this table: {}'.format(len(table.headings))) dtypes = _get_dtypes(table.headings) columns = _get_column_index(table.rows[:column_levels], len(table.headings)) data = [_convert_row_to_correct_type(row, dtypes) for row in table.rows[column_levels:]] bycol = list(zip(*data)) if len(bycol) == 0: bycol = [None for col in columns] series = [ pd.Series(col_data, dtype=dtype, name=col_name) for (col_name, col_data, dtype) in zip(columns, bycol, dtypes) ] df = pd.concat(series, axis=1) if index_levels > 0: index_cols = columns[:index_levels] df.set_index(index_cols, inplace=True) df.index.names = _flatten_index_names_if_needed(collapse_empty_index_levels, column_levels, index_cols) return df
Python
def connection_made(self, transport: Any) -> None: """ Called when a connection is made. """ self._transport = transport
def connection_made(self, transport: Any) -> None: """ Called when a connection is made. """ self._transport = transport
Python
def retry(times=5): """A decorator which retries tasks after a SystemExit exception has been thrown. This combats the fail-fast nature of Fabric in hopes of recovering a remote operation.""" def real_retry(func): func.attempts = 0 func.attempts_max = times @wraps(func) def wrapped(*args, **kwargs): while func.attempts < func.attempts_max: try: return func(*args, **kwargs) except SystemExit: func.attempts += 1 # If we've reached this point, blanket raise whatever exception # brought us here. raise return wrapped return real_retry
def retry(times=5): """A decorator which retries tasks after a SystemExit exception has been thrown. This combats the fail-fast nature of Fabric in hopes of recovering a remote operation.""" def real_retry(func): func.attempts = 0 func.attempts_max = times @wraps(func) def wrapped(*args, **kwargs): while func.attempts < func.attempts_max: try: return func(*args, **kwargs) except SystemExit: func.attempts += 1 # If we've reached this point, blanket raise whatever exception # brought us here. raise return wrapped return real_retry
Python
def make_request(self, url, payload, method): """ Invoke url and return a python request object. :param url: :param payload: :param method: :return response (obj): """ if self.timeout: return requests.request(method, url, headers=self.headers, json=payload, timeout=self.timeout) else: return requests.request(method, url, headers=self.headers, json=payload)
def make_request(self, url, payload, method): """ Invoke url and return a python request object. :param url: :param payload: :param method: :return response (obj): """ if self.timeout: return requests.request(method, url, headers=self.headers, json=payload, timeout=self.timeout) else: return requests.request(method, url, headers=self.headers, json=payload)
Python
def deactivate_subscription(self, data): """ Deactivate Subscription: This method deactivates a subscription for the subscriber. Status notification is sent to the subscriber. :param data: :return response: """ expected_keys = ["address", "productId"] payload = process_data(expected_keys, data) url = URL[self.env][self.version]["deactivate_subscription"] r = self.make_request(url, payload, "POST") if r.status_code != 200: logger.error("Deactivate Subscription has not been completed") response = r.json() return response
def deactivate_subscription(self, data): """ Deactivate Subscription: This method deactivates a subscription for the subscriber. Status notification is sent to the subscriber. :param data: :return response: """ expected_keys = ["address", "productId"] payload = process_data(expected_keys, data) url = URL[self.env][self.version]["deactivate_subscription"] r = self.make_request(url, payload, "POST") if r.status_code != 200: logger.error("Deactivate Subscription has not been completed") response = r.json() return response
Python
def extract(text, restrict_xpaths=None, base_url=None): """ Returns hyperlink.URL from given text. """ if isinstance(text, six.binary_type): _io = six.BytesIO(text) elif isinstance(text, (six.string_types, six.text_type)): _io = six.StringIO(text) else: raise RuntimeError('Unsupported text type %s' % type(text)) doc = html.parse(_io, base_url=base_url) return extract_from_doc(doc, restrict_xpaths)
def extract(text, restrict_xpaths=None, base_url=None): """ Returns hyperlink.URL from given text. """ if isinstance(text, six.binary_type): _io = six.BytesIO(text) elif isinstance(text, (six.string_types, six.text_type)): _io = six.StringIO(text) else: raise RuntimeError('Unsupported text type %s' % type(text)) doc = html.parse(_io, base_url=base_url) return extract_from_doc(doc, restrict_xpaths)
Python
def update(data): """Update the data in place to remove deprecated properties. Args: data (dict): dictionary to be updated Returns: True if data was changed, False otherwise """ if 'include' in data: msg = ("included configuration files should be updated manually" " [files={0}]") warnings.warn(msg.format(', '.join(data['include']))) if 'packages' in data: return spack.schema.packages.update(data['packages']) return False
def update(data): """Update the data in place to remove deprecated properties. Args: data (dict): dictionary to be updated Returns: True if data was changed, False otherwise """ if 'include' in data: msg = ("included configuration files should be updated manually" " [files={0}]") warnings.warn(msg.format(', '.join(data['include']))) if 'packages' in data: return spack.schema.packages.update(data['packages']) return False
Python
def post_install(self): """Run after install to fix install name of dynamic libraries on Darwin to have full path and install the LICENSE file.""" spec = self.spec prefix = self.spec.prefix if (sys.platform == 'darwin'): fix_darwin_install_name(prefix.lib) if spec.satisfies('@:3.0.0'): install('LICENSE', prefix)
def post_install(self): """Run after install to fix install name of dynamic libraries on Darwin to have full path and install the LICENSE file.""" spec = self.spec prefix = self.spec.prefix if (sys.platform == 'darwin'): fix_darwin_install_name(prefix.lib) if spec.satisfies('@:3.0.0'): install('LICENSE', prefix)
Python
def filter_compilers(self): """Run after install to tell the example program Makefiles to use the compilers that Spack built the package with. If this isn't done, they'll have CC, CPP, and F77 set to Spack's generic cc and f77. We want them to be bound to whatever compiler they were built with.""" spec = self.spec kwargs = {'ignore_absent': True, 'backup': False, 'string': True} dirname = os.path.join(self.prefix, 'examples') cc_files = [ 'arkode/C_openmp/Makefile', 'arkode/C_parallel/Makefile', 'arkode/C_parhyp/Makefile', 'arkode/C_petsc/Makefile', 'arkode/C_serial/Makefile', 'cvode/C_openmp/Makefile', 'cvode/parallel/Makefile', 'cvode/parhyp/Makefile', 'cvode/petsc/Makefile', 'cvode/serial/Makefile', 'cvodes/C_openmp/Makefile', 'cvodes/parallel/Makefile', 'cvodes/serial/Makefile', 'ida/C_openmp/Makefile', 'ida/parallel/Makefile', 'ida/petsc/Makefile', 'ida/serial/Makefile', 'idas/C_openmp/Makefile', 'idas/parallel/Makefile', 'idas/serial/Makefile', 'kinsol/C_openmp/Makefile', 'kinsol/parallel/Makefile', 'kinsol/serial/Makefile', 'nvector/C_openmp/Makefile', 'nvector/parallel/Makefile', 'nvector/parhyp/Makefile', 'nvector/petsc/Makefile', 'nvector/pthreads/Makefile', 'nvector/serial/Makefile', 'sunlinsol/band/Makefile', 'sunlinsol/dense/Makefile', 'sunlinsol/klu/Makefile', 'sunlinsol/lapackband/Makefile', 'sunlinsol/lapackdense/Makefile', 'sunlinsol/pcg/parallel/Makefile', 'sunlinsol/pcg/serial/Makefile', 'sunlinsol/spbcgs/parallel/Makefile', 'sunlinsol/spbcgs/serial/Makefile', 'sunlinsol/spfgmr/parallel/Makefile', 'sunlinsol/spfgmr/serial/Makefile', 'sunlinsol/spgmr/parallel/Makefile', 'sunlinsol/spgmr/serial/Makefile', 'sunlinsol/sptfqmr/parallel/Makefile', 'sunlinsol/sptfqmr/serial/Makefile', 'sunlinsol/superlumt/Makefile', 'sunlinsol/superludist/Makefile', 'sunmatrix/band/Makefile', 'sunmatrix/dense/Makefile', 'sunmatrix/sparse/Makefile' ] cxx_files = [ 'arkode/CXX_parallel/Makefile', 'arkode/CXX_serial/Makefile' 'cvode/cuda/Makefile', 'cvode/raja/Makefile', 'nvector/cuda/Makefile', 'nvector/raja/Makefile' ] f77_files = [ 'arkode/F77_parallel/Makefile', 'arkode/F77_serial/Makefile', 'cvode/fcmix_parallel/Makefile', 'cvode/fcmix_serial/Makefile', 'ida/fcmix_openmp/Makefile', 'ida/fcmix_parallel/Makefile', 'ida/fcmix_pthreads/Makefile', 'ida/fcmix_serial/Makefile', 'kinsol/fcmix_parallel/Makefile', 'kinsol/fcmix_serial/Makefile' ] f90_files = [ 'arkode/F90_parallel/Makefile', 'arkode/F90_serial/Makefile' ] f2003_files = [ 'arkode/F2003_serial/Makefile', 'cvode/F2003_serial/Makefile', 'cvodes/F2003_serial/Makefike', 'ida/F2003_serial/Makefile', 'idas/F2003_serial/Makefile', 'kinsol/F2003_serial/Makefile' ] for filename in cc_files: filter_file(os.environ['CC'], self.compiler.cc, os.path.join(dirname, filename), **kwargs) for filename in cc_files: filter_file(r'^CPP\s*=.*', self.compiler.cc, os.path.join(dirname, filename), **kwargs) for filename in cxx_files: filter_file(os.environ['CXX'], self.compiler.cxx, os.path.join(dirname, filename), **kwargs) for filename in cxx_files: filter_file(r'^CPP\s*=.*', self.compiler.cc, os.path.join(dirname, filename), **kwargs) if ('+fcmix' in spec) and ('+examples' in spec): for filename in f77_files: filter_file(os.environ['F77'], self.compiler.f77, os.path.join(dirname, filename), **kwargs) if ('+fcmix' in spec) and ('+examples' in spec): for filename in f90_files: filter_file(os.environ['FC'], self.compiler.fc, os.path.join(dirname, filename), **kwargs) if ('+f2003' in spec) and ('+examples' in spec): for filename in f2003_files: filter_file(os.environ['FC'], self.compiler.fc, os.path.join(dirname, filename), **kwargs)
def filter_compilers(self): """Run after install to tell the example program Makefiles to use the compilers that Spack built the package with. If this isn't done, they'll have CC, CPP, and F77 set to Spack's generic cc and f77. We want them to be bound to whatever compiler they were built with.""" spec = self.spec kwargs = {'ignore_absent': True, 'backup': False, 'string': True} dirname = os.path.join(self.prefix, 'examples') cc_files = [ 'arkode/C_openmp/Makefile', 'arkode/C_parallel/Makefile', 'arkode/C_parhyp/Makefile', 'arkode/C_petsc/Makefile', 'arkode/C_serial/Makefile', 'cvode/C_openmp/Makefile', 'cvode/parallel/Makefile', 'cvode/parhyp/Makefile', 'cvode/petsc/Makefile', 'cvode/serial/Makefile', 'cvodes/C_openmp/Makefile', 'cvodes/parallel/Makefile', 'cvodes/serial/Makefile', 'ida/C_openmp/Makefile', 'ida/parallel/Makefile', 'ida/petsc/Makefile', 'ida/serial/Makefile', 'idas/C_openmp/Makefile', 'idas/parallel/Makefile', 'idas/serial/Makefile', 'kinsol/C_openmp/Makefile', 'kinsol/parallel/Makefile', 'kinsol/serial/Makefile', 'nvector/C_openmp/Makefile', 'nvector/parallel/Makefile', 'nvector/parhyp/Makefile', 'nvector/petsc/Makefile', 'nvector/pthreads/Makefile', 'nvector/serial/Makefile', 'sunlinsol/band/Makefile', 'sunlinsol/dense/Makefile', 'sunlinsol/klu/Makefile', 'sunlinsol/lapackband/Makefile', 'sunlinsol/lapackdense/Makefile', 'sunlinsol/pcg/parallel/Makefile', 'sunlinsol/pcg/serial/Makefile', 'sunlinsol/spbcgs/parallel/Makefile', 'sunlinsol/spbcgs/serial/Makefile', 'sunlinsol/spfgmr/parallel/Makefile', 'sunlinsol/spfgmr/serial/Makefile', 'sunlinsol/spgmr/parallel/Makefile', 'sunlinsol/spgmr/serial/Makefile', 'sunlinsol/sptfqmr/parallel/Makefile', 'sunlinsol/sptfqmr/serial/Makefile', 'sunlinsol/superlumt/Makefile', 'sunlinsol/superludist/Makefile', 'sunmatrix/band/Makefile', 'sunmatrix/dense/Makefile', 'sunmatrix/sparse/Makefile' ] cxx_files = [ 'arkode/CXX_parallel/Makefile', 'arkode/CXX_serial/Makefile' 'cvode/cuda/Makefile', 'cvode/raja/Makefile', 'nvector/cuda/Makefile', 'nvector/raja/Makefile' ] f77_files = [ 'arkode/F77_parallel/Makefile', 'arkode/F77_serial/Makefile', 'cvode/fcmix_parallel/Makefile', 'cvode/fcmix_serial/Makefile', 'ida/fcmix_openmp/Makefile', 'ida/fcmix_parallel/Makefile', 'ida/fcmix_pthreads/Makefile', 'ida/fcmix_serial/Makefile', 'kinsol/fcmix_parallel/Makefile', 'kinsol/fcmix_serial/Makefile' ] f90_files = [ 'arkode/F90_parallel/Makefile', 'arkode/F90_serial/Makefile' ] f2003_files = [ 'arkode/F2003_serial/Makefile', 'cvode/F2003_serial/Makefile', 'cvodes/F2003_serial/Makefike', 'ida/F2003_serial/Makefile', 'idas/F2003_serial/Makefile', 'kinsol/F2003_serial/Makefile' ] for filename in cc_files: filter_file(os.environ['CC'], self.compiler.cc, os.path.join(dirname, filename), **kwargs) for filename in cc_files: filter_file(r'^CPP\s*=.*', self.compiler.cc, os.path.join(dirname, filename), **kwargs) for filename in cxx_files: filter_file(os.environ['CXX'], self.compiler.cxx, os.path.join(dirname, filename), **kwargs) for filename in cxx_files: filter_file(r'^CPP\s*=.*', self.compiler.cc, os.path.join(dirname, filename), **kwargs) if ('+fcmix' in spec) and ('+examples' in spec): for filename in f77_files: filter_file(os.environ['F77'], self.compiler.f77, os.path.join(dirname, filename), **kwargs) if ('+fcmix' in spec) and ('+examples' in spec): for filename in f90_files: filter_file(os.environ['FC'], self.compiler.fc, os.path.join(dirname, filename), **kwargs) if ('+f2003' in spec) and ('+examples' in spec): for filename in f2003_files: filter_file(os.environ['FC'], self.compiler.fc, os.path.join(dirname, filename), **kwargs)
Python
def headers(self): """Export the headers and defines of SUNDIALS. Sample usage: spec['sundials'].headers.cpp_flags """ # SUNDIALS headers are inside subdirectories, so we use a fake header # in the include directory. hdr = find(self.prefix.include.nvector, 'nvector_serial.h', recursive=False) return HeaderList(join_path(self.spec.prefix.include, 'fake.h')) \ if hdr else None
def headers(self): """Export the headers and defines of SUNDIALS. Sample usage: spec['sundials'].headers.cpp_flags """ # SUNDIALS headers are inside subdirectories, so we use a fake header # in the include directory. hdr = find(self.prefix.include.nvector, 'nvector_serial.h', recursive=False) return HeaderList(join_path(self.spec.prefix.include, 'fake.h')) \ if hdr else None
Python
def libs(self): """Export the libraries of SUNDIALS. Sample usage: spec['sundials'].libs.ld_flags spec['sundials:arkode,cvode'].libs.ld_flags """ query_parameters = self.spec.last_query.extra_parameters if not query_parameters: sun_libs = 'libsundials_*[!0-9]' # Q: should the result be ordered by dependency? else: sun_libs = ['libsundials_' + p for p in query_parameters] is_shared = '+shared' in self.spec libs = find_libraries(sun_libs, root=self.prefix, shared=is_shared, recursive=True) return libs or None
def libs(self): """Export the libraries of SUNDIALS. Sample usage: spec['sundials'].libs.ld_flags spec['sundials:arkode,cvode'].libs.ld_flags """ query_parameters = self.spec.last_query.extra_parameters if not query_parameters: sun_libs = 'libsundials_*[!0-9]' # Q: should the result be ordered by dependency? else: sun_libs = ['libsundials_' + p for p in query_parameters] is_shared = '+shared' in self.spec libs = find_libraries(sun_libs, root=self.prefix, shared=is_shared, recursive=True) return libs or None
Python
def lookup(self, name_or_url): """Looks up and returns a Mirror. If this MirrorCollection contains a named Mirror under the name [name_or_url], then that mirror is returned. Otherwise, [name_or_url] is assumed to be a mirror URL, and an anonymous mirror with the given URL is returned. """ result = self.get(name_or_url) if result is None: result = Mirror(fetch_url=name_or_url) return result
def lookup(self, name_or_url): """Looks up and returns a Mirror. If this MirrorCollection contains a named Mirror under the name [name_or_url], then that mirror is returned. Otherwise, [name_or_url] is assumed to be a mirror URL, and an anonymous mirror with the given URL is returned. """ result = self.get(name_or_url) if result is None: result = Mirror(fetch_url=name_or_url) return result
Python
def create(path, specs, skip_unstable_versions=False): """Create a directory to be used as a spack mirror, and fill it with package archives. Arguments: path: Path to create a mirror directory hierarchy in. specs: Any package versions matching these specs will be added \ to the mirror. skip_unstable_versions: if true, this skips adding resources when they do not have a stable archive checksum (as determined by ``fetch_strategy.stable_target``) Return Value: Returns a tuple of lists: (present, mirrored, error) * present: Package specs that were already present. * mirrored: Package specs that were successfully mirrored. * error: Package specs that failed to mirror due to some error. This routine iterates through all known package versions, and it creates specs for those versions. If the version satisfies any spec in the specs list, it is downloaded and added to the mirror. """ parsed = url_util.parse(path) mirror_root = url_util.local_file_path(parsed) if not mirror_root: raise spack.error.SpackError( 'MirrorCaches only work with file:// URLs') # automatically spec-ify anything in the specs array. specs = [ s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s) for s in specs] # Get the absolute path of the root before we start jumping around. if not os.path.isdir(mirror_root): try: mkdirp(mirror_root) except OSError as e: raise MirrorError( "Cannot create directory '%s':" % mirror_root, str(e)) mirror_cache = spack.caches.MirrorCache( mirror_root, skip_unstable_versions=skip_unstable_versions) mirror_stats = MirrorStats() # Iterate through packages and download all safe tarballs for each for spec in specs: mirror_stats.next_spec(spec) _add_single_spec(spec, mirror_cache, mirror_stats) return mirror_stats.stats()
def create(path, specs, skip_unstable_versions=False): """Create a directory to be used as a spack mirror, and fill it with package archives. Arguments: path: Path to create a mirror directory hierarchy in. specs: Any package versions matching these specs will be added \ to the mirror. skip_unstable_versions: if true, this skips adding resources when they do not have a stable archive checksum (as determined by ``fetch_strategy.stable_target``) Return Value: Returns a tuple of lists: (present, mirrored, error) * present: Package specs that were already present. * mirrored: Package specs that were successfully mirrored. * error: Package specs that failed to mirror due to some error. This routine iterates through all known package versions, and it creates specs for those versions. If the version satisfies any spec in the specs list, it is downloaded and added to the mirror. """ parsed = url_util.parse(path) mirror_root = url_util.local_file_path(parsed) if not mirror_root: raise spack.error.SpackError( 'MirrorCaches only work with file:// URLs') # automatically spec-ify anything in the specs array. specs = [ s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s) for s in specs] # Get the absolute path of the root before we start jumping around. if not os.path.isdir(mirror_root): try: mkdirp(mirror_root) except OSError as e: raise MirrorError( "Cannot create directory '%s':" % mirror_root, str(e)) mirror_cache = spack.caches.MirrorCache( mirror_root, skip_unstable_versions=skip_unstable_versions) mirror_stats = MirrorStats() # Iterate through packages and download all safe tarballs for each for spec in specs: mirror_stats.next_spec(spec) _add_single_spec(spec, mirror_cache, mirror_stats) return mirror_stats.stats()
Python
def add(name, url, scope, args={}): """Add a named mirror in the given scope""" mirrors = spack.config.get('mirrors', scope=scope) if not mirrors: mirrors = syaml_dict() if name in mirrors: tty.die("Mirror with name %s already exists." % name) items = [(n, u) for n, u in mirrors.items()] mirror_data = url key_values = ["s3_access_key_id", "s3_access_token", "s3_profile"] # On creation, assume connection data is set for both if any(value for value in key_values if value in args): url_dict = {"url": url, "access_pair": (args.s3_access_key_id, args.s3_access_key_secret), "access_token": args.s3_access_token, "profile": args.s3_profile, "endpoint_url": args.s3_endpoint_url} mirror_data = {"fetch": url_dict, "push": url_dict} items.insert(0, (name, mirror_data)) mirrors = syaml_dict(items) spack.config.set('mirrors', mirrors, scope=scope)
def add(name, url, scope, args={}): """Add a named mirror in the given scope""" mirrors = spack.config.get('mirrors', scope=scope) if not mirrors: mirrors = syaml_dict() if name in mirrors: tty.die("Mirror with name %s already exists." % name) items = [(n, u) for n, u in mirrors.items()] mirror_data = url key_values = ["s3_access_key_id", "s3_access_token", "s3_profile"] # On creation, assume connection data is set for both if any(value for value in key_values if value in args): url_dict = {"url": url, "access_pair": (args.s3_access_key_id, args.s3_access_key_secret), "access_token": args.s3_access_token, "profile": args.s3_profile, "endpoint_url": args.s3_endpoint_url} mirror_data = {"fetch": url_dict, "push": url_dict} items.insert(0, (name, mirror_data)) mirrors = syaml_dict(items) spack.config.set('mirrors', mirrors, scope=scope)
Python
def remove(name, scope): """Remove the named mirror in the given scope""" mirrors = spack.config.get('mirrors', scope=scope) if not mirrors: mirrors = syaml_dict() if name not in mirrors: tty.die("No mirror with name %s" % name) old_value = mirrors.pop(name) spack.config.set('mirrors', mirrors, scope=scope) debug_msg_url = "url %s" debug_msg = ["Removed mirror %s with"] values = [name] try: fetch_value = old_value['fetch'] push_value = old_value['push'] debug_msg.extend(("fetch", debug_msg_url, "and push", debug_msg_url)) values.extend((fetch_value, push_value)) except TypeError: debug_msg.append(debug_msg_url) values.append(old_value) tty.debug(" ".join(debug_msg) % tuple(values)) tty.msg("Removed mirror %s." % name)
def remove(name, scope): """Remove the named mirror in the given scope""" mirrors = spack.config.get('mirrors', scope=scope) if not mirrors: mirrors = syaml_dict() if name not in mirrors: tty.die("No mirror with name %s" % name) old_value = mirrors.pop(name) spack.config.set('mirrors', mirrors, scope=scope) debug_msg_url = "url %s" debug_msg = ["Removed mirror %s with"] values = [name] try: fetch_value = old_value['fetch'] push_value = old_value['push'] debug_msg.extend(("fetch", debug_msg_url, "and push", debug_msg_url)) values.extend((fetch_value, push_value)) except TypeError: debug_msg.append(debug_msg_url) values.append(old_value) tty.debug(" ".join(debug_msg) % tuple(values)) tty.msg("Removed mirror %s." % name)
Python
def push_url_from_directory(output_directory): """Given a directory in the local filesystem, return the URL on which to push binary packages. """ scheme = url_util.parse(output_directory, scheme='<missing>').scheme if scheme != '<missing>': raise ValueError('expected a local path, but got a URL instead') mirror_url = 'file://' + output_directory mirror = spack.mirror.MirrorCollection().lookup(mirror_url) return url_util.format(mirror.push_url)
def push_url_from_directory(output_directory): """Given a directory in the local filesystem, return the URL on which to push binary packages. """ scheme = url_util.parse(output_directory, scheme='<missing>').scheme if scheme != '<missing>': raise ValueError('expected a local path, but got a URL instead') mirror_url = 'file://' + output_directory mirror = spack.mirror.MirrorCollection().lookup(mirror_url) return url_util.format(mirror.push_url)
Python
def push_url_from_mirror_name(mirror_name): """Given a mirror name, return the URL on which to push binary packages.""" mirror = spack.mirror.MirrorCollection().lookup(mirror_name) if mirror.name == "<unnamed>": raise ValueError('no mirror named "{0}"'.format(mirror_name)) return url_util.format(mirror.push_url)
def push_url_from_mirror_name(mirror_name): """Given a mirror name, return the URL on which to push binary packages.""" mirror = spack.mirror.MirrorCollection().lookup(mirror_name) if mirror.name == "<unnamed>": raise ValueError('no mirror named "{0}"'.format(mirror_name)) return url_util.format(mirror.push_url)
Python
def push_url_from_mirror_url(mirror_url): """Given a mirror URL, return the URL on which to push binary packages.""" scheme = url_util.parse(mirror_url, scheme='<missing>').scheme if scheme == '<missing>': raise ValueError('"{0}" is not a valid URL'.format(mirror_url)) mirror = spack.mirror.MirrorCollection().lookup(mirror_url) return url_util.format(mirror.push_url)
def push_url_from_mirror_url(mirror_url): """Given a mirror URL, return the URL on which to push binary packages.""" scheme = url_util.parse(mirror_url, scheme='<missing>').scheme if scheme == '<missing>': raise ValueError('"{0}" is not a valid URL'.format(mirror_url)) mirror = spack.mirror.MirrorCollection().lookup(mirror_url) return url_util.format(mirror.push_url)
Python
def installed_specs(): """ Returns the specs of packages installed in the active environment or None if no packages are installed. """ env = spack.environment.active_environment() hashes = env.all_hashes() if env else None return spack.store.db.query(hashes=hashes)
def installed_specs(): """ Returns the specs of packages installed in the active environment or None if no packages are installed. """ env = spack.environment.active_environment() hashes = env.all_hashes() if env else None return spack.store.db.query(hashes=hashes)
Python
def activate(env, use_env_repo=False): """Activate an environment. To activate an environment, we add its configuration scope to the existing Spack configuration, and we set active to the current environment. Arguments: env (Environment): the environment to activate use_env_repo (bool): use the packages exactly as they appear in the environment's repository """ global _active_environment # Fail early to avoid ending in an invalid state if not isinstance(env, Environment): raise TypeError("`env` should be of type {0}".format(Environment.__name__)) # Check if we need to reinitialize the store due to pushing the configuration # below. store_before_pushing = spack.config.get('config:install_tree') prepare_config_scope(env) store_after_pushing = spack.config.get('config:install_tree') if store_before_pushing != store_after_pushing: # Hack to store the state of the store before activation env.store_token = spack.store.reinitialize() if use_env_repo: spack.repo.path.put_first(env.repo) tty.debug("Using environment '%s'" % env.name) # Do this last, because setting up the config must succeed first. _active_environment = env
def activate(env, use_env_repo=False): """Activate an environment. To activate an environment, we add its configuration scope to the existing Spack configuration, and we set active to the current environment. Arguments: env (Environment): the environment to activate use_env_repo (bool): use the packages exactly as they appear in the environment's repository """ global _active_environment # Fail early to avoid ending in an invalid state if not isinstance(env, Environment): raise TypeError("`env` should be of type {0}".format(Environment.__name__)) # Check if we need to reinitialize the store due to pushing the configuration # below. store_before_pushing = spack.config.get('config:install_tree') prepare_config_scope(env) store_after_pushing = spack.config.get('config:install_tree') if store_before_pushing != store_after_pushing: # Hack to store the state of the store before activation env.store_token = spack.store.reinitialize() if use_env_repo: spack.repo.path.put_first(env.repo) tty.debug("Using environment '%s'" % env.name) # Do this last, because setting up the config must succeed first. _active_environment = env
Python
def create(name, init_file=None, with_view=None, keep_relative=False): """Create a named environment in Spack.""" validate_env_name(name) if exists(name): raise SpackEnvironmentError("'%s': environment already exists" % name) return Environment(root(name), init_file, with_view, keep_relative)
def create(name, init_file=None, with_view=None, keep_relative=False): """Create a named environment in Spack.""" validate_env_name(name) if exists(name): raise SpackEnvironmentError("'%s': environment already exists" % name) return Environment(root(name), init_file, with_view, keep_relative)
Python
def _read_yaml(str_or_file): """Read YAML from a file for round-trip parsing.""" data = syaml.load_config(str_or_file) filename = getattr(str_or_file, 'name', None) default_data = spack.config.validate( data, spack.schema.env.schema, filename) return (data, default_data)
def _read_yaml(str_or_file): """Read YAML from a file for round-trip parsing.""" data = syaml.load_config(str_or_file) filename = getattr(str_or_file, 'name', None) default_data = spack.config.validate( data, spack.schema.env.schema, filename) return (data, default_data)
Python
def _write_yaml(data, str_or_file): """Write YAML to a file preserving comments and dict order.""" filename = getattr(str_or_file, 'name', None) spack.config.validate(data, spack.schema.env.schema, filename) syaml.dump_config(data, str_or_file, default_flow_style=False)
def _write_yaml(data, str_or_file): """Write YAML to a file preserving comments and dict order.""" filename = getattr(str_or_file, 'name', None) spack.config.validate(data, spack.schema.env.schema, filename) syaml.dump_config(data, str_or_file, default_flow_style=False)
Python
def _eval_conditional(string): """Evaluate conditional definitions using restricted variable scope.""" valid_variables = spack.util.environment.get_host_environment() valid_variables.update({ 're': re, 'env': os.environ, }) return eval(string, valid_variables)
def _eval_conditional(string): """Evaluate conditional definitions using restricted variable scope.""" valid_variables = spack.util.environment.get_host_environment() valid_variables.update({ 're': re, 'env': os.environ, }) return eval(string, valid_variables)
Python
def _is_dev_spec_and_has_changed(spec): """Check if the passed spec is a dev build and whether it has changed since the last installation""" # First check if this is a dev build and in the process already try to get # the dev_path dev_path_var = spec.variants.get('dev_path', None) if not dev_path_var: return False # Now we can check whether the code changed since the last installation if not spec.package.installed: # Not installed -> nothing to compare against return False _, record = spack.store.db.query_by_spec_hash(spec.dag_hash()) mtime = fs.last_modification_time_recursive(dev_path_var.value) return mtime > record.installation_time
def _is_dev_spec_and_has_changed(spec): """Check if the passed spec is a dev build and whether it has changed since the last installation""" # First check if this is a dev build and in the process already try to get # the dev_path dev_path_var = spec.variants.get('dev_path', None) if not dev_path_var: return False # Now we can check whether the code changed since the last installation if not spec.package.installed: # Not installed -> nothing to compare against return False _, record = spack.store.db.query_by_spec_hash(spec.dag_hash()) mtime = fs.last_modification_time_recursive(dev_path_var.value) return mtime > record.installation_time
Python
def _spec_needs_overwrite(spec, changed_dev_specs): """Check whether the current spec needs to be overwritten because either it has changed itself or one of its dependencies have changed""" # if it's not installed, we don't need to overwrite it if not spec.package.installed: return False # If the spec itself has changed this is a trivial decision if spec in changed_dev_specs: return True # if spec and all deps aren't dev builds, we don't need to overwrite it if not any(spec.satisfies(c) for c in ('dev_path=*', '^dev_path=*')): return False # If any dep needs overwrite, or any dep is missing and is a dev build then # overwrite this package if any( ((not dep.package.installed) and dep.satisfies('dev_path=*')) or _spec_needs_overwrite(dep, changed_dev_specs) for dep in spec.traverse(root=False) ): return True
def _spec_needs_overwrite(spec, changed_dev_specs): """Check whether the current spec needs to be overwritten because either it has changed itself or one of its dependencies have changed""" # if it's not installed, we don't need to overwrite it if not spec.package.installed: return False # If the spec itself has changed this is a trivial decision if spec in changed_dev_specs: return True # if spec and all deps aren't dev builds, we don't need to overwrite it if not any(spec.satisfies(c) for c in ('dev_path=*', '^dev_path=*')): return False # If any dep needs overwrite, or any dep is missing and is a dev build then # overwrite this package if any( ((not dep.package.installed) and dep.satisfies('dev_path=*')) or _spec_needs_overwrite(dep, changed_dev_specs) for dep in spec.traverse(root=False) ): return True
Python
def view(self, new=None): """ Generate the FilesystemView object for this ViewDescriptor By default, this method returns a FilesystemView object rooted at the current underlying root of this ViewDescriptor (self._current_root) Raise if new is None and there is no current view Arguments: new (str or None): If a string, create a FilesystemView rooted at that path. Default None. This should only be used to regenerate the view, and cannot be used to access specs. """ root = self._current_root if new: root = new if not root: # This can only be hit if we write a future bug msg = ("Attempting to get nonexistent view from environment. " "View root is at %s" % self.root) raise SpackEnvironmentViewError(msg) return YamlFilesystemView(root, spack.store.layout, ignore_conflicts=True, projections=self.projections, link=self.link_type)
def view(self, new=None): """ Generate the FilesystemView object for this ViewDescriptor By default, this method returns a FilesystemView object rooted at the current underlying root of this ViewDescriptor (self._current_root) Raise if new is None and there is no current view Arguments: new (str or None): If a string, create a FilesystemView rooted at that path. Default None. This should only be used to regenerate the view, and cannot be used to access specs. """ root = self._current_root if new: root = new if not root: # This can only be hit if we write a future bug msg = ("Attempting to get nonexistent view from environment. " "View root is at %s" % self.root) raise SpackEnvironmentViewError(msg) return YamlFilesystemView(root, spack.store.layout, ignore_conflicts=True, projections=self.projections, link=self.link_type)
Python
def _rewrite_relative_paths_on_relocation(self, init_file_dir): """When initializing the environment from a manifest file and we plan to store the environment in a different directory, we have to rewrite relative paths to absolute ones.""" if init_file_dir == self.path: return for name, entry in self.dev_specs.items(): dev_path = entry['path'] expanded_path = os.path.normpath(os.path.join( init_file_dir, entry['path'])) # Skip if the expanded path is the same (e.g. when absolute) if dev_path == expanded_path: continue tty.debug("Expanding develop path for {0} to {1}".format( name, expanded_path)) self.dev_specs[name]['path'] = expanded_path
def _rewrite_relative_paths_on_relocation(self, init_file_dir): """When initializing the environment from a manifest file and we plan to store the environment in a different directory, we have to rewrite relative paths to absolute ones.""" if init_file_dir == self.path: return for name, entry in self.dev_specs.items(): dev_path = entry['path'] expanded_path = os.path.normpath(os.path.join( init_file_dir, entry['path'])) # Skip if the expanded path is the same (e.g. when absolute) if dev_path == expanded_path: continue tty.debug("Expanding develop path for {0} to {1}".format( name, expanded_path)) self.dev_specs[name]['path'] = expanded_path
Python
def _re_read(self): """Reinitialize the environment object if it has been written (this may not be true if the environment was just created in this running instance of Spack).""" if not os.path.exists(self.manifest_path): return self.clear(re_read=True) self._read()
def _re_read(self): """Reinitialize the environment object if it has been written (this may not be true if the environment was just created in this running instance of Spack).""" if not os.path.exists(self.manifest_path): return self.clear(re_read=True) self._read()
Python
def _read_manifest(self, f, raw_yaml=None): """Read manifest file and set up user specs.""" if raw_yaml: _, self.yaml = _read_yaml(f) self.raw_yaml, _ = _read_yaml(raw_yaml) else: self.raw_yaml, self.yaml = _read_yaml(f) self.spec_lists = collections.OrderedDict() for item in config_dict(self.yaml).get('definitions', []): entry = copy.deepcopy(item) when = _eval_conditional(entry.pop('when', 'True')) assert len(entry) == 1 if when: name, spec_list = next(iter(entry.items())) user_specs = SpecList(name, spec_list, self.spec_lists.copy()) if name in self.spec_lists: self.spec_lists[name].extend(user_specs) else: self.spec_lists[name] = user_specs spec_list = config_dict(self.yaml).get(user_speclist_name, []) user_specs = SpecList(user_speclist_name, [s for s in spec_list if s], self.spec_lists.copy()) self.spec_lists[user_speclist_name] = user_specs enable_view = config_dict(self.yaml).get('view') # enable_view can be boolean, string, or None if enable_view is True or enable_view is None: self.views = { default_view_name: ViewDescriptor(self.path, self.view_path_default)} elif isinstance(enable_view, six.string_types): self.views = {default_view_name: ViewDescriptor(self.path, enable_view)} elif enable_view: path = self.path self.views = dict((name, ViewDescriptor.from_dict(path, values)) for name, values in enable_view.items()) else: self.views = {} # Retrieve the current concretization strategy configuration = config_dict(self.yaml) # default concretization to separately self.concretization = configuration.get('concretization', 'separately') # Retrieve dev-build packages: self.dev_specs = configuration.get('develop', {}) for name, entry in self.dev_specs.items(): # spec must include a concrete version assert Spec(entry['spec']).version.concrete # default path is the spec name if 'path' not in entry: self.dev_specs[name]['path'] = name
def _read_manifest(self, f, raw_yaml=None): """Read manifest file and set up user specs.""" if raw_yaml: _, self.yaml = _read_yaml(f) self.raw_yaml, _ = _read_yaml(raw_yaml) else: self.raw_yaml, self.yaml = _read_yaml(f) self.spec_lists = collections.OrderedDict() for item in config_dict(self.yaml).get('definitions', []): entry = copy.deepcopy(item) when = _eval_conditional(entry.pop('when', 'True')) assert len(entry) == 1 if when: name, spec_list = next(iter(entry.items())) user_specs = SpecList(name, spec_list, self.spec_lists.copy()) if name in self.spec_lists: self.spec_lists[name].extend(user_specs) else: self.spec_lists[name] = user_specs spec_list = config_dict(self.yaml).get(user_speclist_name, []) user_specs = SpecList(user_speclist_name, [s for s in spec_list if s], self.spec_lists.copy()) self.spec_lists[user_speclist_name] = user_specs enable_view = config_dict(self.yaml).get('view') # enable_view can be boolean, string, or None if enable_view is True or enable_view is None: self.views = { default_view_name: ViewDescriptor(self.path, self.view_path_default)} elif isinstance(enable_view, six.string_types): self.views = {default_view_name: ViewDescriptor(self.path, enable_view)} elif enable_view: path = self.path self.views = dict((name, ViewDescriptor.from_dict(path, values)) for name, values in enable_view.items()) else: self.views = {} # Retrieve the current concretization strategy configuration = config_dict(self.yaml) # default concretization to separately self.concretization = configuration.get('concretization', 'separately') # Retrieve dev-build packages: self.dev_specs = configuration.get('develop', {}) for name, entry in self.dev_specs.items(): # spec must include a concrete version assert Spec(entry['spec']).version.concrete # default path is the spec name if 'path' not in entry: self.dev_specs[name]['path'] = name
Python
def clear(self, re_read=False): """Clear the contents of the environment Arguments: re_read (bool): If True, do not clear ``new_specs`` nor ``new_installs`` values. These values cannot be read from yaml, and need to be maintained when re-reading an existing environment. """ self.spec_lists = {user_speclist_name: SpecList()} # specs from yaml self.dev_specs = {} # dev-build specs from yaml self.concretized_user_specs = [] # user specs from last concretize self.concretized_order = [] # roots of last concretize, in order self.specs_by_hash = {} # concretized specs by hash self._repo = None # RepoPath for this env (memoized) self._previous_active = None # previously active environment if not re_read: # things that cannot be recreated from file self.new_specs = [] # write packages for these on write() self.new_installs = [] # write modules for these on write()
def clear(self, re_read=False): """Clear the contents of the environment Arguments: re_read (bool): If True, do not clear ``new_specs`` nor ``new_installs`` values. These values cannot be read from yaml, and need to be maintained when re-reading an existing environment. """ self.spec_lists = {user_speclist_name: SpecList()} # specs from yaml self.dev_specs = {} # dev-build specs from yaml self.concretized_user_specs = [] # user specs from last concretize self.concretized_order = [] # roots of last concretize, in order self.specs_by_hash = {} # concretized specs by hash self._repo = None # RepoPath for this env (memoized) self._previous_active = None # previously active environment if not re_read: # things that cannot be recreated from file self.new_specs = [] # write packages for these on write() self.new_installs = [] # write modules for these on write()
Python
def _transaction_lock_path(self): """The location of the lock file used to synchronize multiple processes updating the same environment. """ return os.path.join(self.env_subdir_path, 'transaction_lock')
def _transaction_lock_path(self): """The location of the lock file used to synchronize multiple processes updating the same environment. """ return os.path.join(self.env_subdir_path, 'transaction_lock')
Python
def included_config_scopes(self): """List of included configuration scopes from the environment. Scopes are listed in the YAML file in order from highest to lowest precedence, so configuration from earlier scope will take precedence over later ones. This routine returns them in the order they should be pushed onto the internal scope stack (so, in reverse, from lowest to highest). """ scopes = [] # load config scopes added via 'include:', in reverse so that # highest-precedence scopes are last. includes = config_dict(self.yaml).get('include', []) missing = [] for i, config_path in enumerate(reversed(includes)): # allow paths to contain spack config/environment variables, etc. config_path = substitute_path_variables(config_path) # treat relative paths as relative to the environment if not os.path.isabs(config_path): config_path = os.path.join(self.path, config_path) config_path = os.path.normpath(os.path.realpath(config_path)) if os.path.isdir(config_path): # directories are treated as regular ConfigScopes config_name = 'env:%s:%s' % ( self.name, os.path.basename(config_path)) scope = spack.config.ConfigScope(config_name, config_path) elif os.path.exists(config_path): # files are assumed to be SingleFileScopes config_name = 'env:%s:%s' % (self.name, config_path) scope = spack.config.SingleFileScope( config_name, config_path, spack.schema.merged.schema) else: missing.append(config_path) continue scopes.append(scope) if missing: msg = 'Detected {0} missing include path(s):'.format(len(missing)) msg += '\n {0}'.format('\n '.join(missing)) tty.die('{0}\nPlease correct and try again.'.format(msg)) return scopes
def included_config_scopes(self): """List of included configuration scopes from the environment. Scopes are listed in the YAML file in order from highest to lowest precedence, so configuration from earlier scope will take precedence over later ones. This routine returns them in the order they should be pushed onto the internal scope stack (so, in reverse, from lowest to highest). """ scopes = [] # load config scopes added via 'include:', in reverse so that # highest-precedence scopes are last. includes = config_dict(self.yaml).get('include', []) missing = [] for i, config_path in enumerate(reversed(includes)): # allow paths to contain spack config/environment variables, etc. config_path = substitute_path_variables(config_path) # treat relative paths as relative to the environment if not os.path.isabs(config_path): config_path = os.path.join(self.path, config_path) config_path = os.path.normpath(os.path.realpath(config_path)) if os.path.isdir(config_path): # directories are treated as regular ConfigScopes config_name = 'env:%s:%s' % ( self.name, os.path.basename(config_path)) scope = spack.config.ConfigScope(config_name, config_path) elif os.path.exists(config_path): # files are assumed to be SingleFileScopes config_name = 'env:%s:%s' % (self.name, config_path) scope = spack.config.SingleFileScope( config_name, config_path, spack.schema.merged.schema) else: missing.append(config_path) continue scopes.append(scope) if missing: msg = 'Detected {0} missing include path(s):'.format(len(missing)) msg += '\n {0}'.format('\n '.join(missing)) tty.die('{0}\nPlease correct and try again.'.format(msg)) return scopes
Python
def env_file_config_scope(self): """Get the configuration scope for the environment's manifest file.""" config_name = self.env_file_config_scope_name() return spack.config.SingleFileScope( config_name, self.manifest_path, spack.schema.env.schema, [spack.config.first_existing(self.raw_yaml, spack.schema.env.keys)])
def env_file_config_scope(self): """Get the configuration scope for the environment's manifest file.""" config_name = self.env_file_config_scope_name() return spack.config.SingleFileScope( config_name, self.manifest_path, spack.schema.env.schema, [spack.config.first_existing(self.raw_yaml, spack.schema.env.keys)])
Python
def add(self, user_spec, list_name=user_speclist_name): """Add a single user_spec (non-concretized) to the Environment Returns: (bool): True if the spec was added, False if it was already present and did not need to be added """ spec = Spec(user_spec) if list_name not in self.spec_lists: raise SpackEnvironmentError( 'No list %s exists in environment %s' % (list_name, self.name) ) if list_name == user_speclist_name: if not spec.name: raise SpackEnvironmentError( 'cannot add anonymous specs to an environment!') elif not spack.repo.path.exists(spec.name): virtuals = spack.repo.path.provider_index.providers.keys() if spec.name not in virtuals: msg = 'no such package: %s' % spec.name raise SpackEnvironmentError(msg) list_to_change = self.spec_lists[list_name] existing = str(spec) in list_to_change.yaml_list if not existing: list_to_change.add(str(spec)) self.update_stale_references(list_name) return bool(not existing)
def add(self, user_spec, list_name=user_speclist_name): """Add a single user_spec (non-concretized) to the Environment Returns: (bool): True if the spec was added, False if it was already present and did not need to be added """ spec = Spec(user_spec) if list_name not in self.spec_lists: raise SpackEnvironmentError( 'No list %s exists in environment %s' % (list_name, self.name) ) if list_name == user_speclist_name: if not spec.name: raise SpackEnvironmentError( 'cannot add anonymous specs to an environment!') elif not spack.repo.path.exists(spec.name): virtuals = spack.repo.path.provider_index.providers.keys() if spec.name not in virtuals: msg = 'no such package: %s' % spec.name raise SpackEnvironmentError(msg) list_to_change = self.spec_lists[list_name] existing = str(spec) in list_to_change.yaml_list if not existing: list_to_change.add(str(spec)) self.update_stale_references(list_name) return bool(not existing)
Python
def remove(self, query_spec, list_name=user_speclist_name, force=False): """Remove specs from an environment that match a query_spec""" query_spec = Spec(query_spec) list_to_change = self.spec_lists[list_name] matches = [] if not query_spec.concrete: matches = [s for s in list_to_change if s.satisfies(query_spec)] if not matches: # concrete specs match against concrete specs in the env # by *dag hash*, not build hash. dag_hashes_in_order = [ self.specs_by_hash[build_hash].dag_hash() for build_hash in self.concretized_order ] specs_hashes = zip( self.concretized_user_specs, dag_hashes_in_order ) matches = [ s for s, h in specs_hashes if query_spec.dag_hash() == h ] if not matches: raise SpackEnvironmentError( "Not found: {0}".format(query_spec)) old_specs = set(self.user_specs) new_specs = set() for spec in matches: if spec in list_to_change: try: list_to_change.remove(spec) self.update_stale_references(list_name) new_specs = set(self.user_specs) except spack.spec_list.SpecListError: # define new specs list new_specs = set(self.user_specs) msg = "Spec '%s' is part of a spec matrix and " % spec msg += "cannot be removed from list '%s'." % list_to_change if force: msg += " It will be removed from the concrete specs." # Mock new specs so we can remove this spec from # concrete spec lists new_specs.remove(spec) tty.warn(msg) # If force, update stale concretized specs for spec in old_specs - new_specs: if force and spec in self.concretized_user_specs: i = self.concretized_user_specs.index(spec) del self.concretized_user_specs[i] dag_hash = self.concretized_order[i] del self.concretized_order[i] del self.specs_by_hash[dag_hash]
def remove(self, query_spec, list_name=user_speclist_name, force=False): """Remove specs from an environment that match a query_spec""" query_spec = Spec(query_spec) list_to_change = self.spec_lists[list_name] matches = [] if not query_spec.concrete: matches = [s for s in list_to_change if s.satisfies(query_spec)] if not matches: # concrete specs match against concrete specs in the env # by *dag hash*, not build hash. dag_hashes_in_order = [ self.specs_by_hash[build_hash].dag_hash() for build_hash in self.concretized_order ] specs_hashes = zip( self.concretized_user_specs, dag_hashes_in_order ) matches = [ s for s, h in specs_hashes if query_spec.dag_hash() == h ] if not matches: raise SpackEnvironmentError( "Not found: {0}".format(query_spec)) old_specs = set(self.user_specs) new_specs = set() for spec in matches: if spec in list_to_change: try: list_to_change.remove(spec) self.update_stale_references(list_name) new_specs = set(self.user_specs) except spack.spec_list.SpecListError: # define new specs list new_specs = set(self.user_specs) msg = "Spec '%s' is part of a spec matrix and " % spec msg += "cannot be removed from list '%s'." % list_to_change if force: msg += " It will be removed from the concrete specs." # Mock new specs so we can remove this spec from # concrete spec lists new_specs.remove(spec) tty.warn(msg) # If force, update stale concretized specs for spec in old_specs - new_specs: if force and spec in self.concretized_user_specs: i = self.concretized_user_specs.index(spec) del self.concretized_user_specs[i] dag_hash = self.concretized_order[i] del self.concretized_order[i] del self.specs_by_hash[dag_hash]
Python
def develop(self, spec, path, clone=False): """Add dev-build info for package Args: spec (spack.spec.Spec): Set constraints on development specs. Must include a concrete version. path (str): Path to find code for developer builds. Relative paths will be resolved relative to the environment. clone (bool): Clone the package code to the path. If clone is False Spack will assume the code is already present at ``path``. Return: (bool): True iff the environment was changed. """ spec = spec.copy() # defensive copy since we access cached attributes if not spec.versions.concrete: raise SpackEnvironmentError( 'Cannot develop spec %s without a concrete version' % spec) for name, entry in self.dev_specs.items(): if name == spec.name: e_spec = Spec(entry['spec']) e_path = entry['path'] if e_spec == spec: if path == e_path: tty.msg("Spec %s already configured for development" % spec) return False else: tty.msg("Updating development path for spec %s" % spec) break else: msg = "Updating development spec for package " msg += "%s with path %s" % (spec.name, path) tty.msg(msg) break else: tty.msg("Configuring spec %s for development at path %s" % (spec, path)) if clone: # "steal" the source code via staging API abspath = os.path.normpath(os.path.join(self.path, path)) stage = spec.package.stage stage.steal_source(abspath) # If it wasn't already in the list, append it self.dev_specs[spec.name] = {'path': path, 'spec': str(spec)} return True
def develop(self, spec, path, clone=False): """Add dev-build info for package Args: spec (spack.spec.Spec): Set constraints on development specs. Must include a concrete version. path (str): Path to find code for developer builds. Relative paths will be resolved relative to the environment. clone (bool): Clone the package code to the path. If clone is False Spack will assume the code is already present at ``path``. Return: (bool): True iff the environment was changed. """ spec = spec.copy() # defensive copy since we access cached attributes if not spec.versions.concrete: raise SpackEnvironmentError( 'Cannot develop spec %s without a concrete version' % spec) for name, entry in self.dev_specs.items(): if name == spec.name: e_spec = Spec(entry['spec']) e_path = entry['path'] if e_spec == spec: if path == e_path: tty.msg("Spec %s already configured for development" % spec) return False else: tty.msg("Updating development path for spec %s" % spec) break else: msg = "Updating development spec for package " msg += "%s with path %s" % (spec.name, path) tty.msg(msg) break else: tty.msg("Configuring spec %s for development at path %s" % (spec, path)) if clone: # "steal" the source code via staging API abspath = os.path.normpath(os.path.join(self.path, path)) stage = spec.package.stage stage.steal_source(abspath) # If it wasn't already in the list, append it self.dev_specs[spec.name] = {'path': path, 'spec': str(spec)} return True
Python
def undevelop(self, spec): """Remove develop info for abstract spec ``spec``. returns True on success, False if no entry existed.""" spec = Spec(spec) # In case it's a spec object if spec.name in self.dev_specs: del self.dev_specs[spec.name] return True return False
def undevelop(self, spec): """Remove develop info for abstract spec ``spec``. returns True on success, False if no entry existed.""" spec = Spec(spec) # In case it's a spec object if spec.name in self.dev_specs: del self.dev_specs[spec.name] return True return False
Python
def concretize(self, force=False, tests=False, reuse=False): """Concretize user_specs in this environment. Only concretizes specs that haven't been concretized yet unless force is ``True``. This only modifies the environment in memory. ``write()`` will write out a lockfile containing concretized specs. Arguments: force (bool): re-concretize ALL specs, even those that were already concretized tests (bool or list or set): False to run no tests, True to test all packages, or a list of package names to run tests for some reuse (bool): if True try to maximize reuse of already installed specs, if False don't account for installation status. Returns: List of specs that have been concretized. Each entry is a tuple of the user spec and the corresponding concretized spec. """ if force: # Clear previously concretized specs self.concretized_user_specs = [] self.concretized_order = [] self.specs_by_hash = {} # Pick the right concretization strategy if self.concretization == 'together': return self._concretize_together(tests=tests, reuse=reuse) if self.concretization == 'separately': return self._concretize_separately(tests=tests, reuse=reuse) msg = 'concretization strategy not implemented [{0}]' raise SpackEnvironmentError(msg.format(self.concretization))
def concretize(self, force=False, tests=False, reuse=False): """Concretize user_specs in this environment. Only concretizes specs that haven't been concretized yet unless force is ``True``. This only modifies the environment in memory. ``write()`` will write out a lockfile containing concretized specs. Arguments: force (bool): re-concretize ALL specs, even those that were already concretized tests (bool or list or set): False to run no tests, True to test all packages, or a list of package names to run tests for some reuse (bool): if True try to maximize reuse of already installed specs, if False don't account for installation status. Returns: List of specs that have been concretized. Each entry is a tuple of the user spec and the corresponding concretized spec. """ if force: # Clear previously concretized specs self.concretized_user_specs = [] self.concretized_order = [] self.specs_by_hash = {} # Pick the right concretization strategy if self.concretization == 'together': return self._concretize_together(tests=tests, reuse=reuse) if self.concretization == 'separately': return self._concretize_separately(tests=tests, reuse=reuse) msg = 'concretization strategy not implemented [{0}]' raise SpackEnvironmentError(msg.format(self.concretization))
Python
def _concretize_together(self, tests=False, reuse=False): """Concretization strategy that concretizes all the specs in the same DAG. """ # Exit early if the set of concretized specs is the set of user specs user_specs_did_not_change = not bool( set(self.user_specs) - set(self.concretized_user_specs) ) if user_specs_did_not_change: return [] # Check that user specs don't have duplicate packages counter = collections.defaultdict(int) for user_spec in self.user_specs: counter[user_spec.name] += 1 duplicates = [] for name, count in counter.items(): if count > 1: duplicates.append(name) if duplicates: msg = ('environment that are configured to concretize specs' ' together cannot contain more than one spec for each' ' package [{0}]'.format(', '.join(duplicates))) raise SpackEnvironmentError(msg) # Proceed with concretization self.concretized_user_specs = [] self.concretized_order = [] self.specs_by_hash = {} concrete_specs = spack.concretize.concretize_specs_together( *self.user_specs, tests=tests, reuse=reuse ) concretized_specs = [x for x in zip(self.user_specs, concrete_specs)] for abstract, concrete in concretized_specs: self._add_concrete_spec(abstract, concrete) return concretized_specs
def _concretize_together(self, tests=False, reuse=False): """Concretization strategy that concretizes all the specs in the same DAG. """ # Exit early if the set of concretized specs is the set of user specs user_specs_did_not_change = not bool( set(self.user_specs) - set(self.concretized_user_specs) ) if user_specs_did_not_change: return [] # Check that user specs don't have duplicate packages counter = collections.defaultdict(int) for user_spec in self.user_specs: counter[user_spec.name] += 1 duplicates = [] for name, count in counter.items(): if count > 1: duplicates.append(name) if duplicates: msg = ('environment that are configured to concretize specs' ' together cannot contain more than one spec for each' ' package [{0}]'.format(', '.join(duplicates))) raise SpackEnvironmentError(msg) # Proceed with concretization self.concretized_user_specs = [] self.concretized_order = [] self.specs_by_hash = {} concrete_specs = spack.concretize.concretize_specs_together( *self.user_specs, tests=tests, reuse=reuse ) concretized_specs = [x for x in zip(self.user_specs, concrete_specs)] for abstract, concrete in concretized_specs: self._add_concrete_spec(abstract, concrete) return concretized_specs
Python
def _concretize_separately(self, tests=False, reuse=False): """Concretization strategy that concretizes separately one user spec after the other. """ # keep any concretized specs whose user specs are still in the manifest old_concretized_user_specs = self.concretized_user_specs old_concretized_order = self.concretized_order old_specs_by_hash = self.specs_by_hash self.concretized_user_specs = [] self.concretized_order = [] self.specs_by_hash = {} for s, h in zip(old_concretized_user_specs, old_concretized_order): if s in self.user_specs: concrete = old_specs_by_hash[h] self._add_concrete_spec(s, concrete, new=False) # Concretize any new user specs that we haven't concretized yet arguments, root_specs = [], [] for uspec, uspec_constraints in zip( self.user_specs, self.user_specs.specs_as_constraints ): if uspec not in old_concretized_user_specs: root_specs.append(uspec) arguments.append((uspec_constraints, tests, reuse)) # Ensure we don't try to bootstrap clingo in parallel if spack.config.get('config:concretizer') == 'clingo': with spack.bootstrap.ensure_bootstrap_configuration(): spack.bootstrap.ensure_clingo_importable_or_raise() # Ensure all the indexes have been built or updated, since # otherwise the processes in the pool may timeout on waiting # for a write lock. We do this indirectly by retrieving the # provider index, which should in turn trigger the update of # all the indexes if there's any need for that. _ = spack.repo.path.provider_index # Ensure we have compilers in compilers.yaml to avoid that # processes try to write the config file in parallel _ = spack.compilers.get_compiler_config() # Early return if there is nothing to do if len(arguments) == 0: return [] # Solve the environment in parallel on Linux start = time.time() max_processes = min( len(arguments), # Number of specs 16 # Cap on 16 cores ) # TODO: revisit this print as soon as darwin is parallel too msg = 'Starting concretization' if sys.platform != 'darwin': pool_size = spack.util.parallel.num_processes(max_processes=max_processes) if pool_size > 1: msg = msg + ' pool with {0} processes'.format(pool_size) tty.msg(msg) concretized_root_specs = spack.util.parallel.parallel_map( _concretize_task, arguments, max_processes=max_processes, debug=tty.is_debug() ) finish = time.time() tty.msg('Environment concretized in %.2f seconds.' % (finish - start)) results = [] for abstract, concrete in zip(root_specs, concretized_root_specs): self._add_concrete_spec(abstract, concrete) results.append((abstract, concrete)) return results
def _concretize_separately(self, tests=False, reuse=False): """Concretization strategy that concretizes separately one user spec after the other. """ # keep any concretized specs whose user specs are still in the manifest old_concretized_user_specs = self.concretized_user_specs old_concretized_order = self.concretized_order old_specs_by_hash = self.specs_by_hash self.concretized_user_specs = [] self.concretized_order = [] self.specs_by_hash = {} for s, h in zip(old_concretized_user_specs, old_concretized_order): if s in self.user_specs: concrete = old_specs_by_hash[h] self._add_concrete_spec(s, concrete, new=False) # Concretize any new user specs that we haven't concretized yet arguments, root_specs = [], [] for uspec, uspec_constraints in zip( self.user_specs, self.user_specs.specs_as_constraints ): if uspec not in old_concretized_user_specs: root_specs.append(uspec) arguments.append((uspec_constraints, tests, reuse)) # Ensure we don't try to bootstrap clingo in parallel if spack.config.get('config:concretizer') == 'clingo': with spack.bootstrap.ensure_bootstrap_configuration(): spack.bootstrap.ensure_clingo_importable_or_raise() # Ensure all the indexes have been built or updated, since # otherwise the processes in the pool may timeout on waiting # for a write lock. We do this indirectly by retrieving the # provider index, which should in turn trigger the update of # all the indexes if there's any need for that. _ = spack.repo.path.provider_index # Ensure we have compilers in compilers.yaml to avoid that # processes try to write the config file in parallel _ = spack.compilers.get_compiler_config() # Early return if there is nothing to do if len(arguments) == 0: return [] # Solve the environment in parallel on Linux start = time.time() max_processes = min( len(arguments), # Number of specs 16 # Cap on 16 cores ) # TODO: revisit this print as soon as darwin is parallel too msg = 'Starting concretization' if sys.platform != 'darwin': pool_size = spack.util.parallel.num_processes(max_processes=max_processes) if pool_size > 1: msg = msg + ' pool with {0} processes'.format(pool_size) tty.msg(msg) concretized_root_specs = spack.util.parallel.parallel_map( _concretize_task, arguments, max_processes=max_processes, debug=tty.is_debug() ) finish = time.time() tty.msg('Environment concretized in %.2f seconds.' % (finish - start)) results = [] for abstract, concrete in zip(root_specs, concretized_root_specs): self._add_concrete_spec(abstract, concrete) results.append((abstract, concrete)) return results
Python
def concretize_and_add(self, user_spec, concrete_spec=None, tests=False): """Concretize and add a single spec to the environment. Concretize the provided ``user_spec`` and add it along with the concretized result to the environment. If the given ``user_spec`` was already present in the environment, this does not add a duplicate. The concretized spec will be added unless the ``user_spec`` was already present and an associated concrete spec was already present. Args: concrete_spec: if provided, then it is assumed that it is the result of concretizing the provided ``user_spec`` """ if self.concretization == 'together': msg = 'cannot install a single spec in an environment that is ' \ 'configured to be concretized together. Run instead:\n\n' \ ' $ spack add <spec>\n' \ ' $ spack install\n' raise SpackEnvironmentError(msg) spec = Spec(user_spec) if self.add(spec): concrete = concrete_spec or spec.concretized(tests=tests) self._add_concrete_spec(spec, concrete) else: # spec might be in the user_specs, but not installed. # TODO: Redo name-based comparison for old style envs spec = next( s for s in self.user_specs if s.satisfies(user_spec) ) concrete = self.specs_by_hash.get(spec.build_hash()) if not concrete: concrete = spec.concretized(tests=tests) self._add_concrete_spec(spec, concrete) return concrete
def concretize_and_add(self, user_spec, concrete_spec=None, tests=False): """Concretize and add a single spec to the environment. Concretize the provided ``user_spec`` and add it along with the concretized result to the environment. If the given ``user_spec`` was already present in the environment, this does not add a duplicate. The concretized spec will be added unless the ``user_spec`` was already present and an associated concrete spec was already present. Args: concrete_spec: if provided, then it is assumed that it is the result of concretizing the provided ``user_spec`` """ if self.concretization == 'together': msg = 'cannot install a single spec in an environment that is ' \ 'configured to be concretized together. Run instead:\n\n' \ ' $ spack add <spec>\n' \ ' $ spack install\n' raise SpackEnvironmentError(msg) spec = Spec(user_spec) if self.add(spec): concrete = concrete_spec or spec.concretized(tests=tests) self._add_concrete_spec(spec, concrete) else: # spec might be in the user_specs, but not installed. # TODO: Redo name-based comparison for old style envs spec = next( s for s in self.user_specs if s.satisfies(user_spec) ) concrete = self.specs_by_hash.get(spec.build_hash()) if not concrete: concrete = spec.concretized(tests=tests) self._add_concrete_spec(spec, concrete) return concrete