code
stringlengths
17
6.64M
def conv1d(input_, output_dim, initializer='xavier', k_w=5, d_w=1, stddev=0.02, padding='VALID', reuse=False, name='conv1d'): with tf.variable_scope(name, reuse=reuse): if (initializer == 'xavier'): init_type = tf.contrib.layers.xavier_initializer() elif (initializer == 'normal'): init_type = tf.truncated_normal_initializer(stddev=stddev) else: raise Exception('Weight initializer unknown') w = tf.get_variable('w', [k_w, input_.get_shape()[(- 1)], output_dim], initializer=init_type) conv = tf.nn.conv1d(input_, w, stride=d_w, padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.nn.bias_add(conv, biases) return conv
def conv2d(input_, output_dim, initializer='xavier', k_h=5, k_w=5, d_h=1, d_w=1, stddev=0.02, padding='VALID', reuse=False, name='conv2d'): with tf.variable_scope(name, reuse=reuse): if (initializer == 'xavier'): init_type = tf.contrib.layers.xavier_initializer() elif (initializer == 'normal'): init_type = tf.truncated_normal_initializer(stddev=stddev) else: raise Exception('Weight initializer unknown') w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=init_type) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.nn.bias_add(conv, biases) return conv
def conv2da(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d', reuse=False, padding='SAME'): with tf.variable_scope(name, reuse=reuse): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.contrib.layers.xavier_initializer()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv
def conv2dp(input_, output_dim, params, d_h=1, d_w=1): w = tf.Variable(params[0]) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.Variable(params[1]) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv
def conv3d(input_, output_dim, initializer='xavier', k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv3d'): with tf.variable_scope(name): if (initializer == 'xavier'): init_type = tf.contrib.layers.xavier_initializer() elif (initializer == 'normal'): init_type = tf.truncated_normal_initializer(stddev=stddev) else: raise Exception('Weight initializer unknown') w = tf.get_variable('w', [input_.get_shape()[(- 2)], k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=init) conv = tf.nn.conv3d(input_, w, strides=[1, d_h, d_w, 1, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv
def deconv2d(input_, output_shape, initializer='xavier', k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='deconv2d', with_w=False): with tf.variable_scope(name): if (initializer == 'xavier'): init_type = tf.contrib.layers.xavier_initializer() elif (initializer == 'normal'): init_type = tf.truncated_normal_initializer(stddev=stddev) else: raise Exception('Weight initializer unknown') w = tf.get_variable('w', [k_h, k_h, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=init_type) try: deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) except AttributeError: deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0)) deconv = tf.nn.bias_add(deconv, biases) if with_w: return (deconv, w, biases) else: return deconv
def lrelu(x, leak=0.2, name='lrelu'): with tf.variable_scope(name): f1 = (0.5 * (1 + leak)) f2 = (0.5 * (1 - leak)) return ((f1 * x) + (f2 * abs(x)))
def relu(x): return tf.nn.relu(x)
def tanh(x): return tf.nn.tanh(x)
def shape2d(a): '\n a: a int or tuple/list of length 2\n ' if (type(a) == int): return [a, a] if isinstance(a, (list, tuple)): assert (len(a) == 2) return list(a) raise RuntimeError('Illegal shape: {}'.format(a))
def shape4d(a): return (([1] + shape2d(a)) + [1])
def UnPooling2x2ZeroFilled(x): out = tf.concat(3, [x, tf.zeros_like(x)]) out = tf.concat(2, [out, tf.zeros_like(out)]) sh = x.get_shape().as_list() if (None not in sh[1:]): out_size = [(- 1), (sh[1] * 2), (sh[2] * 2), sh[3]] return tf.reshape(out, out_size) else: sh = tf.shape(x) return tf.reshape(out, [(- 1), (sh[1] * 2), (sh[2] * 2), sh[3]])
def MaxPooling(x, shape, stride=None, padding='VALID'): "\n MaxPooling on images.\n :param input: NHWC tensor.\n :param shape: int or [h, w]\n :param stride: int or [h, w]. default to be shape.\n :param padding: 'valid' or 'same'. default to 'valid'\n :returns: NHWC tensor.\n " padding = padding.upper() shape = shape4d(shape) if (stride is None): stride = shape else: stride = shape4d(stride) return tf.nn.max_pool(x, ksize=shape, strides=stride, padding=padding)
def FixedUnPooling(x, shape, unpool_mat=None): '\n Unpool the input with a fixed mat to perform kronecker product with.\n :param input: NHWC tensor\n :param shape: int or [h, w]\n :param unpool_mat: a tf/np matrix with size=shape. If None, will use a mat\n with 1 at top-left corner.\n :returns: NHWC tensor\n ' shape = shape2d(shape) if ((shape[0] == 2) and (shape[1] == 2) and (unpool_mat is None)): return UnPooling2x2ZeroFilled(x) input_shape = tf.shape(x) if (unpool_mat is None): mat = np.zeros(shape, dtype='float32') mat[0][0] = 1 unpool_mat = tf.Variable(mat, trainable=False, name='unpool_mat') elif isinstance(unpool_mat, np.ndarray): unpool_mat = tf.Variable(unpool_mat, trainable=False, name='unpool_mat') assert (unpool_mat.get_shape().as_list() == list(shape)) fx = flatten(tf.transpose(x, [0, 3, 1, 2])) fx = tf.expand_dims(fx, (- 1)) mat = tf.expand_dims(flatten(unpool_mat), 0) prod = tf.matmul(fx, mat) prod = tf.reshape(prod, tf.pack([(- 1), input_shape[3], input_shape[1], input_shape[2], shape[0], shape[1]])) prod = tf.transpose(prod, [0, 2, 4, 3, 5, 1]) prod = tf.reshape(prod, tf.pack([(- 1), (input_shape[1] * shape[0]), (input_shape[2] * shape[1]), input_shape[3]])) return prod
def linear(input_, output_size, name, stddev=0.02, bias_start=0.0, reuse=False, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope(name, reuse=reuse): matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable('bias', [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return ((tf.matmul(input_, matrix) + bias), matrix, bias) else: return (tf.matmul(input_, matrix) + bias)
class BatchCoCaBO(CoCaBO_Base): def __init__(self, objfn, initN, bounds, acq_type, C, **kwargs): super(BatchCoCaBO, self).__init__(objfn, initN, bounds, acq_type, C, **kwargs) self.best_val_list = [] self.C_list = self.C self.name = 'BCoCaBO' def runOptim(self, budget, seed, initData=None, initResult=None): if (initData and initResult): self.data = initData[:] self.result = initResult[:] else: (self.data, self.result) = self.initialise(seed) bestUpperBoundEstimate = ((2 * budget) / 3) gamma_list = [np.sqrt(((C * math.log((C / self.batch_size))) / (((math.e - 1) * self.batch_size) * bestUpperBoundEstimate))) for C in self.C_list] gamma_list = [(g if (not np.isnan(g)) else 1) for g in gamma_list] Wc_list_init = [np.ones(C) for C in self.C_list] Wc_list = Wc_list_init nDim = len(self.bounds) result_list = [] starting_best = np.max(((- 1) * self.result[0])) result_list.append([(- 1), None, None, starting_best, None]) continuous_dims = list(range(len(self.C_list), nDim)) categorical_dims = list(range(len(self.C_list))) for t in tqdm(range(budget)): self.iteration = t (ht_batch_list, probabilityDistribution_list, S0) = self.compute_prob_dist_and_draw_hts(Wc_list, gamma_list, self.batch_size) ht_batch_list = ht_batch_list.astype(int) Gt_ht_list = self.RewardperCategoryviaBO(self.f, ht_batch_list, categorical_dims, continuous_dims) Wc_list = self.update_weights_for_all_cat_var(Gt_ht_list, ht_batch_list, Wc_list, gamma_list, probabilityDistribution_list, self.batch_size, S0=S0) (besty, li, vi) = self.getBestVal2(self.result) result_list.append([t, ht_batch_list, Gt_ht_list, besty, self.mix_used, self.model_hp]) self.ht_recommedations.append(ht_batch_list) df = pd.DataFrame(result_list, columns=['iter', 'ht', 'Reward', 'best_value', 'mix_val', 'model_hp']) bestx = self.data[li][vi] self.best_val_list.append([self.batch_size, self.trial_num, li, besty, bestx]) return df def RewardperCategoryviaBO(self, objfn, ht_next_batch_list, categorical_dims, continuous_dims): Zt = self.data[0] yt = self.result[0] (my_kernel, hp_bounds) = self.get_kernel(categorical_dims, continuous_dims) gp_opt_params = {'method': 'multigrad', 'num_restarts': 5, 'restart_bounds': hp_bounds, 'hp_bounds': hp_bounds, 'verbose': False} gp_kwargs = {'y_norm': 'meanstd', 'opt_params': gp_opt_params} gp_args = (Zt, yt, my_kernel) gp = GP(*gp_args, **gp_kwargs) (opt_flag, gp) = self.set_model_params_and_opt_flag(gp) if opt_flag: gp.optimize() self.model_hp = gp.param_array acq_dict = {'type': 'subspace'} acq_opt_params = {'method': 'samplegrad', 'num_local': 5, 'num_samples': 5000, 'num_chunks': 10, 'verbose': False} ymin_opt_params = {'method': 'standard'} (h_unique, h_counts) = np.unique(ht_next_batch_list, return_counts=True, axis=0) z_batch_list = [] for (idx, curr_h) in enumerate(h_unique): gp_for_bo = GPWithSomeFixedDimsAtStart(*gp_args, fixed_dim_vals=curr_h, **gp_kwargs) gp_for_bo.param_array = gp.param_array curr_batch_size = h_counts[idx] interface = JobExecutorInSeriesBlocking(curr_batch_size) if (len(z_batch_list) > 0): self.surrogate = gp_for_bo self.async_infill_strategy = 'kriging_believer' (surrogate_x_with_fake, surrogate_y_with_fake) = add_hallucinations_to_x_and_y(self, gp_for_bo.X, gp_for_bo.Y_raw, np.vstack(z_batch_list)) gp_for_bo.set_XY(X=surrogate_x_with_fake, Y=surrogate_y_with_fake) bo = BatchBOHeuristic(objfn, gp_for_bo, self.x_bounds, async_infill_strategy='kriging_believer', offset_acq=True, async_interface=interface, batch_size=curr_batch_size, acq_dict=acq_dict, y_min_opt_params=ymin_opt_params, acq_opt_params=acq_opt_params, optimise_surrogate_model=False) (x_batch_for_curr_h, _) = bo.get_next() z_batch_for_curr_h = np.hstack((np.vstack(([curr_h] * curr_batch_size)), np.vstack(x_batch_for_curr_h))) z_batch_list.append(z_batch_for_curr_h) z_batch_next = np.vstack(z_batch_list) y_batch_next = np.zeros((self.batch_size, 1)) for b in range(self.batch_size): x_next = z_batch_next[(b, continuous_dims)] ht_next_list = z_batch_next[(b, categorical_dims)] try: y_next = objfn(ht_next_list, x_next) except: print('stop') y_batch_next[b] = y_next self.mix_used = gp.kern.mix[0] self.data[0] = np.row_stack((self.data[0], z_batch_next)) self.result[0] = np.row_stack((self.result[0], y_batch_next)) ht_batch_list_rewards = self.compute_reward_for_all_cat_variable(ht_next_batch_list, self.batch_size) bestval_ht = np.max((self.result[0] * (- 1))) print(f'arm pulled={ht_next_batch_list[:]} ; y_best = {bestval_ht}; mix={self.mix_used}') return ht_batch_list_rewards def get_kernel(self, categorical_dims, continuous_dims): if self.ARD: hp_bounds = np.array([*([[0.0001, 3]] * len(continuous_dims)), [1e-06, 1]]) else: hp_bounds = np.array([[0.0001, 3], [1e-06, 1]]) (fix_mix_in_this_iter, mix_value, hp_bounds) = self.get_mix(hp_bounds) k_cat = CategoryOverlapKernel(len(categorical_dims), active_dims=categorical_dims) k_cont = GPy.kern.Matern52(len(continuous_dims), lengthscale=self.default_cont_lengthscale, active_dims=continuous_dims, ARD=self.ARD) my_kernel = MixtureViaSumAndProduct((len(categorical_dims) + len(continuous_dims)), k_cat, k_cont, mix=mix_value, fix_inner_variances=True, fix_mix=fix_mix_in_this_iter) return (my_kernel, hp_bounds)
class CoCaBO(CoCaBO_Base): def __init__(self, objfn, initN, bounds, acq_type, C, **kwargs): super(CoCaBO, self).__init__(objfn, initN, bounds, acq_type, C, **kwargs) self.best_val_list = [] self.C_list = self.C self.name = 'CoCaBO' def runOptim(self, budget, seed, batch_size=1, initData=None, initResult=None): if (initData and initResult): self.data = initData[:] self.result = initResult[:] else: (self.data, self.result) = self.initialise(seed) b = batch_size bestUpperBoundEstimate = ((2 * budget) / 3) gamma_list = [math.sqrt(((C * math.log(C)) / ((math.e - 1) * bestUpperBoundEstimate))) for C in self.C_list] Wc_list_init = [np.ones(C) for C in self.C_list] Wc_list = Wc_list_init nDim = len(self.bounds) result_list = [] starting_best = np.max(((- 1) * self.result[0])) result_list.append([(- 1), None, None, starting_best, None]) continuous_dims = list(range(len(self.C_list), nDim)) categorical_dims = list(range(len(self.C_list))) for t in tqdm(range(budget)): self.iteration = t (ht_list, probabilityDistribution_list) = self.compute_prob_dist_and_draw_hts(Wc_list, gamma_list, batch_size) Gt_ht_list = self.RewardperCategoryviaBO(self.f, ht_list, categorical_dims, continuous_dims, self.bounds, self.acq_type, b) Wc_list = self.update_weights_for_all_cat_var(Gt_ht_list, ht_list, Wc_list, gamma_list, probabilityDistribution_list, batch_size) (besty, li, vi) = self.getBestVal2(self.result) result_list.append([t, ht_list, Gt_ht_list, besty, self.mix_used, self.model_hp]) self.ht_recommedations.append(ht_list) df = pd.DataFrame(result_list, columns=['iter', 'ht', 'Reward', 'best_value', 'mix_val', 'model_hp']) bestx = self.data[li][vi] self.best_val_list.append([batch_size, self.trial_num, li, besty, bestx]) return df def RewardperCategoryviaBO(self, objfn, ht_next_list, categorical_dims, continuous_dims, bounds, acq_type, b): Zt = self.data[0] yt = self.result[0] (my_kernel, hp_bounds) = self.get_kernel(categorical_dims, continuous_dims) gp_opt_params = {'method': 'multigrad', 'num_restarts': 5, 'restart_bounds': hp_bounds, 'hp_bounds': hp_bounds, 'verbose': False} gp = GP(Zt, yt, my_kernel, y_norm='meanstd', opt_params=gp_opt_params) (opt_flag, gp) = self.set_model_params_and_opt_flag(gp) if opt_flag: gp.optimize() self.model_hp = gp.param_array self.mix_used = gp.kern.mix[0] x_bounds = np.array([d['domain'] for d in bounds if (d['type'] == 'continuous')]) if (acq_type == 'EI'): acq = EI(gp, np.min(gp.Y_raw)) elif (acq_type == 'LCB'): acq = UCB(gp, 2.0) acq_sub = AcquisitionOnSubspace(acq, my_kernel.k2.active_dims, ht_next_list) def optimiser_func(x): return (- acq_sub.evaluate(np.atleast_2d(x))) res = sample_then_minimize(optimiser_func, x_bounds, num_samples=5000, num_chunks=10, num_local=3, minimize_options=None, evaluate_sequentially=False) x_next = res.x z_next = np.hstack((ht_next_list, x_next)) y_next = objfn(ht_next_list, x_next) self.data[0] = np.row_stack((self.data[0], z_next)) self.result[0] = np.row_stack((self.result[0], y_next)) ht_next_list_array = np.atleast_2d(ht_next_list) ht_list_rewards = self.compute_reward_for_all_cat_variable(ht_next_list_array, b) ht_list_rewards = list(ht_list_rewards.flatten()) bestval_ht = np.max((self.result[0] * (- 1))) print(f'arm pulled={ht_next_list[:]}; y_best = {bestval_ht}; mix={self.mix_used}') return ht_list_rewards def get_kernel(self, categorical_dims, continuous_dims): if self.ARD: hp_bounds = np.array([*([[0.0001, 3]] * len(continuous_dims)), [1e-06, 1]]) else: hp_bounds = np.array([[0.0001, 3], [1e-06, 1]]) (fix_mix_in_this_iter, mix_value, hp_bounds) = self.get_mix(hp_bounds) k_cat = CategoryOverlapKernel(len(categorical_dims), active_dims=categorical_dims) k_cont = GPy.kern.Matern52(len(continuous_dims), lengthscale=self.default_cont_lengthscale, active_dims=continuous_dims, ARD=self.ARD) my_kernel = MixtureViaSumAndProduct((len(categorical_dims) + len(continuous_dims)), k_cat, k_cont, mix=mix_value, fix_inner_variances=True, fix_mix=fix_mix_in_this_iter) return (my_kernel, hp_bounds)
def CoCaBO_Exps(obj_func, budget, initN=24, trials=40, kernel_mix=0.5, batch=None): saving_path = f'data/syntheticFns/{obj_func}/' if (not os.path.exists(saving_path)): os.makedirs(saving_path) if (obj_func == 'func2C'): f = testFunctions.syntheticFunctions.func2C categories = [3, 5] bounds = [{'name': 'h1', 'type': 'categorical', 'domain': (0, 1, 2)}, {'name': 'h2', 'type': 'categorical', 'domain': (0, 1, 2, 3, 4)}, {'name': 'x1', 'type': 'continuous', 'domain': ((- 1), 1)}, {'name': 'x2', 'type': 'continuous', 'domain': ((- 1), 1)}] elif (obj_func == 'func3C'): f = testFunctions.syntheticFunctions.func3C categories = [3, 5, 4] bounds = [{'name': 'h1', 'type': 'categorical', 'domain': (0, 1, 2)}, {'name': 'h2', 'type': 'categorical', 'domain': (0, 1, 2, 3, 4)}, {'name': 'h3', 'type': 'categorical', 'domain': (0, 1, 2, 3)}, {'name': 'x1', 'type': 'continuous', 'domain': ((- 1), 1)}, {'name': 'x2', 'type': 'continuous', 'domain': ((- 1), 1)}] else: raise NotImplementedError if (batch == 1): mabbo = CoCaBO(objfn=f, initN=initN, bounds=bounds, acq_type='LCB', C=categories, kernel_mix=kernel_mix) else: mabbo = BatchCoCaBO(objfn=f, initN=initN, bounds=bounds, acq_type='LCB', C=categories, kernel_mix=kernel_mix, batch_size=batch) mabbo.runTrials(trials, budget, saving_path)
def DepRound(weights_p, k=1, isWeights=True): ' [[Algorithms for adversarial bandit problems with multiple plays, by T.Uchiya, A.Nakamura and M.Kudo, 2010](http://hdl.handle.net/2115/47057)] Figure 5 (page 15) is a very clean presentation of the algorithm.\n\n - Inputs: :math:`k < K` and weights_p :math:`= (p_1, \\dots, p_K)` such that :math:`\\sum_{i=1}^{K} p_i = k` (or :math:`= 1`).\n - Output: A subset of :math:`\\{1,\\dots,K\\}` with exactly :math:`k` elements. Each action :math:`i` is selected with probability exactly :math:`p_i`.\n\n Example:\n\n >>> import numpy as np; import random\n >>> np.random.seed(0); random.seed(0) # for reproductibility!\n >>> K = 5\n >>> k = 2\n\n >>> weights_p = [ 2, 2, 2, 2, 2 ] # all equal weights\n >>> DepRound(weights_p, k)\n [3, 4]\n >>> DepRound(weights_p, k)\n [3, 4]\n >>> DepRound(weights_p, k)\n [0, 1]\n\n >>> weights_p = [ 10, 8, 6, 4, 2 ] # decreasing weights\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [1, 2]\n >>> DepRound(weights_p, k)\n [3, 4]\n\n >>> weights_p = [ 3, 3, 0, 0, 3 ] # decreasing weights\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [0, 4]\n >>> DepRound(weights_p, k)\n [0, 1]\n\n - See [[Gandhi et al, 2006](http://dl.acm.org/citation.cfm?id=1147956)] for the details.\n ' p = np.array(weights_p) K = len(p) assert (k < K), 'Error: k = {} should be < K = {}.'.format(k, K) if (not np.isclose(np.sum(p), 1)): p = (p / np.sum(p)) assert (np.all((0 <= p)) and np.all((p <= 1))), 'Error: the weights (p_1, ..., p_K) should all be 0 <= p_i <= 1 ...'.format(p) assert np.isclose(np.sum(p), 1), 'Error: the sum of weights p_1 + ... + p_K should be = 1 (= {}).'.format(np.sum(p)) possible_ij = [a for a in range(K) if (0 < p[a] < 1)] while possible_ij: if (len(possible_ij) == 1): i = np.random.choice(possible_ij, size=1) j = i else: (i, j) = np.random.choice(possible_ij, size=2, replace=False) (pi, pj) = (p[i], p[j]) assert (0 < pi < 1), 'Error: pi = {} (with i = {}) is not 0 < pi < 1.'.format(pi, i) assert (0 < pj < 1), 'Error: pj = {} (with j = {}) is not 0 < pj < 1.'.format(pj, i) assert (i != j), 'Error: i = {} is different than with j = {}.'.format(i, j) (alpha, beta) = (min((1 - pi), pj), min(pi, (1 - pj))) proba = (alpha / (alpha + beta)) if with_proba(proba): (pi, pj) = ((pi + alpha), (pj - alpha)) else: (pi, pj) = ((pi - beta), (pj + beta)) (p[i], p[j]) = (pi, pj) possible_ij = [a for a in range(K) if (0 < p[a] < 1)] if (len([a for a in range(K) if np.isclose(p[a], 0)]) == (K - k)): break subset = [a for a in range(K) if np.isclose(p[a], 1)] if (len(subset) < k): subset = [a for a in range(K) if (not np.isclose(p[a], 0))] assert (len(subset) == k), 'Error: DepRound({}, {}) is supposed to return a set of size {}, but {} has size {}...'.format(weights_p, k, k, subset, len(subset)) return subset
class AcquisitionFunction(object): '\n Base class for acquisition functions. Used to define the interface\n ' def __init__(self, surrogate=None, verbose=False): self.surrogate = surrogate self.verbose = verbose def evaluate(self, x: np.ndarray, **kwargs) -> np.ndarray: raise NotImplementedError
class AcquisitionOnSubspace(): def __init__(self, acq, free_idx, fixed_vals): self.acq = acq self.free_idx = free_idx self.fixed_vals = fixed_vals def evaluate(self, x: np.ndarray, **kwargs): x_fixed = ([self.fixed_vals] * len(x)) x_complete = np.hstack((np.vstack(x_fixed), x)) return self.acq.evaluate(x_complete)
class EI(AcquisitionFunction): '\n Expected improvement acquisition function for a Gaussian model\n\n Model should return (mu, var)\n ' def __init__(self, surrogate: GP, best: np.ndarray, verbose=False): self.best = best super().__init__(surrogate, verbose) def __str__(self) -> str: return 'EI' def evaluate(self, x: np.ndarray, **kwargs) -> np.ndarray: '\n Evaluates the EI acquisition function.\n\n Parameters\n ----------\n x\n Input to evaluate the acquisition function at\n\n ' if self.verbose: print('Evaluating EI at', x) (mu, var) = self.surrogate.predict(np.atleast_2d(x)) var = np.clip(var, 1e-08, np.inf) s = np.sqrt(var) gamma = ((self.best - mu) / s) return (((s * gamma) * norm.cdf(gamma)) + (s * norm.pdf(gamma))).flatten()
class PI(AcquisitionFunction): '\n Probability of improvement acquisition function for a Gaussian model\n\n Model should return (mu, var)\n ' def __init__(self, surrogate: GP, best: np.ndarray, tradeoff: float, verbose=False): self.best = best self.tradeoff = tradeoff super().__init__(surrogate, verbose) def __str__(self) -> str: return f'PI-{self.tradeoff}' def evaluate(self, x, **kwargs) -> np.ndarray: '\n Evaluates the PI acquisition function.\n\n Parameters\n ----------\n x\n Input to evaluate the acquisition function at\n\n ' if self.verbose: print('Evaluating PI at', x) (mu, var) = self.surrogate.predict(x) var = np.clip(var, 1e-08, np.inf) s = np.sqrt(var) gamma = (((self.best - mu) - self.tradeoff) / s) return norm.cdf(gamma).flatten()
class UCB(AcquisitionFunction): '\n Upper confidence bound acquisition function for a Gaussian model\n\n Model should return (mu, var)\n ' def __init__(self, surrogate: GP, tradeoff: float, verbose=False): self.tradeoff = tradeoff super().__init__(surrogate, verbose) def __str__(self) -> str: return f'UCB-{self.tradeoff}' def evaluate(self, x, **kwargs) -> np.ndarray: '\n Evaluates the UCB acquisition function.\n\n Parameters\n ----------\n x\n Input to evaluate the acquisition function at\n ' if self.verbose: print('Evaluating UCB at', x) (mu, var) = self.surrogate.predict(x) var = np.clip(var, 1e-08, np.inf) s = np.sqrt(var) return (- (mu - (self.tradeoff * s)).flatten())
class AsyncBayesianOptimization(BayesianOptimisation): "Async Bayesian optimization class\n\n Performs Bayesian optimization with a set number of busy and free workers\n\n Parameters\n ----------\n sampler : Callable\n function handle returning sample from expensive function being\n optimized\n\n surrogate : basic_gp.GP\n (GP) model that models the surface of 'objective'\n\n bounds : ndarray\n bounds of each dimension of x as a Dx2 vector (default [0, 1])\n\n async_interface : ExecutorBase\n Interface that deals with exchange of information between\n async workers and the BO loop\n\n batch_size : int\n How many tasks to suggest in one go. This will wait for the\n required number of workers to become free before evaluating the batch\n\n acq_dict : acquisition.AcquisitionFunction\n Defaults to EI\n\n starting_jobs : list(dicts)\n list of dicts in the form {'x': np.ndarray, 'f': callable, 't': float}\n\n optimise_surrogate_model : bool\n Whether to optimise the surrogate model after each BayesOpt iteration\n\n track_cond_k : bool\n Whether to keep track of cond(K) of the surrogate model across\n BayesOpt iterations\n\n y_min_opt_params : dict\n opt_params dict with the following fields:\n\n - method = 'standard', multigrad', 'direct'\n - n_direct_evals = for direct\n - num_restarts = for multigrad\n\n acq_opt_params : dict\n opt_params dict with the following fields:\n\n - method = 'multigrad', 'direct'\n - n_direct_evals = for direct\n - num_restarts = for multigrad\n\n n_bo_steps : int\n Number of BayesOpt steps\n\n min_acq : float\n cut-off threshold for acquisition function\n\n " def __init__(self, sampler: Callable, surrogate: GP, bounds: np.ndarray, async_interface: ExecutorBase=None, starting_jobs: Optional[list]=None, **kwargs): self.starting_jobs = starting_jobs self.interface = async_interface super().__init__(sampler, surrogate, bounds, **kwargs) def _initialise_bo_df(self): '\n Initialise the DataFrame for keeping track of the BO run\n ' self.df = pd.DataFrame(columns=['ii', 't', 'y_min', 'x_min', 'n_busy', 'x_busy', 'n_data', 'model_x', 'model_y', 'model_param_array', 'acq_at_sample', 'requested_x_sample', 'x_sample', 'y_sample', 'time_taken_opt_surrogate', 'time_taken_find_y_min', 'time_taken_get_next', 'time_taken_bo_step', 'var_at_y_min', 'cond_k']) (self.x_min, self.y_min, self.var_at_y_min) = self._get_y_min() if (self.starting_jobs is not None): x_busy = np.vstack([job['x'] for job in self.starting_jobs]) else: x_busy = None starting_record = {'ii': (- 1), 'iteration': 0, 't': self.interface.status['t'], 'y_min': self.y_min, 'x_min': self.x_min, 'n_busy': self.interface.n_busy_workers, 'x_busy': x_busy, 'n_free': self.interface.n_free_workers, 'n_data': len(self.surrogate.X), 'model_x': self.surrogate.X, 'model_y': self.surrogate.Y, 'model_param_array': self.surrogate.param_array, 'acq_at_sample': np.nan, 'requested_x_sample': np.nan, 'y_sample': np.nan, 'x_sample': np.nan, 'time_taken_opt_surrogate': np.nan, 'time_taken_find_y_min': np.nan, 'time_taken_get_next': np.nan, 'time_taken_bo_step': np.nan, 'var_at_y_min': self.var_at_y_min, 'cond_k': np.nan} self.df = self.df.append([starting_record], sort=True) def _update_bo_df(self, x_batch, acq_at_x_best, new_sample_x, new_sample_y, time_dict): "Updates the local dataframe with the current iteration's data\n\n Parameters\n ----------\n x_batch\n Best location to sample at\n acq_at_x_best\n Acquisition function value at x_best\n new_sample_x\n actual sample received\n new_sample_y\n actual sample received\n time_dict\n time taken for different parts of the algo in seconds\n\n " current_record = {'ii': self.curr_bo_step, 't': self.interface.status['t'], 'iteration': (self.curr_bo_step + 1), 'y_min': self.y_min, 'x_min': self.x_min, 'n_busy': self.interface.n_busy_workers, 'x_busy': self.interface.get_array_of_running_jobs(), 'n_free': self.interface.n_free_workers, 'n_data': len(self.surrogate.X), 'model_x': self.surrogate.X, 'model_y': self.surrogate.Y, 'model_param_array': self.surrogate.param_array, 'acq_at_sample': acq_at_x_best, 'requested_x_sample': x_batch, 'y_sample': new_sample_y, 'x_sample': new_sample_x, 'time_taken_opt_surrogate': time_dict['time_taken_opt_surrogate'], 'time_taken_find_y_min': time_dict['time_taken_find_y_min'], 'time_taken_get_next': time_dict['time_taken_get_next'], 'time_taken_bo_step': time_dict['time_taken_bo_step'], 'var_at_y_min': self.var_at_y_min, 'cond_k': (self.cond_k_hist[self.curr_bo_step] if self.track_cond_k else None)} self.df = self.df.append([current_record], sort=True) def run(self): '\n Run the Async BayesOpt loop\n ' t_starting_run = time.time() if self.verbose: print('Started BayesOpt.run()') self._initialise_bo_df() if (self.starting_jobs is not None): for job in self.starting_jobs: self.interface.add_job_to_queue(job) for self.curr_bo_step in range(0, self.n_bo_steps): (new_sample_x, new_sample_y) = (None, None) if True: t_beginning_of_bo_step = time.time() if self.verbose: print('**--** Starting BayesOpt iteration {}/{} **--**'.format((self.curr_bo_step + 1), self.n_bo_steps)) self.interface.run_until_n_free(self.batch_size) n_free_workers = self.interface.status['n_free_workers'] completed_jobs = self.interface.get_completed_jobs() if (len(completed_jobs) > 0): (new_sample_x, new_sample_y) = self._add_completed_jobs_to_surrogate(completed_jobs) assert (n_free_workers >= self.batch_size) t_before_opt_surrogate = time.time() self.optimize_surrogate_if_needed() t_after_opt_surrogate = time.time() t_before_find_y_min = time.time() (self.x_min, self.y_min, self.var_at_y_min) = self._get_y_min() t_after_find_y_min = t_before_get_next = time.time() if self.verbose: print('Selecting next point(s)...') (x_batch, acq_at_x_batch) = self.get_next() t_after_get_next = t_end_of_bo_step = time.time() time_taken_opt_surrogate = (t_after_opt_surrogate - t_before_opt_surrogate) time_taken_find_y_min = (t_after_find_y_min - t_before_find_y_min) time_taken_get_next = (t_after_get_next - t_before_get_next) time_taken_bo_step = (t_end_of_bo_step - t_beginning_of_bo_step) time_taken_dict = {'time_taken_opt_surrogate': time_taken_opt_surrogate, 'time_taken_find_y_min': time_taken_find_y_min, 'time_taken_get_next': time_taken_get_next, 'time_taken_bo_step': time_taken_bo_step} if self.create_plots: self.plot_step(x_batch=x_batch) jobs = [] for ii in range(len(x_batch)): job = {'x': x_batch[ii], 'f': self.sampler} jobs.append(job) self.interface.add_job_to_queue(jobs) self.save_history(None) if (self.curr_bo_step == (self.n_bo_steps - 1)): if (self.verbose > 1): print('Used up budget.') print('Minimum at', self.surrogate.X[np.argmin(self.surrogate.Y)]) self._update_bo_df(x_batch, acq_at_x_batch, new_sample_x, new_sample_y, time_taken_dict) sys.stdout.flush() if self.verbose: print(f'Completed BO exp in; {round((time.time() - t_starting_run), 2)}s') def get_next(self): 'Finds the next point(s) to sample at\n\n Returns\n -------\n x_best : np.ndarray\n Location to sample at\n acq_at_x_best : float\n Value of the acquisition function at the sampling locations\n ' raise NotImplementedError def _add_completed_jobs_to_surrogate(self, completed_jobs): x = [] y = [] for job in completed_jobs: x.append(job['x']) y.append(job['y']) x = np.vstack(x) y = np.vstack(y) self._update_surrogate_with_new_data(x, y) return (x, y) def plot_step(self, x_batch=None, save_plots=None, **kwargs): if (save_plots is None): save_plots = self.save_plots if isinstance(x_batch, list): x_batch = np.vstack(x_batch) (fig, axes) = super().plot_step(x_batch, external_call=True) acq = self._create_acq_function() if (len(self.bounds) == 1): x_busy = self.interface.get_array_of_running_jobs() if (x_busy is not None): axes[0].plot(x_busy, self.surrogate.predict(x_busy)[0], 'g*', label='Busy', markersize=16) axes[1].plot(x_busy, acq.evaluate(x_busy), 'g*', label='Busy', markersize=16) axes[0].legend(numpoints=1) axes[1].legend(numpoints=1) if save_plots: self.save_plots_to_disk(fig) else: fig.show() return (fig, axes)
class AsyncBOHeuristicQEI(AsyncBayesianOptimization): "Async BO with approximate q-EI\n\n Q-EI is approximated by sequentially finding the best location and\n setting its y-value using one of Ginsbourger's heuristics until the\n batch is full\n " def __init__(self, sampler, surrogate, bounds, async_infill_strategy='kriging_believer', **kwargs): from utils.ml_utils.models.additive_gp import GPWithSomeFixedDimsAtStart if (async_infill_strategy is None): self.async_infill_strategy = 'constant_liar_min' else: self.async_infill_strategy = async_infill_strategy if isinstance(surrogate, GPWithSomeFixedDimsAtStart): self.mabbo = True elif isinstance(surrogate, GP): self.mabbo = False else: raise NotImplementedError super().__init__(sampler, surrogate, bounds, **kwargs) def get_next(self): 'Finds the next point(s) to sample at\n\n This function interacts with the async interface to get info about\n completed and running jobs and computes the next point(s) to add\n to the queue based on the batch size\n\n Returns\n -------\n x_best : np.ndarray\n Location to sample at\n acq_at_x_best : float\n Value of the acquisition function at the sampling locations\n ' old_surrogate_x = self.surrogate.X old_surrogate_y = self.surrogate.Y_raw x_busy = self.interface.get_array_of_running_jobs() if self.mabbo: fixed_dim_vals = self.surrogate.fixed_dim_vals else: fixed_dim_vals = None (surrogate_x_with_fake, surrogate_y_with_fake) = add_hallucinations_to_x_and_y(self, old_surrogate_x, old_surrogate_y, x_busy, fixed_dim_vals=fixed_dim_vals) self.surrogate.set_XY(X=surrogate_x_with_fake, Y=surrogate_y_with_fake) acq = self._create_acq_function() (x_best, acq_at_x_best) = self._optimise_acq_func(acq) x_batch = [x_best] acq_at_each_x_batch = [acq_at_x_best] if (self.batch_size > 1): for ii in range((self.batch_size - 1)): current_surrogate_x = self.surrogate.X current_surrogate_y = self.surrogate.Y_raw (surrogate_x_with_fake, surrogate_y_with_fake) = add_hallucinations_to_x_and_y(self, current_surrogate_x, current_surrogate_y, x_batch, fixed_dim_vals=fixed_dim_vals) self.surrogate.set_XY(X=surrogate_x_with_fake, Y=surrogate_y_with_fake) acq = self._create_acq_function() (x_best, acq_at_x_best) = self._optimise_acq_func(acq) x_batch.append(x_best) acq_at_each_x_batch.append(acq_at_x_best) self.surrogate.set_XY(X=old_surrogate_x, Y=old_surrogate_y) assert (len(x_batch) == self.batch_size) return (x_batch, acq_at_each_x_batch)
class BatchBOHeuristic(AsyncBOHeuristicQEI): pass
class ExecutorBase(): 'Base interface for interaction with multiple parallel workers\n\n The simulator and real async interfaces will subclass this class so that\n their interfaces are the same\n\n Main way to interact with this object is to queue jobs using\n add_job_to_queue(), to wait until the desired number of jobs have completed\n using run_until_n_free() and to get the results via get_completed_jobs().\n\n Parameters\n ----------\n n_workers : int\n Number of workers allowed\n\n verbose\n Verbosity\n\n Attributes\n ----------\n n_workers : int\n Total number of workers\n\n n_free_workers : int\n Number of workers without allocated jobs\n\n n_busy_workers : int\n Number of workers currently executing a job\n\n ' def __init__(self, n_workers: int, verbose: bool=False): self.verbose = verbose self.n_workers = n_workers self.n_free_workers = n_workers self.n_busy_workers = 0 self._queue = [] self._running_tasks = [] self._completed_tasks = [] @property def age(self) -> float: raise NotImplementedError @property def is_running(self) -> bool: all_tasks_todo = (len(self._queue) + len(self._running_tasks)) if (all_tasks_todo > 0): return True else: return False @property def status(self) -> Dict: "Get current state (counts) of async workers.\n\n Returns\n -------\n Dict\n Fields are 'n_free_workers', 'n_busy_workers',\n 'n_running_tasks',\n 'n_completed_tasks', n_queue, 't'.\n " status = {'n_free_workers': self.n_free_workers, 'n_busy_workers': self.n_busy_workers, 'n_completed_tasks': len(self._completed_tasks), 'n_queue': len(self._queue), 't': self.age, 'is_running': self.is_running} if self.verbose: print(f'''{self.__class__.__name__}.status: {status}''') return status def _validate_job(self, job: dict) -> None: assert ('x' in job.keys()) assert ('f' in job.keys()) assert callable(job['f']) def run_until_n_free(self, n_desired_free_workers) -> None: 'Run the simulator until a desired number of workers are free\n\n Parameters\n ----------\n n_desired_free_workers: int\n\n ' raise NotImplementedError def run_until_empty(self) -> None: 'Run the simulator until all jobs are completed\n\n ' raise NotImplementedError def add_job_to_queue(self, job: Union[(Dict, List)]) -> None: "Add a job to the queue\n\n Parameters\n ----------\n job : dict\n Dictionary with a job definition that is passed to a worker.\n\n Structure:\n\n {\n 'x': location of sample,\n 'f': function executing the sample,\n }\n\n " if self.verbose: print(f'''{self.__class__.__name__}.queue_job: queuing job: {job}''') if isinstance(job, list): for j in job: self._queue.append(j) else: self._queue.append(job) self._update_internal_state() def _update_internal_state(self) -> None: '\n Main function that takes care of moving jobs to the correct places\n and setting statuses and counts\n ' raise NotImplementedError def get_completed_jobs(self) -> List: 'Get the completed tasks and clear the internal list.\n\n Returns\n -------\n list\n List with dicts of the completed tasks\n ' if self.verbose: print(f'{self.__class__.__name__}.get_completed_jobs: Getting completed jobs') out = self._completed_tasks self._completed_tasks = [] return out def get_array_of_running_jobs(self) -> np.ndarray: 'Get a numpy array with each busy location in a row\n\n Returns\n -------\n numpy array of the busy locations stacked vertically\n ' list_of_jobs = self.get_list_of_running_jobs() if (len(list_of_jobs) > 0): x_busy = np.vstack([job['x'] for job in list_of_jobs]) else: x_busy = None return x_busy def get_list_of_running_jobs(self) -> List: 'Get the currently-running tasks\n\n Returns\n -------\n List with dicts of the currently-running tasks\n ' if self.verbose: print(f'{self.__class__.__name__}.get_running_jobs') return self._running_tasks
class JobExecutor(ExecutorBase): "Async controller that interacts with external async function calls\n\n Will be used to run ML algorithms in parallel for synch and async BO\n\n Functions that run must take in a job dict and return the same\n job dict with the result ['y'] and runtime ['t'].\n " def __init__(self, n_workers: int, polling_frequency=0.5, verbose=False): super().__init__(n_workers, verbose=verbose) self._creation_time = time.time() self._polling_delay = polling_frequency self._executor = futures.ProcessPoolExecutor(n_workers) self._futures = [] @property def age(self) -> float: return (time.time() - self._creation_time) def run_until_n_free(self, n_desired_free_workers) -> None: 'Wait until a desired number of workers are free\n\n Parameters\n ----------\n n_desired_free_workers: int\n\n ' if self.verbose: print(f'{self.__class__.__name__}.run_until_free({n_desired_free_workers})') while (self.n_free_workers < n_desired_free_workers): time.sleep(self._polling_delay) self._update_internal_state() def run_until_empty(self) -> None: 'Run the simulator until all jobs are completed\n\n ' if self.verbose: print(f'{self.__class__.__name__}.run_until_empty()') while (self.n_free_workers < self.n_workers): time.sleep(self._polling_delay) self._update_internal_state() def _update_internal_state(self) -> None: '\n Setting internal counts\n ' self._clean_up_completed_processes() self._begin_jobs_if_workers_free() self.n_free_workers = (self.n_workers - len(self._running_tasks)) self.n_busy_workers = len(self._running_tasks) def _clean_up_completed_processes(self) -> None: '\n Remove completed jobs from the current processes and save results\n ' if (len(self._futures) > 0): idx_complete = np.where([(not f.running()) for f in self._futures])[0] for ii in np.sort(idx_complete)[::(- 1)]: f_complete = self._futures.pop(ii) complete_job_dict = self._running_tasks.pop(ii) complete_job_dict['y'] = f_complete.result() self._completed_tasks.append(complete_job_dict) def _begin_jobs_if_workers_free(self) -> None: '\n If workers are free, start a job from the queue\n ' while ((len(self._futures) < self.n_workers) and (len(self._queue) > 0)): self._futures.append(self._submit_job_to_executor(0)) def _submit_job_to_executor(self, index) -> futures.Future: 'Submits the chosen job from the queue to the executor\n\n Parameters\n ----------\n index\n Index in the queue of the job to be executed\n\n Returns\n -------\n Future object of the submitted job\n ' job = self._queue.pop(index) self._validate_job(job) self._running_tasks.append(job) future = self._executor.submit(job['f'], job['x']) return future
class JobExecutorInSeries(JobExecutor): 'Interface that runs the jobs in series\n but acts like a batch-running interface to the outside.\n\n self._futures is not a list of futures any more. This is a placeholder\n for the jobs that have yet to run to complete the batch\n ' def __init__(self, n_workers: int, polling_frequency=0.5, verbose=False): super().__init__(n_workers, polling_frequency=polling_frequency, verbose=verbose) self._executor = futures.ProcessPoolExecutor(1) def _clean_up_completed_processes(self) -> None: '\n Remove completed jobs from the current processes and save results\n ' if (len(self._futures) > 0): is_complete = self._futures[0].running() if is_complete: f_complete = self._futures.pop(0) complete_job_dict = self._running_tasks.pop(0) complete_job_dict['y'] = f_complete.result() self._completed_tasks.append(complete_job_dict) def _begin_jobs_if_workers_free(self) -> None: '\n If workers are free, start a job from the queue\n ' if (len(self._futures) == 0): if (len(self._running_tasks) > 0): job = self._running_tasks[0] self._validate_job(job) self._futures.append(self._executor.submit(job['f'], job['x'])) else: while ((len(self._queue) > 0) and (len(self._running_tasks) < self.n_workers)): self._running_tasks.append(self._queue.pop(0))
class JobExecutorInSeriesBlocking(ExecutorBase): "Interface that runs the jobs in series and blocks execution of code\n until it's done\n " def __init__(self, n_workers: int, verbose=False): super().__init__(n_workers, verbose=verbose) self._creation_time = time.time() def run_until_n_free(self, n_desired_free_workers) -> None: 'Run the simulator until a desired number of workers are free\n\n Parameters\n ----------\n n_desired_free_workers: int\n\n ' while (self.n_free_workers < n_desired_free_workers): self.run_next() def run_until_empty(self) -> None: 'Run the simulator until all jobs are completed\n\n ' while (self.n_free_workers < self.n_workers): self.run_next() def _update_internal_state(self): while ((len(self._running_tasks) < self.n_workers) and (len(self._queue) > 0)): self._running_tasks.append(self._queue.pop(0)) self.n_busy_workers = len(self._running_tasks) self.n_free_workers = (self.n_workers - self.n_busy_workers) def run_next(self): self._move_tasks_from_queue_to_running() if (len(self._running_tasks) > 0): job = self._running_tasks.pop(0) self._validate_job(job) result = job['f'](job['x']) job['y'] = result self._completed_tasks.append(job) self._update_internal_state() @property def age(self): return (time.time() - self._creation_time) def _move_tasks_from_queue_to_running(self): while ((len(self._running_tasks) < self.n_workers) and (len(self._queue) > 0)): self._running_tasks.append(self._queue.pop(0))
def add_hallucinations_to_x_and_y(bo, old_x, old_y, x_new, fixed_dim_vals=None) -> Tuple[(np.ndarray, np.ndarray)]: 'Add hallucinations to the data arrays.\n\n Parameters\n ----------\n old_x\n Current x values\n old_y\n Current y values\n x_new\n Locations at which to use the async infill procedure. If x_busy\n is None, then nothing happens and the x and y arrays are returned\n\n Returns\n -------\n augmented_x (np.ndarray), augmented_y (list or np.ndarray)\n ' if (x_new is None): x_out = old_x y_out = old_y else: if isinstance(x_new, list): x_new = np.vstack(x_new) if (fixed_dim_vals is not None): if (fixed_dim_vals.ndim == 1): fixed_dim_vals = np.vstack(([fixed_dim_vals] * len(x_new))) assert (len(fixed_dim_vals) == len(x_new)) x_new = np.hstack((fixed_dim_vals, x_new)) x_out = np.vstack((old_x, x_new)) fake_y = make_hallucinated_data(bo, x_new, bo.async_infill_strategy) y_out = np.vstack((old_y, fake_y)) return (x_out, y_out)
def make_hallucinated_data(bo, x: np.ndarray, strat: str) -> np.ndarray: "Returns fake y-values based on the chosen heuristic\n\n Parameters\n ----------\n x\n Used to get the value for the kriging believer. Otherwise, this\n sets the number of values returned\n\n bo\n Instance of BayesianOptimization\n\n strat\n string describing the type of hallucinated data. Choices are:\n 'constant_liar_min', 'constant_liar_median', 'kriging_believer',\n 'posterior_simple'\n\n Returns\n -------\n y : np.ndarray\n Values for the desired heuristic\n\n " if (strat == 'constant_liar_min'): if (x is None): y = np.atleast_2d(bo.y_min) else: y = np.array(([bo.y_min] * len(x))).reshape((- 1), 1) elif (strat == 'constant_liar_median'): if (x is None): y = np.atleast_2d(bo.y_min) else: y = np.array(([bo.y_min] * len(x))).reshape((- 1), 1) elif (strat == 'kriging_believer'): y = bo.surrogate.predict(x)[0] elif (strat == 'posterior_simple'): (mu, var) = bo.surrogate.predict(x) y = np.random.multivariate_normal(mu.flatten(), np.diag(var.flatten())).reshape((- 1), 1) elif (strat == 'posterior_full'): (mu, var) = bo.surrogate.predict(x, full_cov=True) y = np.random.multivariate_normal(mu.flatten(), var).reshape((- 1), 1) else: raise NotImplementedError return y
def draw(weights): choice = random.uniform(0, sum(weights)) choiceIndex = 0 for weight in weights: choice -= weight if (choice <= 0): return choiceIndex choiceIndex += 1
def distr(weights, gamma=0.0): theSum = float(sum(weights)) return tuple(((((1.0 - gamma) * (w / theSum)) + (gamma / len(weights))) for w in weights))
def mean(aList): theSum = 0 count = 0 for x in aList: theSum += x count += 1 return (0 if (count == 0) else (theSum / count))
def with_proba(epsilon): 'Bernoulli test, with probability :math:`\x0barepsilon`, return `True`, and with probability :math:`1 - \x0barepsilon`, return `False`.\n\n Example:\n\n >>> from random import seed; seed(0) # reproductible\n >>> with_proba(0.5)\n False\n >>> with_proba(0.9)\n True\n >>> with_proba(0.1)\n False\n >>> if with_proba(0.2):\n ... print("This happens 20% of the time.")\n ' assert (0 <= epsilon <= 1), "Error: for 'with_proba(epsilon)', epsilon = {:.3g} has to be between 0 and 1 to be a valid probability.".format(epsilon) return (random() < epsilon)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', required=True) parser.add_argument('--config-args') parser.add_argument('--section', required=True) parser.add_argument('--inferred', required=True) parser.add_argument('--output') parser.add_argument('--logdir') parser.add_argument('--evaluate-beams-individually', action='store_true') args = parser.parse_args() (real_logdir, metrics) = evaluation.compute_metrics(args.config, args.config_args, args.section, args.inferred, args.logdir, evaluate_beams_individually=args.evaluate_beams_individually) if args.output: if real_logdir: output_path = args.output.replace('__LOGDIR__', real_logdir) else: output_path = args.output with open(output_path, 'w') as f: json.dump(metrics, f) print('Wrote eval results to {}'.format(output_path)) else: print(metrics)
def main(): all_commands = [] all_eval_commands = [] for (filt, st, nt) in itertools.product(('none', 'contains-hole'), ('cov-xent', 'cov-examples'), (10, 20, 40, 80)): steps = (list(range(1100, 40000, 1000)) + [40000]) logdir = 'logdirs/20190425-django-allmatches-anysplit-multimean/filt-{filt}_st-{st}_nt-{nt}'.format(filt=filt, st=st, nt=nt) for step in steps: infer_command = 'python infer.py --config configs/django-idioms/nl2code-0425-allmatches-anysplit-multimean.jsonnet --logdir {logdir} --config-args "{{filt: \'{filt}\', st: \'{st}\', nt: {nt}}}" --output {logdir}/infer-val-step{step:05d}-bs1.jsonl --step {step} --section val --beam-size 1'.format(logdir=logdir, step=step, filt=filt, st=st, nt=nt) print(infer_command)
def main(): for (filt, st, nt) in itertools.product(('none', 'contains-hole'), ('cov-xent', 'cov-examples'), (10, 20, 40, 80)): steps = list(range(100, 2600, 100)) args = "{{filt: '{filt}', st: '{st}', nt: {nt}}}".format(filt=filt, st=st, nt=nt) logdir = os.path.join('logdirs/20190201-hs-allmatches-anysplit-multimean', 'filt-{filt}_st-{st}_nt-{nt}'.format(filt=filt, st=st, nt=nt)) for step in steps: if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))): continue if os.path.exists(os.path.join(logdir, 'infer-val-20191031-step{:05d}-bs1.jsonl'.format(step))): continue infer_command = (('python infer.py --config configs/hearthstone-idioms/nl2code-0201-allmatches-anysplit-multimean.jsonnet --logdir {logdir} --config-args "{args}" ' + '--output __LOGDIR__/infer-val-20191031-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(logdir=logdir, args=args, step=step) print(infer_command)
def main(): for (filt, st, nt) in itertools.product(('none', 'contains-hole'), ('cov-xent', 'cov-examples'), (10, 20, 40, 80)): steps = list(range(100, 2600, 100)) args = "{{filt: '{filt}', st: '{st}', nt: {nt}}}".format(filt=filt, st=st, nt=nt) logdir = os.path.join('logdirs/20190201-hs-allmatches-anysplit-multimean', 'filt-{filt}_st-{st}_nt-{nt}'.format(filt=filt, st=st, nt=nt)) for step in steps: if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))): continue if os.path.exists(os.path.join(logdir, 'infer-val-step{:05d}-bs1.jsonl'.format(step))): continue infer_command = (('python infer.py --config configs/hearthstone-idioms/nl2code-0201-allmatches-anysplit-multimean.jsonnet --logdir {logdir} --config-args "{args}" ' + '--output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(logdir=logdir, args=args, step=step) print(infer_command)
def main(): for att in (0, 1): steps = list(range(100, 2600, 100)) logdir = os.path.join('logdirs/20181231-nl2code-hearthstone-fef2c5b', 'att{}'.format(att)) for step in steps: if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))): continue if os.path.exists(os.path.join(logdir, 'infer-val-step{:05d}-bs1.jsonl'.format(step))): continue infer_command = ('python infer.py --config configs/hearthstone/nl2code.jsonnet --logdir {logdir} --output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ' + '--step {step} --section val --beam-size 1').format(logdir=logdir, step=step) print(infer_command)
def main(): for (output_from, upd_steps) in itertools.product(('true', 'false'), (0, 1, 2)): job_name = 'output_from={},upd_steps={}'.format(output_from, upd_steps) commands = [] for (qenc, ctenc, tinc) in itertools.product(('e', 'eb'), ('e', 'eb', 'ebs'), ('true', 'false')): commands.append('python train.py --config configs/spider-20190205/nl2code-0214.jsonnet --config-args "{{output_from: {}, qenc: \'{}\', ctenc: \'{}\', tinc: {}, upd_steps: {}}}" --logdir logdirs/20190214'.format(output_from, qenc, ctenc, tinc, upd_steps)) f = open('experiments/PBS_20190214_{}.sh'.format(job_name), 'w') f.write(TEMPLATE.format(job_name=job_name, env_name='seq2s', num_jobs=len(commands), base_dir=os.path.realpath(os.getcwd()))) for (i, cmd) in enumerate(commands): f.write('if [[ $PBS_ARRAY_INDEX == {i} ]]; then {cmd}; fi\n'.format(i=(i + 1), cmd=cmd)) f.close()
def main(): job_name = 'spider_att3' commands = [] for (output_from, upd_steps) in itertools.product(('true', 'false'), (0, 1, 2)): for (qenc, ctenc, tinc) in itertools.product(('e', 'eb'), ('e', 'eb', 'ebs'), ('true', 'false')): if ((output_from, qenc, ctenc, tinc, upd_steps) not in (('false', 'eb', 'eb', 'true', 2), ('true', 'eb', 'ebs', 'false', 0), ('true', 'eb', 'ebs', 'false', 2), ('true', 'eb', 'ebs', 'true', 2), ('true', 'e', 'e', 'true', 1))): continue commands.append('python train.py --config configs/spider-20190205/nl2code-0214.jsonnet --config-args "{{output_from: {}, qenc: \'{}\', ctenc: \'{}\', tinc: {}, upd_steps: {}}}" --logdir logdirs/20190214'.format(output_from, qenc, ctenc, tinc, upd_steps)) f = open('experiments/PBS_20190214_{}.sh'.format(job_name), 'w') f.write(TEMPLATE.format(job_name=job_name, env_name='seq2s', num_jobs=len(commands), base_dir=os.path.realpath(os.getcwd()))) for (i, cmd) in enumerate(commands): f.write('if [[ $PBS_ARRAY_INDEX == {i} ]]; then {cmd}; fi\n'.format(i=(i + 1), cmd=cmd)) f.close()
def main(): for (output_from, upd_steps) in itertools.product(('true', 'false'), (0, 1, 2)): job_name = 'output_from={},upd_steps={}'.format(output_from, upd_steps) commands = [] for (qenc, ctenc, tinc, step) in itertools.product(('e', 'eb'), ('e', 'eb', 'ebs'), ('true', 'false'), (list(range(2100, 40000, 2000)) + [40000])): model_commands = [] logdir = 'logdirs/20190214/output_from={output_from},qenc={qenc},ctenc={ctenc},tinc={tinc},upd_steps={upd_steps}'.format(output_from=output_from, qenc=qenc, ctenc=ctenc, tinc=tinc, upd_steps=upd_steps) model_commands.append((((((('python infer.py --config configs/spider-20190205/nl2code-0214.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + "qenc: '{qenc}', ctenc: '{ctenc}', ") + 'tinc: {tinc}, upd_steps: {upd_steps}}}" ') + '--logdir logdirs/20190214 ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, output_from=output_from, qenc=qenc, ctenc=ctenc, tinc=tinc, upd_steps=upd_steps, logdir=logdir)) model_commands.append((((((('python eval.py --config configs/spider-20190205/nl2code-0214.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + "qenc: '{qenc}', ctenc: '{ctenc}', ") + 'tinc: {tinc}, upd_steps: {upd_steps}}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, output_from=output_from, qenc=qenc, ctenc=ctenc, tinc=tinc, upd_steps=upd_steps, logdir=logdir)) commands.append(' && '.join(model_commands)) f = open('experiments/PBS_20190214_eval_{}.sh'.format(job_name), 'w') f.write(TEMPLATE.format(job_name=job_name, env_name='seq2s', num_jobs=len(commands), base_dir=os.path.realpath(os.getcwd()))) for (i, cmd) in enumerate(commands): f.write('if [[ $PBS_ARRAY_INDEX == {i} ]]; then {cmd}; fi\n'.format(i=(i + 1), cmd=cmd)) f.close()
def main(): for (output_from, upd_steps) in itertools.product(('true', 'false'), (3, 4, 5, 6)): job_name = 'output_from={},upd_steps={}'.format(output_from, upd_steps) commands = [] for ((qenc, ctenc), max_steps, batch_size) in itertools.product((('e', 'e'), ('eb', 'ebs')), ('40000', '80000'), ('10', '20')): commands.append((('python train.py --config configs/spider-20190205/nl2code-0220.jsonnet ' + '--config-args "{{output_from: {}, qenc: \'{}\', ctenc: \'{}\', ') + 'upd_steps: {}, max_steps: {}, batch_size: {}}}" --logdir logdirs/20190214').format(output_from, qenc, ctenc, upd_steps, max_steps, batch_size)) f = open('experiments/PBS_20190220_{}.sh'.format(job_name), 'w') f.write(TEMPLATE.format(job_name=job_name, env_name='seq2s', num_jobs=len(commands), base_dir=os.path.realpath(os.getcwd()))) for (i, cmd) in enumerate(commands): f.write('if [[ $PBS_ARRAY_INDEX == {i} ]]; then {cmd}; fi\n'.format(i=(i + 1), cmd=cmd)) f.close()
def main(): for (output_from, upd_steps) in itertools.product(('true', 'false'), (2, 3, 4, 5, 6)): job_name = 'output_from={},upd_steps={}'.format(output_from, upd_steps) commands = [] for ((qenc, ctenc), max_steps, batch_size) in itertools.product((('e', 'e'), ('eb', 'ebs')), ('40000', '80000'), ('10', '20')): commands.append((('python train.py --config configs/spider-20190205/nl2code-0220.jsonnet ' + '--config-args "{{output_from: {}, qenc: \'{}\', ctenc: \'{}\', ') + 'upd_steps: {}, max_steps: {}, batch_size: {}}}" --logdir logdirs/20190223').format(output_from, qenc, ctenc, upd_steps, max_steps, batch_size)) f = open('experiments/PBS_20190223_{}.sh'.format(job_name), 'w') f.write(TEMPLATE.format(job_name=job_name, env_name='seq2s', num_jobs=len(commands), base_dir=os.path.realpath(os.getcwd()))) for (i, cmd) in enumerate(commands): f.write('if [[ $PBS_ARRAY_INDEX == {i} ]]; then {cmd}; fi\n'.format(i=(i + 1), cmd=cmd)) f.close()
def main(): for (output_from, upd_steps) in itertools.product(('true', 'false'), (2, 3, 4, 5, 6)): job_name = 'output_from={},upd_steps={}'.format(output_from, upd_steps) commands = [] for ((qenc, ctenc), max_steps, batch_size) in itertools.product((('e', 'e'), ('eb', 'ebs')), ('40000', '80000'), ('10', '20')): commands.append((('python train.py --config configs/spider-20190205/nl2code-0220.jsonnet ' + '--config-args "{{output_from: {}, qenc: \'{}\', ctenc: \'{}\', ') + 'upd_steps: {}, max_steps: {}, batch_size: {}}}" --logdir logdirs/20190223').format(output_from, qenc, ctenc, upd_steps, max_steps, batch_size)) f = open('experiments/PBS_20190223_nobatch_retry_{}.sh'.format(job_name), 'w') f.write(TEMPLATE.format(job_name=job_name, env_name='seq2s', num_jobs=len(commands), base_dir=os.path.realpath(os.getcwd()))) for (i, cmd) in enumerate(commands): f.write('if [[ $PBS_ARRAY_INDEX == {i} ]]; then {cmd}; fi\n'.format(i=(i + 1), cmd=cmd)) f.close()
def main(): all_commands = [] all_eval_commands = [] for (output_from, upd_steps) in itertools.product(('true', 'false'), (2, 3, 4, 5, 6)): job_name = 'e0223,{},{}'.format(output_from, upd_steps) commands = [] total = ((2 * 2) * (20 + 40)) i = 0 for ((qenc, ctenc), max_steps, batch_size) in itertools.product((('e', 'e'), ('eb', 'ebs')), ('40000', '80000'), ('10', '20')): if (max_steps == '40000'): steps = (list(range(2100, 40000, 2000)) + [40000]) elif (max_steps == '80000'): steps = (list(range(2100, 80000, 2000)) + [80000]) for step in steps: logdir = 'logdirs/20190223/output_from={output_from},qenc={qenc},ctenc={ctenc},upd_steps={upd_steps},max_steps={max_steps},batch_size={batch_size}'.format(output_from=output_from, qenc=qenc, ctenc=ctenc, upd_steps=upd_steps, max_steps=max_steps, batch_size=batch_size) model_commands = [] model_commands.append(((((((('python infer.py --config configs/spider-20190205/nl2code-0220.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + "qenc: '{qenc}', ctenc: '{ctenc}', ") + 'upd_steps: {upd_steps}, max_steps: {max_steps}, ') + 'batch_size: {batch_size}}}" ') + '--logdir logdirs/20190223 ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, output_from=output_from, qenc=qenc, ctenc=ctenc, upd_steps=upd_steps, max_steps=max_steps, batch_size=batch_size, logdir=logdir)) eval_command = ((((((('python eval.py --config configs/spider-20190205/nl2code-0220.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + "qenc: '{qenc}', ctenc: '{ctenc}', ") + 'upd_steps: {upd_steps}, max_steps: {max_steps}, ') + 'batch_size: {batch_size}}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, output_from=output_from, qenc=qenc, ctenc=ctenc, upd_steps=upd_steps, max_steps=max_steps, batch_size=batch_size, logdir=logdir) model_commands.append(eval_command) all_eval_commands.append(eval_command) commands.append(' && '.join(model_commands)) commands.append('echo Finished {}/{}'.format(i, total)) all_commands.append(' && '.join(model_commands)) i += 1 f = open('experiments/PBS_eval_20190223_nobatch_retry_{}.sh'.format(job_name), 'w') f.write(TEMPLATE.format(job_name=job_name, env_name='seq2s', base_dir=os.path.realpath(os.getcwd()))) for (i, cmd) in enumerate(commands): f.write('{cmd}\n'.format(cmd=cmd)) f.close() with open('experiments/eval_20190223_nobatch_retry_all_eval_commands.txt', 'w') as f: f.write('\n'.join(all_eval_commands)) with open('experiments/eval_20190223_nobatch_retry_all_commands.txt', 'w') as f: f.write(('\n'.join(all_commands) + '\n'))
def main(): all_commands = [] all_eval_commands = [] for (output_from, max_steps, batch_size) in itertools.product(('true', 'false'), (40000, 80000), (10, 20)): if (max_steps == 40000): steps = (list(range(2100, 40000, 2000)) + [40000]) elif (max_steps == 80000): steps = (list(range(2100, 80000, 2000)) + [80000]) else: raise ValueError(max_steps) for (upd_tied, upd_type) in (('false', 'no_subtypes'), ('false', 'merge_types'), ('true', 'full')): for step in steps: logdir = ('logdirs/20190302/output_from=%(output_from)s,upd_steps=%(upd_steps)d,upd_type=%(upd_type)s,upd_tied=%(upd_tied)s,max_steps=%(max_steps)d,batch_size=%(batch_size)d' % dict(output_from=output_from, upd_steps=4, upd_type=upd_type, upd_tied=upd_tied, max_steps=max_steps, batch_size=batch_size)) infer_command = (((((('python infer.py --config configs/spider-20190205/nl2code-0302.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + "upd_steps: 4, upd_type: '{upd_type}', upd_tied: {upd_tied}, max_steps: {max_steps}, ") + 'batch_size: {batch_size}}}" ') + '--logdir logdirs/20190302 ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, output_from=output_from, upd_type=upd_type, upd_tied=upd_tied, max_steps=max_steps, batch_size=batch_size, logdir=logdir) eval_command = (((((('python eval.py --config configs/spider-20190205/nl2code-0302.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + "upd_steps: 4, upd_type: '{upd_type}', upd_tied: {upd_tied}, max_steps: {max_steps}, ") + 'batch_size: {batch_size}}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, output_from=output_from, upd_type=upd_type, upd_tied=upd_tied, max_steps=max_steps, batch_size=batch_size, logdir=logdir) print('{} && {}'.format(infer_command, eval_command))
def main(): all_commands = [] all_eval_commands = [] for (max_steps, batch_size) in itertools.product((40000, 80000), (10, 20)): if (max_steps == 40000): steps = (list(range(2100, 40000, 2000)) + [40000]) elif (max_steps == 80000): steps = (list(range(2100, 80000, 2000)) + [80000]) else: raise ValueError(max_steps) for upd_steps in (0, 4): for step in steps: logdir = ('logdirs/20190315-sketch/upd_steps=%(upd_steps)d,max_steps=%(max_steps)d,batch_size=%(batch_size)d' % dict(upd_steps=upd_steps, max_steps=max_steps, batch_size=batch_size)) infer_command = (((((('python infer.py --config configs/spider-20190205/nl2code-0315-sketch.jsonnet ' + '--config-args "{{upd_steps: {upd_steps}, ') + 'max_steps: {max_steps}, ') + 'batch_size: {batch_size}}}" ') + '--logdir logdirs/20190315-sketch ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, upd_steps=upd_steps, max_steps=max_steps, batch_size=batch_size, logdir=logdir) eval_command = (((((('python eval.py --config configs/spider-20190205/nl2code-0315-sketch.jsonnet ' + '--config-args "{{upd_steps: {upd_steps}, ') + 'max_steps: {max_steps}, ') + 'batch_size: {batch_size}}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, upd_steps=upd_steps, max_steps=max_steps, batch_size=batch_size, logdir=logdir) print(eval_command)
def main(): all_commands = [] all_eval_commands = [] for (output_from, att) in itertools.product(('false', 'true'), (0, 1, 2, 3)): steps = (list(range(1100, 40000, 1000)) + [40000]) for step in steps: logdir = ('logdirs/20190327/rerun,output_from=%(output_from)s,att=%(att)d' % dict(output_from=output_from, att=att)) infer_command = ((((('python infer.py --config configs/spider-20190205/nl2code-0327.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + 'att: {att}}}" ') + '--logdir logdirs/20190327 ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, output_from=output_from, att=att, logdir=logdir) eval_command = ((((('python eval.py --config configs/spider-20190205/nl2code-0327.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + 'att: {att}}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, output_from=output_from, att=att, logdir=logdir) print('{} && {}'.format(infer_command, eval_command))
def main(): all_commands = [] all_eval_commands = [] for (output_from, emb, min_freq) in itertools.product(('false', 'true'), ('glove-42B', 'bpemb-10k', 'bpemb-100k'), (3, 50)): if ((min_freq == 50) and (emb != 'glove-42B')): continue steps = (list(range(1100, 40000, 1000)) + [40000]) for step in steps: logdir = ('logdirs/20190401/output_from=%(output_from)s,emb=%(emb)s,min_freq=%(min_freq)d,att=0' % dict(output_from=output_from, emb=emb, min_freq=min_freq)) infer_command = ((((('python infer.py --config configs/spider-20190205/nl2code-0401.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + 'emb: \'{emb}\', min_freq: {min_freq}, att: 0}}" ') + '--logdir logdirs/20190401 ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, output_from=output_from, emb=emb, min_freq=min_freq, logdir=logdir) eval_command = ((((('python eval.py --config configs/spider-20190205/nl2code-0401.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + 'emb: \'{emb}\', min_freq: {min_freq}, att: 0}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, output_from=output_from, emb=emb, min_freq=min_freq, logdir=logdir) print('{} && {}'.format(infer_command, eval_command))
def main(): all_commands = [] all_eval_commands = [] for (output_from, emb, min_freq) in itertools.product(('false', 'true'), ('glove-42B', 'bpemb-10k', 'bpemb-100k'), (3, 50)): if ((min_freq == 50) and (emb != 'glove-42B')): continue steps = (list(range(1100, 40000, 1000)) + [40000]) for step in steps: logdir = ('logdirs/20190402/output_from=%(output_from)s,emb=%(emb)s,min_freq=%(min_freq)d,att=0' % dict(output_from=output_from, emb=emb, min_freq=min_freq)) infer_command = ((((('python infer.py --config configs/spider-20190205/nl2code-0402.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + 'emb: \'{emb}\', min_freq: {min_freq}, att: 0}}" ') + '--logdir logdirs/20190402 ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, output_from=output_from, emb=emb, min_freq=min_freq, logdir=logdir) eval_command = ((((('python eval.py --config configs/spider-20190205/nl2code-0402.jsonnet ' + '--config-args "{{output_from: {output_from}, ') + 'emb: \'{emb}\', min_freq: {min_freq}, att: 0}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, output_from=output_from, emb=emb, min_freq=min_freq, logdir=logdir) print('{} && {}'.format(infer_command, eval_command))
def main(): all_commands = [] all_eval_commands = [] for (att, enc_size, dec_size) in itertools.product((0, 1), (256, 512), (256, 512)): steps = (list(range(1100, 40000, 1000)) + [40000]) for step in steps: logdir = ('logdirs/20190420/output_from=false,enc_size=%(enc_size)d,dec_size=%(dec_size)d,att=%(att)d' % dict(att=att, enc_size=enc_size, dec_size=dec_size)) infer_command = ((((('python infer.py --config configs/spider-20190205/nl2code-0420.jsonnet ' + '--config-args "{{output_from: false, ') + 'enc_size: {enc_size}, dec_size: {dec_size}, att: {att}}}" ') + '--logdir logdirs/20190420 ') + '--output {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, att=att, enc_size=enc_size, dec_size=dec_size, logdir=logdir) eval_command = ((((('python eval.py --config configs/spider-20190205/nl2code-0420.jsonnet ' + '--config-args "{{output_from: false, ') + 'enc_size: {enc_size}, dec_size: {dec_size}, att: {att}}}" ') + '--inferred {logdir}/infer-val-step{step:05d}-bs1.jsonl ') + '--output {logdir}/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, att=att, enc_size=enc_size, dec_size=dec_size, logdir=logdir) print('{} && {}'.format(infer_command, eval_command))
def main(): all_commands = [] all_eval_commands = [] for (att, fixed) in itertools.product((0, 1, 2, 3), (['init'], ['data', 'model'])): steps = (list(range(1100, 40000, 1000)) + [40000]) for step in steps: infer_command = (((('python infer.py --config configs/spider-20190205/nl2code-0428-random.jsonnet ' + '--logdir logdirs/20190428-random ') + '--config-args "{{fixed: {fixed}, att: {att}}}" ') + '--output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, fixed=fixed, att=att) eval_command = ((((('python eval.py --config configs/spider-20190205/nl2code-0428-random.jsonnet ' + '--logdir logdirs/20190428-random ') + '--config-args "{{fixed: {fixed}, att: {att}}}" ') + '--inferred __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ') + '--output __LOGDIR__/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, fixed=fixed, att=att) print('{} && {}'.format(infer_command, eval_command))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--beam-size', type=int, default=1) script_args = parser.parse_args() for ((bs, lr, end_lr), att) in itertools.product(((50, 0.001, 0),), (0, 1, 2)): steps = (list(range(1100, 40000, 1000)) + [40000]) args = '{{bs: {bs}, lr: {lr}, end_lr: {end_lr}, att: {att}}}'.format(bs=bs, lr=lr, end_lr=end_lr, att=att) config = json.loads(_jsonnet.evaluate_file('configs/spider-20190205/nl2code-0428-stability.jsonnet', tla_codes={'args': args})) logdir = os.path.join('logdirs/20190428-stability', config['model_name']) for step in steps: if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))): continue if os.path.exists(os.path.join(logdir, 'eval-val-step{:05d}-bs{}.jsonl'.format(step, script_args.beam_size))): continue infer_command = ('python infer.py --config configs/spider-20190205/nl2code-0428-stability.jsonnet --logdir logdirs/20190428-stability --config-args "{args}" --output __LOGDIR__/infer-val-step{step:05d}-bs{beam_size}.jsonl ' + '--step {step} --section val --beam-size {beam_size}').format(step=step, args=args, beam_size=script_args.beam_size) eval_command = (('python eval.py --config configs/spider-20190205/nl2code-0428-stability.jsonnet --logdir logdirs/20190428-stability --config-args "{args}" --inferred __LOGDIR__/infer-val-step{step:05d}-bs{beam_size}.jsonl ' + '--output __LOGDIR__/eval-val-step{step:05d}-bs{beam_size}.jsonl ') + '--section val').format(step=step, args=args, beam_size=script_args.beam_size) print('{} && {}'.format(infer_command, eval_command))
def main(): all_commands = [] all_eval_commands = [] for (lr, wd, decay, att) in itertools.product((0.001, 0.0001), (0, 0.01), ('cosine', 'linear'), (0, 1)): steps = (list(range(1100, 20000, 500)) + [20000]) for step in steps: infer_command = ((('python infer.py --config configs/spider-20190205/nl2code-0518-opt.jsonnet ' + '--logdir logdirs/20190518-opt ') + '--config-args "{{lr: {lr}, wd: {wd}, decay: \'{decay}\', att: {att}}}" ') + '--output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl --step {step} --section val --beam-size 1').format(step=step, lr=lr, wd=wd, decay=decay, att=att) eval_command = 'python eval.py --config configs/spider-20190205/nl2code-0518-opt.jsonnet --logdir logdirs/20190518-opt --config-args "{{lr: {lr}, wd: {wd}, decay: \'{decay}\', att: {att}}}" --inferred __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl --output __LOGDIR__/eval-val-step{step:05d}-bs1.jsonl --section val'.format(step=step, lr=lr, wd=wd, decay=decay, att=att) print('{} && {}'.format(infer_command, eval_command))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--beam-size', type=int, default=1) script_args = parser.parse_args() for ((glove, upd_type, num_layers), att) in itertools.product(((False, 'full', 4), (True, 'no_subtypes', 4), (True, 'merge_types', 4), (True, 'full', 2), (True, 'full', 0)), (0, 1, 2)): steps = (list(range(1100, 40000, 1000)) + [40000]) args = "{{glove: {glove}, upd_type: '{upd_type}', num_layers: {num_layers}, att: {att}}}".format(glove=('true' if glove else 'false'), upd_type=upd_type, num_layers=num_layers, att=att) config = json.loads(_jsonnet.evaluate_file('configs/spider-20190205/nl2code-0521-ablations.jsonnet', tla_codes={'args': args})) logdir = os.path.join('logdirs/20190521-ablations', config['model_name']) for step in steps: if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))): continue if os.path.exists(os.path.join(logdir, 'eval-val-step{:05d}-bs{}.jsonl'.format(step, script_args.beam_size))): continue infer_command = (('python infer.py --config configs/spider-20190205/nl2code-0521-ablations.jsonnet --logdir logdirs/20190521-ablations --config-args "{args}" ' + '--output __LOGDIR__/infer-val-step{step:05d}-bs{beam_size}.jsonl ') + '--step {step} --section val --beam-size {beam_size}').format(args=args, step=step, beam_size=script_args.beam_size) eval_command = ((((('python eval.py --config configs/spider-20190205/nl2code-0521-ablations.jsonnet ' + '--logdir logdirs/20190521-ablations ') + '--config-args "{args}" ') + '--inferred __LOGDIR__/infer-val-step{step:05d}-bs{beam_size}.jsonl ') + '--output __LOGDIR__/eval-val-step{step:05d}-bs{beam_size}.jsonl ') + '--section val').format(args=args, step=step, beam_size=script_args.beam_size) print('{} && {}'.format(infer_command, eval_command))
def main(): all_commands = [] all_eval_commands = [] for (st, nt, att) in itertools.product(('cov-xent', 'cov-examples'), (10, 20, 40, 80), (0,)): steps = (list(range(1100, 40000, 1000)) + [40000]) for step in steps: infer_command = 'python infer.py --config configs/spider-idioms/nl2code-0513.jsonnet --logdir logdirs/spider-idioms/nl2code-0513 --config-args "{{st: \'{st}\', nt: {nt}, att: {att}}}" --output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl --step {step} --section val --beam-size 1'.format(step=step, st=st, nt=nt, att=att) eval_command = 'python eval.py --config configs/spider-idioms/nl2code-0513.jsonnet --logdir logdirs/spider-idioms/nl2code-0513 --config-args "{{st: \'{st}\', nt: {nt}, att: {att}}}" --inferred __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl --output __LOGDIR__/eval-val-step{step:05d}-bs1.jsonl --section val'.format(step=step, st=st, nt=nt, att=att) print('{} && {}'.format(infer_command, eval_command))
def main(): for (st, nt, att) in itertools.product(('cov-xent', 'cov-examples'), (40, 80), (0,)): steps = (list(range(1100, 40000, 1000)) + [40000]) args = "{{st: '{st}', nt: {nt}, att: {att}}}".format(st=st, nt=nt, att=att) config = json.loads(_jsonnet.evaluate_file('configs/spider-idioms/nl2code-0518.jsonnet', tla_codes={'args': args})) logdir = os.path.join('logdirs/spider-idioms/nl2code-0518', config['model_name']) for step in steps: if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))): continue if os.path.exists(os.path.join(logdir, 'eval-val-step{:05d}-bs1.jsonl'.format(step))): continue infer_command = 'python infer.py --config configs/spider-idioms/nl2code-0518.jsonnet --logdir logdirs/spider-idioms/nl2code-0518 --config-args "{args}" --output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl --step {step} --section val --beam-size 1'.format(args=args, step=step) eval_command = 'python eval.py --config configs/spider-idioms/nl2code-0518.jsonnet --logdir logdirs/spider-idioms/nl2code-0518 --config-args "{args}" --inferred __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl --output __LOGDIR__/eval-val-step{step:05d}-bs1.jsonl --section val'.format(args=args, step=step) print('{} && {}'.format(infer_command, eval_command))
def maybe_slice(iterable, start, end): if ((start is not None) or (end is not None)): iterable = itertools.islice(iterable, start, end) return iterable
class Inferer(): def __init__(self, config): self.config = config if torch.cuda.is_available(): self.device = torch.device('cuda') else: self.device = torch.device('cpu') torch.set_num_threads(1) self.model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model']) self.model_preproc.load() def load_model(self, logdir, step): 'Load a model (identified by the config used for construction) and return it' model = registry.construct('model', self.config['model'], preproc=self.model_preproc, device=self.device) model.to(self.device) model.eval() model.visualize_flag = False optimizer = registry.construct('optimizer', self.config['optimizer'], params=model.parameters()) saver = saver_mod.Saver(model, optimizer) last_step = saver.restore(logdir, step=step, map_location=self.device) if (not last_step): raise Exception('Attempting to infer on untrained model') return model def infer(self, model, output_path, args): output = open(output_path, 'w') orig_data = registry.construct('dataset', self.config['data'][args.section]) sliced_orig_data = maybe_slice(orig_data, args.start_offset, args.limit) preproc_data = self.model_preproc.dataset(args.section) sliced_preproc_data = maybe_slice(preproc_data, args.start_offset, args.limit) with torch.no_grad(): if (args.mode == 'infer'): assert (len(orig_data) == len(preproc_data)) self._inner_infer(model, args.beam_size, args.output_history, sliced_orig_data, sliced_preproc_data, output, args.nproc) elif (args.mode == 'debug'): self._debug(model, sliced_orig_data, output) elif (args.mode == 'visualize_attention'): model.visualize_flag = True model.decoder.visualize_flag = True self._visualize_attention(model, args.beam_size, args.output_history, sliced_orig_data, args.res1, args.res2, args.res3, output) def _inner_infer(self, model, beam_size, output_history, sliced_orig_data, sliced_preproc_data, output, nproc): list_items = [(idx, oi, pi) for (idx, (oi, pi)) in enumerate(zip(sliced_orig_data, sliced_preproc_data))] cp = parallelizer.CPUParallelizer(nproc) params = [(beam_size, output_history, indices, orig_items, preproc_items) for (indices, orig_items, preproc_items) in list_items] write_all(output, cp.parallel_map([(functools.partial(self._infer_single, model), params)])) def _infer_single(self, model, param): (beam_size, output_history, index, orig_item, preproc_item) = param try: beams = beam_search.beam_search(model, orig_item, preproc_item, beam_size=beam_size, max_steps=1000) decoded = [] for beam in beams: (model_output, inferred_code) = beam.inference_state.finalize() decoded.append({'model_output': model_output, 'inferred_code': inferred_code, 'score': beam.score, **({'choice_history': beam.choice_history, 'score_history': beam.score_history} if output_history else {})}) result = {'index': index, 'beams': decoded} except Exception as e: result = {'index': index, 'error': str(e)} return (json.dumps(result) + '\n') def _debug(self, model, sliced_data, output): for (i, item) in enumerate(tqdm.tqdm(sliced_data)): ((_, history),) = model.compute_loss([item], debug=True) output.write((json.dumps({'index': i, 'history': history}) + '\n')) output.flush() def _visualize_attention(self, model, beam_size, output_history, sliced_data, res1file, res2file, res3file, output): res1 = json.load(open(res1file, 'r')) res1 = res1['per_item'] res2 = json.load(open(res2file, 'r')) res2 = res2['per_item'] res3 = json.load(open(res3file, 'r')) res3 = res3['per_item'] interest_cnt = 0 cnt = 0 for (i, item) in enumerate(tqdm.tqdm(sliced_data)): if (res1[i]['hardness'] != 'extra'): continue cnt += 1 if ((res1[i]['exact'] == 0) and (res2[i]['exact'] == 0) and (res3[i]['exact'] == 0)): continue interest_cnt += 1 "\n print('sample index: ')\n print(i)\n beams = beam_search.beam_search(\n model, item, beam_size=beam_size, max_steps=1000, visualize_flag=True)\n entry = item.orig\n print('ground truth SQL:')\n print(entry['query_toks'])\n print('prediction:')\n print(res2[i])\n decoded = []\n for beam in beams:\n model_output, inferred_code = beam.inference_state.finalize()\n\n decoded.append({\n 'model_output': model_output,\n 'inferred_code': inferred_code,\n 'score': beam.score,\n **({\n 'choice_history': beam.choice_history,\n 'score_history': beam.score_history,\n } if output_history else {})})\n\n output.write(\n json.dumps({\n 'index': i,\n 'beams': decoded,\n }) + '\n')\n output.flush()\n " print(((interest_cnt * 1.0) / cnt))
def write_all(output, genexp): for item in genexp: output.write(item) output.flush()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--logdir', required=True) parser.add_argument('--config', required=True) parser.add_argument('--config-args') parser.add_argument('--step', type=int) parser.add_argument('--section', required=True) parser.add_argument('--output', required=True) parser.add_argument('--beam-size', required=True, type=int) parser.add_argument('--output-history', action='store_true') parser.add_argument('--start-offset', type=int) parser.add_argument('--limit', type=int) parser.add_argument('--mode', default='infer', choices=['infer', 'debug', 'visualize_attention']) parser.add_argument('--res1', default='outputs/glove-sup-att-1h-0/outputs.json') parser.add_argument('--res2', default='outputs/glove-sup-att-1h-1/outputs.json') parser.add_argument('--res3', default='outputs/glove-sup-att-1h-2/outputs.json') parser.add_argument('--nproc', type=int, default=1) args = parser.parse_args() if args.config_args: config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args})) else: config = json.loads(_jsonnet.evaluate_file(args.config)) if ('model_name' in config): args.logdir = os.path.join(args.logdir, config['model_name']) output_path = args.output.replace('__LOGDIR__', args.logdir) if os.path.exists(output_path): print('Output file {} already exists'.format(output_path)) sys.exit(1) inferer = Inferer(config) model = inferer.load_model(args.logdir, args.step) inferer.infer(model, output_path, args)
def find_idiom_checkpoints(): accuracy_per_run = collections.defaultdict(dict) all_metrics = [] metric_types = set() rows = [] for d in sorted(glob.glob('logdirs/20190201-hs-allmatches-anysplit-multimean/*')): exp_name = os.path.basename(d) exp_vars = re.match('filt-([^_]+)_st-([^_]+)_nt-([^_]+)', exp_name).groups() infer_paths = glob.glob(os.path.join(d, 'infer-val-step*-bs1.jsonl')) all_scores = [] for infer_path in infer_paths: step = int(re.search('step(\\d+)', infer_path).group(1)) (_, metrics) = evaluation.compute_metrics('configs/hearthstone/nl2code.jsonnet', '', 'val', infer_path) all_scores.append((step, metrics['exact match'])) all_metrics.append((exp_name, step, metrics)) metric_types.update(metrics.keys()) all_scores.sort(key=operator.itemgetter(0)) sorted_scores = sorted(all_scores, reverse=True, key=operator.itemgetter(1)) rows.append(((exp_vars + (len(all_scores),)) + (sorted_scores[0] if sorted_scores else ((- 1), (- 1))))) accuracy_per_run[exp_name] = {'x': [s[0] for s in all_scores], 'all': [s[1] for s in all_scores]} print(d) metric_types = tuple(sorted(metric_types)) df = pd.DataFrame(rows, columns=('filt', 'cov', 'nt', 'num steps eval', 'step', 'exact match')) flat_df = pd.DataFrame([((exp_name, step) + tuple((metrics.get(t) for t in metric_types))) for (exp_name, step, metrics) in all_metrics], columns=(('exp_name', 'step') + metric_types)) return flat_df
def find_baseline_checkpoints(): accuracy_per_run = collections.defaultdict(dict) all_metrics = [] metric_types = set() rows = [] for d in sorted(glob.glob('logdirs/20181231-nl2code-hearthstone-fef2c5b//*')): exp_name = os.path.basename(d) exp_vars = re.match('att([^_]+)', exp_name).groups() infer_paths = glob.glob(os.path.join(d, 'infer-val-step*-bs1.jsonl')) all_scores = [] for infer_path in infer_paths: step = int(re.search('step(\\d+)', infer_path).group(1)) (_, metrics) = evaluation.compute_metrics('configs/hearthstone/nl2code.jsonnet', '', 'val', infer_path) all_scores.append((step, metrics['exact match'])) all_metrics.append((exp_name, step, metrics)) metric_types.update(metrics.keys()) all_scores.sort(key=operator.itemgetter(0)) sorted_scores = sorted(all_scores, reverse=True, key=operator.itemgetter(1)) rows.append(((exp_vars + (len(all_scores),)) + (sorted_scores[0] if sorted_scores else ((- 1), (- 1))))) accuracy_per_run[exp_name] = {'x': [s[0] for s in all_scores], 'all': [s[1] for s in all_scores]} print(d) metric_types = tuple(sorted(metric_types)) df = pd.DataFrame(rows, columns=('att', 'num steps eval', 'step', 'exact match')) flat_df = pd.DataFrame([((exp_name, step) + tuple((metrics.get(t) for t in metric_types))) for (exp_name, step, metrics) in all_metrics], columns=(('exp_name', 'step') + metric_types)) return flat_df
def count_nodes(tree): queue = collections.deque([tree]) count = 0 while queue: node = queue.pop() count += 1 if isinstance(node, dict): for (k, v) in node.items(): if (k == '_type'): continue if isinstance(v, (list, tuple)): queue.extend(v) else: queue.append(v) return count
def analyze_idiom_usage(infer_history_path, normalized_gold_code): inferred_all = [json.loads(line) for line in open(infer_history_path)] exact_match_all = [((normalized_gold_code[i] == example['beams'][0]['inferred_code']) if example['beams'] else False) for (i, example) in enumerate(inferred_all)] decoding_history_all = {} for example in inferred_all: decoding_history = [] decoding_history_all[example['index']] = decoding_history if (not example['beams']): continue for choice in example['beams'][0]['choice_history']: if isinstance(choice, int): decoding_history.append(all_rules[choice]) else: decoding_history.append(choice) num_idioms_used_all = {} for (i, history) in decoding_history_all.items(): counter = collections.Counter() for choice in history: if (isinstance(choice, list) and isinstance(choice[1], str) and re.match('Template\\d+', choice[1])): counter[tuple(choice)] += 1 num_idioms_used_all[i] = counter num_characters_all = {example['index']: (len(example['beams'][0]['inferred_code']) if example['beams'] else 0) for example in inferred_all} num_lines_all = {example['index']: (example['beams'][0]['inferred_code'].count('\n') if example['beams'] else 0) for example in inferred_all} num_nodes_all = {example['index']: (count_nodes(example['beams'][0]['model_output']) if example['beams'] else 0) for example in inferred_all} idiom_usage_df = pd.DataFrame(collections.OrderedDict((('Number of idioms used', {k: sum(v.values()) for (k, v) in num_idioms_used_all.items()}), ('Number of characters', num_characters_all), ('Number of lines', num_lines_all), ('Number of AST nodes', num_nodes_all), ('Exact match', dict(enumerate(exact_match_all)))))) exact_match_idiom_usage_df = idiom_usage_df[idiom_usage_df['Exact match']] (fig, ax) = plt.subplots() sns.distplot(idiom_usage_df['Number of idioms used'], kde=False, ax=ax, label='All') (fig, ax) = plt.subplots() sns.distplot(exact_match_idiom_usage_df['Number of idioms used'], kde=False, ax=ax, bins=5, label='Exact match') for (x, y) in (('Number of characters', 'Number of idioms used'), ('Number of AST nodes', 'Number of idioms used')): jg = sns.jointplot(x=x, y=y, data=idiom_usage_df) jg.fig.suptitle('All') jg = sns.jointplot(x=x, y=y, data=exact_match_idiom_usage_df) jg.fig.suptitle('Exactly matched')
def analyze_teacher_forced_pr(report, templates, accuracy_ks=(1, 2), precision_ks=(1, 2), recall_ks=(1, 2)): template_match_counts = collections.defaultdict(int) template_choice_ranks = {'all': collections.defaultdict(list), 'templates only': collections.defaultdict(list)} template_valid_choice_ranks = {'all': collections.defaultdict(list), 'templates only': collections.defaultdict(list)} min_valid_ranks = [] for item in report: for entry in item['history']: if (not isinstance(entry['choices'][0], str)): continue all_ranks = {} template_only_ranks = {} template_only_i = 0 for (i, choice) in enumerate(entry['choices']): all_ranks[choice] = i template_only_ranks[choice] = template_only_i if (not re.match('Template(\\d+).*', choice)): template_only_i += 1 min_valid_rank = min((all_ranks[choice] for choice in entry['valid_choices'])) min_valid_ranks.append(min_valid_rank) for choice in entry['choices']: m = re.match('Template(\\d+).*', choice) if (not m): continue template_id = int(m.group(1)) template_choice_ranks['all'][template_id].append(all_ranks[choice]) template_choice_ranks['templates only'][template_id].append(template_only_ranks[choice]) for choice in entry['valid_choices']: m = re.match('Template(\\d+).*', choice) if (not m): continue template_id = int(m.group(1)) template_match_counts[template_id] += 1 template_valid_choice_ranks['all'][template_id].append(all_ranks[choice]) template_valid_choice_ranks['templates only'][template_id].append(template_only_ranks[choice]) min_valid_ranks = np.array(min_valid_ranks) top_k_accuracy = {k: (np.sum((min_valid_ranks < k)) / len(min_valid_ranks)) for k in accuracy_ks} top_k_precision = {type_name: {k: {i: (np.sum((np.array(template_valid_choice_ranks[type_name][i]) < k)) / np.sum((np.array(template_choice_ranks[type_name][i]) < k))) for i in template_match_counts.keys()} for k in precision_ks} for type_name in template_valid_choice_ranks} top_k_recall = {type_name: {k: {i: (np.sum((np.array(ranks) < k)) / len(ranks)) for (i, ranks) in ranks_of_type.items()} for k in recall_ks} for (type_name, ranks_of_type) in template_valid_choice_ranks.items()} accuracy_df = pd.DataFrame({'Accuracy @ {}'.format(k): [top_k_accuracy[k]] for k in accuracy_ks}) pr_df = pd.DataFrame({'Head': {t['id']: t['idiom'][0] for t in templates}, 'Matches': template_match_counts, **{'Precision @ {} {}'.format(k, type_name): top_k_precision[type_name][k] for type_name in top_k_precision.keys() for k in precision_ks}, **{'Recall @ {} {}'.format(k, type_name): top_k_recall[type_name][k] for type_name in top_k_recall.keys() for k in recall_ks}}) return (accuracy_df, pr_df)
def analyze_anysplit_one(name, section): report = [json.loads(line) for line in open('../logdirs/20190201-hs-allmatches-anysplit/{}/debug-{}-step2600.jsonl'.format(name, section))] templates = json.load(open('../data/hearthstone-idioms-20190201/all-matches-trees-anysplit/{}/templates.json'.format(name))) return analyze(report, templates)
def load_inferred(infer_history_path, normalized_gold_code): inferred_all = [json.loads(line) for line in open(infer_history_path)] exact_match_all = [((normalized_gold_code[i] == example['beams'][0]['inferred_code']) if example['beams'] else False) for (i, example) in enumerate(inferred_all)] return (inferred_all, exact_match_all)
def analyze(report, templates, accuracy_ks=(1, 2), precision_ks=(1, 2), recall_ks=(1, 2)): template_match_counts = collections.defaultdict(int) template_choice_ranks = {'all': collections.defaultdict(list), 'templates only': collections.defaultdict(list)} template_valid_choice_ranks = {'all': collections.defaultdict(list), 'templates only': collections.defaultdict(list)} min_valid_ranks = [] for item in report: for entry in item['history']: if (not isinstance(entry['choices'][0], str)): continue all_ranks = {} template_only_ranks = {} template_only_i = 0 for (i, choice) in enumerate(entry['choices']): all_ranks[choice] = i template_only_ranks[choice] = template_only_i if (not re.match('Template(\\d+).*', choice)): template_only_i += 1 min_valid_rank = min((all_ranks[choice] for choice in entry['valid_choices'])) min_valid_ranks.append(min_valid_rank) for choice in entry['choices']: m = re.match('Template(\\d+).*', choice) if (not m): continue template_id = int(m.group(1)) template_choice_ranks['all'][template_id].append(all_ranks[choice]) template_choice_ranks['templates only'][template_id].append(template_only_ranks[choice]) for choice in entry['valid_choices']: m = re.match('Template(\\d+).*', choice) if (not m): continue template_id = int(m.group(1)) template_match_counts[template_id] += 1 template_valid_choice_ranks['all'][template_id].append(all_ranks[choice]) template_valid_choice_ranks['templates only'][template_id].append(template_only_ranks[choice]) min_valid_ranks = np.array(min_valid_ranks) top_k_accuracy = {k: (np.sum((min_valid_ranks < k)) / len(min_valid_ranks)) for k in accuracy_ks} top_k_precision = {type_name: {k: {i: (np.sum((np.array(template_valid_choice_ranks[type_name][i]) < k)) / np.sum((np.array(template_choice_ranks[type_name][i]) < k))) for i in template_match_counts.keys()} for k in precision_ks} for type_name in template_valid_choice_ranks} top_k_recall = {type_name: {k: {i: (np.sum((np.array(ranks) < k)) / len(ranks)) for (i, ranks) in ranks_of_type.items()} for k in recall_ks} for (type_name, ranks_of_type) in template_valid_choice_ranks.items()} accuracy_df = pd.DataFrame({'Accuracy @ {}'.format(k): [top_k_accuracy[k]] for k in accuracy_ks}) pr_df = pd.DataFrame({'Head': {t['id']: t['idiom'][0] for t in templates}, 'Matches': template_match_counts, **{'Precision @ {} {}'.format(k, type_name): top_k_precision[type_name][k] for type_name in top_k_precision.keys() for k in precision_ks}, **{'Recall @ {} {}'.format(k, type_name): top_k_recall[type_name][k] for type_name in top_k_recall.keys() for k in recall_ks}}) return (accuracy_df, pr_df)
def analyze_anysplit_one(name, section): report = [json.loads(line) for line in open('../logdirs/20190201-hs-allmatches-anysplit/{}/debug-{}-step2600.jsonl'.format(name, section))] templates = json.load(open('../data/hearthstone-idioms-20190201/all-matches-trees-anysplit/{}/templates.json'.format(name))) return analyze(report, templates)
def analyze_anysplit(section): for (filt, st, nt) in itertools.product(('none', 'contains-hole'), ('cov-xent', 'cov-examples'), ('10', '20', '40', '80')): name = 'filt-{}_st-{}_nt-{}'.format(filt, st, nt) (acc_df, pr_df) = analyze_anysplit_one(name, section) print(name) print('Templates containing matches: {}'.format(sum((pr_df['Matches'] > 0)))) print('Templates with non-zero rank 1 freq: {}'.format(sum((pr_df['Recall @ 1 templates only'] > 0)))) print()
def analyze_anysplit_multimean_one(name, section): report = [json.loads(line) for line in open('../logdirs/20190201-hs-allmatches-anysplit-multimean/{}/debug-{}-step2600.jsonl'.format(name, section))] templates = json.load(open('../data/hearthstone-idioms-20190201/all-matches-trees-anysplit/{}/templates.json'.format(name))) return analyze(report, templates)
def analyze_anysplit_multimean(section): for (filt, st, nt) in itertools.product(('none', 'contains-hole'), ('cov-xent', 'cov-examples'), ('10', '20', '40', '80')): name = 'filt-{}_st-{}_nt-{}'.format(filt, st, nt) (acc_df, pr_df) = analyze_anysplit_multimean_one(name, section) print(name) print('Templates containing matches: {}'.format(sum((pr_df['Matches'] > 0)))) print('Templates with non-zero rank 1 freq: {}'.format(sum((pr_df['Recall @ 1 templates only'] > 0)))) print()
def compute_accuracy(rows): levels = ['easy', 'medium', 'hard', 'extra', 'all'] total = collections.defaultdict(int) exact = collections.defaultdict(int) for row in rows: exact[row['hardness']] += row['exact'] exact['all'] += row['exact'] total[row['hardness']] += 1 total['all'] += 1 result = {hardness: (exact[hardness] / total[hardness]) for hardness in levels} result = {**result, **{'{} count'.format(hardness): total[hardness] for hardness in levels}} return result
def display_item(item): question_toks = left_preproc[item['i']]['question'] IPython.display.display(IPython.display.HTML('\n <ul>\n <li>Database: <tt>{db_id}</tt></li>\n <li>Question: {question_tok}</li>\n <li>Gold: <tt>{gold}</tt></li>\n <li>Left: <tt>{left}</tt></li>\n <li>Right: <tt>{right}</tt></li>\n </ul>\n '.format(question_tok=' '.join(((tok if (tok in vocab) else '<s>{}</s>'.format(tok)) for tok in question_toks)), **item)))
def compare(data, left, right): both_exact = [] left_exact = [] right_exact = [] neither_exact = [] for (i, (data_item, left_item, right_item)) in enumerate(zip(data, left['per_item'], right['per_item'])): result = {'i': i, 'db_id': data_item['db_id'], 'question': data_item['question'], 'gold': data_item['query'], 'left': left_item['predicted'], 'right': right_item['predicted']} if (left_item['exact'] and right_item['exact']): both_exact.append(result) elif left_item['exact']: left_exact.append(result) elif right_item['exact']: right_exact.append(result) else: neither_exact.append(result) return (both_exact, left_exact, right_exact, neither_exact)
def compute_accuracy(rows): levels = ['easy', 'medium', 'hard', 'extra', 'all'] total = collections.defaultdict(int) exact = collections.defaultdict(int) for row in rows: exact[row['hardness']] += row['exact'] exact['all'] += row['exact'] total[row['hardness']] += 1 total['all'] += 1 result = {hardness: (exact[hardness] / total[hardness]) for hardness in levels} result = {**result, **{'{} count'.format(hardness): total[hardness] for hardness in levels}} return result
def compute_accuracy(rows): levels = ['easy', 'medium', 'hard', 'extra', 'all'] total = collections.defaultdict(int) exact = collections.defaultdict(int) for row in rows: exact[row['hardness']] += row['exact'] exact['all'] += row['exact'] total[row['hardness']] += 1 total['all'] += 1 result = {hardness: (exact[hardness] / total[hardness]) for hardness in levels} result = {**result, **{'{} count'.format(hardness): total[hardness] for hardness in levels}} return result
def compare(data, left, right): both_exact = [] left_exact = [] right_exact = [] neither_exact = [] for (i, (data_item, left_item, right_item)) in enumerate(zip(data, left['per_item'], right['per_item'])): result = {'i': i, 'db_id': data_item['db_id'], 'question': data_item['question'], 'gold': data_item['query'], 'left': left_item['predicted'], 'right': right_item['predicted']} if (left_item['exact'] and right_item['exact']): both_exact.append(result) elif left_item['exact']: left_exact.append(result) elif right_item['exact']: right_exact.append(result) else: neither_exact.append(result) return (both_exact, left_exact, right_exact, neither_exact)
def compute_accuracy(rows): levels = ['easy', 'medium', 'hard', 'extra', 'all'] total = collections.defaultdict(int) exact = collections.defaultdict(int) for row in rows: exact[row['hardness']] += row['exact'] exact['all'] += row['exact'] total[row['hardness']] += 1 total['all'] += 1 result = {hardness: (exact[hardness] / total[hardness]) for hardness in levels} result = {**result, **{'{} count'.format(hardness): total[hardness] for hardness in levels}} return result
def compare(data, left, right): both_exact = [] left_exact = [] right_exact = [] neither_exact = [] for (i, (data_item, left_item, right_item)) in enumerate(zip(data, left['per_item'], right['per_item'])): result = {'i': i, 'db_id': data_item['db_id'], 'question': data_item['question'], 'gold': data_item['query'], 'left': left_item['predicted'], 'right': right_item['predicted']} if (left_item['exact'] and right_item['exact']): both_exact.append(result) elif left_item['exact']: left_exact.append(result) elif right_item['exact']: right_exact.append(result) else: neither_exact.append(result) return (both_exact, left_exact, right_exact, neither_exact)
def compute_accuracy(rows): levels = ['easy', 'medium', 'hard', 'extra', 'all'] total = collections.defaultdict(int) exact = collections.defaultdict(int) for row in rows: exact[row['hardness']] += row['exact'] exact['all'] += row['exact'] total[row['hardness']] += 1 total['all'] += 1 result = {hardness: (exact[hardness] / total[hardness]) for hardness in levels} result = {**result, **{'{} count'.format(hardness): total[hardness] for hardness in levels}} return result
def compare(data, left, right): both_exact = [] left_exact = [] right_exact = [] neither_exact = [] for (i, (data_item, left_item, right_item)) in enumerate(zip(data, left['per_item'], right['per_item'])): result = {'i': i, 'db_id': data_item['db_id'], 'question': data_item['question'], 'gold': data_item['query'], 'left': left_item['predicted'], 'right': right_item['predicted']} if (left_item['exact'] and right_item['exact']): both_exact.append(result) elif left_item['exact']: left_exact.append(result) elif right_item['exact']: right_exact.append(result) else: neither_exact.append(result) return (both_exact, left_exact, right_exact, neither_exact)
def find_columns(query, include_from=False): result = set() queue = collections.deque([query]) while queue: node = queue.popleft() type_info = grammar.ast_wrapper.singular_types[node['_type']] for field in type_info.fields: if ((not include_from) and (field.name == 'from')): continue if (field.name not in node): continue v = node[field.name] if isinstance(v, dict): queue.append(v) elif isinstance(v, list): queue.extend(v) if (field.type == 'column'): result.add(v) return result
def group_by_schema(data): result = collections.defaultdict(list) for example in data: result[example.schema.db_id].append(example) return result
def analyze(data): grouped = collections.defaultdict(list) grouped_column_usage = collections.defaultdict(list) for example in data: grouped[example.schema.db_id].append(example) grouped_column_usage[example.schema.db_id].append(find_columns(grammar.parse(example.code, 'train'))) for (db_id, column_usages) in sorted(grouped_column_usage.items()): examples = grouped[db_id] schema = data.schemas[db_id] num_columns = len(schema.columns) all_columns = set(range(num_columns)) used_columns = set.union(*column_usages) assert (used_columns <= all_columns) unused_columns = (all_columns - used_columns) grouped_by_query = collections.defaultdict(list) for (i, example) in enumerate(examples): grouped_by_query[example.orig['query']].append(i) all_example_indices = set(range(len(examples))) groups_with_misses = {} for (query, query_group_indices) in grouped_by_query.items(): other_indices = (all_example_indices - set(query_group_indices)) other_used_columns = set.union(*(column_usages[:i] + column_usages[(i + 1):])) missing_columns = (column_usages[query_group_indices[0]] - other_used_columns) if missing_columns: groups_with_misses[query] = missing_columns print('{}: {} examples, {} columns, {} unused by any query; {} examples with missing columns'.format(db_id, len(column_usages), num_columns, len(unused_columns), len(groups_with_misses))) if groups_with_misses: print('======================') for (query, missing_columns) in groups_with_misses.items(): print('- Query: {}'.format(query)) print('- Questions:') for i in grouped_by_query[query]: print(' - {}'.format(' '.join(examples[i].text))) print('- Missing columns: {}'.format(', '.join(('{}.{}'.format(schema.columns[i].table.name, schema.columns[i].name) for i in sorted(missing_columns)))))
def compute_accuracy(rows): levels = ['easy', 'medium', 'hard', 'extra', 'all'] total = collections.defaultdict(int) exact = collections.defaultdict(int) for row in rows: exact[row['hardness']] += row['exact'] exact['all'] += row['exact'] total[row['hardness']] += 1 total['all'] += 1 result = {hardness: (exact[hardness] / total[hardness]) for hardness in levels} result = {**result, **{'{} count'.format(hardness): total[hardness] for hardness in levels}} return result
class Preprocessor(): def __init__(self, config): self.config = config self.model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model']) def preprocess(self): self.model_preproc.clear_items() for section in self.config['data']: data = registry.construct('dataset', self.config['data'][section]) for item in tqdm.tqdm(data, desc=section, dynamic_ncols=True): (to_add, validation_info) = self.model_preproc.validate_item(item, section) if to_add: self.model_preproc.add_item(item, section, validation_info) self.model_preproc.save()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', required=True) parser.add_argument('--config-args') args = parser.parse_args() if args.config_args: config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args})) else: config = json.loads(_jsonnet.evaluate_file(args.config)) preprocessor = Preprocessor(config) preprocessor.preprocess()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--infer', nargs='*', default=()) parser.add_argument('--sql', nargs='*', default=()) parser.add_argument('--names', nargs='*', default=()) parser.add_argument('--out', required=True) args = parser.parse_args() assert (len(args.names) == (len(args.infer) + len(args.sql))) SPIDER_ROOT = 'data/spider-20190205' foreign_key_maps = {db['db_id']: evaluation.build_foreign_key_map(db) for db in json.load(open(os.path.join(SPIDER_ROOT, 'tables.json')))} evaluator = evaluation.Evaluator(os.path.join(SPIDER_ROOT, 'database'), foreign_key_maps, 'match') dev = json.load(open(os.path.join(SPIDER_ROOT, 'dev.json'))) difficulty = {} inferred_per_file = [] correct_per_file = [] for infer_path in args.infer: inferred = ([None] * len(dev)) correct = ([None] * len(dev)) inferred_per_file.append(inferred) correct_per_file.append(correct) for line in open(infer_path): item = json.loads(line) item_inferred = item['beams'][0]['inferred_code'] i = item['index'] eval_result = evaluator.evaluate_one(db_name=dev[i]['db_id'], gold=dev[i]['query'], predicted=item_inferred) difficulty[i] = eval_result['hardness'] inferred[i] = item_inferred correct[i] = (1 if eval_result['exact'] else 0) for sql_path in args.sql: inferred = ([None] * len(dev)) correct = ([None] * len(dev)) inferred_per_file.append(inferred) correct_per_file.append(correct) for (i, line) in enumerate(open(sql_path)): eval_result = evaluator.evaluate_one(db_name=dev[i]['db_id'], gold=dev[i]['query'], predicted=line.strip()) difficulty[i] = eval_result['hardness'] inferred[i] = line.strip() correct[i] = (1 if eval_result['exact'] else 0) with open(args.out, 'w') as f: writer = csv.writer(f) writer.writerow(((['DB', 'Difficulty', 'Question', 'Gold'] + ['{} correct'.format(c) for c in args.names]) + ['{} output'.format(c) for c in args.names])) for (i, dev_item) in enumerate(dev): writer.writerow((([dev_item['db_id'], difficulty[i], dev_item['question'], dev_item['query']] + [x[i] for x in correct_per_file]) + [x[i] for x in inferred_per_file]))
def main(): parser = argparse.ArgumentParser() parser.add_argument('inputs', nargs='+') args = parser.parse_args() for path in args.inputs: for existing in glob.glob(os.path.join(path, 'events.out.tfevents*')): os.unlink(existing) writer = tf.summary.FileWriter(path) for line in open(os.path.join(path, 'log.txt')): m = re.search(log_re, line) if (m is None): continue (timestamp, step, section, loss) = m.groups() step = int(step) loss = float(loss) timestamp = dateutil.parser.parse(timestamp).timestamp() writer.add_event(tf.Event(wall_time=timestamp, step=step, summary=tf.Summary(value=[tf.Summary.Value(tag='loss/{}'.format(section), simple_value=loss)]))) writer.close() print(path)
def check_close(a, b): assert ((a - b).abs().max() < 1e-05)
def test_enc_equal(input0, inputb, sequential): input1 = input0 inputc = inputb inputb0 = [inputb[0]] input0_history = [input0] inputb_history = [inputb] inputb0_history = [inputb0] for m in sequential: input0 = m.forward_unbatched(input0) input1 = m.forward_unbatched(input1) inputb0 = m.forward(inputb0) inputb = m.forward(inputb) inputc = m.forward(inputc) input0_history.append(input0) inputb_history.append(inputb) inputb0_history.append(inputb0) (input0_enc, input0_bounds) = input0 (input1_enc, input1_bounds) = input1 (inputb0_enc, inputb0_bounds) = inputb0 (inputb_enc, inputb_bounds) = inputb (inputc_enc, inputc_bounds) = inputc check_close(input0_enc.squeeze(1), inputb0_enc.select(0)) check_close(input0_enc.squeeze(1), inputb_enc.select(0)) check_close(input0_enc.squeeze(1), input1_enc.squeeze(1)) check_close(inputb_enc.select(0), inputc_enc.select(0)) assert np.array_equal(input0_bounds, inputb_bounds[0]) return (input0, inputb)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', required=True) parser.add_argument('--config-args') args = parser.parse_args() if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') if args.config_args: config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args})) else: config = json.loads(_jsonnet.evaluate_file(args.config)) model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model'], unused_keys=('name',)) model_preproc.load() model = registry.construct('model', config['model'], unused_keys=('encoder_preproc', 'decoder_preproc'), preproc=model_preproc, device=device) model.to(device) model.eval() train_data = model_preproc.dataset('train') train_eval_data_loader = torch.utils.data.DataLoader(train_data, batch_size=10, collate_fn=(lambda x: x)) batch = next(iter(train_eval_data_loader)) descs = [x for (x, y) in batch] (q0, qb) = test_enc_equal([descs[0]['question']], [[desc['question']] for desc in descs], model.encoder.question_encoder) (c0, cb) = test_enc_equal(descs[0]['columns'], [desc['columns'] for desc in descs], model.encoder.column_encoder) (t0, tb) = test_enc_equal(descs[0]['tables'], [desc['tables'] for desc in descs], model.encoder.table_encoder) (q0_enc, c0_enc, t0_enc) = model.encoder.encs_update.forward_unbatched(descs[0], q0[0], c0[0], c0[1], t0[0], t0[1]) (qb_enc, cb_enc, tb_enc) = model.encoder.encs_update.forward(descs, qb[0], cb[0], cb[1], tb[0], tb[1]) check_close(q0_enc.squeeze(1), qb_enc.select(0)) check_close(c0_enc.squeeze(1), cb_enc.select(0)) check_close(t0_enc.squeeze(1), tb_enc.select(0))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', required=True) parser.add_argument('--config-args') parser.add_argument('--section', required=True) parser.add_argument('--inferred', required=True) parser.add_argument('--output', required=True) args = parser.parse_args() if args.config_args: config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args})) else: config = json.loads(_jsonnet.evaluate_file(args.config)) os.makedirs(args.output, exist_ok=True) gold = open(os.path.join(args.output, 'gold.txt'), 'w') predicted = open(os.path.join(args.output, 'predicted.txt'), 'w') inferred = open(args.inferred) data = registry.construct('dataset', config['data'][args.section]) for line in inferred: infer_results = json.loads(line) if infer_results['beams']: inferred_code = infer_results['beams'][0]['inferred_code'] else: inferred_code = 'SELECT a FROM b' item = data[infer_results['index']] gold.write('{}\t{}\n'.format(item.orig['query'].replace('\t', ' '), item.schema.db_id)) predicted.write('{}\n'.format(inferred_code))
def count_glove(data, embedder): present = collections.Counter() missing = collections.Counter() counted_db_ids = set() for item in tqdm.tqdm(data): question_tokens = embedder.tokenize(item.orig['question']) for token in question_tokens: if (embedder.lookup(token) is None): missing[token] += 1 else: present[token] += 1 if (item.orig['db_id'] in counted_db_ids): continue column_names = [embedder.tokenize(column.unsplit_name) for column in item.schema.columns] table_names = [embedder.tokenize(table.unsplit_name) for table in item.schema.tables] for token in itertools.chain(*column_names, *table_names): if (embedder.lookup(token) is None): missing[token] += 1 else: present[token] += 1 counted_db_ids.add(item.orig['db_id']) return (present, missing)