code
stringlengths
17
6.64M
class Machine(): def __init__(self, max_features=20000, maxlen=80): self.data = Data(max_features, maxlen) self.model = RNN_LSTM(max_features, maxlen) def run(self, epochs=3, batch_size=32): data = self.data model = self.model print('Training stage') print('==============') model.fit(data.x_train, data.y_train, batch_size=batch_size, epochs=epochs, validation_data=(data.x_test, data.y_test)) (score, acc) = model.evaluate(data.x_test, data.y_test, batch_size=batch_size) print('Test performance: accuracy={0}, loss={1}'.format(acc, score))
def main(): m = Machine() m.run()
class AE(models.Model): def __init__(self, x_nodes=784, z_dim=36): x_shape = (x_nodes,) x = layers.Input(shape=x_shape) z = layers.Dense(z_dim, activation='relu')(x) y = layers.Dense(x_nodes, activation='sigmoid')(z) super().__init__(x, y) self.x = x self.z = z self.z_dim = z_dim self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy']) def Encoder(self): return models.Model(self.x, self.z) def Decoder(self): z_shape = (self.z_dim,) z = layers.Input(shape=z_shape) y_layer = self.layers[(- 1)] y = y_layer(z) return models.Model(z, y)
def show_ae(autoencoder): encoder = autoencoder.Encoder() decoder = autoencoder.Decoder() encoded_imgs = encoder.predict(x_test) decoded_imgs = decoder.predict(encoded_imgs) n = 10 plt.figure(figsize=(20, 6)) for i in range(n): ax = plt.subplot(3, n, (i + 1)) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, ((i + 1) + n)) plt.stem(encoded_imgs[i].reshape((- 1))) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, (((i + 1) + n) + n)) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(): x_nodes = 784 z_dim = 36 autoencoder = AE(x_nodes, z_dim) history = autoencoder.fit(x_train, x_train, epochs=10, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) plot_acc(history) plt.show() plot_loss(history) plt.show() show_ae(autoencoder) plt.show()
def Conv2D(filters, kernel_size, padding='same', activation='relu'): return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
class AE(models.Model): def __init__(self, org_shape=(1, 28, 28)): original = layers.Input(shape=org_shape) x = Conv2D(4, (3, 3))(original) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3))(x) x = layers.MaxPooling2D((2, 2), padding='same')(x) z = Conv2D(1, (7, 7))(x) y = Conv2D(16, (3, 3))(z) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(8, (3, 3))(y) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(4, (3, 3))(y) decoded = Conv2D(1, (3, 3), activation='sigmoid')(y) super().__init__(original, decoded) self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
def show_ae(autoencoder, data): x_test = data.x_test decoded_imgs = autoencoder.predict(x_test) print(decoded_imgs.shape, data.x_test.shape) if (backend.image_data_format() == 'channels_first'): (N, n_ch, n_i, n_j) = x_test.shape else: (N, n_i, n_j, n_ch) = x_test.shape x_test = x_test.reshape(N, n_i, n_j) decoded_imgs = decoded_imgs.reshape(decoded_imgs.shape[0], n_i, n_j) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): ax = plt.subplot(2, n, (i + 1)) plt.imshow(x_test[i], cmap='YlGnBu') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(2, n, ((i + 1) + n)) plt.imshow(decoded_imgs[i], cmap='YlGnBu') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(epochs=20, batch_size=128): data = DATA() autoencoder = AE(data.input_shape) history = autoencoder.fit(data.x_train, data.x_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.2) plot_acc(history) plt.show() plot_loss(history) plt.show() show_ae(autoencoder, data) plt.show()
def add_decorate(x): '\n axis = -1 --> last dimension in an array\n ' m = K.mean(x, axis=(- 1), keepdims=True) d = K.square((x - m)) return K.concatenate([x, d], axis=(- 1))
def add_decorate_shape(input_shape): shape = list(input_shape) assert (len(shape) == 2) shape[1] *= 2 return tuple(shape)
def model_compile(model): return model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
class GAN(): def __init__(self, ni_D, nh_D, nh_G): self.ni_D = ni_D self.nh_D = nh_D self.nh_G = nh_G self.D = self.gen_D() self.G = self.gen_G() self.GD = self.make_GD() def gen_D(self): ni_D = self.ni_D nh_D = self.nh_D D = models.Sequential() D.add(Lambda(add_decorate, output_shape=add_decorate_shape, input_shape=(ni_D,))) D.add(Dense(nh_D, activation='relu')) D.add(Dense(nh_D, activation='relu')) D.add(Dense(1, activation='sigmoid')) model_compile(D) return D def gen_G(self): ni_D = self.ni_D nh_G = self.nh_D G = models.Sequential() G.add(Reshape((ni_D, 1), input_shape=(ni_D,))) G.add(Conv1D(nh_G, 1, activation='relu')) G.add(Conv1D(nh_G, 1, activation='sigmoid')) G.add(Conv1D(1, 1)) G.add(Flatten()) model_compile(G) return G def make_GD(self): (G, D) = (self.G, self.D) GD = models.Sequential() GD.add(G) GD.add(D) D.trainable = False model_compile(GD) D.trainable = True return GD def D_train_on_batch(self, Real, Gen): D = self.D X = np.concatenate([Real, Gen], axis=0) y = np.array((([1] * Real.shape[0]) + ([0] * Gen.shape[0]))) D.train_on_batch(X, y) def GD_train_on_batch(self, Z): GD = self.GD y = np.array(([1] * Z.shape[0])) GD.train_on_batch(Z, y)
class Data(): def __init__(self, mu, sigma, ni_D): self.real_sample = (lambda n_batch: np.random.normal(mu, sigma, (n_batch, ni_D))) self.in_sample = (lambda n_batch: np.random.rand(n_batch, ni_D))
class Machine(): def __init__(self, n_batch=10, ni_D=100): data_mean = 4 data_stddev = 1.25 self.n_iter_D = 1 self.n_iter_G = 5 self.data = Data(data_mean, data_stddev, ni_D) self.gan = GAN(ni_D=ni_D, nh_D=50, nh_G=50) self.n_batch = n_batch def train_D(self): gan = self.gan n_batch = self.n_batch data = self.data Real = data.real_sample(n_batch) Z = data.in_sample(n_batch) Gen = gan.G.predict(Z) gan.D.trainable = True gan.D_train_on_batch(Real, Gen) def train_GD(self): gan = self.gan n_batch = self.n_batch data = self.data Z = data.in_sample(n_batch) gan.D.trainable = False gan.GD_train_on_batch(Z) def train_each(self): for it in range(self.n_iter_D): self.train_D() for it in range(self.n_iter_G): self.train_GD() def train(self, epochs): for epoch in range(epochs): self.train_each() def test(self, n_test): '\n generate a new image\n ' gan = self.gan data = self.data Z = data.in_sample(n_test) Gen = gan.G.predict(Z) return (Gen, Z) def show_hist(self, Real, Gen, Z): plt.hist(Real.reshape((- 1)), histtype='step', label='Real') plt.hist(Gen.reshape((- 1)), histtype='step', label='Generated') plt.hist(Z.reshape((- 1)), histtype='step', label='Input') plt.legend(loc=0) def test_and_show(self, n_test): data = self.data (Gen, Z) = self.test(n_test) Real = data.real_sample(n_test) self.show_hist(Real, Gen, Z) Machine.print_stat(Real, Gen) def run_epochs(self, epochs, n_test): '\n train GAN and show the results\n for showing, the original and the artificial results will be compared\n ' self.train(epochs) self.test_and_show(n_test) def run(self, n_repeat=200, n_show=200, n_test=100): for ii in range(n_repeat): print('Stage', ii, '(Epoch: {})'.format((ii * n_show))) self.run_epochs(n_show, n_test) plt.show() @staticmethod def print_stat(Real, Gen): def stat(d): return (np.mean(d), np.std(d)) print('Mean and Std of Real:', stat(Real)) print('Mean and Std of Gen:', stat(Gen))
class GAN_Pure(GAN): def __init__(self, ni_D, nh_D, nh_G): '\n Discriminator input is not added\n ' super().__init__(ni_D, nh_D, nh_G) def gen_D(self): ni_D = self.ni_D nh_D = self.nh_D D = models.Sequential() D.add(Dense(nh_D, activation='relu', input_shape=(ni_D,))) D.add(Dense(nh_D, activation='relu')) D.add(Dense(1, activation='sigmoid')) model_compile(D) return D
class Machine_Pure(Machine): def __init__(self, n_batch=10, ni_D=100): data_mean = 4 data_stddev = 1.25 self.data = Data(data_mean, data_stddev, ni_D) self.gan = GAN_Pure(ni_D=ni_D, nh_D=50, nh_G=50) self.n_batch = n_batch
def main(): machine = Machine(n_batch=1, ni_D=100) machine.run(n_repeat=200, n_show=200, n_test=100)
def Conv2D(filters, kernel_size, padding='same', activation='relu'): return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
class AE(models.Model): def __init__(self, org_shape=(1, 28, 28)): original = layers.Input(shape=org_shape) x = Conv2D(4, (3, 3))(original) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3))(x) x = layers.MaxPooling2D((2, 2), padding='same')(x) z = Conv2D(1, (7, 7))(x) y = Conv2D(16, (3, 3))(z) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(8, (3, 3))(y) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(4, (3, 3))(y) decoded = Conv2D(1, (3, 3), activation='sigmoid')(y) super().__init__(original, decoded) self.compile(optimizer='adadelta', loss='binary_crossentropy')
class DATA(): def __init__(self): num_classes = 10 ((x_train, y_train), (x_test, y_test)) = datasets.mnist.load_data() (img_rows, img_cols) = x_train.shape[1:] if (backend.image_data_format() == 'channels_first'): x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 y_train = utils.to_categorical(y_train, num_classes) y_test = utils.to_categorical(y_test, num_classes) self.input_shape = input_shape self.num_classes = num_classes (self.x_train, self.y_train) = (x_train, y_train) (self.x_test, self.y_test) = (x_test, y_test)
def plot_loss(history): plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
class ANN(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = output(h) super().__init__(x, y) self.compile(loss='mse', optimizer='sgd')
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.boston_housing.load_data() scaler = preprocessing.MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) return ((X_train, y_train), (X_test, y_test))
def main(): Nin = 13 Nh = 5 Nout = 1 model = ANN(Nin, Nh, Nout) ((X_train, y_train), (X_test, y_test)) = Data_func() history = model.fit(X_train, y_train, epochs=100, batch_size=100, validation_split=0.2, verbose=2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('\nTest Loss -> {:.2f}'.format(performace_test)) plot_loss(history) plt.show()
def ANN_models_func(Nin, Nh, Nout): x = layers.Input(shape=(Nin,)) h = layers.Activation('relu')(layers.Dense(Nh)(x)) y = layers.Activation('softmax')(layers.Dense(Nout)(h)) model = models.Model(x, y) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def ANN_seq_func(Nin, Nh, Nout): model = models.Sequential() model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) model.add(layers.Dense(Nout, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
class ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') softmax = layers.Activation('softmax') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = softmax(output(h)) super().__init__(x, y) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
class ANN_seq_class(models.Sequential): def __init__(self, Nin, Nh, Nout): super().__init__() self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def plot_loss(history): plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc=0)
def plot_acc(history): plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc=0)
def main(): Nin = 784 Nh = 100 number_of_class = 10 Nout = number_of_class model = ANN_seq_class(Nin, Nh, Nout) ((X_train, Y_train), (X_test, Y_test)) = Data_func() history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, Y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_loss(history) plt.show() plot_acc(history) plt.show()
def plot_acc(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['accuracy']) plt.plot(history['val_accuracy']) if (title is not None): plt.title(title) plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
def plot_loss(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['loss']) plt.plot(history['val_loss']) if (title is not None): plt.title(title) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
class History(): def __init__(self): self.history = {'accuracy': [], 'loss': [], 'val_accuracy': [], 'val_loss': []}
class Metrics_Mean(): def __init__(self): self.reset_states() def __call__(self, loss): self.buff.append(loss.data) def reset_states(self): self.buff = [] def result(self): return np.mean(self.buff)
class Metrics_CategoricalAccuracy(): def __init__(self): self.reset_states() def __call__(self, labels, predictions): decisions = predictions.data.max(1)[1] self.correct += decisions.eq(labels.data).cpu().sum() self.L += len(labels.data) def reset_states(self): (self.correct, self.L) = (0, 0) def result(self): return (float(self.correct) / self.L)
class ANN_models_class(nn.Module): def __init__(self, Nin, Nh, Nout): super().__init__() self.hidden = nn.Linear(Nin, Nh) self.last = nn.Linear(Nh, Nout) self.Nin = Nin def forward(self, x): x = x.view((- 1), self.Nin) h = F.relu(self.hidden(x)) y = F.softmax(self.last(h), dim=1) return y
def Data_func(): train_dataset = datasets.MNIST('~/pytorch_data', train=True, download=True, transform=transforms.ToTensor()) test_dataset = datasets.MNIST('~/pytorch_data', train=False, transform=transforms.ToTensor()) train_ds = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_ds = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) return (train_ds, test_ds)
def ANN_models_func(Nin, Nh, Nout): x = layers.Input(shape=(Nin,)) h = layers.Activation('relu')(layers.Dense(Nh)(x)) y = layers.Activation('softmax')(layers.Dense(Nout)(h)) model = models.Model(x, y) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def ANN_seq_func(Nin, Nh, Nout): model = models.Sequential() model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) model.add(layers.Dense(Nout, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
class ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') softmax = layers.Activation('softmax') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = softmax(output(h)) super().__init__(x, y) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
class ANN_seq_class(models.Sequential): def __init__(self, Nin, Nh, Nout): super().__init__() self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = utils.to_categorical(y_train) Y_test = utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def plot_acc(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['acc']) plt.plot(history['val_acc']) if (title is not None): plt.title(title) plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
def plot_loss(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['loss']) plt.plot(history['val_loss']) if (title is not None): plt.title(title) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
def main(): Nin = 784 Nh = 100 number_of_class = 10 Nout = number_of_class model = ANN_seq_class(Nin, Nh, Nout) ((X_train, Y_train), (X_test, Y_test)) = Data_func() history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, Y_test, batch_size=100, verbose=0) print('Test Loss and Accuracy ->', performace_test) plot_loss(history) plt.show() plot_acc(history) plt.show()
def ANN_models_func(Nin, Nh, Nout): x = layers.Input(shape=(Nin,)) h = layers.Activation('relu')(layers.Dense(Nh)(x)) y = layers.Activation('softmax')(layers.Dense(Nout)(h)) model = models.Model(x, y) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def ANN_seq_func(Nin, Nh, Nout): model = models.Sequential() model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) model.add(layers.Dense(Nout, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
class ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') softmax = layers.Activation('softmax') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = softmax(output(h)) super().__init__(x, y) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
class ANN_seq_class(models.Sequential): def __init__(self, Nin, Nh, Nout): super().__init__() self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = utils.to_categorical(y_train) Y_test = utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def plot_acc(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['accuracy']) plt.plot(history['val_accuracy']) if (title is not None): plt.title(title) plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
def plot_loss(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['loss']) plt.plot(history['val_loss']) if (title is not None): plt.title(title) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
class ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): super().__init__() self.hidden = layers.Dense(Nh) self.last = layers.Dense(Nout) def call(self, x): relu = layers.Activation('relu') softmax = layers.Activation('softmax') h = relu(self.hidden(x)) y = softmax(self.last(h)) return y
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = utils.to_categorical(y_train) Y_test = utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def plot_acc(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['accuracy']) plt.plot(history['val_accuracy']) if (title is not None): plt.title(title) plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
def plot_loss(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['loss']) plt.plot(history['val_loss']) if (title is not None): plt.title(title) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
class History(): def __init__(self): self.history = {'accuracy': [], 'loss': [], 'val_accuracy': [], 'val_loss': []}
class _ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') softmax = layers.Activation('softmax') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = softmax(output(h)) super().__init__(x, y) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
@tf2.function def ep_train(xx, yy): with tf2.GradientTape() as tape: yp = model(xx) loss = Loss_object(yy, yp) gradients = tape.gradient(loss, model.trainable_variables) Optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(yy, yp)
@tf2.function def ep_test(xx, yy): yp = model(xx) t_loss = Loss_object(yy, yp) test_loss(t_loss) test_accuracy(yy, yp)
class MyModel(Model): def __init__(self): super(MyModel, self).__init__() self.conv1 = Conv2D(32, 3, activation='relu') self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.d2 = Dense(10, activation='softmax') def call(self, x): x = self.conv1(x) x = self.flatten(x) x = self.d1(x) return self.d2(x)
@tf.function def train_step(images, labels): with tf.GradientTape() as tape: predictions = model(images) loss = loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(labels, predictions)
@tf.function def test_step(images, labels): predictions = model(images) t_loss = loss_object(labels, predictions) test_loss(t_loss) test_accuracy(labels, predictions)
class MultiHeadAttn(nn.Module): def __init__(self, dim_q, dim_k, dim_v, dim_out, num_heads=8): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.fc_q = nn.Linear(dim_q, dim_out, bias=False) self.fc_k = nn.Linear(dim_k, dim_out, bias=False) self.fc_v = nn.Linear(dim_v, dim_out, bias=False) self.fc_out = nn.Linear(dim_out, dim_out) self.ln1 = nn.LayerNorm(dim_out) self.ln2 = nn.LayerNorm(dim_out) def scatter(self, x): return torch.cat(x.chunk(self.num_heads, (- 1)), (- 3)) def gather(self, x): return torch.cat(x.chunk(self.num_heads, (- 3)), (- 1)) def attend(self, q, k, v, mask=None): (q_, k_, v_) = [self.scatter(x) for x in [q, k, v]] A_logits = ((q_ @ k_.transpose((- 2), (- 1))) / math.sqrt(self.dim_out)) if (mask is not None): mask = mask.bool().to(q.device) mask = torch.stack(([mask] * q.shape[(- 2)]), (- 2)) mask = torch.cat(([mask] * self.num_heads), (- 3)) A = torch.softmax(A_logits.masked_fill(mask, (- float('inf'))), (- 1)) A = A.masked_fill(torch.isnan(A), 0.0) else: A = torch.softmax(A_logits, (- 1)) return self.gather((A @ v_)) def forward(self, q, k, v, mask=None): (q, k, v) = (self.fc_q(q), self.fc_k(k), self.fc_v(v)) out = self.ln1((q + self.attend(q, k, v, mask=mask))) out = self.ln2((out + F.relu(self.fc_out(out)))) return out
class SelfAttn(MultiHeadAttn): def __init__(self, dim_in, dim_out, num_heads=8): super().__init__(dim_in, dim_in, dim_in, dim_out, num_heads) def forward(self, x, mask=None): return super().forward(x, x, x, mask=mask)
def build_mlp(dim_in, dim_hid, dim_out, depth): modules = [nn.Linear(dim_in, dim_hid), nn.ReLU(True)] for _ in range((depth - 2)): modules.append(nn.Linear(dim_hid, dim_hid)) modules.append(nn.ReLU(True)) modules.append(nn.Linear(dim_hid, dim_out)) return nn.Sequential(*modules)
class PoolingEncoder(nn.Module): def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=False, pre_depth=4, post_depth=2): super().__init__() self.use_lat = (dim_lat is not None) self.net_pre = (build_mlp((dim_x + dim_y), dim_hid, dim_hid, pre_depth) if (not self_attn) else nn.Sequential(build_mlp((dim_x + dim_y), dim_hid, dim_hid, (pre_depth - 2)), nn.ReLU(True), SelfAttn(dim_hid, dim_hid))) self.net_post = build_mlp(dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid), post_depth) def forward(self, xc, yc, mask=None): out = self.net_pre(torch.cat([xc, yc], (- 1))) if (mask is None): out = out.mean((- 2)) else: mask = mask.to(xc.device) out = ((out * mask.unsqueeze((- 1))).sum((- 2)) / (mask.sum((- 1), keepdim=True).detach() + 1e-05)) if self.use_lat: (mu, sigma) = self.net_post(out).chunk(2, (- 1)) sigma = (0.1 + (0.9 * torch.sigmoid(sigma))) return Normal(mu, sigma) else: return self.net_post(out)
class CrossAttnEncoder(nn.Module): def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=True, v_depth=4, qk_depth=2): super().__init__() self.use_lat = (dim_lat is not None) if (not self_attn): self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, v_depth) else: self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, (v_depth - 2)) self.self_attn = SelfAttn(dim_hid, dim_hid) self.net_qk = build_mlp(dim_x, dim_hid, dim_hid, qk_depth) self.attn = MultiHeadAttn(dim_hid, dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid)) def forward(self, xc, yc, xt, mask=None): (q, k) = (self.net_qk(xt), self.net_qk(xc)) v = self.net_v(torch.cat([xc, yc], (- 1))) if hasattr(self, 'self_attn'): v = self.self_attn(v, mask=mask) out = self.attn(q, k, v, mask=mask) if self.use_lat: (mu, sigma) = out.chunk(2, (- 1)) sigma = (0.1 + (0.9 * torch.sigmoid(sigma))) return Normal(mu, sigma) else: return out
class Decoder(nn.Module): def __init__(self, dim_x=1, dim_y=1, dim_enc=128, dim_hid=128, depth=3): super().__init__() self.fc = nn.Linear((dim_x + dim_enc), dim_hid) self.dim_hid = dim_hid modules = [nn.ReLU(True)] for _ in range((depth - 2)): modules.append(nn.Linear(dim_hid, dim_hid)) modules.append(nn.ReLU(True)) modules.append(nn.Linear(dim_hid, (2 * dim_y))) self.mlp = nn.Sequential(*modules) def add_ctx(self, dim_ctx): self.dim_ctx = dim_ctx self.fc_ctx = nn.Linear(dim_ctx, self.dim_hid, bias=False) def forward(self, encoded, x, ctx=None): packed = torch.cat([encoded, x], (- 1)) hid = self.fc(packed) if (ctx is not None): hid = (hid + self.fc_ctx(ctx)) out = self.mlp(hid) (mu, sigma) = out.chunk(2, (- 1)) sigma = (0.1 + (0.9 * F.softplus(sigma))) return Normal(mu, sigma)
def get_logger(filename, mode='a'): logging.basicConfig(level=logging.INFO, format='%(message)s') logger = logging.getLogger() logger.addHandler(logging.FileHandler(filename, mode=mode)) return logger
class RunningAverage(object): def __init__(self, *keys): self.sum = OrderedDict() self.cnt = OrderedDict() self.clock = time.time() for key in keys: self.sum[key] = 0 self.cnt[key] = 0 def update(self, key, val): if isinstance(val, torch.Tensor): val = val.item() if (self.sum.get(key, None) is None): self.sum[key] = val self.cnt[key] = 1 else: self.sum[key] = (self.sum[key] + val) self.cnt[key] += 1 def reset(self): for key in self.sum.keys(): self.sum[key] = 0 self.cnt[key] = 0 self.clock = time.time() def clear(self): self.sum = OrderedDict() self.cnt = OrderedDict() self.clock = time.time() def keys(self): return self.sum.keys() def get(self, key): assert (self.sum.get(key, None) is not None) return (self.sum[key] / self.cnt[key]) def info(self, show_et=True): line = '' for key in self.sum.keys(): val = (self.sum[key] / self.cnt[key]) if (type(val) == float): line += f'{key} {val:.4f} ' else: line += f'{key} {val} '.format(key, val) if show_et: line += f'({(time.time() - self.clock):.3f} secs)' return line
def gen_load_func(parser, func): def load(args, cmdline): (sub_args, cmdline) = parser.parse_known_args(cmdline) for (k, v) in sub_args.__dict__.items(): args.__dict__[k] = v return (func(**sub_args.__dict__), cmdline) return load
def load_module(filename): module_name = os.path.splitext(os.path.basename(filename))[0] return SourceFileLoader(module_name, filename).load_module()
def logmeanexp(x, dim=0): return (x.logsumexp(dim) - math.log(x.shape[dim]))
def stack(x, num_samples=None, dim=0): return (x if (num_samples is None) else torch.stack(([x] * num_samples), dim=dim))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--mode', choices=['train', 'eval', 'plot', 'ensemble'], default='train') parser.add_argument('--expid', type=str, default='trial') parser.add_argument('--resume', action='store_true', default=False) parser.add_argument('--gpu', type=str, default='0') parser.add_argument('--max_num_points', type=int, default=200) parser.add_argument('--model', type=str, default='cnp') parser.add_argument('--train_batch_size', type=int, default=100) parser.add_argument('--train_num_samples', type=int, default=4) parser.add_argument('--lr', type=float, default=0.0005) parser.add_argument('--num_epochs', type=int, default=200) parser.add_argument('--eval_freq', type=int, default=10) parser.add_argument('--save_freq', type=int, default=10) parser.add_argument('--eval_seed', type=int, default=42) parser.add_argument('--eval_batch_size', type=int, default=16) parser.add_argument('--eval_num_samples', type=int, default=50) parser.add_argument('--eval_logfile', type=str, default=None) parser.add_argument('--plot_seed', type=int, default=None) parser.add_argument('--plot_batch_size', type=int, default=16) parser.add_argument('--plot_num_samples', type=int, default=30) parser.add_argument('--plot_num_ctx', type=int, default=100) parser.add_argument('--t_noise', type=float, default=None) args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper()) with open(f'configs/celeba/{args.model}.yaml', 'r') as f: config = yaml.safe_load(f) model = model_cls(**config).cuda() args.root = osp.join(results_path, 'celeba', args.model, args.expid) if (args.mode == 'train'): train(args, model) elif (args.mode == 'eval'): eval(args, model) elif (args.mode == 'plot'): plot(args, model) elif (args.mode == 'ensemble'): ensemble(args, model)
def train(args, model): if (not osp.isdir(args.root)): os.makedirs(args.root) with open(osp.join(args.root, 'args.yaml'), 'w') as f: yaml.dump(args.__dict__, f) train_ds = CelebA(train=True) eval_ds = CelebA(train=False) train_loader = torch.utils.data.DataLoader(train_ds, batch_size=args.train_batch_size, shuffle=True, num_workers=4) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(len(train_loader) * args.num_epochs)) if args.resume: ckpt = torch.load(osp.join(args.root, 'ckpt.tar')) model.load_state_dict(ckpt.model) optimizer.load_state_dict(ckpt.optimizer) scheduler.load_state_dict(ckpt.scheduler) logfilename = ckpt.logfilename start_epoch = ckpt.epoch else: logfilename = osp.join(args.root, 'train_{}.log'.format(time.strftime('%Y%m%d-%H%M'))) start_epoch = 1 logger = get_logger(logfilename) ravg = RunningAverage() if (not args.resume): logger.info('Total number of parameters: {}\n'.format(sum((p.numel() for p in model.parameters())))) for epoch in range(start_epoch, (args.num_epochs + 1)): model.train() for (x, _) in tqdm(train_loader): batch = img_to_task(x, max_num_points=args.max_num_points, device='cuda') optimizer.zero_grad() outs = model(batch, num_samples=args.train_num_samples) outs.loss.backward() optimizer.step() scheduler.step() for (key, val) in outs.items(): ravg.update(key, val) line = f'{args.model}:{args.expid} epoch {epoch} ' line += f"lr {optimizer.param_groups[0]['lr']:.3e} " line += ravg.info() logger.info(line) if ((epoch % args.eval_freq) == 0): logger.info((eval(args, model) + '\n')) ravg.reset() if (((epoch % args.save_freq) == 0) or (epoch == args.num_epochs)): ckpt = AttrDict() ckpt.model = model.state_dict() ckpt.optimizer = optimizer.state_dict() ckpt.scheduler = scheduler.state_dict() ckpt.logfilename = logfilename ckpt.epoch = (epoch + 1) torch.save(ckpt, osp.join(args.root, 'ckpt.tar')) args.mode = 'eval' eval(args, model)
def gen_evalset(args): torch.manual_seed(args.eval_seed) torch.cuda.manual_seed(args.eval_seed) eval_ds = CelebA(train=False) eval_loader = torch.utils.data.DataLoader(eval_ds, batch_size=args.eval_batch_size, shuffle=False, num_workers=4) batches = [] for (x, _) in tqdm(eval_loader): batches.append(img_to_task(x, t_noise=args.t_noise, max_num_points=args.max_num_points)) torch.manual_seed(time.time()) torch.cuda.manual_seed(time.time()) path = osp.join(evalsets_path, 'celeba') if (not osp.isdir(path)): os.makedirs(path) filename = ('no_noise.tar' if (args.t_noise is None) else f'{args.t_noise}.tar') torch.save(batches, osp.join(path, filename))
def eval(args, model): if (args.mode == 'eval'): ckpt = torch.load(osp.join(args.root, 'ckpt.tar')) model.load_state_dict(ckpt.model) if (args.eval_logfile is None): eval_logfile = f'eval' if (args.t_noise is not None): eval_logfile += f'_{args.t_noise}' eval_logfile += '.log' else: eval_logfile = args.eval_logfile filename = osp.join(args.root, eval_logfile) logger = get_logger(filename, mode='w') else: logger = None path = osp.join(evalsets_path, 'celeba') if (not osp.isdir(path)): os.makedirs(path) filename = ('no_noise.tar' if (args.t_noise is None) else f'{args.t_noise}.tar') if (not osp.isfile(osp.join(path, filename))): print('generating evaluation sets...') gen_evalset(args) eval_batches = torch.load(osp.join(path, filename)) torch.manual_seed(args.eval_seed) torch.cuda.manual_seed(args.eval_seed) ravg = RunningAverage() model.eval() with torch.no_grad(): for batch in tqdm(eval_batches): for (key, val) in batch.items(): batch[key] = val.cuda() outs = model(batch, num_samples=args.eval_num_samples) for (key, val) in outs.items(): ravg.update(key, val) torch.manual_seed(time.time()) torch.cuda.manual_seed(time.time()) line = f'{args.model}:{args.expid} ' if (args.t_noise is not None): line += f'tn {args.t_noise} ' line += ravg.info() if (logger is not None): logger.info(line) return line
def ensemble(args, model): num_runs = 5 models = [] for i in range(num_runs): model_ = deepcopy(model) ckpt = torch.load(osp.join(results_path, 'celeba', args.model, f'run{(i + 1)}', 'ckpt.tar')) model_.load_state_dict(ckpt['model']) model_.cuda() model_.eval() models.append(model_) path = osp.join(evalsets_path, 'celeba') if (not osp.isdir(path)): os.makedirs(path) filename = ('no_noise.tar' if (args.t_noise is None) else f'{args.t_noise}.tar') if (not osp.isfile(osp.join(path, filename))): print('generating evaluation sets...') gen_evalset(args) eval_batches = torch.load(osp.join(path, filename)) ravg = RunningAverage() with torch.no_grad(): for batch in tqdm(eval_batches): for (key, val) in batch.items(): batch[key] = val.cuda() ctx_ll = [] tar_ll = [] for model in models: outs = model(batch, num_samples=args.eval_num_samples, reduce_ll=False) ctx_ll.append(outs.ctx_ll) tar_ll.append(outs.tar_ll) if (ctx_ll[0].dim() == 2): ctx_ll = torch.stack(ctx_ll) tar_ll = torch.stack(tar_ll) else: ctx_ll = torch.cat(ctx_ll) tar_ll = torch.cat(tar_ll) ctx_ll = logmeanexp(ctx_ll).mean() tar_ll = logmeanexp(tar_ll).mean() ravg.update('ctx_ll', ctx_ll) ravg.update('tar_ll', tar_ll) torch.manual_seed(time.time()) torch.cuda.manual_seed(time.time()) filename = f'ensemble' if (args.t_noise is not None): filename += f'_{args.t_noise}' filename += '.log' logger = get_logger(osp.join(results_path, 'celeba', args.model, filename), mode='w') logger.info(ravg.info())
class CelebA(object): def __init__(self, train=True): (self.data, self.targets) = torch.load(osp.join(datasets_path, 'celeba', ('train.pt' if train else 'eval.pt'))) self.data = (self.data.float() / 255.0) if train: (self.data, self.targets) = (self.data, self.targets) else: (self.data, self.targets) = (self.data, self.targets) def __len__(self): return len(self.data) def __getitem__(self, index): return (self.data[index], self.targets[index])
class EMNIST(tvds.EMNIST): def __init__(self, train=True, class_range=[0, 47], device='cpu', download=True): super().__init__(datasets_path, train=train, split='balanced', download=download) self.data = self.data.unsqueeze(1).float().div(255).transpose((- 1), (- 2)).to(device) self.targets = self.targets.to(device) idxs = [] for c in range(class_range[0], class_range[1]): idxs.append(torch.where((self.targets == c))[0]) idxs = torch.cat(idxs) self.data = self.data[idxs] self.targets = self.targets[idxs] def __getitem__(self, idx): return (self.data[idx], self.targets[idx])
def main(): parser = argparse.ArgumentParser() parser.add_argument('--mode', choices=['train', 'eval', 'plot', 'ensemble'], default='train') parser.add_argument('--expid', type=str, default='trial') parser.add_argument('--resume', action='store_true', default=False) parser.add_argument('--gpu', type=str, default='0') parser.add_argument('--max_num_points', type=int, default=200) parser.add_argument('--class_range', type=int, nargs='*', default=[0, 10]) parser.add_argument('--model', type=str, default='cnp') parser.add_argument('--train_batch_size', type=int, default=100) parser.add_argument('--train_num_samples', type=int, default=4) parser.add_argument('--lr', type=float, default=0.0005) parser.add_argument('--num_epochs', type=int, default=200) parser.add_argument('--eval_freq', type=int, default=10) parser.add_argument('--save_freq', type=int, default=10) parser.add_argument('--eval_seed', type=int, default=42) parser.add_argument('--eval_batch_size', type=int, default=16) parser.add_argument('--eval_num_samples', type=int, default=50) parser.add_argument('--eval_logfile', type=str, default=None) parser.add_argument('--plot_seed', type=int, default=None) parser.add_argument('--plot_batch_size', type=int, default=16) parser.add_argument('--plot_num_samples', type=int, default=30) parser.add_argument('--plot_num_ctx', type=int, default=100) parser.add_argument('--t_noise', type=float, default=None) args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper()) with open(f'configs/emnist/{args.model}.yaml', 'r') as f: config = yaml.safe_load(f) model = model_cls(**config).cuda() args.root = osp.join(results_path, 'emnist', args.model, args.expid) if (args.mode == 'train'): train(args, model) elif (args.mode == 'eval'): eval(args, model) elif (args.mode == 'plot'): plot(args, model) elif (args.mode == 'ensemble'): ensemble(args, model)
def train(args, model): if (not osp.isdir(args.root)): os.makedirs(args.root) with open(osp.join(args.root, 'args.yaml'), 'w') as f: yaml.dump(args.__dict__, f) train_ds = EMNIST(train=True, class_range=args.class_range) eval_ds = EMNIST(train=False, class_range=args.class_range) train_loader = torch.utils.data.DataLoader(train_ds, batch_size=args.train_batch_size, shuffle=True, num_workers=4) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(len(train_loader) * args.num_epochs)) if args.resume: ckpt = torch.load(osp.join(args.root, 'ckpt.tar')) model.load_state_dict(ckpt.model) optimizer.load_state_dict(ckpt.optimizer) scheduler.load_state_dict(ckpt.scheduler) logfilename = ckpt.logfilename start_epoch = ckpt.epoch else: logfilename = osp.join(args.root, 'train_{}.log'.format(time.strftime('%Y%m%d-%H%M'))) start_epoch = 1 logger = get_logger(logfilename) ravg = RunningAverage() if (not args.resume): logger.info('Total number of parameters: {}\n'.format(sum((p.numel() for p in model.parameters())))) for epoch in range(start_epoch, (args.num_epochs + 1)): model.train() for (x, _) in tqdm(train_loader): batch = img_to_task(x, max_num_points=args.max_num_points, device='cuda') optimizer.zero_grad() outs = model(batch, num_samples=args.train_num_samples) outs.loss.backward() optimizer.step() scheduler.step() for (key, val) in outs.items(): ravg.update(key, val) line = f'{args.model}:{args.expid} epoch {epoch} ' line += f"lr {optimizer.param_groups[0]['lr']:.3e} " line += ravg.info() logger.info(line) if ((epoch % args.eval_freq) == 0): logger.info((eval(args, model) + '\n')) ravg.reset() if (((epoch % args.save_freq) == 0) or (epoch == args.num_epochs)): ckpt = AttrDict() ckpt.model = model.state_dict() ckpt.optimizer = optimizer.state_dict() ckpt.scheduler = scheduler.state_dict() ckpt.logfilename = logfilename ckpt.epoch = (epoch + 1) torch.save(ckpt, osp.join(args.root, 'ckpt.tar')) args.mode = 'eval' eval(args, model)
def gen_evalset(args): torch.manual_seed(args.eval_seed) torch.cuda.manual_seed(args.eval_seed) eval_ds = EMNIST(train=False, class_range=args.class_range) eval_loader = torch.utils.data.DataLoader(eval_ds, batch_size=args.eval_batch_size, shuffle=False, num_workers=4) batches = [] for (x, _) in tqdm(eval_loader): batches.append(img_to_task(x, t_noise=args.t_noise, max_num_points=args.max_num_points)) torch.manual_seed(time.time()) torch.cuda.manual_seed(time.time()) path = osp.join(evalsets_path, 'emnist') if (not osp.isdir(path)): os.makedirs(path) (c1, c2) = args.class_range filename = f'{c1}-{c2}' if (args.t_noise is not None): filename += f'_{args.t_noise}' filename += '.tar' torch.save(batches, osp.join(path, filename))
def eval(args, model): if (args.mode == 'eval'): ckpt = torch.load(osp.join(args.root, 'ckpt.tar')) model.load_state_dict(ckpt.model) if (args.eval_logfile is None): (c1, c2) = args.class_range eval_logfile = f'eval_{c1}-{c2}' if (args.t_noise is not None): eval_logfile += f'_{args.t_noise}' eval_logfile += '.log' else: eval_logfile = args.eval_logfile filename = osp.join(args.root, eval_logfile) logger = get_logger(filename, mode='w') else: logger = None path = osp.join(evalsets_path, 'emnist') (c1, c2) = args.class_range filename = f'{c1}-{c2}' if (args.t_noise is not None): filename += f'_{args.t_noise}' filename += '.tar' if (not osp.isfile(osp.join(path, filename))): print('generating evaluation sets...') gen_evalset(args) eval_batches = torch.load(osp.join(path, filename)) torch.manual_seed(args.eval_seed) torch.cuda.manual_seed(args.eval_seed) ravg = RunningAverage() model.eval() with torch.no_grad(): for batch in tqdm(eval_batches): for (key, val) in batch.items(): batch[key] = val.cuda() outs = model(batch, num_samples=args.eval_num_samples) for (key, val) in outs.items(): ravg.update(key, val) torch.manual_seed(time.time()) torch.cuda.manual_seed(time.time()) (c1, c2) = args.class_range line = f'{args.model}:{args.expid} {c1}-{c2} ' if (args.t_noise is not None): line += f'tn {args.t_noise} ' line += ravg.info() if (logger is not None): logger.info(line) return line
def ensemble(args, model): num_runs = 5 models = [] for i in range(num_runs): model_ = deepcopy(model) ckpt = torch.load(osp.join(results_path, 'emnist', args.model, f'run{(i + 1)}', 'ckpt.tar')) model_.load_state_dict(ckpt['model']) model_.cuda() model_.eval() models.append(model_) path = osp.join(evalsets_path, 'emnist') (c1, c2) = args.class_range filename = f'{c1}-{c2}' if (args.t_noise is not None): filename += f'_{args.t_noise}' filename += '.tar' if (not osp.isfile(osp.join(path, filename))): print('generating evaluation sets...') gen_evalset(args) eval_batches = torch.load(osp.join(path, filename)) ravg = RunningAverage() with torch.no_grad(): for batch in tqdm(eval_batches): for (key, val) in batch.items(): batch[key] = val.cuda() ctx_ll = [] tar_ll = [] for model in models: outs = model(batch, num_samples=args.eval_num_samples, reduce_ll=False) ctx_ll.append(outs.ctx_ll) tar_ll.append(outs.tar_ll) if (ctx_ll[0].dim() == 2): ctx_ll = torch.stack(ctx_ll) tar_ll = torch.stack(tar_ll) else: ctx_ll = torch.cat(ctx_ll) tar_ll = torch.cat(tar_ll) ctx_ll = logmeanexp(ctx_ll).mean() tar_ll = logmeanexp(tar_ll).mean() ravg.update('ctx_ll', ctx_ll) ravg.update('tar_ll', tar_ll) torch.manual_seed(time.time()) torch.cuda.manual_seed(time.time()) filename = f'ensemble_{c1}-{c2}' if (args.t_noise is not None): filename += f'_{args.t_noise}' filename += '.log' logger = get_logger(osp.join(results_path, 'emnist', args.model, filename), mode='w') logger.info(ravg.info())
class MultiHeadAttn(nn.Module): def __init__(self, dim_q, dim_k, dim_v, dim_out, num_heads=8): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.fc_q = nn.Linear(dim_q, dim_out, bias=False) self.fc_k = nn.Linear(dim_k, dim_out, bias=False) self.fc_v = nn.Linear(dim_v, dim_out, bias=False) self.fc_out = nn.Linear(dim_out, dim_out) self.ln1 = nn.LayerNorm(dim_out) self.ln2 = nn.LayerNorm(dim_out) def scatter(self, x): return torch.cat(x.chunk(self.num_heads, (- 1)), (- 3)) def gather(self, x): return torch.cat(x.chunk(self.num_heads, (- 3)), (- 1)) def attend(self, q, k, v, mask=None): (q_, k_, v_) = [self.scatter(x) for x in [q, k, v]] A_logits = ((q_ @ k_.transpose((- 2), (- 1))) / math.sqrt(self.dim_out)) if (mask is not None): mask = mask.bool().to(q.device) mask = torch.stack(([mask] * q.shape[(- 2)]), (- 2)) mask = torch.cat(([mask] * self.num_heads), (- 3)) A = torch.softmax(A_logits.masked_fill(mask, (- float('inf'))), (- 1)) A = A.masked_fill(torch.isnan(A), 0.0) else: A = torch.softmax(A_logits, (- 1)) return self.gather((A @ v_)) def forward(self, q, k, v, mask=None): (q, k, v) = (self.fc_q(q), self.fc_k(k), self.fc_v(v)) out = self.ln1((q + self.attend(q, k, v, mask=mask))) out = self.ln2((out + F.relu(self.fc_out(out)))) return out
class SelfAttn(MultiHeadAttn): def __init__(self, dim_in, dim_out, num_heads=8): super().__init__(dim_in, dim_in, dim_in, dim_out, num_heads) def forward(self, x, mask=None): return super().forward(x, x, x, mask=mask)
def build_mlp(dim_in, dim_hid, dim_out, depth): modules = [nn.Linear(dim_in, dim_hid), nn.ReLU(True)] for _ in range((depth - 2)): modules.append(nn.Linear(dim_hid, dim_hid)) modules.append(nn.ReLU(True)) modules.append(nn.Linear(dim_hid, dim_out)) return nn.Sequential(*modules)
class PoolingEncoder(nn.Module): def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=False, pre_depth=4, post_depth=2): super().__init__() self.use_lat = (dim_lat is not None) self.net_pre = (build_mlp((dim_x + dim_y), dim_hid, dim_hid, pre_depth) if (not self_attn) else nn.Sequential(build_mlp((dim_x + dim_y), dim_hid, dim_hid, (pre_depth - 2)), nn.ReLU(True), SelfAttn(dim_hid, dim_hid))) self.net_post = build_mlp(dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid), post_depth) def forward(self, xc, yc, mask=None): out = self.net_pre(torch.cat([xc, yc], (- 1))) if (mask is None): out = out.mean((- 2)) else: mask = mask.to(xc.device) out = ((out * mask.unsqueeze((- 1))).sum((- 2)) / (mask.sum((- 1), keepdim=True).detach() + 1e-05)) if self.use_lat: (mu, sigma) = self.net_post(out).chunk(2, (- 1)) sigma = (0.1 + (0.9 * torch.sigmoid(sigma))) return Normal(mu, sigma) else: return self.net_post(out)
class CrossAttnEncoder(nn.Module): def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=True, v_depth=4, qk_depth=2): super().__init__() self.use_lat = (dim_lat is not None) if (not self_attn): self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, v_depth) else: self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, (v_depth - 2)) self.self_attn = SelfAttn(dim_hid, dim_hid) self.net_qk = build_mlp(dim_x, dim_hid, dim_hid, qk_depth) self.attn = MultiHeadAttn(dim_hid, dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid)) def forward(self, xc, yc, xt, mask=None): (q, k) = (self.net_qk(xt), self.net_qk(xc)) v = self.net_v(torch.cat([xc, yc], (- 1))) if hasattr(self, 'self_attn'): v = self.self_attn(v, mask=mask) out = self.attn(q, k, v, mask=mask) if self.use_lat: (mu, sigma) = out.chunk(2, (- 1)) sigma = (0.1 + (0.9 * torch.sigmoid(sigma))) return Normal(mu, sigma) else: return out
class Decoder(nn.Module): def __init__(self, dim_x=1, dim_y=1, dim_enc=128, dim_hid=128, depth=3): super().__init__() self.fc = nn.Linear((dim_x + dim_enc), dim_hid) self.dim_hid = dim_hid modules = [nn.ReLU(True)] for _ in range((depth - 2)): modules.append(nn.Linear(dim_hid, dim_hid)) modules.append(nn.ReLU(True)) modules.append(nn.Linear(dim_hid, (2 * dim_y))) self.mlp = nn.Sequential(*modules) def add_ctx(self, dim_ctx): self.dim_ctx = dim_ctx self.fc_ctx = nn.Linear(dim_ctx, self.dim_hid, bias=False) def forward(self, encoded, x, ctx=None): packed = torch.cat([encoded, x], (- 1)) hid = self.fc(packed) if (ctx is not None): hid = (hid + self.fc_ctx(ctx)) out = self.mlp(hid) (mu, sigma) = out.chunk(2, (- 1)) sigma = (0.1 + (0.9 * F.softplus(sigma))) return Normal(mu, sigma)
def get_logger(filename, mode='a'): logging.basicConfig(level=logging.INFO, format='%(message)s') logger = logging.getLogger() logger.addHandler(logging.FileHandler(filename, mode=mode)) return logger
class RunningAverage(object): def __init__(self, *keys): self.sum = OrderedDict() self.cnt = OrderedDict() self.clock = time.time() for key in keys: self.sum[key] = 0 self.cnt[key] = 0 def update(self, key, val): if isinstance(val, torch.Tensor): val = val.item() if (self.sum.get(key, None) is None): self.sum[key] = val self.cnt[key] = 1 else: self.sum[key] = (self.sum[key] + val) self.cnt[key] += 1 def reset(self): for key in self.sum.keys(): self.sum[key] = 0 self.cnt[key] = 0 self.clock = time.time() def clear(self): self.sum = OrderedDict() self.cnt = OrderedDict() self.clock = time.time() def keys(self): return self.sum.keys() def get(self, key): assert (self.sum.get(key, None) is not None) return (self.sum[key] / self.cnt[key]) def info(self, show_et=True): line = '' for key in self.sum.keys(): val = (self.sum[key] / self.cnt[key]) if (type(val) == float): line += f'{key} {val:.4f} ' else: line += f'{key} {val} '.format(key, val) if show_et: line += f'({(time.time() - self.clock):.3f} secs)' return line
def gen_load_func(parser, func): def load(args, cmdline): (sub_args, cmdline) = parser.parse_known_args(cmdline) for (k, v) in sub_args.__dict__.items(): args.__dict__[k] = v return (func(**sub_args.__dict__), cmdline) return load
def load_module(filename): module_name = os.path.splitext(os.path.basename(filename))[0] return SourceFileLoader(module_name, filename).load_module()