kevinwang676 commited on
Commit
34a163e
β€’
1 Parent(s): 22cf660

Update test.py

Browse files
Files changed (1) hide show
  1. test.py +20 -23
test.py CHANGED
@@ -1,55 +1,53 @@
1
  import tensorflow as tf
2
  from tensorflow.keras.models import Model
3
- from tensorflow.keras.layers import Input, Dense, Dropout, BatchNormalization
4
  from tensorflow.keras import regularizers
5
  from tensorflow.keras.optimizers import Adam
6
- from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
7
 
8
  with tf.device('/gpu:0'):
9
- # this is the size of our encoded representations
10
  encoding_dim1 = 500
11
  encoding_dim2 = 200
12
 
13
  lambda_act = 0.0001
14
  lambda_weight = 0.001
15
 
16
- # this is our input placeholder
17
  input_data = Input(shape=(num_in_neurons,))
18
 
19
- # first encoded representation of the input
20
- encoded = Dense(encoding_dim1, activation='relu', activity_regularizer=regularizers.l1(lambda_act), kernel_regularizer=regularizers.l2(lambda_weight), name='encoder1')(input_data)
21
  encoded = BatchNormalization()(encoded)
 
22
  encoded = Dropout(0.5)(encoded)
23
 
24
- # second encoded representation of the input
25
-
26
- encoded = Dense(encoding_dim2, activation='relu', activity_regularizer=regularizers.l1(lambda_act), kernel_regularizer=regularizers.l2(lambda_weight), name='encoder2')(encoded)
27
  encoded = BatchNormalization()(encoded)
 
28
  encoded = Dropout(0.5)(encoded)
29
 
30
- # first lossy reconstruction of the input
31
-
32
- decoded = Dense(encoding_dim1, activation='relu', name='decoder1')(encoded)
33
  decoded = BatchNormalization()(decoded)
 
34
 
35
- # the final lossy reconstruction of the input
36
- decoded = Dense(num_in_neurons, activation='sigmoid', name='decoder2')(decoded)
37
 
38
- # this model maps an input to its reconstruction
39
  autoencoder = Model(inputs=input_data, outputs=decoded)
40
- autoencoder.compile(optimizer=Adam(), loss='mse')
41
 
42
- # setup callbacks
43
  callbacks = [
44
- EarlyStopping(monitor='val_loss', patience=5, verbose=1),
45
- ModelCheckpoint('best_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
 
46
  ]
47
 
48
- # training
49
  print('Training the autoencoder')
50
  autoencoder.fit(x_train_noisy, x_train,
51
  epochs=50,
52
- batch_size=8,
53
  shuffle=True,
54
  validation_data=(x_test_noisy, x_test),
55
  callbacks=callbacks)
@@ -57,6 +55,5 @@ with tf.device('/gpu:0'):
57
  # Load best model
58
  autoencoder.load_weights('best_model.h5')
59
 
60
- # Freeze the weights
61
  autoencoder.trainable = False
62
-
 
1
  import tensorflow as tf
2
  from tensorflow.keras.models import Model
3
+ from tensorflow.keras.layers import Input, Dense, Dropout, BatchNormalization, Activation
4
  from tensorflow.keras import regularizers
5
  from tensorflow.keras.optimizers import Adam
6
+ from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
7
 
8
  with tf.device('/gpu:0'):
 
9
  encoding_dim1 = 500
10
  encoding_dim2 = 200
11
 
12
  lambda_act = 0.0001
13
  lambda_weight = 0.001
14
 
 
15
  input_data = Input(shape=(num_in_neurons,))
16
 
17
+ # Encoder
18
+ encoded = Dense(encoding_dim1, activity_regularizer=regularizers.l1(lambda_act), kernel_regularizer=regularizers.l2(lambda_weight), name='encoder1')(input_data)
19
  encoded = BatchNormalization()(encoded)
20
+ encoded = Activation('relu')(encoded)
21
  encoded = Dropout(0.5)(encoded)
22
 
23
+ encoded = Dense(encoding_dim2, activity_regularizer=regularizers.l1(lambda_act), kernel_regularizer=regularizers.l2(lambda_weight), name='encoder2')(encoded)
 
 
24
  encoded = BatchNormalization()(encoded)
25
+ encoded = Activation('relu')(encoded)
26
  encoded = Dropout(0.5)(encoded)
27
 
28
+ # Decoder
29
+ decoded = Dense(encoding_dim1, name='decoder1')(encoded)
 
30
  decoded = BatchNormalization()(decoded)
31
+ decoded = Activation('relu')(decoded)
32
 
33
+ decoded = Dense(num_in_neurons, name='decoder2')(decoded)
34
+ decoded = Activation('sigmoid')(decoded)
35
 
 
36
  autoencoder = Model(inputs=input_data, outputs=decoded)
37
+ autoencoder.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
38
 
39
+ # Callbacks
40
  callbacks = [
41
+ EarlyStopping(monitor='val_loss', patience=10, verbose=1),
42
+ ModelCheckpoint('best_model.h5', monitor='val_loss', save_best_only=True, verbose=1),
43
+ ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1)
44
  ]
45
 
46
+ # Training
47
  print('Training the autoencoder')
48
  autoencoder.fit(x_train_noisy, x_train,
49
  epochs=50,
50
+ batch_size=16, # Adjusted batch size
51
  shuffle=True,
52
  validation_data=(x_test_noisy, x_test),
53
  callbacks=callbacks)
 
55
  # Load best model
56
  autoencoder.load_weights('best_model.h5')
57
 
58
+ # Freeze the weights for inference
59
  autoencoder.trainable = False