Innokentiy commited on
Commit
623c9b6
1 Parent(s): 2028705

Upload 4 files

Browse files
Files changed (4) hide show
  1. Screenshot_5.png +0 -0
  2. Screenshot_6.png +0 -0
  3. main.py +129 -0
  4. model (1).png +0 -0
Screenshot_5.png ADDED
Screenshot_6.png ADDED
main.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import tensorflow_datasets as tfds
3
+ import os
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from tensorflow.keras import regularizers
7
+
8
+ assert 'COLAB_TPU_ADDR' in os.environ, 'Missin TPU?'
9
+ if('COLAB_TPU_ADDR') in os.environ:
10
+ TF_MASTER = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
11
+ else:
12
+ TF_MASTER = ''
13
+ tpu_address = TF_MASTER
14
+
15
+ resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu_address)
16
+ tf.config.experimental_connect_to_cluster(resolver)
17
+ tf.tpu.experimental.initialize_tpu_system(resolver)
18
+
19
+
20
+ strategy = tf.distribute.TPUStrategy(resolver)
21
+
22
+
23
+ def create_model():
24
+ return tf.keras.Sequential([
25
+ tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
26
+ tf.keras.layers.BatchNormalization(),
27
+ tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
28
+ tf.keras.layers.BatchNormalization(),
29
+ tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
30
+ tf.keras.layers.Dropout(0.25),
31
+
32
+ tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
33
+ tf.keras.layers.BatchNormalization(),
34
+ tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.001)),
35
+ tf.keras.layers.BatchNormalization(),
36
+ tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
37
+ tf.keras.layers.Dropout(0.25),
38
+
39
+ tf.keras.layers.Flatten(),
40
+ tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.001)),
41
+ tf.keras.layers.BatchNormalization(),
42
+ tf.keras.layers.Dropout(0.5),
43
+ tf.keras.layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.001)),
44
+ tf.keras.layers.BatchNormalization(),
45
+ tf.keras.layers.Dropout(0.5),
46
+ tf.keras.layers.Dense(10, activation='softmax')
47
+ ])
48
+
49
+
50
+ def get_dataset(batch_size, is_training=True):
51
+ split = 'train' if is_training else 'test'
52
+ dataset, info = tfds.load(name='mnist', split=split, with_info= True, as_supervised=True, try_gcs=True)
53
+ def scale(image, label):
54
+ image = tf.cast(image, tf.float32)
55
+ image /= 255.0
56
+ return image, label
57
+ dataset = dataset.map(scale)
58
+ if is_training:
59
+ dataset = dataset.shuffle(10000)
60
+ dataset = dataset.repeat()
61
+ dataset = dataset.batch(batch_size)
62
+ return dataset
63
+
64
+
65
+
66
+ with strategy.scope():
67
+ model = create_model()
68
+ model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['sparse_categorical_accuracy'])
69
+ model.summary()
70
+
71
+
72
+
73
+
74
+ batch_size = 512
75
+ train_dataset = get_dataset(batch_size, True)
76
+ validation_dataset = get_dataset(batch_size, False)
77
+ with strategy.scope():
78
+ model = create_model()
79
+ model.compile(optimizer='adam', steps_per_execution=50, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['sparse_categorical_accuracy'])
80
+ epochs = 80
81
+ steps_per_epoch = 60000 // batch_size
82
+ validation_steps = 10000 // batch_size
83
+ history = model.fit(train_dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, validation_data=validation_dataset, validation_steps=validation_steps)
84
+
85
+
86
+ acc = history.history['sparse_categorical_accuracy']
87
+ val_acc = history.history['val_sparse_categorical_accuracy']
88
+ loss = history.history['loss']
89
+ val_loss = history.history['val_loss']
90
+ epochs_range = range(epochs)
91
+
92
+
93
+ plt.figure(figsize=(15, 15))
94
+ plt.subplot(2, 2, 1)
95
+ plt.plot(epochs_range, acc, label='Training Accuracy')
96
+ plt.plot(epochs_range, val_acc, label='Validation Accuracy')
97
+ plt.legend(loc='lower right')
98
+ plt.title('Training and Validation Accuracy')
99
+
100
+ plt.subplot(2, 2, 2)
101
+ plt.plot(epochs_range, loss, label='Training Loss')
102
+ plt.plot(epochs_range, val_loss, label='Validation Loss')
103
+ plt.legend(loc='upper right')
104
+ plt.title('Training and Validation Loss')
105
+ plt.show()
106
+
107
+
108
+ final_daset = validation_dataset.take(10)
109
+ test_images, test_labels = next(iter(final_daset.take(10)))
110
+ class_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
111
+
112
+ # Получение предсказаний нейросети для 10 изображений
113
+ predictions = model.predict(test_images)
114
+
115
+ fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(15, 6),
116
+ subplot_kw={'xticks': [], 'yticks': []})
117
+ for i, ax in enumerate(axes.flat):
118
+ # Отображение изображения
119
+ ax.imshow(test_images[i])
120
+ # Отображение меток и предсказаний
121
+ true_label = class_names[test_labels[i]]
122
+ pred_label = class_names[np.argmax(predictions[i])]
123
+ if true_label == pred_label:
124
+ ax.set_title("Это: {}, ИИ: {}".format(true_label, pred_label), color='green')
125
+ else:
126
+ ax.set_title("Это: {}, ИИ: {}".format(true_label, pred_label), color='red')
127
+
128
+ plt.tight_layout()
129
+ plt.show()
model (1).png ADDED