roshnn24 commited on
Commit
04ff542
1 Parent(s): 1bae2ff

Upload 5 files

Browse files

MLathon.py this file contains the core architecture and functionality of the model. Run this in appropriate epochs to fit the model.
improve.py this file contains code to load a saved model to finr-tune it further. Use effective optimizers to improve the learning process.
test.py use this file to test the model in an matplotlib figure
eval.py use this file to test it with testing dataset.

Files changed (5) hide show
  1. MLathon.py +94 -0
  2. count.py +34 -0
  3. eval.py +38 -0
  4. improve.py +107 -0
  5. test.py +69 -0
MLathon.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import os
3
+
4
+ import pathlib
5
+ import numpy as np
6
+ data_dir = pathlib.Path("/Users/rosh/Downloads/Train_data")
7
+ class_names = np.array(sorted([item.name for item in data_dir.glob("*")]))
8
+ class_names = list(class_names)
9
+ import matplotlib.pyplot as plt
10
+ import matplotlib.image as mpimg
11
+ import random
12
+ def view_random_image(target_dir, target_class):
13
+
14
+ target_folder = target_dir + "/" + target_class
15
+
16
+ random_image = random.sample(os.listdir(target_folder), 1)
17
+
18
+ img = mpimg.imread(target_folder + "/" + random_image[0])
19
+ plt.imshow(img)
20
+ plt.title(target_class)
21
+ plt.axis("off")
22
+
23
+ print(f"Image shape: {img.shape}")
24
+ plt.show()
25
+ return img
26
+
27
+
28
+ #img = view_random_image(target_dir="/Users/rosh/Downloads/Train_data",target_class)
29
+
30
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
31
+
32
+ tf.random.set_seed(42)
33
+
34
+
35
+ # Define data augmentation parameters
36
+ train_datagen = ImageDataGenerator(
37
+ rotation_range=20, # Random rotation in the range [-20, 20] degrees
38
+ width_shift_range=0.1, # Random horizontal shift by up to 10% of the width
39
+ height_shift_range=0.1, # Random vertical shift by up to 10% of the height
40
+ shear_range=0.2, # Shear intensity (shear angle in radians)
41
+ zoom_range=0.2, # Random zoom in the range [0.8, 1.2]
42
+ horizontal_flip=True, # Random horizontal flipping
43
+ vertical_flip=True, # Random vertical flipping
44
+ fill_mode='nearest', # Fill mode for points outside the input boundaries
45
+ rescale=1./255 # Rescaling factor
46
+ )
47
+
48
+ valid_datagen = ImageDataGenerator(
49
+ rescale=1./255 # Rescaling factor
50
+ )
51
+
52
+
53
+ train_dir = "/Users/rosh/Downloads/Train_data"
54
+ valid_dir = "/Users/rosh/Downloads/Validation_data"
55
+
56
+ train_data = train_datagen.flow_from_directory(directory=train_dir,
57
+ batch_size=32,
58
+ target_size=(224, 224),
59
+ class_mode="categorical",
60
+ seed=42)
61
+ valid_data = valid_datagen.flow_from_directory(directory=valid_dir,
62
+ batch_size=32,
63
+ target_size=(224, 224),
64
+ class_mode="categorical",
65
+ seed=42)
66
+
67
+
68
+ model_1 = tf.keras.Sequential([
69
+ tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu", input_shape=(224, 224, 3)),
70
+ tf.keras.layers.MaxPool2D(pool_size=2, padding="valid"),
71
+ tf.keras.layers.Conv2D(64, 3, activation="relu"),
72
+ tf.keras.layers.MaxPool2D(2),
73
+ tf.keras.layers.Conv2D(64, 3, activation="relu"),
74
+ tf.keras.layers.MaxPool2D(2),
75
+ tf.keras.layers.Flatten(),
76
+ tf.keras.layers.Dense(64, activation="relu"),
77
+ tf.keras.layers.Dense(10, activation="softmax")
78
+ ])
79
+
80
+
81
+ model_1.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
82
+ optimizer=tf.keras.optimizers.Adam(),
83
+ metrics=["accuracy"])
84
+
85
+
86
+ history = model_1.fit(train_data,
87
+ epochs=100,
88
+ steps_per_epoch=len(train_data),
89
+ validation_data=valid_data,
90
+ validation_steps=len(valid_data),
91
+ verbose=1)
92
+
93
+ model_1.save("model_3.h5")
94
+ #
count.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from tensorflow.keras.preprocessing.image import ImageDataGenerator
2
+ # import numpy as np
3
+ # import tensorflow as tf
4
+ # valid_datagen = ImageDataGenerator(
5
+ # rescale=1./255 # Rescaling factor
6
+ # )
7
+ # valid_dir = "/Users/rosh/Downloads/Validation_data"
8
+ # valid_data = valid_datagen.flow_from_directory(directory=valid_dir,
9
+ # batch_size=32,
10
+ # target_size=(224, 224),
11
+ # class_mode="categorical",
12
+ # seed=42)
13
+ # loaded_model = tf.keras.models.load_model('improved_model_4.h5')
14
+ # true_labels = []
15
+ # for i in range(len(valid_data)):
16
+ # _, labels = valid_data[i]
17
+ # true_labels.extend(np.argmax(labels, axis=1))
18
+ #
19
+ # # Print true labels
20
+ # print("True labels:", true_labels)
21
+ # pred_prob = loaded_model.predict(valid_data)
22
+ # preds = pred_prob.argmax(axis=1)
23
+ # print("Predicted: ")
24
+ # count = 0
25
+ # for i in range(len(preds)):
26
+ # if true_labels[i] == preds[i]:
27
+ # count += 1
28
+ # print(count)
29
+ #print(tf.keras.models.load_model('model_4_improved_1.h5').summary())
30
+ import keras
31
+ import tensorflow as tf
32
+
33
+ print("Keras version:", keras.__version__)
34
+ print("TensorFlow version:", tf.__version__)
eval.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import numpy as np
4
+ from tensorflow.keras.preprocessing import image
5
+ import tensorflow as tf
6
+ # Define the path to the directory containing test images
7
+ predict_dir = "/Users/rosh/Downloads/Eval_data" # Change this to the actual path
8
+ model = tf.keras.models.load_model("model_4_improved_8.h5")
9
+ # Define class labels
10
+ class_labels = ['Crane', 'Crow', 'Egret', 'Kingfisher','Myna','Peacock','Pitta','Rosefinch','Tailorbird','Wagtail']
11
+ # Open a CSV file to write the results
12
+ with open('pred.csv', mode='w', newline='') as csvfile:
13
+ writer = csv.writer(csvfile)
14
+ writer.writerow(['Name', 'Target_name','Target_num'])
15
+ qq=0
16
+ # Loop through each image file and make predictions
17
+ for img_file in os.listdir(predict_dir):
18
+ print(qq)
19
+ # Load and preprocess the image
20
+ img_path = '/Users/rosh/Downloads/Eval_data'+'/'+ img_file
21
+ img = image.load_img(img_path, target_size=(224, 224)) # Ensure the target_size matches the input size of your model
22
+
23
+ # Preprocess the image
24
+ img_array = image.img_to_array(img)
25
+ img_array = np.expand_dims(img_array, axis=0)
26
+
27
+ # Make prediction
28
+ prediction = model.predict(img_array,verbose=0)
29
+ predicted_class = np.argmax(prediction, axis=1)[0]
30
+ # Assuming train_images.class_indices is a dictionary mapping class names to indices
31
+ class_indices = [0,1,2,3,4,5,6,7,8,9]
32
+ class_names =['Crane', 'Crow', 'Egret', 'Kingfisher', 'Myna', 'Peacock', 'Pitta', 'Rosefinch', 'Tailorbird', 'Wagtail']
33
+ predicted_class_name = class_names[predicted_class]
34
+
35
+ writer.writerow([img_file[:img_file.index('.jpg')], predicted_class_name,predicted_class])
36
+ qq+=1
37
+
38
+ print("Predictions saved to predictions.csv")
improve.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import os
3
+
4
+ import pathlib
5
+ import numpy as np
6
+ data_dir = pathlib.Path("/Users/rosh/Downloads/Train_data")
7
+ class_names = np.array(sorted([item.name for item in data_dir.glob("*")]))
8
+ class_names = list(class_names)
9
+ import matplotlib.pyplot as plt
10
+ import matplotlib.image as mpimg
11
+ import random
12
+ # def view_random_image(target_dir, target_class):
13
+ #
14
+ # target_folder = target_dir + "/" + target_class
15
+ #
16
+ # random_image = random.sample(os.listdir(target_folder), 1)
17
+ #
18
+ # img = mpimg.imread(target_folder + "/" + random_image[0])
19
+ # plt.imshow(img)
20
+ # plt.title(target_class)
21
+ # plt.axis("off")
22
+ #
23
+ # print(f"Image shape: {img.shape}")
24
+ # plt.show()
25
+ # return img
26
+ #
27
+ #
28
+ # #img = view_random_image(target_dir="/Users/rosh/Downloads/Train_data",target_class)
29
+ #
30
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
31
+ #
32
+ # tf.random.set_seed(42)
33
+ #
34
+ #
35
+ # Define data augmentation parameters
36
+ train_datagen = ImageDataGenerator(
37
+ rotation_range=20, # Random rotation in the range [-20, 20] degrees
38
+ width_shift_range=0.1, # Random horizontal shift by up to 10% of the width
39
+ height_shift_range=0.1, # Random vertical shift by up to 10% of the height
40
+ shear_range=0.2, # Shear intensity (shear angle in radians)
41
+ zoom_range=0.2, # Random zoom in the range [0.8, 1.2]
42
+ horizontal_flip=True, # Random horizontal flipping
43
+ vertical_flip=True, # Random vertical flipping
44
+ fill_mode='nearest', # Fill mode for points outside the input boundaries
45
+ rescale=1./255 # Rescaling factor
46
+ )
47
+
48
+ valid_datagen = ImageDataGenerator(
49
+ rescale=1./255 # Rescaling factor
50
+ )
51
+ #
52
+ #
53
+ train_dir = "/Users/rosh/Downloads/Train_data"
54
+ #valid_dir = "/Users/rosh/Downloads/Birds"
55
+ #
56
+ train_data = train_datagen.flow_from_directory(directory=train_dir,
57
+ batch_size=32,
58
+ target_size=(224, 224),
59
+ class_mode="categorical",
60
+ seed=42)
61
+ # valid_data = valid_datagen.flow_from_directory(directory=valid_dir,
62
+ # batch_size=32,
63
+ # target_size=(224, 224),
64
+ # class_mode="categorical",
65
+ # seed=42)
66
+
67
+
68
+ # model_1 = tf.keras.Sequential([
69
+ # tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu", input_shape=(224, 224, 3)),
70
+ # tf.keras.layers.MaxPool2D(pool_size=2, padding="valid"),
71
+ # tf.keras.layers.Conv2D(64, 3, activation="relu"),
72
+ # tf.keras.layers.MaxPool2D(2),
73
+ # tf.keras.layers.Conv2D(128, 3, activation="relu"), # Increased filters
74
+ # tf.keras.layers.MaxPool2D(2),
75
+ # tf.keras.layers.Conv2D(128, 3, activation="relu"), # Increased filters
76
+ # tf.keras.layers.MaxPool2D(2),
77
+ # tf.keras.layers.Flatten(),
78
+ # tf.keras.layers.Dense(256, activation="relu"), # Increased units
79
+ # tf.keras.layers.Dropout(0.5),
80
+ # tf.keras.layers.Dense(10, activation="softmax")
81
+ # ])
82
+ #
83
+ #
84
+ # model_1.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
85
+ # optimizer=tf.keras.optimizers.Adam(),
86
+ # metrics=["accuracy"])
87
+ #
88
+ #
89
+ # history = model_1.fit(train_data,
90
+ # epochs=40,
91
+ # steps_per_epoch=len(train_data),
92
+ # validation_data=valid_data,
93
+ # validation_steps=len(valid_data),
94
+ # verbose=1)
95
+ #
96
+ # model_1.save("model_4.h5")
97
+ l_model = tf.keras.models.load_model('model_4_improved_8.h5')
98
+
99
+
100
+ l_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
101
+ optimizer=tf.keras.optimizers.Adam(),
102
+ metrics=["accuracy"])
103
+ l_model.fit(train_data,
104
+ epochs=5,
105
+ verbose=1)
106
+
107
+ l_model.save("model_4_improved_8.h5")
test.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Create a function to import an image and resize it to be able to be used with our model
2
+ import tensorflow as tf
3
+ import matplotlib.pyplot as plt
4
+ import os
5
+ import pathlib
6
+ import numpy as np
7
+ data_dir = pathlib.Path("/Users/rosh/Downloads/Train_data")
8
+ class_names = np.array(sorted([item.name for item in data_dir.glob("*")]))
9
+ class_names = list(class_names)
10
+ class_names.pop(0)
11
+ loaded_model = tf.keras.models.load_model('model_4_improved_8.h5')
12
+ def load_and_prep_image(filename, img_shape=224):
13
+ """
14
+ Reads an image from filename, turns it into a tensor
15
+ and reshapes it to (img_shape, img_shape, colour_channel).
16
+ """
17
+ # Read in target file (an image)
18
+ img = tf.io.read_file(filename)
19
+
20
+ # Decode the read file into a tensor & ensure 3 colour channels
21
+ # (our model is trained on images with 3 colour channels and sometimes images have 4 colour channels)
22
+ img = tf.image.decode_image(img, channels=3)
23
+
24
+ # Resize the image (to the same size our model was trained on)
25
+ img = tf.image.resize(img, size = [img_shape, img_shape])
26
+
27
+ # Rescale the image (get all values between 0 and 1)
28
+ img = img/255.
29
+ return img
30
+
31
+ # Adjust function to work with multi-class
32
+ def pred_and_plot(model, filename, class_names):
33
+ """
34
+ Imports an image located at filename, makes a prediction on it with
35
+ a trained model and plots the image with the predicted class as the title.
36
+ """
37
+ # Import the target image and preprocess it
38
+ img = load_and_prep_image(filename)
39
+
40
+ # Make a prediction
41
+ pred = model.predict(tf.expand_dims(img, axis=0))
42
+
43
+ # Get the predicted class
44
+
45
+ pred_class = class_names[pred.argmax()] # if more than one output, take the max
46
+
47
+ # Plot the image and predicted class
48
+ plt.imshow(img)
49
+ plt.title(f"Prediction: {pred_class}")
50
+ plt.axis(False)
51
+ plt.show()
52
+
53
+ pred_and_plot(loaded_model, "/Users/rosh/Downloads/egret.jpg", class_names)
54
+ # # loaded_model.compile(loss='categorical_crossentropy',
55
+ # # optimizer='adam',
56
+ # # metrics=['accuracy'])
57
+ # # Get true labels
58
+ # valid_datagen = ImageDataGenerator(
59
+ # rescale=1./255 # Rescaling factor
60
+ # )
61
+ # valid_dir = "/Users/rosh/Downloads/Validation_data"
62
+ # valid_data = valid_datagen.flow_from_directory(directory=valid_dir,
63
+ # batch_size=32,
64
+ # target_size=(224, 224),
65
+ # class_mode="categorical",
66
+ # seed=42)
67
+ # pred = loaded_model.predict(valid_data)
68
+ # preds = pred.argmax(axis=1)
69
+ # print(preds)