#!/usr/bin/env python
# coding: utf-8

# In[10]:


# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory

import os
for dirname, _, filenames in os.walk('E:\\AI_math\\homework\\flower_photos'):
    for filename in filenames:
        print(os.path.join(dirname, filename))
        break

# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" 
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session


# In[11]:


import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


# ## Loading data with batchsize 32

# In[12]:


import torch
from torch.utils.data import DataLoader, random_split
from torchvision import datasets, transforms

# Define transformations for the images (same as before)
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

# Load the full dataset from the directory\
dataset = datasets.ImageFolder(root='E:\\AI_math\\homework\\flower_photos', transform=transform)

# Define the split ratio (e.g., 80% for training, 20% for testing)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size

# Randomly split the dataset
train_dataset, test_dataset = random_split(dataset, [train_size, test_size])

# Create DataLoaders for each subset
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=2)

# Check the number of images in each set
print("Training set size:", len(train_dataset))
print("Testing set size:", len(test_dataset))


# In[13]:


import matplotlib.pyplot as plt
classes= [
    'daisy',
    'dandelion',
    'roses',
    'sunflowers',
    'tulips'
]


# In[14]:


imageList=[]
for images,labels in train_loader:
    for i,img in enumerate(images[:20]):
        # print(img.shape)
        imageList.append((img.permute(1,2,0).numpy(),classes[labels[i].item()]))

        # plt.imshow(img.permute(1,2,0))

    break


# ## Visualize flowers

# In[15]:


fig, axes = plt.subplots(4, 5, figsize=(15, 6))  
axes = axes.flatten() 

i=0
for img,label in imageList:
    axes[i].imshow(img,label=label)
    axes[i].set_title(label)
    axes[i].axis('off')  # Hide axis
    i+=1
    

# plt.tight_layout()
plt.show()


# ## Model 1

# In[16]:


from torch import nn


# In[ ]:





# In[17]:


best_models = {}


# In[18]:


def get_best_model(name):
    if name in best_models:
        try:
            model = torch.load(f"{name}_best.pt")
            return model

        except Exception as e:
            print(e)
            return None

    return None


# In[19]:


def save_best_model(name,model,accuracy):
    if name in best_models:
        best_acc= best_models.get(name)
        if accuracy<=best_acc:
            return


    old = best_models.get(name) or 0
    best_models[name] = accuracy
    torch.save(model,f"{name}_best.pt")

    
    print(f"\n🤩🤩\nBest Model {name} updated Accuracy {accuracy*100:.2f} | old : {old*100:.2f}",end="\n\n")


# In[20]:


import torch
import torch.nn as nn
torch.manual_seed(10)
torch.cuda.manual_seed(10)
model_1 = nn.Sequential(
    # First Convolutional Layer
    nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),  # Output: [16, 224, 224]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),  
    nn.BatchNorm2d(16),# Output: [16, 112, 112]

    # Second Convolutional Layer
    nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), # Output: [32, 112, 112]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),                 # Output: [32, 56, 56]

    # Third Convolutional Layer
    nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # Output: [64, 56, 56]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),                 # Output: [64, 28, 28]

    # Fourth Convolutional Layer
    nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), # Output: [128, 28, 28]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),                  # Output: [128, 14, 14]

    # Fifth Convolutional Layer
    nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), # Output: [256, 14, 14]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),   
    nn.Dropout(0.2),# Output: [256, 7, 7]

    # Flatten the tensor for the fully connected layers
    nn.Flatten(),                                          # Output: [256 * 7 * 7]

    # Fully Connected Layers
    nn.Linear(256 * 7 * 7, 512),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(512, 256),                                   # Output: [256]
    nn.ReLU(),
    nn.Linear(256, 5)                                     # Output: [10] (e.g., for 10 classes)
).to(device)




# In[21]:


# CrossEntropyLoss 
loss_fn = nn.CrossEntropyLoss()

import torch.optim as optim

# Adam optimizer
optimizer = optim.Adam(model_1.parameters(), lr=0.001)


# In[22]:


def truePositive(logits,labels):
    logits = logits.detach().cpu().numpy()
    labels = labels.long()
    labels = labels.detach().cpu().numpy()
    

    # Get predicted class indices
    y_pred = np.argmax(logits, axis=1)

    return np.sum(y_pred == labels)
    

def accuracy_score(logits, labels):
    accuracy = truePositive(logits,labels) / len(labels)
    return accuracy



def get_accuracy(model):
    model.eval()
    with torch.inference_mode():
        truePos=0
        c=0
        for images,labels in test_loader:
            logits = model(images.to(device))
            truePos += truePositive(logits,labels)
            c+= len(labels)
        accuracy=truePos/c
        

        return accuracy,truePos,c
        
        

def print_accuracy(model):
    accuracy,truePos,c = get_accuracy(model)
    print(f'Accuracy= {truePos}/{c} = {accuracy * 100:.2f}%')


# In[23]:


for images, labels in train_loader:
    print(images.shape)
    break


# In[18]:


epochs=30
epoch_count=[]
loss_values = []
test_lost_values = []
train_acc=[]
test_acc=[]

for epoch in range(epochs):
    model_1.train()

    train_loss=0
    train_loss_count=0
    total_labels=0
    trueVal=0
    
    
    for images, labels in train_loader:
        logits = model_1(images.to(device))

        trueVal+=truePositive(logits,labels)
        total_labels+=len(labels)
        loss = loss_fn(logits,labels.to(device))

        train_loss+=loss.item()
        train_loss_count+=1

        optimizer.zero_grad()

        loss.backward()

        optimizer.step()





    model_1.eval()
    with torch.inference_mode():
        test_loss =0
        test_loss_count=0
        test_labels=0
        test_true=0
        
        for images,labels in test_loader:
            logits = model_1(images.to(device))
            test_true+=truePositive(logits,labels)
            test_loss+= loss_fn(logits,labels.to(device)).item()
            test_loss_count+=1
            test_labels+=len(labels)
            
            
            


    test_loss =  test_loss/ test_loss_count if test_loss_count!=0 else 1
    train_loss =  train_loss/ train_loss_count if train_loss_count!=0 else 1
    train_accuracy = trueVal/total_labels
    acc2 = test_true/test_labels

    loss_values.append(train_loss)
    test_lost_values.append(test_loss)
    epoch_count.append(epoch)
    train_acc.append(train_accuracy)
    test_acc.append(acc2)

    
    
    print(f"Epoch: {epoch} | loss:{train_loss:.2f} | Test loss: {test_loss:.2f} | Accuracy = {trueVal}/{total_labels} = {train_accuracy*100:.2f}% | Test accuracy = {test_true}/{test_labels} = {acc2*100:.2f}%")
    save_best_model("model_1",model_1,acc2)


# In[19]:


print_accuracy(model_1)


# In[20]:


best_model_1 = get_best_model("model_1").to(device)
print_accuracy(best_model_1)


# In[21]:


plt.figure()
plt.plot(epoch_count,loss_values,c='b',label="Train Loss")
plt.plot(epoch_count,test_lost_values,c='r',label="Validation Loss")
plt.title("Training And Validation Loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()


# In[22]:


plt.figure()
plt.plot(epoch_count,train_acc,c='b',label="Train Accuracy")
plt.plot(epoch_count,test_acc,c='r',label="Validation Accuracy")
plt.title("Training And Validation Accuracy")
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()


# We got 71 % accuracy from model 1 itself, and there's so many scope of improvement right now🤩

# ## Model 2
# - Adding Dropouts
# - Batch Normalization
# - reducing learning rate
# - more linear layers

# In[35]:


import torch
import torch.nn as nn

torch.manual_seed(10)
torch.cuda.manual_seed(10)
model_2 = nn.Sequential(
    # First Convolutional Layer
    nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),  # Output: [16, 224, 224]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),  
    nn.BatchNorm2d(16),# Output: [16, 112, 112]

    # Second Convolutional Layer
    nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), # Output: [32, 112, 112]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2), 
    nn.BatchNorm2d(32),# Output: [32, 56, 56]

    # Third Convolutional Layer
    nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # Output: [64, 56, 56]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),                 # Output: [64, 28, 28]

    # Fourth Convolutional Layer
    nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), # Output: [128, 28, 28]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2), 
    nn.Dropout(0.2),# Output: [128, 14, 14]

    # Fifth Convolutional Layer
    nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), # Output: [256, 14, 14]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),   
    nn.Dropout(0.3),# Output: [256, 7, 7]

    # Flatten the tensor for the fully connected layers
    nn.Flatten(),                                          # Output: [256 * 7 * 7]

    # Fully Connected Layers
    nn.Linear(256*7*7, 1024),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(1024, 512),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(512, 256),                           # Output: [512]
    nn.ReLU(),
     nn.Linear(256, 128),                           # Output: [512]
    nn.ReLU(),
      nn.Linear(128, 64),                           # Output: [512]
    nn.ReLU(),
       nn.Linear(64, 32),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(32, 16),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(16, 5)                                     # Output: [10] (e.g., for 10 classes)
).to(device)
model_2


# In[36]:


loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model_2.parameters(), lr=0.0007)


# In[25]:


epochs=50
epoch_count=[]
loss_values = []
test_lost_values = []
train_acc=[]
test_acc=[]

for epoch in range(epochs):
    model_2.train()

    train_loss=0
    train_loss_count=0
    total_labels=0
    trueVal=0
    
    
    for images, labels in train_loader:
        logits = model_2(images.to(device))

        trueVal+=truePositive(logits,labels)
        total_labels+=len(labels)
        loss = loss_fn(logits,labels.to(device))

        train_loss+=loss.item()
        train_loss_count+=1

        optimizer.zero_grad()

        loss.backward()

        optimizer.step()





    model_2.eval()
    with torch.inference_mode():
        test_loss =0
        test_loss_count=0
        test_labels=0
        test_true=0
        
        for images,labels in test_loader:
            logits = model_2(images.to(device))
            test_true+=truePositive(logits,labels)
            test_loss+= loss_fn(logits,labels.to(device)).item()
            test_loss_count+=1
            test_labels+=len(labels)
            
            
            


    test_loss =  test_loss/ test_loss_count if test_loss_count!=0 else 1
    train_loss =  train_loss/ train_loss_count if train_loss_count!=0 else 1
    train_accuracy = trueVal/total_labels
    acc2 = test_true/test_labels

    loss_values.append(train_loss)
    test_lost_values.append(test_loss)
    epoch_count.append(epoch)
    train_acc.append(train_accuracy)
    test_acc.append(acc2)

    
    
    print(f"Epoch: {epoch} | loss:{train_loss:.2f} | Test loss: {test_loss:.2f} | Accuracy = {trueVal}/{total_labels} = {train_accuracy*100:.2f}% | Test accuracy = {test_true}/{test_labels} = {acc2*100:.2f}%")
    save_best_model("model_2",model_2,acc2)


# In[26]:


print_accuracy(model_2)


# In[27]:


best_model_2 = get_best_model("model_2").to(device)
print_accuracy(best_model_2)


# In[28]:


max(test_acc)


# In[29]:


plt.figure()
plt.plot(epoch_count,loss_values,c='b',label="Train Loss")
plt.plot(epoch_count,test_lost_values,c='r',label="Validation Loss")
plt.title("Training And Validation Loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()


# In[30]:


plt.figure()
plt.plot(epoch_count,train_acc,c='b',label="Train Accuracy")
plt.plot(epoch_count,test_acc,c='r',label="Validation Accuracy")
plt.title("Training And Validation Accuracy")
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()


# model 2 didn't go that great, trying another variant of model_2 in model_3 with little bit of tweaks in the architecture

# ## Model 3

# In[37]:


import torch
import torch.nn as nn

torch.manual_seed(10)
torch.cuda.manual_seed(10)

model_3 = nn.Sequential(
    # First Convolutional Layer
    nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),  # Output: [16, 224, 224]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),  
    nn.BatchNorm2d(16),# Output: [16, 112, 112]

    # Second Convolutional Layer
    nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), # Output: [32, 112, 112]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2), 
    nn.BatchNorm2d(32),# Output: [32, 56, 56]

    # Third Convolutional Layer
    nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # Output: [64, 56, 56]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),
    nn.BatchNorm2d(64),# Output: [32, 56, 56]# Output: [64, 28, 28]

    # Fourth Convolutional Layer
    nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), # Output: [128, 28, 28]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2), 
    nn.Dropout(0.3),# Output: [128, 14, 14]

    # Fifth Convolutional Layer
    nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), # Output: [256, 14, 14]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),   
    nn.Dropout(0.3),# Output: [256, 7, 7]

    nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), # Output: [512, 14, 14]
    nn.ReLU(),   
    nn.Dropout(0.3),

    # Flatten the tensor for the fully connected layers
    nn.Flatten(),                                          # Output: [256 * 7 * 7]
    nn.Linear(512*7*7, 2048),                           # Output: [512]
    nn.ReLU(),
    # Fully Connected Layers
    nn.Linear(2048, 1024),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(1024, 512),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(512, 256),                           # Output: [512]
    nn.ReLU(),
     nn.Linear(256, 128),                           # Output: [512]
    nn.ReLU(),
      nn.Linear(128, 64),                           # Output: [512]
    nn.ReLU(),
       nn.Linear(64, 32),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(32, 16),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(16, 5)                                     # Output: [10] (e.g., for 10 classes)
).to(device)



# In[38]:


# CrossEntropyLoss for classification problems
loss_fn = nn.CrossEntropyLoss()

import torch.optim as optim

# Adam optimizer
optimizer = optim.Adam(model_3.parameters(), lr=0.0001)


# In[33]:


epochs=80
epoch_count=[]
loss_values = []
test_lost_values = []
train_acc=[]
test_acc=[]

for epoch in range(epochs):
    model_3.train()

    train_loss=0
    train_loss_count=0
    total_labels=0
    trueVal=0
    
    
    for images, labels in train_loader:
        logits = model_3(images.to(device))

        trueVal+=truePositive(logits,labels)
        total_labels+=len(labels)
        loss = loss_fn(logits,labels.to(device))

        train_loss+=loss.item()
        train_loss_count+=1

        optimizer.zero_grad()

        loss.backward()

        optimizer.step()





    model_3.eval()
    with torch.inference_mode():
        test_loss =0
        test_loss_count=0
        test_labels=0
        test_true=0
        
        for images,labels in test_loader:
            logits = model_3(images.to(device))
            test_true+=truePositive(logits,labels)
            test_loss+= loss_fn(logits,labels.to(device)).item()
            test_loss_count+=1
            test_labels+=len(labels)
            
            
            


    test_loss =  test_loss/ test_loss_count if test_loss_count!=0 else 1
    train_loss =  train_loss/ train_loss_count if train_loss_count!=0 else 1
    train_accuracy = trueVal/total_labels
    acc2 = test_true/test_labels

    loss_values.append(train_loss)
    test_lost_values.append(test_loss)
    epoch_count.append(epoch)
    train_acc.append(train_accuracy)
    test_acc.append(acc2)

    
    
    print(f"Epoch: {epoch} | loss:{train_loss:.2f} | Test loss: {test_loss:.2f} | Accuracy = {trueVal}/{total_labels} = {train_accuracy*100:.2f}% | Test accuracy = {test_true}/{test_labels} = {acc2*100:.2f}%")
    save_best_model("model_3",model_3,acc2)


# In[34]:


print_accuracy(model_3)


# In[37]:


best_model_3 = get_best_model("model_3").to(device)
print_accuracy(best_model_3)


# In[38]:


plt.figure()
plt.plot(epoch_count,loss_values,c='b',label="Train Loss")
plt.plot(epoch_count,test_lost_values,c='r',label="Validation Loss")
plt.title("Training And Validation Loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()


# In[39]:


plt.figure()
plt.plot(epoch_count,train_acc,c='b',label="Train Accuracy")
plt.plot(epoch_count,test_acc,c='r',label="Validation Accuracy")
plt.title("Training And Validation Accuracy")
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()


# Model 3 outperformed model 1 which means more dropouts and batch normalization worked, to make further improvements, I'll try augmentation to reduce overfitting

# ## Model 4
# let's try data augmentations
# will apply some transformation to each batch to generate additional images to double the batch size

# In[24]:


augmentation_transform = transforms.Compose([
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.RandomVerticalFlip(p=0.5),
    transforms.RandomRotation(degrees=70),
    # transforms.ColorJitter(brightness=0.8, contrast=0.2, saturation=0.2, hue=0.1),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])


# ### Normal Images

# In[25]:


fig, axes = plt.subplots(4, 5, figsize=(15, 6))  
axes = axes.flatten() 

i=0
for img,label in imageList:
    axes[i].imshow(img,label=label)
    axes[i].set_title(label)
    axes[i].axis('off')  # Hide axis
    i+=1
    

# plt.tight_layout()
plt.show()


# ### Augmented images Generation

# In[26]:


augmented_images = [(augmentation_transform(torch.Tensor(img[0]).permute(2,0,1)).permute(1,2,0).numpy(),img[1]) for img in imageList]



# ### Augmented Images 

# In[27]:


fig, axes = plt.subplots(4, 5, figsize=(15, 6))  
axes = axes.flatten() 

i=0
for img,label in augmented_images:
    axes[i].imshow(img,label=label)
    axes[i].set_title(label)
    axes[i].axis('off')  # Hide axis
    i+=1
    

# plt.tight_layout()
plt.show()


# In[39]:


import torch
import torch.nn as nn
torch.manual_seed(10)
torch.cuda.manual_seed(10)


model_4 = nn.Sequential(
    # First Convolutional Layer
    nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),  # Output: [16, 224, 224]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),  
    nn.BatchNorm2d(16),# Output: [16, 112, 112]

    # Second Convolutional Layer
    nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), # Output: [32, 112, 112]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2), 
    nn.BatchNorm2d(32),# Output: [32, 56, 56]

    # Third Convolutional Layer
    nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # Output: [64, 56, 56]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),

    # Fourth Convolutional Layer
    nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), # Output: [128, 28, 28]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2), 
    nn.Dropout(0.3),# Output: [128, 14, 14]

    # Fifth Convolutional Layer
    nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), # Output: [256, 14, 14]
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2, stride=2),   
    nn.Dropout(0.3),# Output: [256, 7, 7]

    nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), # Output: [512, 14, 14]
    nn.ReLU(),   
    nn.Dropout(0.3),

    # Flatten the tensor for the fully connected layers
    nn.Flatten(),                                          # Output: [256 * 7 * 7]
    nn.Linear(512*7*7, 512),                           # Output: [512]
    nn.ReLU(),
                           # Output: [512]
    nn.Linear(512, 128),                           # Output: [512]
    nn.ReLU(),

    nn.Linear(128, 32),                           # Output: [512]
    nn.ReLU(),

    nn.Linear(32, 16),                           # Output: [512]
    nn.ReLU(),
    nn.Linear(16, 5)                                     # Output: [10] (e.g., for 10 classes)
).to(device)

model_4


# In[40]:


# CrossEntropyLoss for classification problems
loss_fn = nn.CrossEntropyLoss()

import torch.optim as optim

# Adam optimizer
optimizer = optim.Adam(model_4.parameters(), lr=0.0002)


# ### Let's train model 4🚀

# In[46]:


epochs=100
epoch_count=[]
loss_values = []
test_lost_values = []
train_acc=[]
test_acc=[]

for epoch in range(epochs):
    model_4.train()

    train_loss=0
    train_loss_count=0



    train_true_pos=0
    train_label_count=0
    
    for images, labels in train_loader:

        aug1 = augmentation_transform(images)


        all_labels = torch.cat((labels,labels))

        all_images = torch.cat((aug1,images))

        logits = model_4(all_images.to(device))


        train_true_pos+=truePositive(logits,all_labels)
        train_label_count+= len(all_labels)

        loss = loss_fn(logits,all_labels.to(device))
        train_loss+=loss.item()
        train_loss_count+=1

        optimizer.zero_grad()

        loss.backward()

        optimizer.step()



    



    model_4.eval()
    with torch.inference_mode():
        test_loss =0
        test_loss_count=0

        truePos=0
        label_count=0
        
        for images,labels in test_loader:
            logits = model_4(images.to(device))
            truePos+=truePositive(logits,labels)
            test_loss+= loss_fn(logits,labels.to(device)).item()
            test_loss_count+=1
            label_count+= len(labels)

        acc2 = truePos/label_count
            
            


    test_loss =  test_loss/ test_loss_count if test_loss_count!=0 else 1
    train_loss =  train_loss/ train_loss_count if train_loss_count!=0 else 1
    train_accuracy = train_true_pos/train_label_count

    loss_values.append(train_loss)
    test_lost_values.append(test_loss)
    epoch_count.append(epoch)
    train_acc.append(train_accuracy)
    test_acc.append(acc2)
    
    
    # print(f"Epoch:{epoch} | tr_loss: {train_loss} | te_loss: {test_loss} | tr_accuracy: {acc} | te_accuracy:{acc2} | truepos :{truePos}")
    print(f"Epoch: {epoch} | loss:{train_loss:.2f} | Test loss: {test_loss:.2f} | Accuracy = {train_true_pos}/{train_label_count} = {train_accuracy*100:.2f}% | Test accuracy = {truePos}/{label_count} = {acc2*100:.2f}%")
    save_best_model("model_4",model_4,acc2)


# In[47]:


print_accuracy(model_4)


# In[48]:


best_model_4 = get_best_model("model_4")
print_accuracy(best_model_4)


# In[49]:


plt.figure()
plt.plot(epoch_count,loss_values,c='b',label="Train Loss")
plt.plot(epoch_count,test_lost_values,c='r',label="Validation Loss")
plt.title("Training And Validation Loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()


# In[50]:


plt.figure()
plt.plot(epoch_count,train_acc,c='b',label="Train Accuracy")
plt.plot(epoch_count,test_acc,c='r',label="Validation Accuracy")
plt.title("Training And Validation Accuracy")
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()


# It seems model 4 outperformed all the models🤩🤩🤩

# ### Saving latest the models
# 

# In[41]:


torch.save(model_1,"model_1.pt")
torch.save(model_2,"model_2.pt")
torch.save(model_3,"model_3.pt")
torch.save(model_4,"model_4.pt")


# ## Best Model

# In[42]:


models =[
    model_1,
    model_2,
    model_3,
    model_4,
    get_best_model("model_1").to(device),
    get_best_model("model_2").to(device),
    get_best_model("model_3").to(device),
    get_best_model("model_4").to(device),
]



# In[53]:


model_names = ["model_1","model_2","model_3","model_4","best_model_1","best_model_2","best_model_3","best_model_4"]


# In[30]:


def get_label_from_logits(logits):
    logits = logits.detach().cpu().numpy()
    y_pred = np.argmax(logits, axis=1)
    return classes[y_pred[0]]
    


# In[31]:


scores = [get_accuracy(m) for m in models]


# In[32]:


scores = [s[0] for s in scores]


# In[57]:


max_score = max(scores)
max_score_index = scores.index(max_score)
best_model_name = model_names[max_score_index]
best_model_name,max_score, 


# In[58]:


best_model = models[max_score_index]
best_model


# In[59]:


torch.save(best_model,"best.pt")


# ## It's time to predict some flowers with our best model

# In[60]:


images, labels = next(iter(test_loader))
images_iter = iter(images)
labels_iter = iter(labels)


# In[61]:


image,label = next(images_iter),next(labels_iter)
label= classes[label.item()]
logits = best_model(torch.Tensor([image.numpy()]).to(device))
predicted_label = get_label_from_logits(logits)

plt.imshow(image.permute(1,2,0).numpy())
plt.title(f"Label={label} Prediction={predicted_label}")


# In[62]:


image,label = next(images_iter),next(labels_iter)
label= classes[label.item()]
logits = best_model(torch.Tensor([image.numpy()]).to(device))
predicted_label = get_label_from_logits(logits)

plt.imshow(image.permute(1,2,0).numpy())
plt.title(f"Label={label} Prediction={predicted_label}")


# In[63]:


image,label = next(images_iter),next(labels_iter)
label= classes[label.item()]
logits = best_model(torch.Tensor([image.numpy()]).to(device))
predicted_label = get_label_from_logits(logits)

plt.imshow(image.permute(1,2,0).numpy())
plt.title(f"Label={label} Prediction={predicted_label}")


# In[64]:


image,label = next(images_iter),next(labels_iter)
label= classes[label.item()]
logits = best_model(torch.Tensor([image.numpy()]).to(device))
predicted_label = get_label_from_logits(logits)

plt.imshow(image.permute(1,2,0).numpy())
plt.title(f"Label={label} Prediction={predicted_label}")


# In[65]:


image,label = next(images_iter),next(labels_iter)
label= classes[label.item()]
logits = best_model(torch.Tensor([image.numpy()]).to(device))
predicted_label = get_label_from_logits(logits)

plt.imshow(image.permute(1,2,0).numpy())
plt.title(f"Label={label} Prediction={predicted_label}")


# In[66]:


image,label = next(images_iter),next(labels_iter)
label= classes[label.item()]
logits = best_model(torch.Tensor([image.numpy()]).to(device))
predicted_label = get_label_from_logits(logits)

plt.imshow(image.permute(1,2,0).numpy())
plt.title(f"Label={label} Prediction={predicted_label}")


# In[67]:


image,label = next(images_iter),next(labels_iter)
label= classes[label.item()]
logits = best_model(torch.Tensor([image.numpy()]).to(device))
predicted_label = get_label_from_logits(logits)

plt.imshow(image.permute(1,2,0).numpy())
plt.title(f"Label={label} Prediction={predicted_label}")


# ### Test from the internet

# In[7]:


from PIL import Image
import requests

def predict_from_url(image_url,label):
    response = requests.get(image_url)
    
    if response.status_code == 200:
        # Save the image as 'test.jpg'
        with open("test.jpg", "wb") as file:
            file.write(response.content)
        print("Image saved as test.jpg")
    else:
        print(f"Failed to download image. Status code: {response.status_code}")
    
    
    image_path = "E:\\AI_math\\homework\\test.jpg"  
    image = Image.open(image_path).convert("RGB")
    image_tensor = transform(image)
    image_tensor = image_tensor.unsqueeze(0)  
    
    
    logits = best_model(image_tensor.to(device))
    predicted_label = get_label_from_logits(logits)
    
    plt.imshow(image_tensor.squeeze().permute(1,2,0).numpy())
    plt.title(f"Label={label} Prediction={predicted_label}")


# In[33]:


image_url = "https://images-cdn.ubuy.co.in/65186466b64a6331f9320fa7-red-roses-50-next-day-flowers.jpg" 
label="rose"
predict_from_url(image_url,label)


# In[4]:


image_url = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRTVJQhUtKoapg5tBzfBYei37jdgyqtsFM2rw&s"  
label="Dandelion"
predict_from_url(image_url,label)


# In[ ]:


image_url = "https://i.ytimg.com/vi/363cUhH3dSg/hqdefault.jpg"  
label="sunflowers"
predict_from_url(image_url,label)


# ## FINAL ACCURACY = 76%
# ## Thanks for reading:)

# In[ ]:




