from PIL import Image
from torchfusion_utils.fp16 import convertToFP16
from torchfusion_utils.initializers import *
from torchfusion_utils.metrics import Accuracy
from torchfusion_utils.models import load_model, save_model

import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
from tqdm import tqdm

class FireSmoketrain:
    def __init__(self):

        self.CLS2ID = {}
        self.ID2CLS = {}


    # def image_display(self, image, title,fontsize):
    #     image = image / 2 + 0.5
    #     numpy_image = image.numpy()
    #     transposed_numpy_image = np.transpose(numpy_image, (1, 2, 0))
    #     plt.figure(figsize=(20, 4))
    #     plt.imshow(transposed_numpy_image)
    #     plt.yticks([])
    #     plt.xticks([])

    #     plt.title(title, fontsize=fontsize)
    #     plt.show()

    def load_data(self, train_data_dir, test_data_dir, batch_size=64):
        transforms_train = transforms.Compose([
            transforms.Resize(225),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])

        transforms_test = transforms.Compose([
            transforms.Resize(225),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])

        train_data = datasets.ImageFolder(root=train_data_dir, transform=transforms_train)
        test_data = datasets.ImageFolder(root=test_data_dir, transform=transforms_test)

        self.CLS2ID = train_data.class_to_idx
        self.ID2CLS = {v: k for k, v in self.CLS2ID.items()}

        train_data_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
        test_data_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True)
        return train_data_loader, test_data_loader

    def train(self,train_data_loader, test_data_loader,n_epochs,save_path=None):
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        model = models.resnet50(num_classes=3) 
        model = model.to(device)
        criteria = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

        milestones = [100, 150]
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1)
        train_acc = Accuracy()
        validation_acc = Accuracy(topK=1)
        training_loss_array = []
        validation_loss_array = []

        saving_criteria_of_model = 0

        for epoch in range(n_epochs):
            total_test_data = 0
            training_loss = 0
            validation_loss = 0

            train_acc.reset()

            for data, target in tqdm(train_data_loader, desc="Training"):
                data, target = data.to(device), target.to(device)
                optimizer.zero_grad()
                predictions = model(data)
                loss = criteria(predictions, target)
                loss.backward()
                optimizer.step()

                training_loss += loss.item() * data.size(0)
                train_acc.update(predictions, target)

            scheduler.step()

            with torch.no_grad():
                validation_acc.reset()
                for data, target in test_data_loader:
                    data, target = data.to(device), target.to(device)
                    predictions = model(data)
                    loss = criteria(predictions, target)
                    validation_acc.update(predictions, target)
                    total_test_data += target.size(0)
                    validation_loss += loss.item() * data.size(0)

            training_loss = len(train_data_loader.dataset)
            validation_loss = total_test_data
            training_loss_array.append(training_loss)
            validation_loss_array.append(validation_loss)
            print(f'{epoch + 1} / {n_epochs} Training loss: {training_loss}, Train Accuracy: {train_acc.getValue()}, Validation loss: {validation_loss}, Validation Accuracy: {validation_acc.getValue()}')
            if save_path!=None:
                if saving_criteria_of_model < validation_acc.getValue():
                    torch.save(model, save_path)
                    saving_criteria_of_model = validation_acc.getValue()
        
        plt.figure(figsize=(10, 6))
        x_axis = range(n_epochs)
        plt.plot(x_axis, training_loss_array, 'r', validation_loss_array, 'b')
        plt.title('A graph of training loss vs validation loss')
        plt.legend(['train loss', 'validation loss'])
        plt.xlabel('Number of Epochs')
        plt.ylabel('Loss')
        plt.show()
        return model,saving_criteria_of_model
    
 

    def image_display(self,img_path, title,fonsize):
        plt.figure(figsize=(10, 6))
        
        plt.imshow(Image.open(img_path))
        plt.title(title, fontsize=fonsize)
        plt.yticks([])
        plt.xticks([])
        plt.show()
    

    def model_inference_results(self,img_path,load_saved_model):
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        transformer = transforms.Compose([
            transforms.Resize(225),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        img = Image.open(img_path)
        img_processed = transformer(img).unsqueeze(0)
        img_var = Variable(img_processed, requires_grad=False)
        img_var = img_var.to(device)
        load_saved_model.eval()
        logp = load_saved_model(img_var)
        expp = torch.softmax(logp, dim=1)
        confidence, clas = expp.topk(1, dim=1)
        confidence = round(confidence.item(),4)
        clas = clas.item()
        return confidence,clas
