import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
# from tqdm import tqdm
import torchvision.transforms as transforms
import numpy as np
from sklearn.metrics import (
    accuracy_score,
    precision_score,
    recall_score,
    f1_score,
    classification_report,
)
import copy
from datetime import datetime
from util import Parameter,create_symlink
import PetDataset

class Trainer:
    def __init__(self,model:nn.Module):
        self.model=model
        for parameters in self.model.parameters():
            parameters.requires_grad = True

        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        self.best_acc = 0.0
        # self.best_model=model
        self.saved = True

    def test_and_print(self,
                       prompt:str,
                       model:nn.Module,
                       test_dataset:PetDataset.PetDataset,
                       batch_size:int=Parameter.BATCH_SIZE):
        (
            train_accuracy,
            train_precision,
            train_recall,
            train_f1,
            train_all_labels,
            train_all_predictions,
        ) = self.test(model, test_dataset, batch_size)

        print(prompt)
        print(classification_report(train_all_labels, train_all_predictions))
        print(
            f"accuracy: {train_accuracy}\nprecision: {train_precision}\nrecall: {train_recall}\nf1: {train_f1}"
        )

        return (
            train_accuracy,
            train_precision,
            train_recall,
            train_f1,
            train_all_labels,
            train_all_predictions,
            )
        

    def test(self,
             model:nn.Module,
             test_dataset:PetDataset.PetDataset,
             batch_size:int=Parameter.BATCH_SIZE):
        all_labels = []
        all_predictions = []

        test_dataloader = torch.utils.data.DataLoader(
            dataset=test_dataset, batch_size=batch_size, shuffle=False
        )

        model.eval()
        with torch.no_grad():
            test_loader = test_dataloader
            for _, (inputs, labels) in enumerate(test_loader):
                inputs=inputs.to(self.device)
                labels: torch.Tensor = labels.to(self.device)

                logits: torch.Tensor = model(inputs)
                prediction = logits.argmax(dim=1)

                all_predictions.append(prediction.cpu().numpy())
                all_labels.append(labels.cpu().numpy())

        all_labels = np.concatenate(all_labels)
        all_predictions = np.concatenate(all_predictions)

        accuracy = accuracy_score(all_labels, all_predictions)
        precision = precision_score(all_labels, all_predictions, average="weighted")
        recall = recall_score(all_labels, all_predictions, average="weighted")
        f1 = f1_score(all_labels, all_predictions, average="weighted")

        return accuracy, precision, recall, f1, all_labels, all_predictions


    def train(
        self,
        train_dataset:PetDataset.PetDataset,
        test_train_dataset:PetDataset.PetDataset,
        validate_dataset: PetDataset.PetDataset,
        checkpoint_dir: str,
        batch_size: int = Parameter.BATCH_SIZE,
        num_epochs:int=Parameter.NUM_EPOCHS
    ):
        model:nn.Module = self.model.to(self.device)

        train_dataloader = torch.utils.data.DataLoader(
            dataset=train_dataset, batch_size=batch_size, shuffle=True
        )

        criterion = nn.NLLLoss()
        optimizer = optim.SGD(
            model.parameters(), lr=Parameter.LEARNING_RATE, momentum=Parameter.MOMENTUM
        )

        ####################### train #######################

        for epoch in range(num_epochs):
            print(f"\nepoch: {epoch}")

            train_loader = train_dataloader
            model.train()
            for _, (inputs, labels) in enumerate(train_loader):
                inputs=inputs.to(self.device)
                labels = labels.to(self.device)

                optimizer.zero_grad()

                outputs = model(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

            # test on validate data
            (
            validate_accuracy,
            validate_precision,
            validate_recall,
            validate_f1,
            validate_all_labels,
            validate_all_predictions,
            )=self.test_and_print("\ntest on validate data\n",model,validate_dataset,batch_size)
            # test on train data
            self.test_and_print("\ntest on train data:\n",model,test_train_dataset,batch_size)


            ####################### checkpoint #######################
            if validate_accuracy >= self.best_acc:
                self.best_acc = validate_accuracy
                self.model = copy.deepcopy(model)
                self.saved = False

            if (epoch + 1) % Parameter.N_SAVE_INTERVAL == 0 and self.saved == False:
                file_path=f"{checkpoint_dir}/{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.pt"
                torch.save(
                    self.model,
                    file_path,
                )
                create_symlink(file_path,f"{checkpoint_dir}/best.pt")

def main():
    model=torch.load("model/prepared/embedding_resnet50.pt",weights_only=False)
    trainer=Trainer(model)
    train_dataset=PetDataset.PetDataset("train.csv")
    test_train_dataset=PetDataset.PetDataset("train.csv",PetDataset.transform)
    validate_dataset=PetDataset.PetDataset("reidentification.csv")
    trainer.train(train_dataset,test_train_dataset, validate_dataset,"model/trained")

# def test():
#     test_dataset=PetDataset.PetDataset("test.csv")
#     model=torch.load("model/trained/best.pt",weights_only=False)
#     trainer=Trainer(model)
#     trainer.test_and_print("",model,test_dataset)


if __name__=="__main__":
    main()
    # test()