import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from tqdm import tqdm
import torchvision.transforms as transforms
from util import Parameter,create_symlink
import VegetableDataset
import numpy as np
from sklearn.metrics import (
    accuracy_score,
    precision_score,
    recall_score,
    f1_score,
    classification_report,
)
import copy,os,time
from datetime import datetime

LOG_FILE="log/train.log"

class Trainer:
    def __init__(self,model:nn.Module):
        self.model=model
        for parameters in self.model.parameters():
            parameters.requires_grad = True

        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        self.best_acc = 0.0
        self.saved = True

    def test_and_print(self,
                       prompt:str,
                       model:nn.Module,
                       test_dataset:VegetableDataset.VegetableDataset,
                       batch_size:int=Parameter.BATCH_SIZE):
        (
            train_accuracy,
            train_precision,
            train_recall,
            train_f1,
            train_all_labels,
            train_all_predictions,
        ) = self.test(model, test_dataset, batch_size)
        
        words=f"{prompt}\n{classification_report(train_all_labels, train_all_predictions)}\naccuracy: {train_accuracy}\nprecision: {train_precision}\nrecall: {train_recall}\nf1: {train_f1}\n"
        with open(LOG_FILE,"a") as fp:
            fp.write(words)

        return (
            train_accuracy,
            train_precision,
            train_recall,
            train_f1,
            train_all_labels,
            train_all_predictions,
            )
        

    def test(self,
             model:nn.Module,
             test_dataset:VegetableDataset.VegetableDataset,
             batch_size:int=Parameter.BATCH_SIZE):
        all_labels = []
        all_predictions = []

        test_dataloader = torch.utils.data.DataLoader(
            dataset=test_dataset, batch_size=batch_size, shuffle=False
        )
        start=time.perf_counter()
        model.eval()
        with torch.no_grad():
            # test_loader = test_dataloader
            test_loader = tqdm(test_dataloader)
            for _, (inputs, labels) in enumerate(test_loader):
                inputs=inputs.to(self.device)
                labels: torch.Tensor = labels.to(self.device)

                logits: torch.Tensor = model(inputs)
                prediction = logits.argmax(dim=1)

                all_predictions.append(prediction.cpu().numpy())
                all_labels.append(labels.cpu().numpy())

        all_labels = np.concatenate(all_labels)
        all_predictions = np.concatenate(all_predictions)
        print(time.perf_counter()-start)
        accuracy = accuracy_score(all_labels, all_predictions)
        precision = precision_score(all_labels, all_predictions, average="weighted")
        recall = recall_score(all_labels, all_predictions, average="weighted")
        f1 = f1_score(all_labels, all_predictions, average="weighted")

        return accuracy, precision, recall, f1, all_labels, all_predictions


    def train(
        self,
        train_dataset:VegetableDataset.VegetableDataset,
        test_train_dataset:VegetableDataset.VegetableDataset,
        validate_dataset: VegetableDataset.VegetableDataset,
        checkpoint_dir: str,
        batch_size: int = Parameter.BATCH_SIZE,
        num_epochs:int=Parameter.NUM_EPOCHS,
        fc_only:bool=False
    ):
        model:nn.Module = self.model.to(self.device)

        train_dataloader = torch.utils.data.DataLoader(
            dataset=train_dataset, batch_size=batch_size, shuffle=True
        )
        
        criterion = nn.CrossEntropyLoss()

        if fc_only:
            for param in model.parameters():
                param.requires_grad = False
            for param in model.fc.parameters():
                param.requires_grad = True
            optimizer = optim.SGD(
                filter(lambda p: p.requires_grad, model.parameters()), lr=Parameter.LEARNING_RATE, momentum=Parameter.MOMENTUM
            )
        else:
            for param in model.parameters():
                param.requires_grad = True
            optimizer = optim.SGD(
                model.parameters(), lr=Parameter.LEARNING_RATE, momentum=Parameter.MOMENTUM
            )

        ####################### train #######################

        for epoch in range(num_epochs):
            words=f"\nepoch: {epoch}"
            with open(LOG_FILE,"a") as fp:
                fp.write(words)

            # train_loader = train_dataloader
            train_loader = tqdm(train_dataloader)
            model.train()
            for _, (inputs, labels) in enumerate(train_loader):
                inputs=inputs.to(self.device)
                labels = labels.to(self.device)

                optimizer.zero_grad()

                outputs = model(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

            # test on validate data
            (
            validate_accuracy,
            validate_precision,
            validate_recall,
            validate_f1,
            validate_all_labels,
            validate_all_predictions,
            )=self.test_and_print("\ntest on validate data\n",model,validate_dataset,batch_size)
            # test on train data
            self.test_and_print("\ntest on train data:\n",model,test_train_dataset,batch_size)


            ####################### checkpoint #######################
            if validate_accuracy >= self.best_acc:
                self.best_acc = validate_accuracy
                self.model = copy.deepcopy(model)
                self.saved = False

            if (epoch + 1) % Parameter.N_SAVE_INTERVAL == 0:
                if self.saved == False:
                    file_path=f"{checkpoint_dir}/{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.pt"
                    torch.save(
                        self.model,
                        file_path,
                    )
                    create_symlink(file_path,f"{checkpoint_dir}/best.pt")

def train(from_model_path:str="model/prepared/prepared_resnet50.pt"):
    model=torch.load(from_model_path,weights_only=False)
    trainer=Trainer(model)
    train_dataset=VegetableDataset.VegetableDataset("train.csv")
    test_train_dataset=VegetableDataset.VegetableDataset("train.csv",VegetableDataset.transform)
    validate_dataset=VegetableDataset.VegetableDataset("validate.csv",VegetableDataset.transform)
    trainer.train(train_dataset,test_train_dataset, validate_dataset,"model/trained")

def test(model_path:str="model/trained/best.pt"):
    test_dataset=VegetableDataset.VegetableDataset("test.csv",VegetableDataset.transform)
    model=torch.load(model_path,weights_only=False)
    trainer=Trainer(model)
    trainer.test_and_print("",model,test_dataset)
    # accuracy, precision, recall, f1, all_labels, all_predictions= trainer.test(model,test_dataset)
    # np.save("res.npy",all_predictions)

def validate(model_path:str):
    test_dataset=VegetableDataset.VegetableDataset("validate.csv",VegetableDataset.transform)
    model=torch.load(model_path,weights_only=False)
    trainer=Trainer(model)
    (
    train_accuracy,
    train_precision,
    train_recall,
    train_f1,
    train_all_labels,
    train_all_predictions,
    )=trainer.test_and_print("",model,test_dataset)
    return train_accuracy

if __name__=="__main__":
    # main()
    test()
    # test("/home/tuchunxu/workspace/cv_project/vegetable/model/trained0/best.pt")
    # best=0.0
    # best_path=""
    # root_path="/home/tuchunxu/workspace/cv_project/vegetable/model/trained0"
    # for path in os.listdir(root_path):
    #     abs_path=os.path.join(root_path,path)
    #     if os.path.isfile(abs_path):
    #         acc=validate(abs_path)
    #         if acc>=best:
    #             best=acc
    #             best_path=abs_path
    # print(best,best_path)