import torch
import torch.optim as optim
import torch.nn as nn
from AudioDataset import AudioDataset
from HyperParm import HyperParm
from sklearn.metrics import (
    accuracy_score,
    precision_score,
    recall_score,
    f1_score,
    classification_report,
)
from datetime import datetime
from tqdm import tqdm
import numpy as np
from collections import Counter


class Trainer:
    def __init__(self, model_path: str, data_enhanced: bool = True):
        self.model: nn.Module = torch.load(model_path, weights_only=False)
        for parameters in self.model.parameters():
            parameters.requires_grad = True

        self.data_enhanced = data_enhanced

        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.best_acc = 0.0
        self.saved = True

    def train(
        self,
        train_data_root_dir: str,
        validate_data_root_dir: str,
        checkpoint_dir: str,
        batch_size: int = HyperParm.BATCH_SIZE,
    ):
        train_dataset = AudioDataset(train_data_root_dir, self.data_enhanced)

        train_dataloader = torch.utils.data.DataLoader(
            dataset=train_dataset, batch_size=batch_size, shuffle=True
        )

        model = self.model.to(self.device)

        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(
            model.parameters(), lr=HyperParm.LEARNING_RATE, momentum=HyperParm.MOMENTUM
        )

        ####################### train #######################

        for epoch in range(HyperParm.N_EPOCH):
            print(f"epoch: {epoch}")

            train_loader = tqdm(train_dataloader)
            model.train()
            for _, (inputs, labels) in enumerate(train_loader):
                inputs = inputs.reshape(-1, *HyperParm.DATA_SIZE)
                inputs = inputs.to(HyperParm.MODEL_WEIGHT_DATA_TYPE).to(self.device)

                labels = labels.to(self.device)

                optimizer.zero_grad()

                outputs = model(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

            (
                validate_accuracy,
                validate_precision,
                validate_recall,
                validate_f1,
                validate_all_labels,
                validate_all_predictions,
            ) = self.test(model, validate_data_root_dir, batch_size)

            # if (epoch + 1) % HyperParm.N_SAVE_INTERVAL == 0:
            if True:
                ####################### test on validate data #######################

                print("\ntest on validate data:\n")
                print(
                    classification_report(validate_all_labels, validate_all_predictions)
                )
                print(
                    f"metric: {validate_accuracy} {validate_precision} {validate_recall} {validate_f1}"
                )

            if (epoch + 1) % HyperParm.N_SAVE_INTERVAL == 0:
                ####################### test on train data #######################
                (
                    train_accuracy,
                    train_precision,
                    train_recall,
                    train_f1,
                    train_all_labels,
                    train_all_predictions,
                ) = self.test(model, train_data_root_dir, batch_size)

                print("\ntest on train data:\n")
                print(classification_report(train_all_labels, train_all_predictions))
                print(
                    f"metric: {train_accuracy} {train_precision} {train_recall} {train_f1}"
                )

            ####################### checkpoint #######################
            if validate_accuracy > self.best_acc:
                self.best_acc = validate_accuracy
                self.model = model
                self.saved = False

            if (epoch + 1) % HyperParm.N_SAVE_INTERVAL == 0 and self.saved == False:
                torch.save(
                    self.model,
                    f"{checkpoint_dir}/{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.pt",
                )

    def test(
        self,
        model: nn.Module,
        data_root_dir: str,
        batch_size: int = HyperParm.BATCH_SIZE,
    ):
        all_labels = []
        all_predictions = []

        test_dataset = AudioDataset(data_root_dir)

        test_dataloader = torch.utils.data.DataLoader(
            dataset=test_dataset, batch_size=batch_size, shuffle=False
        )

        model.eval()
        with torch.no_grad():
            test_loader = tqdm(test_dataloader)
            for _, (inputs, labels) in enumerate(test_loader):
                inputs = inputs.reshape(-1, *HyperParm.DATA_SIZE)
                inputs = inputs.to(HyperParm.MODEL_WEIGHT_DATA_TYPE).to(self.device)

                labels: torch.Tensor = labels.to(self.device)

                logits: torch.Tensor = model(inputs)
                prediction = logits.argmax(dim=1)

                all_predictions.append(prediction.cpu().numpy())
                all_labels.append(labels.cpu().numpy())

        all_labels = np.concatenate(all_labels)
        all_predictions = np.concatenate(all_predictions)

        accuracy = accuracy_score(all_labels, all_predictions)
        precision = precision_score(all_labels, all_predictions, average="weighted")
        recall = recall_score(all_labels, all_predictions, average="weighted")
        f1 = f1_score(all_labels, all_predictions, average="weighted")

        return accuracy, precision, recall, f1, all_labels, all_predictions

    def knowledge_distillate(
        self,
        student_path: str,
        train_data_root_dir: str,
        validate_data_root_dir: str,
        checkpoint_dir: str,
        batch_size: int = HyperParm.BATCH_SIZE,
    ):
        best_acc = 0
        model_saved = True

        train_dataset = AudioDataset(train_data_root_dir, self.data_enhanced)

        train_dataloader = torch.utils.data.DataLoader(
            dataset=train_dataset, batch_size=batch_size, shuffle=True
        )

        best_student: nn.Module = torch.load(student_path, weights_only=False)
        student_model: nn.Module = best_student.to(self.device)

        for parameters in student_model.parameters():
            parameters.requires_grad = True

        teacher_model = self.model.to(self.device)
        teacher_model.eval()

        soft_loss_criterion = nn.KLDivLoss(reduction="batchmean")
        hard_loss_criterion = nn.CrossEntropyLoss()

        optimizer = optim.SGD(
            student_model.parameters(),
            lr=HyperParm.LEARNING_RATE,
            momentum=HyperParm.MOMENTUM,
        )

        ####################### train #######################

        for epoch in range(HyperParm.N_EPOCH):
            print(f"Knowledge Distillation epoch: {epoch}")

            train_loader = tqdm(train_dataloader)
            student_model.train()
            for _, (inputs, labels) in enumerate(train_loader):
                inputs = inputs.reshape(-1, *HyperParm.DATA_SIZE)
                inputs = inputs.to(HyperParm.MODEL_WEIGHT_DATA_TYPE).to(self.device)

                labels = labels.to(self.device)

                optimizer.zero_grad()

                student_outputs = student_model(inputs)
                teacher_outputs = teacher_model(inputs)

                soft_loss = soft_loss_criterion(
                    nn.functional.softmax(
                        student_outputs / HyperParm.TEMPERATURE, dim=1
                    ),
                    nn.functional.softmax(
                        teacher_outputs / HyperParm.TEMPERATURE, dim=1
                    ),
                )
                hard_loss = hard_loss_criterion(student_outputs, labels)

                total_loss = (
                    HyperParm.ALPHA * soft_loss + (1 - HyperParm.ALPHA) * hard_loss
                )

                total_loss.backward()
                optimizer.step()

            ####################### test on validate data #######################
            (
                validate_accuracy,
                validate_precision,
                validate_recall,
                validate_f1,
                validate_all_labels,
                validate_all_predictions,
            ) = self.test(student_model, validate_data_root_dir, batch_size)

            print("test on validate data:")
            print(classification_report(validate_all_labels, validate_all_predictions))
            print(validate_accuracy, validate_precision, validate_recall, validate_f1)

            ####################### test on train data #######################
            (
                train_accuracy,
                train_precision,
                train_recall,
                train_f1,
                train_all_labels,
                train_all_predictions,
            ) = self.test(student_model, train_data_root_dir, batch_size)

            print("train on train data:")
            print(classification_report(train_all_labels, train_all_predictions))
            print(train_accuracy, train_precision, train_recall, train_f1)

            ####################### checkpoint #######################
            if validate_accuracy > best_acc:
                best_acc = validate_accuracy
                best_student = student_model
                model_saved = False

            if (epoch + 1) % HyperParm.N_SAVE_INTERVAL == 0 and model_saved == False:
                torch.save(
                    student_model,
                    f"{checkpoint_dir}/{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.pt",
                )


################################################################################################


def vote_result(
    original_labels: np.ndarray,
    original_predictions: np.ndarray,
    n_split: int = HyperParm.N_SPLIT,
):
    voted_prediction = np.empty(len(original_predictions) // n_split)
    for i in range(len(voted_prediction)):
        voted_prediction[i], _ = Counter(
            original_predictions[3 * i : 3 * i + n_split]
        ).most_common(1)[0]

    return original_labels[::n_split].copy(), voted_prediction


def test_data_enhanced(model_path: str):
    tester = Trainer(model_path)
    accuracy, precision, recall, f1, all_labels, all_predictions = tester.test(
        tester.model,
        "/home/tuchunxu/workspace/pr-project/pr-project/data/test",
        # "/home/tuchunxu/workspace/pr-project/pr-project/data/validate",
    )
    print(classification_report(all_labels, all_predictions))
    print(precision, recall, f1, accuracy)


def test_no_enhanced():
    tester = Trainer(
        # "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/no_enhanced/2025-02-11_04:49:04.pt"
        "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/no_enhanced/2025-02-11_09:23:23.pt"
    )
    accuracy, precision, recall, f1, all_labels, all_predictions = tester.test(
        tester.model, "/home/tuchunxu/workspace/pr-project/pr-project/data/test"
    )

    # all_labels, all_predictions = vote_result(all_labels, all_predictions)

    print(classification_report(all_labels, all_predictions))
    print(precision, recall, f1, accuracy)


def train_data_enhanced():
    trainer = Trainer(
        model_path="/home/tuchunxu/workspace/pr-project/pr-project/model/prepared/resnet101_3c.pt",
        data_enhanced=True,
    )

    trainer.train(
        train_data_root_dir="/home/tuchunxu/workspace/pr-project/pr-project/data/train",
        validate_data_root_dir="/home/tuchunxu/workspace/pr-project/pr-project/data/validate",
        checkpoint_dir="/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c",
    )


def train_no_enhanced():
    trainer = Trainer(
        model_path="/home/tuchunxu/workspace/pr-project/pr-project/model/prepared/resnet101.pt",
        data_enhanced=False,
    )

    trainer.train(
        train_data_root_dir="/home/tuchunxu/workspace/pr-project/pr-project/data/train",
        validate_data_root_dir="/home/tuchunxu/workspace/pr-project/pr-project/data/validate",
        checkpoint_dir="/home/tuchunxu/workspace/pr-project/pr-project/model/trained/5_class_no_enhanced",
    )


def kd():
    trainer = Trainer(
        model_path="/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_15:22:10.pt",
        data_enhanced=False,
    )

    trainer.knowledge_distillate(
        student_path="/home/tuchunxu/workspace/pr-project/pr-project/model/prepared/resnet50_3c.pt",
        train_data_root_dir="/home/tuchunxu/workspace/pr-project/pr-project/data/train",
        validate_data_root_dir="/home/tuchunxu/workspace/pr-project/pr-project/data/validate",
        checkpoint_dir="/home/tuchunxu/workspace/pr-project/pr-project/model/trained/kd_enhanced",
    )


all_model = [
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_09:54:38.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_10:06:16.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_10:18:18.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_10:30:17.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_10:42:39.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_10:55:22.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_11:08:19.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_11:20:48.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_11:33:11.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_11:46:18.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_11:58:15.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_12:10:32.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_12:23:14.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_12:35:09.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_12:46:43.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_12:58:28.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_13:10:33.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_13:22:27.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_13:34:26.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_13:46:26.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_13:58:25.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_14:10:18.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_14:22:15.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_14:34:19.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_14:46:23.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_14:58:22.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_15:10:18.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_15:22:10.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_15:33:50.pt",
    "/home/tuchunxu/workspace/pr-project/pr-project/model/trained/3_class_data_enhanced_3c/2025-02-13_15:45:38.pt",
]

if __name__ == "__main__":
    # train_data_enhanced()
    # train_no_enhanced()
    kd()
    # test_data_enhanced()
    # for i, model in enumerate(all_model):
    #    print(i)
    #    test_data_enhanced(model)
