import torch
from torch.optim import AdamW
from tqdm import tqdm

import torch.optim as optim
import math

import matplotlib.pyplot as pl
from sklearn import metrics
import numpy as np

import matplotlib
import os
import pandas as pd

#####################################
import os
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, cohen_kappa_score, roc_auc_score

def evaluate_multilabel_classification(predictions, labels, threshold=0.8):
    """
    Evaluate multi-label classification model performance.

    :param predictions: Model output probabilities (numpy array of shape (batchsize, num_classes))
    :param labels: Ground truth labels (numpy array of shape (batchsize, num_classes))
    :param threshold: Threshold to convert probabilities into binary outputs
    :return: Dictionary containing evaluation metrics
    """
    # Convert probabilities to binary outputs based on the threshold
    predicted_classes = (predictions >= threshold).astype(int)

    # Compute Accuracy Rate for each class
    accuracy_per_class = np.mean(predicted_classes == labels, axis=0)

    # Compute Precision, Recall, and F1-score for each class
    precision, recall, f1_score, _ = precision_recall_fscore_support(labels, predicted_classes, average=None)

    # Compute Kappa score
    kappa_score = cohen_kappa_score(labels.flatten(), predicted_classes.flatten())
    # Compute AUC for each class and average AUC
    auc = roc_auc_score(labels, predictions, average=None)
    average_auc = np.mean(auc)

    # Round all metrics to 4 decimal places
    accuracy_per_class = np.round(accuracy_per_class, 4)
    precision = np.round(precision, 4)
    recall = np.round(recall, 4)
    f1_score = np.round(f1_score, 4)
    kappa_score = np.round(kappa_score, 4)
    auc = np.round(auc, 4)
    average_auc = np.round(average_auc, 4)

    # Compile all the scores into a dictionary
    scores = {
        "Accuracy Rate per class": accuracy_per_class,
        "Precision per class": precision,
        "Recall per class": recall,
        "F1-score per class": f1_score,
        "Kappa score": kappa_score,
        "AUC per class": auc,
        "Average AUC": average_auc
    }
    # Create a DataFrame to display the metrics
    class_names = ["N", "D", "G", "C", "A", "H", "M", "O"]  # Assuming these are the class names
    metrics_df = pd.DataFrame({
        "Class": class_names,
        "Accuracy Rate": accuracy_per_class,
        "Precision": precision,
        "Recall": recall,
        "F1-score": f1_score,
        "AUC": auc
    })

    # Print the DataFrame
    print("\n------------------Per Class Metrics--------------------")
    print(metrics_df)
    print(f"\nKappa Score: {kappa_score}")
    print(f"Average AUC: {average_auc}")
    print("-------------------------------------------------------")
    return scores
#####################################

class Trainer():

    def __init__(self, config, processor, model, device=torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')):
        self.config = config
        self.processor = processor
        self.model = model.to(device)
        self.device = device
       
        bert_params = set(self.model.text_model.bert.parameters())
        mamba_params = set(self.model.img_model.mamba_model.parameters())
        other_params = list(set(self.model.parameters()) - bert_params - mamba_params)
        no_decay = ['bias', 'LayerNorm.weight']
        params = [
            {'params': [p for n, p in self.model.text_model.bert.named_parameters() if not any(nd in n for nd in no_decay)],
                'lr': self.config.bert_learning_rate, 'weight_decay': self.config.weight_decay},
            {'params': [p for n, p in self.model.text_model.bert.named_parameters() if any(nd in n for nd in no_decay)],
                'lr': self.config.bert_learning_rate, 'weight_decay': 0.0},
            {'params': [p for n, p in self.model.img_model.mamba_model.named_parameters() if not any(nd in n for nd in no_decay)],
                'lr': self.config.resnet_learning_rate, 'weight_decay': self.config.weight_decay},
            {'params': [p for n, p in self.model.img_model.mamba_model.named_parameters() if any(nd in n for nd in no_decay)],
                'lr': self.config.resnet_learning_rate, 'weight_decay': 0.0},
            {'params': other_params,
                'lr': self.config.learning_rate, 'weight_decay': self.config.weight_decay},
        ]  
        self.optimizer = AdamW(params, lr=config.learning_rate)
        

    def train(self, train_loader,socketio=None):
            self.model.train()
            
            loss_list = []
            batch_index = 0
            pred_labels = None
            true_labels = None

            threshold = 0.8

            for batch in tqdm(train_loader, desc='----- [Training] '):
                texts, texts_mask, imgs, labels = batch
                texts, texts_mask, imgs, labels = texts.to(self.device), texts_mask.to(self.device), imgs.to(self.device),labels.to(self.device)
                pred_vec, loss = self.model(texts, texts_mask, imgs, labels=labels)
                
                # metric
                loss_list.append(loss.item())
                if batch_index == 0:
                    pred_labels = pred_vec.cpu().detach()
                    true_labels = labels.cpu().detach()
                else:
                    pred_labels = np.concatenate((pred_labels, pred_vec.cpu().detach()),0)
                    true_labels = np.concatenate((true_labels, labels.cpu().detach()), 0)
                batch_index = batch_index + 1

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                if socketio != None:
                    threshold = float(self.config.threshold)
                    socketio.emit('model_progress', {"data": batch_index / len(train_loader),"mode": 'Train'}, namespace='/model')
            
            train_loss = np.round(sum(loss_list) / len(loss_list), 4)
            scores = evaluate_multilabel_classification(pred_labels, true_labels,threshold)
            acc = np.round(scores['Accuracy Rate per class'].mean(),4)
            f1 = np.round(scores["F1-score per class"].mean(),4)
            recall = np.round(scores["Recall per class"].mean(),4)
            precision = np.round(scores["Precision per class"].mean(),4)
            print("acc=", acc, 'f1=', f1, 'precision=', precision, 'recall=', recall)

            # 如果socketio不为空 返回结果给前端
            if socketio != None:
                self.formatSocketIOResult(socketio,scores)

            return train_loss
    
    def valid(self, val_loader,socketio=None):
        self.model.eval()

        val_loss = 0
        batch_index = 0
        pred_labels = None
        true_labels = None

        threshold = 0.8

        for batch in tqdm(val_loader, desc='\t ----- [Validing] '):
            texts, texts_mask, imgs, labels = batch
            texts, texts_mask, imgs, labels = texts.to(self.device), texts_mask.to(self.device), imgs.to(self.device),labels.to(self.device)
            pred_vec, loss = self.model(texts, texts_mask, imgs, labels=labels)

            # metric
            val_loss += loss.item()
            if batch_index == 0:
                pred_labels = pred_vec.cpu().detach()
                true_labels = labels.cpu().detach()
            else:
                pred_labels = np.concatenate((pred_labels, pred_vec.cpu().detach()),0)
                true_labels = np.concatenate((true_labels, labels.cpu().detach()), 0)
            batch_index = batch_index + 1

            if socketio != None:
                threshold = float(self.config.threshold)
                socketio.emit('model_progress', {"data": batch_index / len(val_loader), "mode": "Valid"}, namespace='/model')
            
        scores = evaluate_multilabel_classification(pred_labels, true_labels, threshold)
        acc = np.round(scores['Accuracy Rate per class'].mean(),4)
        f1 = np.round(scores["F1-score per class"].mean(),4)
        recall = np.round(scores["Recall per class"].mean(),4)
        precision = np.round(scores["Precision per class"].mean(),4)
        print("acc=", acc, 'f1=', f1, 'precision=', precision, 'recall=', recall)

        # 如果socketio不为空 返回结果给前端
        if socketio != None:
            self.formatSocketIOResult(socketio,scores)
        
        return val_loss / len(val_loader), acc
            
    def predict(self, test_loader,socketio=None):
        self.model.eval()
        batch_index = 0
        pred_labels = None
        true_labels = None

        threshold = 0.8

        for batch in tqdm(test_loader, desc='----- [Predicting] '):
            texts, texts_mask, imgs, labels = batch
            texts, texts_mask, imgs = texts.to(self.device), texts_mask.to(self.device), imgs.to(self.device)
            pred_vec = torch.sigmoid(self.model(texts, texts_mask, imgs))
            
            if batch_index == 0:
                pred_labels = pred_vec.cpu().detach()
                true_labels = labels.cpu().detach()
            else:
                pred_labels = np.concatenate((pred_labels, pred_vec.cpu().detach()),0)
                true_labels = np.concatenate((true_labels, labels.cpu().detach()), 0)
            batch_index = batch_index + 1

            if socketio != None:
                threshold = float(self.config.threshold)
                socketio.emit('model_progress', {"data": batch_index / len(test_loader),"mode": "Predict"}, namespace='/model')

        scores = evaluate_multilabel_classification(pred_labels, true_labels,threshold)
        acc = np.round(scores['Accuracy Rate per class'].mean(),4)
        f1 = np.round(scores["F1-score per class"].mean(),4)
        recall = np.round(scores["Recall per class"].mean(),4)
        precision = np.round(scores["Precision per class"].mean(),4)
        print("acc=", acc, 'f1=', f1, 'precision=', precision, 'recall=', recall)

        # 如果socketio不为空 返回结果给前端
        if socketio != None:
            self.formatSocketIOResult(socketio,scores)

        return pred_labels

    def formatSocketIOResult(self,socketio,scores):
        eachScore = scores
        acc = np.round(eachScore['Accuracy Rate per class'].mean(),4).tolist()
        f1 = np.round(eachScore["F1-score per class"].mean(),4).tolist()
        recall = np.round(eachScore["Recall per class"].mean(),4).tolist()
        precision = np.round(eachScore["Precision per class"].mean(),4).tolist()
        eachScore['Accuracy Rate per class'] = scores['Accuracy Rate per class'].tolist()
        eachScore['Precision per class'] = scores['Precision per class'].tolist()
        eachScore['Recall per class'] = scores['Recall per class'].tolist()
        eachScore['F1-score per class'] = scores['F1-score per class'].tolist()
        eachScore['Kappa score'] = scores['Kappa score'].tolist()
        eachScore['AUC per class'] = scores['AUC per class'].tolist()
        eachScore['Average AUC'] = scores['Average AUC'].tolist()
        result = {
            'scores': eachScore,
            'acc': acc,
            'f1': f1,
            'recall': recall,
            'precision': precision
        }
        socketio.emit('model_response', {"data": result}, namespace='/model')

