import configs
import data

print("Welcome to K-Fold Cross Validation")

ds = data.load()

import numpy as np
import torch
from torch.utils.data import DataLoader, Subset
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score
import time
from transformers import BertModel
from ClassifyModel import ClassifyModel


def kfold_cross_validation():
    # Get all labels
    alllabels = np.array([torch.argmax(label).item() for _, _, _, label in ds])

    # KFold
    kfold = StratifiedKFold(n_splits=configs.K, shuffle=True, random_state=configs.v_random_seed)

    fold_results = {
        'val_acc': [],
        'val_f1': [],
        'train_time': []
    }

    for fold, (train_ids, val_ids) in enumerate(kfold.split(np.zeros(len(alllabels)), alllabels)):
        print(f"Fold [{fold + 1}/{configs.K}]")

        train_subsampler = Subset(ds, train_ids)
        val_subsampler = Subset(ds, val_ids)

        train_loader = DataLoader(
            train_subsampler,
            batch_size=configs.v_batch_size,
            shuffle=True
        )
        val_loader = DataLoader(
            val_subsampler,
            batch_size=configs.v_batch_size,
            shuffle=False
        )

        # Initialize ClassifyModel
        bert = BertModel.from_pretrained(configs.PRETRAINED_BERT_PATH)
        model = ClassifyModel(
            bert,
            lstm_hidden_size=configs.lstm_hidden_size,
            lstm_n_layers=configs.lstm_n_layers,
            stat_n_features=configs.stat_n_features,
            fusion_n_out=configs.fusion_n_out,
            classifier_dropout=configs.classifier_dropout,
            out_n_classes=configs.out_n_classes
        )

        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=configs.v_lr)

        # Timer
        fold_start_time = time.time()

        model.train()
        for epoch in range(configs.fold_max_epoch):
            epoch_loss = 0.0

            for batch in train_loader:
                input_ids, attention_mask, stats, labels = batch[0], batch[1], batch[2], batch[3]

                optimizer.zero_grad()

                outputs = model(input_ids, attention_mask, stats)
                loss = criterion(outputs, labels)

                loss.backward()
                optimizer.step()

                epoch_loss += loss.item() * input_ids.size(0)

            epoch_loss_avg = epoch_loss / len(train_loader.dataset)

            # Sklearn Score
            val_acc, val_f1 = evaluate_model(model, val_loader)

            print(f"[Fold {fold + 1}, Epoch {epoch + 1}/{configs.fold_max_epoch}]")
            print(f"Train Loss: {epoch_loss_avg}")
            print(f"Val Acc: {val_acc} | Val F1: {val_f1}")

        fold_time = time.time() - fold_start_time
        fold_results['val_acc'].append(val_acc)
        fold_results['val_f1'].append(val_f1)
        fold_results['train_time'].append(fold_time)

        print(f"Fold {fold + 1} Ends")
        print(f"Val Acc: {val_acc}")

    avg_acc = np.mean(fold_results['val_acc'])
    std_acc = np.std(fold_results['val_acc'])
    avg_f1 = np.mean(fold_results['val_f1'])

    print(f"avg ACC: {avg_acc} ± {std_acc}")
    print(f"avg F1: {avg_f1}")

    return {
        'mean_accuracy': avg_acc,
        'std_accuracy': std_acc,
        'avg_f1': avg_f1,
        'fold_results': fold_results
    }


def evaluate_model(model, dataloader):
    model.eval()

    total_tp, total_fp, total_tn, total_fn = 0, 0, 0, 0

    with torch.no_grad():
        for batch in dataloader:

            input_ids, attention_mask, stats, labels = batch[0], batch[1], batch[2], batch[3]

            outputs = model(input_ids, attention_mask, stats)

            _, preds = torch.max(outputs, 1)
            _, target_labels = torch.max(labels, 1)

            # 混淆矩阵
            tp = ((preds == 1) & (target_labels == 1)).sum().item()
            fp = ((preds == 1) & (target_labels == 0)).sum().item()
            tn = ((preds == 0) & (target_labels == 0)).sum().item()
            fn = ((preds == 0) & (target_labels == 1)).sum().item()

            total_tp += tp
            total_fp += fp
            total_tn += tn
            total_fn += fn


    # Scoring
    total_samples = total_tp + total_fp + total_tn + total_fn
    acc = (total_tp + total_tn) / total_samples
    precision = total_tp / (total_tp + total_fp + 1e-10)
    recall = total_tp / (total_tp + total_fn + 1e-10)
    f1 = 2*precision*recall/(precision+recall)

    return acc, f1


if __name__ == "__main__":
    results = kfold_cross_validation()

    print(results)

    import json

    with open('kfold_results.json', 'w') as f:
        json.dump(results, f)
