import os
import uuid

import numpy as np
import pandas as pd
import torch
from sklearn.metrics import f1_score
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import AutoTokenizer
from transformers.models.llama4.image_processing_llama4_fast import get_best_fit

import Config
from Config import Paths, device, Dconfig
from Data import Dataset
from Model import Model
from PlotConfusionMatrix import plot_confusion_matrix
from Utils import LOGGER, sigmoid, get_best_F1, get_ROC_AUC, get_F1, get_precision_recall
from Process import analyze_df

def combined_inference(config, test_df, tokenizer, is_ensemble=False, is_fold=False, n_folds=5):

    test_dataset = Dataset(config, test_df, tokenizer)

    test_loader = DataLoader(
        test_dataset,
        batch_size=config.BATCH_SIZE_TEST,
        shuffle=False,
        num_workers=config.NUM_WORKERS,
        pin_memory=True,
        drop_last=False
    )

    all_ids = []
    with torch.no_grad():
        for batch in test_loader:
            all_ids.extend(batch["ids"])
    all_ids = np.array(all_ids)  # 转换为numpy数组

    if is_ensemble:
        all_preds = []
        for fold in range(n_folds):
            model_path = os.path.join(Paths.OUTPUT_DIR, f"fold_{fold}_best.pth")

            if not os.path.exists(model_path):
                LOGGER.warning(f"Model not found: {model_path}")
                continue

            model = Model(config, config_path=Paths.CONFIG_PATH, pretrained=False,
                          is_training=False)
            model.load_state_dict(torch.load(model_path))
            model.to(device)
            model.eval()

            fold_preds = []
            with torch.no_grad(), tqdm(test_loader, desc="Inference") as pbar:
                for batch in pbar:
                    inputs = {k: v.to(device) for k, v in batch["inputs"].items()}
                    preds = model(inputs).cpu().numpy().flatten()
                    fold_preds.append(preds)
            all_preds.append(np.concatenate(fold_preds))

        assert len(all_ids) == len(all_preds[0]), "IDs和预测值数量不匹配！"

        predictions = np.mean(all_preds, axis=0)  # 多折概率平均
    else:

        BEST_MODEL_PATH = None
        if is_fold:
            BEST_MODEL_PATH=Paths.BEST_FOLD_MODEL_PATH
        else:
            BEST_MODEL_PATH=Paths.BEST_Full_MODEL_PATH

        model = Model(config, config_path=Paths.CONFIG_PATH, pretrained=False, is_training=False)
        model.load_state_dict(torch.load(BEST_MODEL_PATH))
        model.to(device)
        model.eval()

        all_preds = []
        with torch.no_grad(), tqdm(test_loader, desc="Inference") as pbar:
            for batch in pbar:
                inputs = {k: v.to(device) for k, v in batch["inputs"].items()}
                preds = model(inputs).cpu().numpy().flatten()
                all_preds.append(preds)

        predictions = np.concatenate(all_preds)

    return {
        "predictions": predictions,
        "ids": all_ids
    }

def main():
    print("Starting DeBERTa Classification Inference")

    test_df = pd.read_csv(Paths.TEST_DATA, sep=',')
    analyze_df(test_df)

    tokenizer = AutoTokenizer.from_pretrained(os.path.join(Paths.OUTPUT_DIR, "tokenizer"))

    is_ensemble = Dconfig.is_ensemble
    is_fold = Dconfig.is_fold

    predictions = combined_inference(Dconfig, test_df, tokenizer,is_ensemble=is_ensemble, is_fold=is_fold,n_folds=5)

    results = pd.DataFrame({
        "id": predictions["ids"],
        "generated":test_df["generated"],
        "predictions": sigmoid(predictions["predictions"])
    })
    ROC_AUC = get_ROC_AUC(test_df["generated"].values, results["predictions"].values)
    HalfF1 = get_F1(test_df["generated"].values, results["predictions"].values,0.5)
    best_threshold, best_F1 = get_best_F1(test_df["generated"].values,results["predictions"].values)
    precision, recall=get_precision_recall(test_df["generated"].values,results["predictions"].values)


    if is_ensemble:
        LOGGER.info(f"—————————————— Ensemble-Path:{Paths.TEST_DATA} result ——————————————")
        LOGGER.info(f'ROC_AUC: {ROC_AUC:<.4f}')
        LOGGER.info(f'HalfF1: {HalfF1:<.4f}')
        LOGGER.info(f'Best_F1: {best_F1:<.4f}')
        LOGGER.info(f'Best_Threshold: {best_threshold:<.4f}')
        LOGGER.info(f'Precision: {precision:<.4f}')
        LOGGER.info(f'recall: {recall:<.4f}')
        results.to_csv(f"./output/{Paths.TEST_NUM}_test_multiple_results.csv", index=False)
    elif is_fold :
        LOGGER.info(f"—————————————— ONE-Path:{Paths.TEST_DATA}{Paths.BEST_FOLD_MODEL_PATH} result ——————————————")
        LOGGER.info(f'ROC_AUC: {ROC_AUC:<.4f}')
        LOGGER.info(f'HalfF1: {HalfF1:<.4f}')
        LOGGER.info(f'Best_F1: {best_F1:<.4f}')
        LOGGER.info(f'Best_Threshold: {best_threshold:<.4f}')
        LOGGER.info(f'Precision: {precision:<.4f}')
        LOGGER.info(f'recall: {recall:<.4f}')
        results.to_csv(f"./output/{Paths.TEST_NUM}_test_one_{Paths.BEST_FOLD}_results.csv", index=False)
    else :
        LOGGER.info(f"—————————————— Full-Path:{Paths.TEST_DATA}{Paths.BEST_Full_MODEL_PATH} result ——————————————")
        LOGGER.info(f'ROC_AUC: {ROC_AUC:<.4f}')
        LOGGER.info(f'HalfF1: {HalfF1:<.4f}')
        LOGGER.info(f'Best_F1: {best_F1:<.4f}')
        LOGGER.info(f'Best_Threshold: {best_threshold:<.4f}')
        LOGGER.info(f'Precision: {precision:<.4f}')
        LOGGER.info(f'recall: {recall:<.4f}')
        results.to_csv(f"./output/{Paths.TEST_NUM}_test_full_results.csv", index=False)



if __name__ == "__main__":
    main()