import pandas as pd
import numpy as np
import torch
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from transformers import AutoTokenizer, AutoModelForSequenceClassification


def predict_sentiment_batch(model, data, tokenizer, device, batch_size=16):
    model.eval()
    model.to(device)

    predicted_class = []

    for i in tqdm(range(0, len(data), batch_size), desc="批量预测", unit="batch"):
        batch_texts = data[i:i + batch_size]

        inputs = tokenizer(
            batch_texts,
            return_tensors="pt",
            truncation=True,
            padding=True,
            max_length=512,
            add_special_tokens=True
        )
        inputs = {key: value.to(device) for key, value in inputs.items()}

        with torch.no_grad():
            outputs = model(**inputs)

        predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
        batch_preds = torch.argmax(predictions, dim=1).cpu().numpy()
        predicted_class.extend(batch_preds.tolist())

    return predicted_class


def calculate_detailed_metrics(y_true, y_pred):
    y_true = np.array(y_true)
    y_pred = np.array(y_pred)

    accuracy = accuracy_score(y_true, y_pred)
    precision = precision_score(y_true, y_pred, average='binary', zero_division=0)
    recall = recall_score(y_true, y_pred, average='binary', zero_division=0)
    f1 = f1_score(y_true, y_pred, average='binary', zero_division=0)

    total = len(y_true)

    metrics = {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1_score': f1,
        'sample_info': {
            'total_samples': total,
            'true_positive': np.sum((y_true == 1) & (y_pred == 1)),
            'true_negative': np.sum((y_true == 0) & (y_pred == 0)),
            'false_positive': np.sum((y_true == 0) & (y_pred == 1)),
            'false_negative': np.sum((y_true == 1) & (y_pred == 0)),
        }
    }
    return metrics


# 使用示例
def main_optimized():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 加载数据
    test_data_path = './data/dev.csv'
    df = pd.read_csv(test_data_path)
    df.dropna(inplace=True)
    data = df['SentimentText'].tolist()
    target = df['label'].astype(int).tolist()

    print(f"数据加载完成，共 {len(data)} 个样本")

    model_paths = ['bert-base-chinese', './review_model', './review_classifier_model']

    for model_path in model_paths:
        print(f"\n{'=' * 60}")
        print(f"评估模型: {model_path}")
        print(f"{'=' * 60}")

        try:
            model = AutoModelForSequenceClassification.from_pretrained(model_path)
            tokenizer = AutoTokenizer.from_pretrained(model_path)

            batch_size = 32 if device.type == "cuda" else 8
            print(f"使用batch大小: {batch_size}")

            predictions = predict_sentiment_batch(model, data, tokenizer, device, batch_size)
            metrics = calculate_detailed_metrics(target, predictions)

            print(f"\n评估结果:")
            print(f"   准确率 (Accuracy):  {metrics['accuracy']:.4f}")
            print(f"   精确率 (Precision): {metrics['precision']:.4f}")
            print(f"   召回率 (Recall):    {metrics['recall']:.4f}")
            print(f"   F1分数:            {metrics['f1_score']:.4f}")

            print(f"\n样本统计:")
            info = metrics['sample_info']
            print(f"   总样本数: {info['total_samples']}")
            print(f"   真正例 (TP): {info['true_positive']}")
            print(f"   真反例 (TN): {info['true_negative']}")
            print(f"   假正例 (FP): {info['false_positive']}")
            print(f"   假反例 (FN): {info['false_negative']}")

        except Exception as e:
            print(f"处理模型 {model_path} 时出错: {e}")
            continue
    if device.type == "cuda":
        torch.cuda.empty_cache()


if __name__ == "__main__":
    main_optimized()
