# -*- coding: utf-8 -*-
"""
综合性中文文本三分类训练脚本 (带采样功能)

本脚本为实际应用优化，并根据要求加入了采样技术以控制训练集规模。
它整合了以下关键技术：
1.  **基础模型**: 使用 'hfl/chinese-roberta-wwm-ext'，一个针对中文优化的BERT模型。
2.  **微调技术**: 应用LoRA（低秩适配）进行参数高效微调。
3.  **数据处理**: 对训练集进行随机欠采样，将训练样本控制在10000个，同时保持验证/测试集的原始分布。
4.  **不平衡处理**: 在采样后的训练集上，继续使用加权交叉熵损失函数，进一步强调少数类。

请在使用前确保已安装所有必要的库:
!pip install --upgrade transformers accelerate peft datasets scikit-learn torch
"""
import os
os.environ['HF_ENDPOINT']='https://hf-mirror.com'
import sqlite3
import pandas as pd
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, balanced_accuracy_score
from datasets import Dataset, DatasetDict
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer
)
from peft import get_peft_model, LoraConfig, TaskType


# --- 1. 配置参数 ---
class TrainingConfig:
    """存储所有训练过程中的配置和超参数"""
    # 文件与路径
    DB_PATH = "sentiment_analysis.db"
    TABLE_NAME = "sentiments"
    OUTPUT_DIR = "./results_sampled"
    LOGGING_DIR = "./logs_sampled"

    # 模型与分词器
    MODEL_NAME = "hfl/chinese-roberta-wwm-ext"
    MAX_LENGTH = 256

    # LoRA 配置
    LORA_R = 16
    LORA_ALPHA = 32
    LORA_DROPOUT = 0.1
    TARGET_MODULES = ["query", "value"]

    # 训练与采样参数
    NUM_EPOCHS = 20
    BATCH_SIZE = 128
    LEARNING_RATE = 2e-5
    WEIGHT_DECAY = 0.01
    RANDOM_STATE = 42
    TRAIN_SET_SIZE = 40000  # *** 新增：目标训练集大小 ***


def load_and_split_data(config):
    """从SQLite加载数据，并进行分层抽样划分"""
    conn = sqlite3.connect(config.DB_PATH)
    df = pd.read_sql_query(f"SELECT sentence, result FROM {config.TABLE_NAME}", conn)
    conn.close()

    df = df.rename(columns={"result": "labels"})

    train_val_df, test_df = train_test_split(
        df, test_size=0.1, stratify=df['labels'], random_state=config.RANDOM_STATE
    )
    train_df, val_df = train_test_split(
        train_val_df, test_size=0.1, stratify=train_val_df['labels'], random_state=config.RANDOM_STATE
    )

    train_dataset = Dataset.from_pandas(train_df.reset_index(drop=True))
    val_dataset = Dataset.from_pandas(val_df.reset_index(drop=True))
    test_dataset = Dataset.from_pandas(test_df.reset_index(drop=True))

    return DatasetDict({
        'train': train_dataset, 'validation': val_dataset, 'test': test_dataset
    })


def sample_training_set(train_dataset, target_size, random_state):
    """对训练集进行欠采样以达到目标大小"""
    train_df = train_dataset.to_pandas()
    print(f"原始训练集大小: {len(train_df)}")
    print("原始训练集类别分布:\n", train_df['labels'].value_counts())

    df_class_0 = train_df[train_df['labels'] == 0]
    df_class_1 = train_df[train_df['labels'] == 1]
    df_class_2 = train_df[train_df['labels'] == 2]

    # 保留所有少数类样本
    n_minority = len(df_class_0) + len(df_class_1)

    # 计算需要从多数类中采样的数量
    n_class_2_needed = target_size - n_minority

    if n_class_2_needed < 0:
        print("警告：少数类样本总数已超过目标训练集大小。将仅使用少数类样本。")
        final_train_df = pd.concat([df_class_0, df_class_1])
    elif n_class_2_needed > len(df_class_2):
        print(f"警告：需要的多数类样本({n_class_2_needed})多于可用数量({len(df_class_2)})。将使用所有可用样本。")
        final_train_df = train_df
    else:
        df_class_2_sampled = df_class_2.sample(n=n_class_2_needed, random_state=random_state)
        final_train_df = pd.concat([df_class_0, df_class_1, df_class_2_sampled])

    # 打乱最终的训练集
    final_train_df = final_train_df.sample(frac=1, random_state=random_state).reset_index(drop=True)

    print(f"\n采样后训练集大小: {len(final_train_df)}")
    print("采样后训练集类别分布:\n", final_train_df['labels'].value_counts())

    return Dataset.from_pandas(final_train_df), final_train_df


def tokenize_data(raw_datasets, tokenizer, config):
    """对数据集进行分词"""

    def tokenize_function(examples):
        return tokenizer(
            examples["sentence"], padding="max_length", truncation=True, max_length=config.MAX_LENGTH
        )

    tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
    tokenized_datasets = tokenized_datasets.remove_columns(["sentence"])
    tokenized_datasets.set_format("torch")
    return tokenized_datasets


# --- 3. 核心组件 (与之前版本相同) ---
def compute_class_weights(df):
    class_counts = df['labels'].value_counts().sort_index().to_dict()
    total_samples = sum(class_counts.values())
    num_classes = len(class_counts)
    weights = {i: total_samples / (num_classes * count) for i, count in class_counts.items()}
    class_weights_tensor = torch.tensor([weights[i] for i in sorted(weights.keys())], dtype=torch.float)
    print(f"\n计算出的类别权重: {class_weights_tensor}")
    return class_weights_tensor


def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)
    macro_f1 = f1_score(labels, predictions, average='macro', zero_division=0)
    weighted_f1 = f1_score(labels, predictions, average='weighted', zero_division=0)
    balanced_acc = balanced_accuracy_score(labels, predictions)
    return {'accuracy': accuracy_score(labels, predictions), 'macro_f1': macro_f1, 'weighted_f1': weighted_f1,
            'balanced_accuracy': balanced_acc}


class WeightedLossTrainer(Trainer):
    def __init__(self, *args, class_weights=None, **kwargs):
        super().__init__(*args, **kwargs)
        self.class_weights = class_weights.to(self.args.device) if class_weights is not None else None

    def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
        labels = inputs.pop("labels")
        outputs = model(**inputs)
        logits = outputs.get("logits")
        loss_fct = torch.nn.CrossEntropyLoss(weight=self.class_weights)
        loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1))
        return (loss, outputs) if return_outputs else loss


# --- 4. 主执行流程 ---
def main():
    config = TrainingConfig()

    # 1. 加载并进行初始的、基于全部数据的分层划分
    raw_datasets = load_and_split_data(config)
    print("初始数据集信息:")
    print(raw_datasets)

    # 2. *** 对训练集进行欠采样 ***
    sampled_train_dataset, sampled_train_df = sample_training_set(
        raw_datasets['train'], config.TRAIN_SET_SIZE, config.RANDOM_STATE
    )
    # 用采样后的训练集替换原始训练集
    raw_datasets['train'] = sampled_train_dataset

    # 3. 分词
    tokenizer = AutoTokenizer.from_pretrained(config.MODEL_NAME)
    tokenized_datasets = tokenize_data(raw_datasets, tokenizer, config)

    # 4. 基于采样后的训练集计算类别权重
    class_weights = compute_class_weights(sampled_train_df)

    # 5. 初始化模型和LoRA配置
    model = AutoModelForSequenceClassification.from_pretrained(
        config.MODEL_NAME, num_labels=3, ignore_mismatched_sizes=True
    )
    id2label = {0: "Class 0", 1: "Class 1", 2: "Class 2"}
    label2id = {"Class 0": 0, "Class 1": 1, "Class 2": 2}
    model.config.id2label = id2label
    model.config.label2id = label2id

    lora_config = LoraConfig(
        task_type=TaskType.SEQ_CLS, r=config.LORA_R, lora_alpha=config.LORA_ALPHA,
        lora_dropout=config.LORA_DROPOUT, target_modules=config.TARGET_MODULES, bias="none"
    )
    lora_model = get_peft_model(model, lora_config)
    print("\nLoRA模型可训练参数:")
    lora_model.print_trainable_parameters()

    # 6. 定义训练参数
    training_args = TrainingArguments(
        output_dir=config.OUTPUT_DIR, logging_dir=config.LOGGING_DIR,
        num_train_epochs=config.NUM_EPOCHS, per_device_train_batch_size=config.BATCH_SIZE,
        per_device_eval_batch_size=config.BATCH_SIZE, learning_rate=config.LEARNING_RATE,
        weight_decay=config.WEIGHT_DECAY, eval_strategy="epoch", save_strategy="epoch",
        load_best_model_at_end=True, metric_for_best_model="macro_f1", greater_is_better=True,
        push_to_hub=False, report_to="none"
    )

    # 7. 实例化自定义Trainer并开始训练
    trainer = WeightedLossTrainer(
        model=lora_model, args=training_args, train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["validation"], tokenizer=tokenizer,
        compute_metrics=compute_metrics, class_weights=class_weights
    )

    print("\n--- 开始训练 ---")
    trainer.train()
    print("--- 训练完成 ---")

    print("\n--- 在测试集上评估最终模型 ---")
    test_results = trainer.evaluate(eval_dataset=tokenized_datasets["test"])
    print("测试集评估结果:")
    print(test_results)

    final_model_path = f"{config.OUTPUT_DIR}/final_model"
    trainer.save_model(final_model_path)
    tokenizer.save_pretrained(final_model_path)
    print(f"\n最终的LoRA适配器和分词器已保存至: {final_model_path}")


if __name__ == "__main__":
    main()
