import os
os.environ['HF_ENDPOINT']='https://hf-mirror.com'
import pandas as pd
import torch
import numpy as np
import sqlite3
from datasets import Dataset
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer,
    EvalPrediction
)
from peft import get_peft_model, LoraConfig, TaskType
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, balanced_accuracy_score
from torch.nn import CrossEntropyLoss

# --- 1. 配置 ---
# 数据库路径
DB_PATH = './sentiment_analysis.db'
# 基础模型
BASE_MODEL_NAME = "hfl/chinese-roberta-wwm-ext"
# 新的二分类模型保存路径
OUTPUT_MODEL_DIR = "./binary_climate_model"

# --- 2. 加载和准备数据 ---
print("正在加载数据...")
try:
    con = sqlite3.connect(DB_PATH)
    # 只选择类别为 0 和 1 的数据
    query = "SELECT sentence, result FROM sentiments WHERE result IN (0, 1)"
    df = pd.read_sql_query(query, con)
    con.close()

    df.rename(columns={'result': 'label'}, inplace=True)
    print(f"成功加载数据。类别0数量: {len(df[df.label == 0])}, 类别1数量: {len(df[df.label == 1])}")

except Exception as e:
    print(f"数据库加载失败: {e}")
    exit()

# 划分训练集和验证集
train_df, eval_df = train_test_split(df, test_size=0.2, random_state=42, stratify=df['label'])

train_dataset = Dataset.from_pandas(train_df)
eval_dataset = Dataset.from_pandas(eval_df)

# --- 3. 分词 ---
print("正在加载分词器...")
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_NAME)


def tokenize_function(examples):
    return tokenizer(examples['sentence'], padding="max_length", truncation=True, max_length=256)


print("正在对数据集进行分词...")
tokenized_train_dataset = train_dataset.map(tokenize_function, batched=True)
tokenized_eval_dataset = eval_dataset.map(tokenize_function, batched=True)

# --- 4. 配置和训练 ---
print("正在配置模型...")
# LoRA 配置
peft_config = LoraConfig(
    task_type=TaskType.SEQ_CLS,
    inference_mode=False,
    r=8,
    lora_alpha=16,
    lora_dropout=0.1,
    target_modules=['query', 'key', 'value']  # 根据BERT模型结构调整
)

# 加载基础模型
model = AutoModelForSequenceClassification.from_pretrained(
    BASE_MODEL_NAME,
    num_labels=2,  # 二分类
    id2label={0: "气候风险暴露", 1: "气候风险防范"},
    label2id={"气候风险暴露": 0, "气候风险防范": 1}
)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()


# 自定义Trainer以处理类别不平衡
class WeightedLossTrainer(Trainer):
    def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
        labels = inputs.get("labels")
        outputs = model(**inputs)
        logits = outputs.get("logits")

        # 计算类别权重
        class_counts = np.bincount(df['label'])
        total_counts = len(df)
        class_weights = torch.tensor([total_counts / class_counts[i] for i in range(len(class_counts))],
                                     dtype=torch.float).to(model.device)

        loss_fct = CrossEntropyLoss(weight=class_weights)
        loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1))
        return (loss, outputs) if return_outputs else loss


# 定义评估指标
def compute_metrics(p: EvalPrediction):
    preds = np.argmax(p.predictions, axis=1)
    labels = p.label_ids
    accuracy = accuracy_score(labels, preds)
    macro_f1 = f1_score(labels, preds, average='macro')
    balanced_acc = balanced_accuracy_score(labels, preds)
    return {
        "accuracy": accuracy,
        "macro_f1": macro_f1,
        "balanced_accuracy": balanced_acc,
    }


# 训练参数
training_args = TrainingArguments(
    output_dir=f"{OUTPUT_MODEL_DIR}/checkpoints",
    num_train_epochs=50,
    per_device_train_batch_size=32,
    per_device_eval_batch_size=32,
    warmup_steps=50,
    weight_decay=0.01,
    logging_dir='./logs',
    logging_steps=50,
    eval_strategy="epoch",
    save_strategy="epoch",
    load_best_model_at_end=True,
    metric_for_best_model="macro_f1",
    greater_is_better=True,
    report_to="none"
)

trainer = WeightedLossTrainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_train_dataset,
    eval_dataset=tokenized_eval_dataset,
    compute_metrics=compute_metrics,
)

print("--- 开始训练 ---")
trainer.train()
print("--- 训练完成 ---")

# --- 5. 保存最终模型 ---
print(f"正在将最佳模型保存到 {OUTPUT_MODEL_DIR}/final_model ...")
trainer.save_model(f"{OUTPUT_MODEL_DIR}/final_model")
tokenizer.save_pretrained(f"{OUTPUT_MODEL_DIR}/final_model")
print("模型保存完毕。")

