import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
from datasets import Dataset, concatenate_datasets
from transformers import (
    AutoTokenizer,
    BertForSequenceClassification,
    TrainingArguments,
    Trainer,
    DataCollatorWithPadding, AutoModelForSequenceClassification
)
from sklearn.metrics import accuracy_score, f1_score
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split


# 定义评估指标函数
def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)
    accuracy = accuracy_score(labels, predictions)
    f1 = f1_score(labels, predictions, average='weighted')
    return {
        'accuracy': accuracy,
        'f1': f1
    }

def tune_model(df, model_name, num_labels, model_save_path, content_column, label_column):
    #使用 Pandas 读取 CSV 文件
    # df = pd.read_csv('ChnSentiCorp_htl_all.csv')

    # 处理缺失值，这里将 NaN 替换为空字符串，你也可以根据需求删除这些行
    df = df.dropna(axis=0)
    # 划分训练集和测试集
    train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)

      # 将 Pandas DataFrame 转换为 datasets.Dataset 对象
    train_dataset = Dataset.from_pandas(train_df)
    test_dataset = Dataset.from_pandas(test_df)

    split_dataset = {
        "train": train_dataset,
        "test": test_dataset
      }

    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    # 加载模型
    model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=num_labels)


    # 对数据集进行分词处理
    def tokenize_function(examples):
        # 对文本进行分词处理
        tokenized_inputs = tokenizer(examples[content_column], truncation=True, padding="max_length", max_length=128)
        # 获取正确的标签字段
        tokenized_inputs["labels"] = examples[label_column]
        return tokenized_inputs
        # return tokenizer(examples["summary"], truncation=True, padding="max_length", max_length=128, return_tensors="pt")


    # 对数据集进行分词处理
    tokenized_datasets = {
        "train": split_dataset["train"].map(tokenize_function, batched=True),
        "test": split_dataset["test"].map(tokenize_function, batched=True)
    }



    # 数据收集器
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

    # 定义训练参数
    training_args = TrainingArguments(
        output_dir='../results',
        num_train_epochs=3,
        per_device_train_batch_size=16,
        per_device_eval_batch_size=64,
        warmup_steps=500,
        weight_decay=0.01,
        logging_dir='./logs',
        logging_steps=10,
        eval_strategy="steps",
        eval_steps=50,
        save_strategy="steps",
        save_steps=500,
        load_best_model_at_end=True
    )

    # 创建 Trainer 对象
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["test"],
        data_collator=data_collator,
        compute_metrics=compute_metrics
    )

    # 训练模型
    trainer.train()

    # 评估模型
    results = trainer.evaluate()
    print("Evaluation Results:", results)

    # 保存模型
    # model_save_path = '../model/roberta-tune-htl'
    trainer.save_model(model_save_path)
    tokenizer.save_pretrained(model_save_path)

def tune_model_classify(df, df_old, model_name, num_labels, model_save_path, content_column, label_column):
    from transformers import RobertaConfig

    # 重新初始化分类器
    # config = RobertaConfig.from_pretrained(
    #     model_name,
    #     num_labels=num_labels  # 关键修改点
    # )

    model = AutoModelForSequenceClassification.from_pretrained(
        model_name,
        # config=config,
        num_labels=num_labels,
        ignore_mismatched_sizes=True  # 允许修改输出层维度
    )

    if df_old is not None:
        df = pd.concat([df, df_old], ignore_index=True)
        df.to_csv('test.csv', index=False)
    df = df.dropna(axis=0)
    # 划分训练集和测试集
    train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)

    # 将 Pandas DataFrame 转换为 datasets.Dataset 对象
    train_dataset = Dataset.from_pandas(train_df)
    test_dataset = Dataset.from_pandas(test_df)

    split_dataset = {
        "train": train_dataset,
        "test": test_dataset
    }

    tokenizer = AutoTokenizer.from_pretrained(model_name)



    # 对数据集进行分词处理
    def tokenize_function(examples):
        # 对文本进行分词处理
        tokenized_inputs = tokenizer(examples[content_column], truncation=True, padding="max_length", max_length=128)
        # 获取正确的标签字段
        tokenized_inputs["labels"] = examples[label_column]
        return tokenized_inputs

    # 对数据集进行分词处理
    tokenized_datasets = {
        "train": split_dataset["train"].map(tokenize_function, batched=True),
        "test": split_dataset["test"].map(tokenize_function, batched=True)
    }

    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

    # 训练参数
    training_args = TrainingArguments(
        output_dir='./results_triple',
        num_train_epochs=5,  # 更多epochs适应新任务
        per_device_train_batch_size=16,
        per_device_eval_batch_size=32,
        learning_rate=2e-5,  # 更小的学习率
        warmup_ratio=0.1,
        weight_decay=0.01,
        logging_dir='./logs_triple',
        eval_strategy="steps",
        eval_steps=200,
        save_strategy="steps",
        save_steps=200,
        load_best_model_at_end=True
    )

    # 使用不同的优化器
    from torch.optim import AdamW
    optimizer_grouped_parameters = [
        {"params": [p for n, p in model.named_parameters() if "classifier" in n], "lr": 1e-3},
        {"params": [p for n, p in model.named_parameters() if "classifier" not in n], "lr": 2e-5},
    ]
    optimizer = AdamW(optimizer_grouped_parameters)

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["test"],
        data_collator=data_collator,
        compute_metrics=compute_metrics,
        optimizers=(optimizer, None)  # 保持默认调度器
    )

    # 开始训练
    trainer.train()

    # 评估模型
    results = trainer.evaluate()
    print("Evaluation Results:", results)

    # 保存模型
    # model_save_path = '../model/roberta-tune-htl'
    trainer.save_model(model_save_path)
    tokenizer.save_pretrained(model_save_path)