from datasets import load_dataset,Dataset
from transformers import (
  BertTokenizer,
  BertForSequenceClassification,
  TrainingArguments,
  Trainer,
  DataCollatorWithPadding
)
from sklearn.metrics import accuracy_score, f1_score
import numpy as np
import torch
import pandas as pd
from sklearn.model_selection import train_test_split



# 定义评估指标函数
def compute_metrics(eval_pred):
  logits, labels = eval_pred
  predictions = np.argmax(logits, axis=-1)
  accuracy = accuracy_score(labels, predictions)
  f1 = f1_score(labels, predictions, average='weighted')
  return {
    'accuracy': accuracy,
    'f1': f1
  }


# 主函数
def main():

  # # 加载数据集
  # dataset = load_dataset('csv', data_files='ChnSentiCorp_htl_all.csv')
  # # 划分训练集和测试集
  # split_dataset = dataset['train'].train_test_split(test_size=0.2)

  #使用 Pandas 读取 CSV 文件
  df = pd.read_csv('ChnSentiCorp_htl_all.csv')

  # 处理缺失值，这里将 NaN 替换为空字符串，你也可以根据需求删除这些行
  df = df.dropna(axis=0)
  # 划分训练集和测试集
  train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)

  # 将 Pandas DataFrame 转换为 datasets.Dataset 对象
  train_dataset = Dataset.from_pandas(train_df)
  test_dataset = Dataset.from_pandas(test_df)

  split_dataset = {
    "train": train_dataset,
    "test": test_dataset
  }

  # 加载分词器和模型
  tokenizer = BertTokenizer.from_pretrained('model/bert-base-chinese')
  model = BertForSequenceClassification.from_pretrained('model/bert-base-chinese', num_labels=2)

  # 检查 MPS 是否可用并将模型移动到 MPS 设备
  if torch.backends.mps.is_available():
    device = torch.device("mps")
    model = model.to(device)
  # 定义分词函数
  def tokenize_function(examples):
    for text in examples["review"]:
      if not isinstance(text, str):  # 处理可能的NaN转换成的float
        print(text)
    return tokenizer(examples["review"], truncation=True, padding="max_length",max_length=128,return_tensors="pt")

  # 对数据集进行分词处理
  # tokenized_datasets = split_dataset.map(tokenize_function, batched=True)
  tokenized_datasets = {
    "train": split_dataset["train"].map(tokenize_function, batched=True),
    "test": split_dataset["test"].map(tokenize_function, batched=True)
  }

  # 数据收集器，用于填充输入序列到相同长度
  data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

  # 定义训练参数
  # training_args = TrainingArguments(
  #     output_dir='./results',
  #     num_train_epochs=3,
  #     per_device_train_batch_size=16,
  #     per_device_eval_batch_size=64,
  #     warmup_steps=500,
  #     weight_decay=0.01,
  #     logging_dir='./logs',
  #     logging_steps=10,
  #     eval_strategy="steps",
  #     eval_steps=50,
  #     save_strategy="steps",
  #     save_steps=500,
  #     load_best_model_at_end=True
  # )

  # 设置训练参数 epoch
  training_args = TrainingArguments(
      output_dir="./bert_chn_sentiment",
      eval_strategy="epoch",
      learning_rate=2e-5,
      per_device_train_batch_size=16,
      per_device_eval_batch_size=16,
      num_train_epochs=3,
      weight_decay=0.01,
      save_strategy="epoch",
      load_best_model_at_end=True
  )

  # 创建 Trainer 对象
  trainer = Trainer(
      model=model,
      args=training_args,
      train_dataset=tokenized_datasets["train"],
      eval_dataset=tokenized_datasets["test"],
      data_collator=data_collator,
      compute_metrics=compute_metrics
  )

  # 训练模型
  trainer.train()

  # 评估模型
  results = trainer.evaluate()
  print("Evaluation Results:", results)

  # 保存模型
  model_save_path = 'model/bert-tune-trainer'
  trainer.save_model(model_save_path)
  tokenizer.save_pretrained(model_save_path)


if __name__ == "__main__":
  main()