from paddlenlp.data import DataCollatorWithPadding
from paddlenlp.transformers import AutoTokenizer, AutoModelForSequenceClassification
from paddlenlp.trainer import TrainingArguments, Trainer, IntervalStrategy
from datasets import Dataset, load_from_disk, load_dataset
from sklearn.metrics import accuracy_score, f1_score
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split


def compute_metrics(eval_pred):
  logits, labels = eval_pred
  predictions = np.argmax(logits, axis=-1)
  accuracy = accuracy_score(labels, predictions)
  f1 = f1_score(labels, predictions, average='weighted')
  return {
    'accuracy': accuracy,
    'f1': f1
  }

def load_data():
  dataset =  load_dataset("arrow", data_files={
    "train": "../datasets/chn_senti_corp-train.arrow",
    "valid": "../datasets/chn_senti_corp-validation.arrow"
  })
  return dataset

def main():
  dataset = load_data()
  print(dataset["train"][0])

  model_name = "ernie-3.0-base-zh"
  tokenizer = AutoTokenizer.from_pretrained(model_name)

  # 用于分类任务
  model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)

  def preprocess_function(examples):
    return tokenizer(
        examples["text"],
        max_length=128,
        padding="max_length",
        truncation=True,
        return_attention_mask=True
    )

  encoded_dataset = dataset.map(preprocess_function, batched=True, remove_columns=['text'])
  encoded_dataset = encoded_dataset.rename_column("label", "labels") # 确保标签列名为 labels
  print("处理后的样本类型:", encoded_dataset["train"].features)
  # def tokenize_function(examples):
  #   # 对文本进行分词处理
  #   tokenized_inputs = tokenizer(examples['review'], truncation=True, padding="max_length", max_length=128, return_tensors="pd")
  #   # 获取正确的标签字段
  #   tokenized_inputs["labels"] = examples['label']
  #   return tokenized_inputs

  #
  # split_dataset = {
  #   "train": train_dataset,
  #   "test": test_dataset
  # }
  #
  # tokenized_datasets = {
  #   "train": split_dataset["train"].map(tokenize_function),
  #   "test": split_dataset["test"].map(tokenize_function)
  # }

  # 数据收集器
  data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

  training_args = TrainingArguments(
      output_dir="../model_train/ernie-sentiment",
      learning_rate=2e-5,
      per_device_train_batch_size=32,
      per_device_eval_batch_size=64,
      # device='mps',
      # dataloader_num_workers=11,
      warmup_steps=500,
      weight_decay=0.01,
      num_train_epochs=3,
      logging_steps=50,
      evaluation_strategy=IntervalStrategy.EPOCH,
      save_strategy=IntervalStrategy.EPOCH,
      eval_steps=50,
      save_steps=500,
      load_best_model_at_end=True
  )


  trainer = Trainer(
      model=model,
      args=training_args,
      train_dataset=encoded_dataset["train"],
      eval_dataset=encoded_dataset["valid"],
      tokenizer=tokenizer,
      data_collator=data_collator,
      compute_metrics=compute_metrics
  )

  trainer.train()
  results = trainer.evaluate()
  print("Evaluation Results:", results)
  # 保存模型
  trainer.save_model("./ernie-sentiment-final")


if __name__ == "__main__":
  main()