import os

from sklearn.model_selection import train_test_split

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
from transformers import (
  AutoTokenizer,
  TrainingArguments,
  Trainer,
  DataCollatorWithPadding, AutoModelForSequenceClassification
)
from datasets import load_dataset, interleave_datasets, Features, Value, \
  ClassLabel, Dataset, DatasetDict
from sklearn.metrics import accuracy_score, f1_score
import numpy as np
import pandas as pd


def compute_metrics(eval_pred):
  logits, labels = eval_pred
  predictions = np.argmax(logits, axis=-1)
  accuracy = accuracy_score(labels, predictions)
  f1 = f1_score(labels, predictions, average='weighted')
  return {
    'accuracy': accuracy,
    'f1': f1
  }

def load_data():
  dataset =  load_dataset("arrow", data_files={
    "train": "../datasets/chn_senti_corp-train.arrow",
    "valid": "../datasets/chn_senti_corp-validation.arrow"
  })
  return dataset

def merge_data():
  dataset_chn =  load_dataset("arrow", data_files="../datasets/chn_senti_corp-train.arrow", split='train')
  df =pd.read_csv('../0208_0313_merge_2.csv')
  df = df.drop('content',axis=1)
  # dataset_summary = load_dataset("csv", data_files="../0208_0313_merge_2.csv",  split='train')
  dataset_summary = Dataset.from_pandas(df)
  dataset_summary = dataset_summary.rename_column("summary", "text")
  features = Features({
    "text": Value("string"),
    "label": ClassLabel(names=["negative", "positive"])  # 指定类别名称
  })

  # 转换特征类型
  dataset_summary = dataset_summary.cast(features)
  interleaved_dataset = interleave_datasets([dataset_chn, dataset_summary])
  return interleaved_dataset

def merge_data_pd():
  dataset_chn_train =  load_dataset("arrow", data_files="../datasets/chn_senti_corp-train.arrow", split='train')
  pd_chn_train = dataset_chn_train.to_pandas()
  dataset_chn_valid =  load_dataset("arrow", data_files="../datasets/chn_senti_corp-validation.arrow", split='train')
  pd_chn_valid = dataset_chn_valid.to_pandas()
  df =pd.read_csv('../0208_0313_merge.csv')
  df = df.rename(columns={'summary': 'text'})
  df = df.rename(columns={'content': 'label'})
  summary_train, summary_test = train_test_split(df, test_size=0.2, random_state=42)
  train_df = pd.concat([summary_train, pd_chn_train], ignore_index=True)
  test_df = pd.concat([summary_test, pd_chn_valid], ignore_index=True)
  # 将 Pandas DataFrame 转换为 datasets.Dataset 对象
  train_dataset = Dataset.from_pandas(train_df)
  test_dataset = Dataset.from_pandas(test_df)

  combined_dataset = DatasetDict({
    'train': train_dataset,
    'valid': test_dataset
  })

  return combined_dataset

def train(model_name,output_file):
  dataset = load_data()
  print(dataset["train"][0])

  tokenizer = AutoTokenizer.from_pretrained(model_name)

  # 用于分类任务
  model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)

  def preprocess_function(examples):
    return tokenizer(
        examples["text"],
        max_length=128,
        padding="max_length",
        truncation=True,
        return_attention_mask=True
    )

  encoded_dataset = dataset.map(preprocess_function, batched=True)
  encoded_dataset = encoded_dataset.rename_column("label", "labels") # 确保标签列名为 labels
  print("处理后的样本类型:", encoded_dataset["train"].features)

  # 数据收集器
  data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

  training_args = TrainingArguments(
      output_dir="../model_train/ernie-sentiment",
      learning_rate=2e-5,
      per_device_train_batch_size=32,
      per_device_eval_batch_size=64,
      warmup_steps=500,
      weight_decay=0.01,
      num_train_epochs=3,
      logging_steps=50,
      eval_strategy='epoch',
      save_strategy='epoch',
      eval_steps=50,
      save_steps=500,
      load_best_model_at_end=True
  )


  trainer = Trainer(
      model=model,
      args=training_args,
      train_dataset=encoded_dataset["train"],
      eval_dataset=encoded_dataset["valid"],
      data_collator=data_collator,
      compute_metrics=compute_metrics
  )

  trainer.train()
  results = trainer.evaluate()
  print("Evaluation Results:", results)
  # 保存模型
  trainer.save_model(output_file)
  tokenizer.save_pretrained(output_file)

def tune_2nd(model_name,output_file, num_labels):

  dataset = merge_data_pd()
  tokenizer = AutoTokenizer.from_pretrained(model_name)

  # 用于分类任务
  model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=num_labels
                                                             ,ignore_mismatched_sizes=True)  # 允许修改输出层维度))

  def preprocess_function(examples):
    return tokenizer(
        examples["text"],
        max_length=128,
        padding="max_length",
        truncation=True,
        return_attention_mask=True
    )

  encoded_dataset = dataset.map(preprocess_function, batched=True)
  encoded_dataset = encoded_dataset.rename_column("label", "labels") # 确保标签列名为 labels
  print("处理后的样本类型:", encoded_dataset["train"].features)

  # 数据收集器
  data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

  training_args = TrainingArguments(
      output_dir="../model_train/ernie-sentiment",
      learning_rate=2e-5,
      per_device_train_batch_size=32,
      per_device_eval_batch_size=64,
      warmup_steps=500,
      weight_decay=0.01,
      num_train_epochs=3,
      logging_steps=50,
      eval_strategy='epoch',
      save_strategy='epoch',
      eval_steps=50,
      save_steps=500,
      load_best_model_at_end=True
  )


  trainer = Trainer(
      model=model,
      args=training_args,
      train_dataset=encoded_dataset['train'],
      eval_dataset=encoded_dataset["valid"],
      data_collator=data_collator,
      compute_metrics=compute_metrics
  )

  trainer.train()
  results = trainer.evaluate()
  print("Evaluation Results:", results)
  # 保存模型
  trainer.save_model(output_file)
  tokenizer.save_pretrained(output_file)




if __name__ == "__main__":
  model_name = '../model/ernie-3-base-zh'
  output_file = '../model/ernie-tune-chn-fix'
  # train(model_name, output_file)
  tune_2nd(model_name = '../model/ernie-tune-chn-fix', output_file = '../model/ernie-tune-chn-summary-3num-fix', num_labels=3)