import pandas as pd
import torch
from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
from torch.utils.data import Dataset


def load_snli_data(file_path):
  data = pd.read_csv(file_path, sep='\t')
  data = data[['sentence1', 'sentence2', 'gold_label']]
  data = data[data['gold_label'].isin(['entailment', 'contradiction', 'neutral'])]

  # 检查数据类型
  data = data.dropna(subset=['sentence1', 'sentence2'])
  data['sentence1'] = data['sentence1'].astype(str)
  data['sentence2'] = data['sentence2'].astype(str)

  return data


# 下载数据文件路径，请确保SNLI数据集已下载到指定路径，例如snli_data.txt
train_file_path = './data/snli_1.0_train.txt'
dev_file_path = './data/snli_1.0_dev.txt'

train_data = load_snli_data(train_file_path)
dev_data = load_snli_data(dev_file_path)

# 打印数据以检查格式
print("Train Data Head:\n", train_data.head())
print("Dev Data Head:\n", dev_data.head())

# 使用'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=3)


# 对句子对进行编码
def encode_data(data, tokenizer, max_length=128):
  sentences1 = data['sentence1'].tolist()
  sentences2 = data['sentence2'].tolist()

  # 打印前几个句子以确认格式
  print("Sample sentences1:", sentences1[:5])
  print("Sample sentences2:", sentences2[:5])

  return tokenizer(
    sentences1,
    sentences2,
    truncation=True,
    padding=True,
    max_length=max_length,
    return_tensors='pt'
  )


train_encodings = encode_data(train_data, tokenizer)
dev_encodings = encode_data(dev_data, tokenizer)

# 标签映射
label_map = {'entailment': 0, 'contradiction': 1, 'neutral': 2}
train_labels = [label_map[label] for label in train_data['gold_label']]
dev_labels = [label_map[label] for label in dev_data['gold_label']]

train_labels = torch.tensor(train_labels)
dev_labels = torch.tensor(dev_labels)


class SNLIDataset(Dataset):
  def __init__(self, encodings, labels):
    self.encodings = encodings
    self.labels = labels

  def __getitem__(self, idx):
    item = {key: val[idx] for key, val in self.encodings.items()}
    item['labels'] = self.labels[idx]
    return item

  def __len__(self):
    return len(self.labels)

if torch.cuda.is_available():
    device = torch.device("cuda")
    print("Using GPU:", torch.cuda.get_device_name(0))
else:
    device = torch.device("cpu")
    print("Using CPU")

model.to(device)

train_dataset = SNLIDataset(train_encodings, train_labels)
dev_dataset = SNLIDataset(dev_encodings, dev_labels)

training_args = TrainingArguments(
  output_dir='./results',
  num_train_epochs=3,
  per_device_train_batch_size=16,
  per_device_eval_batch_size=16,
  warmup_steps=500,
  weight_decay=0.01,
  logging_dir='./logs',
  logging_steps=10,
  evaluation_strategy="epoch",
  fp16=True
)

trainer = Trainer(
  model=model,
  args=training_args,
  train_dataset=train_dataset,
  eval_dataset=dev_dataset
)

trainer.train()

# 保存模型
model.save_pretrained('./model')
tokenizer.save_pretrained('./model')