import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
import pandas as pd
import os


# 自定义数据集类
class SentimentDataset(Dataset):
  def __init__(self, texts, labels, tokenizer, max_length):
    self.texts = texts
    self.labels = labels
    self.tokenizer = tokenizer
    self.max_length = max_length

  def __len__(self):
    return len(self.texts)

  def __getitem__(self, idx):
    text = str(self.texts[idx])
    label = self.labels[idx]
    encoding = self.tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=self.max_length,
        padding='max_length',
        truncation=True,
        return_tensors='pt'
    )
    return {
      'input_ids': encoding['input_ids'].flatten(),
      'attention_mask': encoding['attention_mask'].flatten(),
      'labels': torch.tensor(label, dtype=torch.long)
    }


# 加载数据集
def load_dataset():
  df = pd.read_csv('ChnSentiCorp_htl_all.csv')
  texts = df['review'].tolist()
  labels = df['label'].tolist()
  train_texts, test_texts, train_labels, test_labels = train_test_split(texts, labels, test_size=0.2, random_state=42)
  return train_texts, test_texts, train_labels, test_labels


# 训练模型
def train_model(model, train_dataloader, optimizer, device, epochs):
  model.train()
  for epoch in range(epochs):
    total_loss = 0
    for batch in train_dataloader:
      input_ids = batch['input_ids'].to(device)
      attention_mask = batch['attention_mask'].to(device)
      labels = batch['labels'].to(device)

      optimizer.zero_grad()
      outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
      loss = outputs.loss
      total_loss += loss.item()
      loss.backward()
      optimizer.step()
    print(f'Epoch {epoch + 1}, Loss: {total_loss / len(train_dataloader)}')


# 评估模型
def evaluate_model(model, test_dataloader, device):
  model.eval()
  all_preds = []
  all_labels = []
  with torch.no_grad():
    for batch in test_dataloader:
      input_ids = batch['input_ids'].to(device)
      attention_mask = batch['attention_mask'].to(device)
      labels = batch['labels'].to(device)

      outputs = model(input_ids, attention_mask=attention_mask)
      logits = outputs.logits
      preds = torch.argmax(logits, dim=1)

      all_preds.extend(preds.cpu().tolist())
      all_labels.extend(labels.cpu().tolist())

  accuracy = accuracy_score(all_labels, all_preds)
  report = classification_report(all_labels, all_preds)
  print(f"Accuracy: {accuracy}")
  print(report)


# 主函数
def main():
  device = torch.device('cuda' if torch.cuda.is_available() else 'mps')
  tokenizer = BertTokenizer.from_pretrained('model/bert-base-chinese')
  model = BertForSequenceClassification.from_pretrained('model/bert-base-chinese', num_labels=2)
  model.to(device)

  train_texts, test_texts, train_labels, test_labels = load_dataset()
  max_length = 128

  train_dataset = SentimentDataset(train_texts, train_labels, tokenizer, max_length)
  test_dataset = SentimentDataset(test_texts, test_labels, tokenizer, max_length)

  train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True)
  test_dataloader = DataLoader(test_dataset, batch_size=16, shuffle=False)

  optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
  epochs = 3

  train_model(model, train_dataloader, optimizer, device, epochs)

  # 保存模型
  model_save_path = 'model/bert-tune'
  if not os.path.exists(model_save_path):
    os.makedirs(model_save_path)
  model.save_pretrained(model_save_path)
  tokenizer.save_pretrained(model_save_path)

  # 评估模型
  evaluate_model(model, test_dataloader, device)


if __name__ == "__main__":
  main()