import torch
from transformers import BertTokenizer, BertForSequenceClassification
from torch.utils.data import DataLoader, Dataset
import pandas as pd
import argparse
from sklearn.metrics import roc_auc_score
from tqdm import tqdm


# 定义数据集类
class TextDataset(Dataset):
    def __init__(self, pairs, labels, tokenizer, max_len=64):
        self.pairs = pairs
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.pairs)

    def __getitem__(self, item):
        q1, q2 = self.pairs[item]
        label = self.labels[item]
        encoding = self.tokenizer(q1, q2, padding='max_length', truncation=True, max_length=self.max_len,
                                  return_tensors="pt")

        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'token_type_ids': encoding['token_type_ids'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }


# 训练模型
def train(model, train_data, epochs=3, batch_size=32, lr=2e-5):
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
    loss_fn = torch.nn.CrossEntropyLoss()

    model.train()
    for epoch in range(epochs):
        total_loss = 0
        train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
        for batch in tqdm(train_loader, desc=f"Training Epoch {epoch + 1}"):
            optimizer.zero_grad()
            outputs = model(
                input_ids=batch['input_ids'].cuda(),
                attention_mask=batch['attention_mask'].cuda(),
                token_type_ids=batch['token_type_ids'].cuda()
            )
            loss = loss_fn(outputs.logits, batch['labels'].cuda())
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        print(f"Epoch {epoch + 1} Loss: {total_loss / len(train_loader)}")


# 评估模型
def evaluate(model, val_data):
    model.eval()
    val_loader = DataLoader(val_data, batch_size=32)
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for batch in tqdm(val_loader, desc="Evaluating"):
            outputs = model(
                input_ids=batch['input_ids'].cuda(),
                attention_mask=batch['attention_mask'].cuda(),
                token_type_ids=batch['token_type_ids'].cuda()
            )
            preds = torch.softmax(outputs.logits, dim=-1)[:, 1].cpu().numpy()
            all_preds.extend(preds)
            all_labels.extend(batch['labels'].cpu().numpy())

    auc = roc_auc_score(all_labels, all_preds)
    print(f"AUC: {auc}")
    return auc


def load_data(path):
    df = pd.read_csv(path, sep='\t', header=None)
    pairs = df[[0, 1]].values.tolist()
    labels = df[2].tolist()
    return pairs, labels


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--train_path', default='../../tcdata/oppo_breeno_round1_data/train.tsv')
    parser.add_argument('--model_path', default='../../user_data/model_data/model.bin')
    args = parser.parse_args()

    tokenizer = BertTokenizer.from_pretrained('hfl/chinese-macbert-base')
    model = BertForSequenceClassification.from_pretrained('hfl/chinese-macbert-base', num_labels=2)
    model = model.cuda()

    pairs, labels = load_data(args.train_path)
    train_data = TextDataset(pairs, labels, tokenizer)

    # 训练并保存模型
    train(model, train_data)
    torch.save(model.state_dict(), args.model_path)


if __name__ == "__main__":
    main()
