import pandas as pd
import numpy as np
import torch
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from transformers import get_linear_schedule_with_warmup
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.nn.utils.rnn import pad_sequence
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import KFold
import re
from sklearn.metrics import confusion_matrix

def insert_special_token(text, keywords):
    for keyword in keywords:
        match = re.findall(r'\b' + keyword + r'\b', text, re.IGNORECASE)
        if match:
            text = text.replace(keyword, f'[SPE]{keyword}[SPE]')
            # text = text.replace(keyword, f'*{keyword}*')
    return text

def encode_texts(texts, max_len=128):
    input_ids, attention_masks = [], []
    keywords = []
    with open('/data/sfq/oco/keywords.txt', 'r', encoding='utf-8') as f:
        for line in f:
            keyword, weight = line.strip().split(',')
            keywords.append(keyword)
            
    for text in texts:
        text = insert_special_token(text, keywords)
        encoded_dict = tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=max_len,
            pad_to_max_length=True,
            return_attention_mask=True,
            return_tensors='pt',
        )
        input_ids.append(encoded_dict['input_ids'])
        attention_masks.append(encoded_dict['attention_mask'])
    return torch.cat(input_ids, dim=0), torch.cat(attention_masks, dim=0)

# 数据预处理
def text_predeal(temp):
    temp = re.sub('[^\u4e00-\u9fa5aA-Za-z0-9，。？：！；“”]', ' ', temp)  # 保留中文、英文、数字以及一些标点符号
    temp = temp.strip()  # 去除首尾空白字符
    return temp

def init_model():
    model = BertForSequenceClassification.from_pretrained(local_path, num_labels=2,
                                                      output_attentions=True, 
                                                      attn_implementation="eager",
                                                      output_hidden_states=False)
    model.to(device)
    model.resize_token_embeddings(len(tokenizer)) 
    
    new_embedding = torch.randn(model.config.hidden_size)  # 创建新的 embedding
    embedding_weight = model.bert.embeddings.word_embeddings.weight.detach()
    embedding_weight[tokenizer.convert_tokens_to_ids('[SPE]')] = new_embedding
    embedding_weight = torch.nn.Parameter(embedding_weight)
    embedding_weight.requires_grad = True
    model.bert.embeddings.word_embeddings.weight = embedding_weight
    return model

gpu_id = 0
device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu')
is_test = False

# 加载数据
train_data = pd.read_csv('/data/sfq/oco/train.csv', sep='\t')
test_data = pd.read_csv('/data/sfq/oco/test_new.csv', sep=',')

train_data['comment'] = train_data['comment'].apply(text_predeal)
test_data['comment'] = test_data['comment'].apply(text_predeal)


# 本地路径
local_path = '/data/pretrain_models/bert_cn/'
save_path = '/data/sfq/oco/model.pt'

# 分词和编码
tokenizer = BertTokenizer.from_pretrained(local_path)
tokenizer.add_special_tokens({'additional_special_tokens':['[SPE]']})

train_texts, train_labels = train_data['comment'].values, train_data['label'].values
test_texts = test_data['comment'].values

train_input_ids, train_attention_masks = encode_texts(train_texts)
test_input_ids, test_attention_masks = encode_texts(test_texts)

# 创建测试DataLoader
batch_size = 512
test_dataset = TensorDataset(test_input_ids, test_attention_masks)
test_dataloader = DataLoader(test_dataset, sampler=SequentialSampler(test_dataset), batch_size=batch_size)
min_loss = np.inf

# 初始化BERT模型
# for name, p in model.named_parameters():
#     # print(name)
#     if 'bert.encoder.' in name:
#         if '.layer.0.' in name or '.layer.1.' in name:
#             if '.LayerNorm.' in name:
#                 p.requires_grad = True
#             else:
#                 p.requires_grad = False
#         else:
#             p.requires_grad = True
#     else:
#         p.requires_grad = True

# weight = torch.tensor([0.5, 1], dtype=torch.float32, device=device)
# loss_fn = torch.nn.CrossEntropyLoss(weight=weight)

if not is_test:
    # 初始化KFold
    kf = KFold(n_splits=5, shuffle=True, random_state=42)
    model = init_model()
    optimizer = AdamW(model.parameters(), lr=5e-5, no_deprecation_warning=True) # 5e-5
    epochs = 10
    total_steps = len(train_input_ids) // batch_size * epochs * 5
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)

    # 交叉验证
    for fold, (train_idx, val_idx) in enumerate(kf.split(train_input_ids)):
        print(f'Fold {fold + 1}')
        # 获取当前折的训练和验证数据
        train_inputs, val_inputs = train_input_ids[train_idx], train_input_ids[val_idx]
        train_masks, val_masks = train_attention_masks[train_idx], train_attention_masks[val_idx]
        train_labels_fold, val_labels_fold = train_labels[train_idx], train_labels[val_idx]

        train_dataset = TensorDataset(train_inputs, train_masks, torch.tensor(train_labels_fold))
        val_dataset = TensorDataset(val_inputs, val_masks, torch.tensor(val_labels_fold))
        train_dataloader = DataLoader(train_dataset, sampler=RandomSampler(train_dataset), batch_size=batch_size)
        val_dataloader = DataLoader(val_dataset, sampler=SequentialSampler(val_dataset), batch_size=batch_size)

        # 训练和评估模型
        for epoch in range(epochs):
            # 训练模型
            model.train()
            total_loss = 0
            for step, batch in enumerate(train_dataloader):
                b_input_ids, b_input_mask, b_labels = tuple(t.to(device) for t in batch)
                model.zero_grad()
                outputs = model(b_input_ids, attention_mask=b_input_mask, labels=b_labels)
                # loss = loss_fn(outputs, b_labels)
                loss = outputs.loss
                total_loss += loss.item()
                loss.backward()
                optimizer.step()
                scheduler.step()
                
            avg_train_loss = total_loss / len(train_dataloader)
            print(f'Epoch {epoch + 1}, Loss: {avg_train_loss}')
            
            # 评估模型
            model.eval()
            val_preds, val_true = [], []
            valid_loss = 0
            for batch in val_dataloader:
                b_input_ids, b_input_mask, b_labels = tuple(t.to(device) for t in batch)
                with torch.no_grad():
                    outputs = model(b_input_ids, attention_mask=b_input_mask, labels=b_labels)
                logits = outputs.logits
                loss = outputs.loss
                valid_loss += loss.item()
                val_preds.append(logits.argmax(dim=1).cpu().numpy())
                val_true.append(b_labels.cpu().numpy())
            
            val_preds = np.concatenate(val_preds)
            val_true = np.concatenate(val_true)
            
            precision = precision_score(val_true, val_preds)
            recall = recall_score(val_true, val_preds)
            f1 = f1_score(val_true, val_preds)
            cm = confusion_matrix(val_true, val_preds)
            
            print(f'Epoch {epoch + 1}, valid_loss: {valid_loss:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}')
            print(f'confusion matrix: {cm}')
            
            if valid_loss < min_loss:
                min_loss = valid_loss
                torch.save(model.state_dict(), save_path)
                print('is stored')

# 预测
model.load_state_dict(torch.load(save_path))
model.eval()
predictions = []
for batch in test_dataloader:
    b_input_ids, b_input_mask = tuple(t.to(device) for t in batch)
    with torch.no_grad():
        outputs = model(b_input_ids, attention_mask=b_input_mask)
    logits = outputs.logits
    predictions.append(logits.argmax(dim=1).cpu().numpy())
predictions = np.concatenate(predictions)

# 生成结果文件
test_data['label'] = predictions
test_data[['id', 'label']].to_csv(f'final_result3.csv', index=False)
print('success')