import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import (
    BertTokenizer,
    BertPreTrainedModel,
    BertModel,
    AdamW
)
from typing import List, Tuple, Dict
from TrainingSample import SAMPLE
# 配置参数
MODEL_NAME = 'bert-large-uncased'
ENTITY_LABELS = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
RELATION_LABELS = ["unknown", "belong", "contain", "attribute"]
PROMPT_TEMPLATES = [
    "[E1] is the [MASK] of [E2].",
    "[E1]'s relation to [E2] is [MASK].",
    "The relationship between [E1] and [E2] is [MASK]."
]
LABEL_TO_WORD = {
    "unknown": "unknown",
    "belong": "belong",
    "contain": "contain",
    "attribute": "attribute",
}

DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 16
EPOCHS = 3
LEARNING_RATE = 2e-5
TASK_WEIGHTS = {"ner": 0.5, "relation": 0.5}  # 任务权重

sample_data = SAMPLE
        
# 多任务模型定义
class MultiTaskBert(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.bert = BertModel(config)
        
        # NER任务头
        self.ner_classifier = nn.Linear(config.hidden_size, len(ENTITY_LABELS))
        
        # 关系抽取任务头（MLM）
        self.cls = nn.Linear(config.hidden_size, config.vocab_size)  # MLM分类头
        
        self.init_weights()

    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        ner_labels=None,
        relation_input_ids=None,
        relation_attention_mask=None,
        relation_mask_positions=None,
        relation_labels=None
    ):
        # 共享BERT编码器
        outputs = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask
        )
        sequence_output = outputs.last_hidden_state
        
        # NER任务计算
        ner_logits = self.ner_classifier(sequence_output)
        ner_loss = None
        if ner_labels is not None:
            loss_fct = nn.CrossEntropyLoss()
            ner_loss = loss_fct(
                ner_logits.view(-1, len(ENTITY_LABELS)), 
                ner_labels.view(-1)
            )
        
        # 关系任务计算
        relation_loss = None
        prediction_scores = None  # 初始化为 None
        if relation_input_ids is not None:
            # 展平关系输入
            batch_size, num_prompts, seq_len = relation_input_ids.size()
            flat_input_ids = relation_input_ids.view(-1, seq_len)  # (batch_size * num_prompts, seq_len)
            flat_attention_mask = relation_attention_mask.view(-1, seq_len)  # (batch_size * num_prompts, seq_len)
            
            # 通过BERT编码
            relation_outputs = self.bert(
                input_ids=flat_input_ids,
                attention_mask=flat_attention_mask
            )
            prediction_scores = self.cls(relation_outputs.last_hidden_state)  # (batch_size * num_prompts, seq_len, vocab_size)
            
            # 恢复原始形状
            prediction_scores = prediction_scores.view(batch_size, num_prompts, seq_len, -1)  # (batch_size, num_prompts, seq_len, vocab_size)
            
            # 计算每个提示模板的损失
            losses = []
            for i in range(batch_size):
                for j in range(num_prompts):
                    mask_pos = relation_mask_positions[i, j]
                    logits = prediction_scores[i, j, mask_pos]  # (vocab_size,)
                    loss = nn.CrossEntropyLoss()(
                        logits.unsqueeze(0), 
                        relation_labels[i, j].unsqueeze(0)
                    )
                    losses.append(loss)
            relation_loss = torch.mean(torch.stack(losses))
        
        # 合并损失
        total_loss = None
        if ner_loss is not None and relation_loss is not None:
            total_loss = TASK_WEIGHTS["ner"] * ner_loss + TASK_WEIGHTS["relation"] * relation_loss
        
        # 返回结果
        return {
            "loss": total_loss,
            "ner_logits": ner_logits,
            "relation_logits": prediction_scores if prediction_scores is not None else torch.tensor([])  # 返回空张量作为默认值
        }

# 多任务数据集
class MultiTaskDataset(Dataset):
    def __init__(self, data: List[Dict], tokenizer: BertTokenizer):
        self.data = data
        self.tokenizer = tokenizer
        
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        item = self.data[idx]
        
        # NER数据准备
        text = item["text"]
        ner_labels = item["ner_labels"]  # 假设已经对齐到填充后的长度
        
        # 使用tokenizer编码NER数据，包括填充
        ner_encoding = self.tokenizer(
            text,
            padding="max_length",
            truncation=True,
            max_length=128,
            return_tensors="pt",
            return_token_type_ids=False  # 禁用 token_type_ids
        )
        input_ids = ner_encoding["input_ids"].squeeze(0)  # (max_length,)
        attention_mask = ner_encoding["attention_mask"].squeeze(0)
        
        # 处理ner_labels，填充到max_length=128
        padded_ner_labels = ner_labels + [0] * (128 - len(ner_labels))
        
        # 关系数据准备
        e1, e2 = item["entity_pair"]
        relation = item["relation"]
        target_word = LABEL_TO_WORD[relation]
        
        # 生成多个提示模板
        prompts = [
            template.replace("[E1]", e1).replace("[E2]", e2)
            for template in PROMPT_TEMPLATES
        ]
        
        # 编码关系数据
        relation_encodings = self.tokenizer(
            prompts,
            padding="max_length",
            truncation=True,
            max_length=128,
            return_tensors="pt",
            return_token_type_ids=False  # 禁用 token_type_ids
        )
        
        # 找到每个prompt中的[MASK]位置
        mask_token_id = self.tokenizer.mask_token_id
        mask_positions = []
        for prompt_input_ids in relation_encodings["input_ids"]:
            mask_pos = (prompt_input_ids == mask_token_id).nonzero(as_tuple=True)[0]
            mask_positions.append(mask_pos.item())  # 假设每个prompt只有一个[MASK]
        
        # 转换目标词为token id
        target_id = self.tokenizer.encode(target_word, add_special_tokens=False)[0]
        
        return {
            # NER数据
            "input_ids": input_ids,
            "attention_mask": attention_mask,
            "ner_labels": torch.tensor(padded_ner_labels),
            
            # 关系数据
            "relation_input_ids": relation_encodings["input_ids"],  # 形状 (num_prompts, seq_len)
            "relation_attention_mask": relation_encodings["attention_mask"],  # 形状 (num_prompts, seq_len)
            "relation_mask_positions": torch.tensor(mask_positions),  # 形状 (num_prompts,)
            "relation_labels": torch.tensor([target_id] * len(PROMPT_TEMPLATES))  # 形状 (num_prompts,)
        }
# 训练流程
def train():
    # 初始化组件
    tokenizer = BertTokenizer.from_pretrained(MODEL_NAME)
    model = MultiTaskBert.from_pretrained(MODEL_NAME).to(DEVICE)
    optimizer = AdamW(model.parameters(), lr=LEARNING_RATE)
    
    # 准备数据
    dataset = MultiTaskDataset(sample_data, tokenizer)
    dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
    
    # 训练循环
    model.train()
    for epoch in range(EPOCHS):
        total_loss = 0
        for batch in dataloader:
            # 准备NER数据
            ner_inputs = {
                "input_ids": batch["input_ids"].to(DEVICE),
                "attention_mask": batch["attention_mask"].to(DEVICE),
                "ner_labels": batch["ner_labels"].to(DEVICE)
            }
            
            # 准备关系数据
            relation_inputs = {
                "relation_input_ids": batch["relation_input_ids"].to(DEVICE),
                "relation_attention_mask": batch["relation_attention_mask"].to(DEVICE),
                "relation_mask_positions": batch["relation_mask_positions"].to(DEVICE),
                "relation_labels": batch["relation_labels"].to(DEVICE)
            }
            
            # 合并输入
            inputs = {**ner_inputs, **relation_inputs}
            
            # 前向传播
            outputs = model(**inputs)
            loss = outputs["loss"]
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
        
        print(f"Epoch {epoch+1} - Loss: {total_loss/len(dataloader):.4f}")
    
    return model, tokenizer

# 预测流程
def predict(model, tokenizer, text: str):
    """
    预测命名实体和实体关系。
    """
    model.eval()
    
    # 1. 命名实体识别
    inputs = tokenizer(
        text, 
        return_tensors="pt", 
        truncation=True, 
        max_length=128,
        return_token_type_ids=False  # 禁用 token_type_ids
    )
    inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
    
    with torch.no_grad():
        outputs = model(**inputs)
    ner_logits = outputs["ner_logits"]
    predicted_labels = torch.argmax(ner_logits, dim=2).squeeze().tolist()
    
    # 解码NER结果
    entities = []
    current_entity = ""
    current_label = ""
    for token_id, label_id in enumerate(predicted_labels):
        label = ENTITY_LABELS[label_id]
        if label.startswith("B-"):
            if current_entity:
                entities.append((current_entity, current_label))
            current_entity = tokenizer.decode(inputs["input_ids"][0][token_id])
            current_label = label[2:]
        elif label.startswith("I-") and current_label == label[2:]:
            current_entity += tokenizer.decode(inputs["input_ids"][0][token_id])
        else:
            if current_entity:
                entities.append((current_entity, current_label))
            current_entity = ""
            current_label = ""
    
    # 2. 实体关系抽取
    if len(entities) >= 2:
        for i in range(len(entities) - 1):
            e1, type1 = entities[i]
            e2, type2 = entities[i + 1]
            
            # 生成多个提示模板
            predictions = []
            for template in PROMPT_TEMPLATES:
                prompt = template.replace("[E1]", e1).replace("[E2]", e2)
                inputs = tokenizer(
                    prompt, 
                    return_tensors="pt", 
                    padding=True, 
                    truncation=True, 
                    max_length=128,
                    return_token_type_ids=False  # 禁用 token_type_ids
                )
                inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
                
                # 找到[MASK]位置
                mask_token_id = tokenizer.mask_token_id
                mask_position = (inputs["input_ids"] == mask_token_id).nonzero(as_tuple=True)[1].item()
                
                # 预测[MASK]位置的词
                with torch.no_grad():
                    outputs = model(**inputs)
                logits = outputs.get("relation_logits")
                
                if logits is not None and logits.numel() > 0:  # 检查 logits 是否有效
                    mask_logits = logits[0, mask_position]
                    
                    # 获取最可能的词
                    predicted_token_id = torch.argmax(mask_logits).item()
                    predicted_word = tokenizer.decode(predicted_token_id)
                    
                    # 将预测的词映射回关系标签
                    for relation, word in LABEL_TO_WORD.items():
                        if word == predicted_word:
                            predictions.append(relation)
                            break
            
            # 投票选择最可能的关系
            if predictions:
                relation = max(set(predictions), key=predictions.count)
                print(f"Relation between {e1} ({type1}) and {e2} ({type2}): {relation}")
    
    return entities

# 主函数
if __name__ == "__main__":
    # 训练模型
    model, tokenizer = train()
    
    # 测试预测
    test_text = "Bob is an employee of the company."
    entities = predict(model, tokenizer, test_text)
    print("Extracted entities:", entities)