import torch
from torch.utils.data import Dataset, DataLoader
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification, DistilBertTokenizerFast, \
    BertTokenizerFast
from torch.optim import AdamW
from sklearn.preprocessing import LabelEncoder
import json
import numpy as np  # 添加numpy导入

# ----------------------
# 第一步：数据预处理
# ----------------------

# 加载数据
# with open('../datasets/emergency_calls_1000.json', 'r', encoding='utf-8') as f:
#     data = json.load(f)
#
# # 提取所有可能的标签
# all_intents = set()
# all_ner_tags = set()
# for item in data:
#     all_intents.add(item['intent'])
#     for entity in item['entities']:
#         all_ner_tags.add(entity['label'])
#
# # 编码标签
# intent_encoder = LabelEncoder()
# intent_encoder.fit(list(all_intents))
#
# ner_tag_encoder = LabelEncoder()
# ner_tag_encoder.fit(list(all_ner_tags))

# 修改后的数据预处理代码
import numpy as np
from sklearn.preprocessing import LabelEncoder

model_path = "distilbert-base-multilingual-cased"
# 确保所有标签都有值
def clean_labels(data):
    for item in data:
        if not item['intent']:
            item['intent'] = 'Unknown'  # 处理空标签
        for entity in item['entities']:
            if not entity['label']:
                entity['label'] = 'O'  # 使用'O'表示非实体
    return data

# 加载并清洗数据
with open('../datasets/emergency_calls_1000.json', 'r', encoding='utf-8') as f:
    raw_data = json.load(f)
data = clean_labels(raw_data)

# 提取所有可能的标签（确保无遗漏）
all_intents = set()
all_ner_tags = {'O'}  # 确保包含'O'标签
for item in data:
    all_intents.add(item['intent'])
    for entity in item['entities']:
        all_ner_tags.add(entity['label'])

# 初始化编码器（添加handle_unknown参数）
intent_encoder = LabelEncoder()
intent_encoder.fit(list(all_intents) + ['Unknown'])  # 显式包含未知类别

ner_tag_encoder = LabelEncoder()
ner_tag_encoder.fit(list(all_ner_tags))


# ----------------------
# 第二步：自定义数据集类
# ----------------------

class EmergencyCallDataset(Dataset):
    def __init__(self, data, tokenizer, intent_encoder, ner_tag_encoder, max_length=128):
        self.data = data
        self.tokenizer = tokenizer
        self.intent_encoder = intent_encoder
        self.ner_tag_encoder = ner_tag_encoder
        self.max_length = max_length  # 新增最大长度限制

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        item = self.data[idx]
        text = item['text']

        # 统一使用tokenizer的padding和truncation功能
        encoding = self.tokenizer(
            text,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        # 处理意图标签
        intent = self.intent_encoder.transform([item['intent']])[0]

        # 修复后的NER标签处理
        tokens = self.tokenizer.tokenize(text)[:self.max_length - 2]  # 考虑[CLS]和[SEP]
        ner_labels = ['O'] * len(tokens)

        for entity in item['entities']:
            entity_tokens = self.tokenizer.tokenize(entity['word'])
            # 找到实体位置
            for i in range(len(tokens) - len(entity_tokens) + 1):
                if tokens[i:i + len(entity_tokens)] == entity_tokens:
                    # 确保完整标签名不被截断
                    full_label = entity['label']
                    ner_labels[i] = f"B-{full_label}"
                    for j in range(1, len(entity_tokens)):
                        ner_labels[i + j] = f"I-{full_label}"
                    break

        # 确保标签不被截断，并正确转换为数字
        cls_label = 'O'
        sep_label = 'O'
        cls_label_encoded = self.ner_tag_encoder.transform([cls_label])[0]
        sep_label_encoded = self.ner_tag_encoder.transform([sep_label])[0]

        # 添加[CLS]和[SEP]标签
        ner_labels_with_special_tokens = [cls_label] + ner_labels + [sep_label]

        # 确保标签在编码器的词汇表中
        valid_labels = []
        for label in ner_labels_with_special_tokens:
            if label in self.ner_tag_encoder.classes_:
                valid_labels.append(label)
            else:
                valid_labels.append('O')  # 对于未知标签，使用'O'

        ner_labels_encoded = self.ner_tag_encoder.transform(valid_labels)
        
        # 修复广播错误：确保所有操作都在numpy数组上进行
        padding_length = self.max_length - len(ner_labels_encoded)
        if padding_length > 0:
            o_label = self.ner_tag_encoder.transform(['O'])[0]
            padding = np.full(padding_length, o_label)
            ner_labels_final = np.concatenate([ner_labels_encoded, padding])
        else:
            ner_labels_final = ner_labels_encoded[:self.max_length]
            
        ner_labels = torch.tensor(ner_labels_final, dtype=torch.long)

        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'intent_label': torch.tensor(intent, dtype=torch.long),
            'ner_labels': ner_labels
        }


# ----------------------
# 第三步：定义多任务模型
# ----------------------

class MultiTaskDistilBERT(torch.nn.Module):
    def __init__(self, num_intents, num_ner_tags):
        super().__init__()
        # model_path = "distilbert-base-multilingual-cased"
        # model_path = "bert-base-chinese"
        # 修复：正确初始化模型并指定意图分类数量
        self.distilbert = DistilBertForSequenceClassification.from_pretrained(
            model_path,
            num_labels=num_intents
        )
        # 修复：正确获取隐藏层大小
        hidden_size = self.distilbert.config.hidden_size
        self.ner_classifier = torch.nn.Linear(hidden_size, num_ner_tags)

    def forward(self, input_ids, attention_mask, intent_label=None, ner_labels=None):
        outputs = self.distilbert.distilbert(
            input_ids=input_ids,
            attention_mask=attention_mask
        )

        # 意图分类（用[CLS] token的输出）
        pooled_output = outputs.last_hidden_state[:, 0, :]
        intent_logits = self.distilbert.pre_classifier(pooled_output)
        intent_logits = self.distilbert.classifier(intent_logits)

        # 实体识别（用每个token的输出）
        sequence_output = outputs.last_hidden_state
        ner_logits = self.ner_classifier(sequence_output)

        loss = None
        if intent_label is not None and ner_labels is not None:
            loss_fct = torch.nn.CrossEntropyLoss()
            intent_loss = loss_fct(intent_logits, intent_label)
            ner_loss = loss_fct(ner_logits.view(-1, ner_logits.size(-1)), ner_labels.view(-1))
            loss = intent_loss + ner_loss

        return {
            'loss': loss,
            'intent_logits': intent_logits,
            'ner_logits': ner_logits
        }


def collate_fn(batch):
    # 动态padding（如果Dataset中未padding）
    max_len = max(len(item['input_ids']) for item in batch)

    padded_batch = {
        'input_ids': torch.stack([
            torch.cat([item['input_ids'],
                       torch.zeros(max_len - len(item['input_ids']), dtype=torch.long)])
            for item in batch
        ]),
        'attention_mask': torch.stack([
            torch.cat([item['attention_mask'],
                       torch.zeros(max_len - len(item['attention_mask']), dtype=torch.long)])
            for item in batch
        ]),
        'intent_label': torch.stack([item['intent_label'] for item in batch]),
        'ner_labels': torch.stack([
            torch.cat([item['ner_labels'],
                       torch.zeros(max_len - len(item['ner_labels']), dtype=torch.long)])
            for item in batch
        ])
    }
    return padded_batch

# ----------------------
# 第四步：训练循环
# ----------------------

def train():
    # 初始化
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 指定本地路径
    # model_path = "distilbert-base-multilingual-cased"
    tokenizer = DistilBertTokenizerFast.from_pretrained(model_path)
    # tokenizer = BertTokenizerFast.from_pretrained('bert-base-chinese')

    # model = DistilBertModel.from_pretrained(model_path)

    # tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-multilingual-cased')
    # 2. 加载数据
    print("Loading data...")
    with open('../datasets/emergency_calls_1000.json', 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 3. 初始化编码器
    all_intents = set(item['intent'] for item in data)
    all_ner_tags = {'O'}
    for item in data:
        for entity in item['entities']:
            label = entity['label']
            # 添加所有可能的标签变体
            all_ner_tags.add(label)
            all_ner_tags.add(f'B-{label}')
            all_ner_tags.add(f'I-{label}')

    intent_encoder = LabelEncoder()
    intent_encoder.fit(list(all_intents))

    ner_tag_encoder = LabelEncoder()
    ner_tag_encoder.fit(list(all_ner_tags))

    # 4. 创建数据集
    dataset = EmergencyCallDataset(
        data=data,
        tokenizer=tokenizer,
        intent_encoder=intent_encoder,
        ner_tag_encoder=ner_tag_encoder,
        max_length=128  # 设置合适的最大长度
    )

    # 5. 创建DataLoader
    dataloader = DataLoader(
        dataset,
        batch_size=32,
        shuffle=True,
        collate_fn=collate_fn,
        num_workers=4
    )

    # 6. 初始化模型
    print("初始化模型")
    model = MultiTaskDistilBERT(
        num_intents=len(intent_encoder.classes_),
        num_ner_tags=len(ner_tag_encoder.classes_)
    ).to(device)

    # 7. 训练循环
    print("开始训练")
    optimizer = AdamW(model.parameters(), lr=5e-5)

    for epoch in range(10):
        print(f"Eponch {epoch +1} 开始")
        model.train()
        total_loss = 0  # 添加这一行定义total_loss变量
        for batch in dataloader:
            batch = {k: v.to(device) for k, v in batch.items()}

            outputs = model(
                input_ids=batch['input_ids'],
                attention_mask=batch['attention_mask'],
                intent_label=batch['intent_label'],
                ner_labels=batch['ner_labels']
            )

            loss = outputs['loss']
            total_loss += loss.item()  # 累加损失值
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

        print(f"Epoch {epoch + 1}, Loss: {total_loss / len(dataloader)}")

    # 保存模型
    torch.save(model.state_dict(), '../models/emergency_distilbert_v3.pt')

def test():
    # 初始化
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # model_path = "bert-base-chinese"
    tokenizer = DistilBertTokenizerFast.from_pretrained(model_path)
    
    # 加载数据以获取标签编码器
    with open('../datasets/emergency_calls_1000.json', 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 初始化编码器 - 确保与训练时一致
    all_intents = set(item['intent'] for item in data)
    all_ner_tags = {'O'}
    for item in data:
        for entity in item['entities']:
            label = entity['label']
            all_ner_tags.add(label)
            all_ner_tags.add(f'B-{label}')
            all_ner_tags.add(f'I-{label}')

    intent_encoder = LabelEncoder()
    intent_encoder.fit(list(all_intents))

    ner_tag_encoder = LabelEncoder()
    ner_tag_encoder.fit(list(all_ner_tags))
    
    # 初始化模型 - 确保参数与训练时一致
    model = MultiTaskDistilBERT(
        num_intents=len(intent_encoder.classes_),
        num_ner_tags=len(ner_tag_encoder.classes_)
    ).to(device)
    
    # 加载模型权重
    try:
        model.load_state_dict(torch.load('../models/emergency_distilbert_v3.pt', map_location=device))
    except RuntimeError as e:
        print(f"模型加载失败: {e}")
        print("请确保使用相同的训练数据重新训练模型")
        return
    except FileNotFoundError:
        print("模型文件未找到，请先运行训练过程")
        return
    
    model.eval()

    # 示例预测
    text = "我爷爷在公园突然晕倒了，叫不醒！"
    inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True)

    with torch.no_grad():
        outputs = model(
            input_ids=inputs['input_ids'].to(device),
            attention_mask=inputs['attention_mask'].to(device)
        )

    # 解析结果
    intent_pred = intent_encoder.inverse_transform([torch.argmax(outputs['intent_logits']).item()])[0]
    ner_preds = torch.argmax(outputs['ner_logits'], dim=-1).squeeze().tolist()
    if not isinstance(ner_preds, list):
        ner_preds = [ner_preds]
    ner_tags = ner_tag_encoder.inverse_transform(ner_preds)

    print(f"意图: {intent_pred}")
    print("实体识别结果:")
    tokens = tokenizer.tokenize(text)
    for token, tag in zip(tokens, ner_tags[1:len(tokens)+1]):  # 跳过[CLS]标记
        print(f"{token}: {tag}")

# def check_alignment(sample):
#     tokens = tokenizer.convert_ids_to_tokens(sample['input_ids'])
#     print("Tokens:", tokens)
#     print("NER labels:", sample['ner_labels'])
#     print("Length check:", len(tokens) == len(sample['ner_labels']))
#
# # 测试一个样本
# sample = dataset[0]
# check_alignment(sample)

if __name__ == '__main__':
    try:
        train()
        # test()
    except Exception as e:
        print(f"训练失败: {str(e)}")
        import traceback

        traceback.print_exc()
    
