from transformers import AutoTokenizer, DebertaV2Tokenizer
import logging
import os
from pathlib import Path
import numpy as np
import onnxruntime as ort
import json

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class NERService:
    """
    医学术语命名实体识别服务
    使用本地ONNX模型进行医疗文本的实体识别
    """
    def __init__(self, local_model_path=None):
        # 设置默认的本地模型路径
        if local_model_path is None:
            local_model_path = os.environ.get("NER_MODEL_PATH", "/appslog/NER/Medical-NER-ONNX")
        
        # 确保路径是字符串
        local_model_path = str(local_model_path)
        logger.info(f"加载本地NER模型: {local_model_path}")
        
        try:
            # 使用ONNX Runtime加载模型
            self.onnx_model_path = os.path.join(local_model_path, "onnx", "model.onnx")
            logger.info(f"加载ONNX模型: {self.onnx_model_path}")
            
            if not os.path.exists(self.onnx_model_path):
                error_msg = f"ONNX模型文件不存在：{self.onnx_model_path}"
                logger.error(error_msg)
                raise FileNotFoundError(error_msg)
                
            # 创建ONNX运行时会话
            self.ort_session = ort.InferenceSession(str(self.onnx_model_path))
            
            # 加载分词器 - 使用特定的DebertaV2Tokenizer
            logger.info(f"加载分词器: {local_model_path}")
            
            try:
                # 尝试使用DebertaV2Tokenizer加载分词器
                self.tokenizer = DebertaV2Tokenizer.from_pretrained(
                    local_model_path,  # 直接使用字符串路径
                    local_files_only=True
                )
                logger.info("使用DebertaV2Tokenizer加载分词器成功")
            except Exception as e:
                logger.warning(f"DebertaV2Tokenizer加载失败: {str(e)}，尝试使用AutoTokenizer")
                try:
                    # 检查是否有spm.model文件
                    spm_model_path = os.path.join(local_model_path, "spm.model")
                    if os.path.exists(spm_model_path):
                        logger.info(f"找到spm.model文件: {spm_model_path}")
                    else:
                        logger.warning(f"未找到spm.model文件: {spm_model_path}")
                    
                    # 尝试使用AutoTokenizer
                    self.tokenizer = AutoTokenizer.from_pretrained(
                        local_model_path,  # 直接使用字符串路径
                        local_files_only=True,
                        use_fast=False  # 使用慢速版本避免tokenizers库兼容性问题
                    )
                    logger.info("使用AutoTokenizer加载分词器成功")
                except Exception as e2:
                    # 记录详细的错误信息
                    logger.error(f"AutoTokenizer加载失败: {str(e2)}")
                    # 记录目录内容以便调试
                    try:
                        logger.info(f"目录内容: {os.listdir(local_model_path)}")
                    except Exception:
                        logger.error(f"无法列出目录内容: {local_model_path}")
                    
                    error_msg = f"所有分词器加载方式均失败: {str(e2)}"
                    logger.error(error_msg)
                    raise RuntimeError(error_msg)
            
            # 尝试从config.json加载标签映射
            config_path = os.path.join(local_model_path, "config.json")
            if os.path.exists(config_path):
                try:
                    with open(config_path, "r", encoding="utf-8") as f:
                        config = json.load(f)
                        # 检查配置是否包含id2label
                        if "id2label" in config:
                            self.id2label = config["id2label"]
                            logger.info(f"从config.json加载标签映射: {self.id2label}")
                        else:
                            logger.warning("config.json中未找到id2label字段，使用默认标签映射")
                            self.id2label = self._get_default_labels()
                except Exception as e:
                    logger.warning(f"读取config.json失败: {str(e)}，使用默认标签映射")
                    self.id2label = self._get_default_labels()
            else:
                logger.warning(f"未找到config.json，使用默认标签映射")
                self.id2label = self._get_default_labels()
            
            logger.info("ONNX模型和分词器加载成功")
        except Exception as e:
            error_msg = f"加载ONNX模型失败: {str(e)}"
            logger.error(error_msg)
            raise RuntimeError(error_msg)
  
    def process(self, text, options, term_types):
        """
        处理输入文本，识别医学术语实体
        
        Args:
            text: 输入文本
            options: 处理选项，如是否合并生物结构
            term_types: 需要识别的术语类型
            
        Returns:
            包含识别出的实体和原始文本的字典
        """
        try:
            # 使用ONNX模型进行实体识别
            result = self._onnx_inference(text)
            
            # 合并相关实体（如生物结构和症状）
            combined_result = self._combine_entities(result, text, options)
            
            # 移除重叠实体
            non_overlapping_result = self._remove_overlapping_entities(combined_result)
            
            # 根据术语类型过滤实体
            filtered_result = self._filter_entities(non_overlapping_result, term_types)
            
            return {
                "text": text,
                "entities": filtered_result
            }
        except Exception as e:
            logger.error(f"NER处理失败: {str(e)}")
            return {
                "text": text, 
                "entities": [],
                "error": str(e)
            }
    
    def _onnx_inference(self, text):
        """
        使用ONNX模型进行推理
        
        Args:
            text: 输入文本
            
        Returns:
            实体列表
        """
        # 对文本进行分词
        inputs = self.tokenizer(text, return_tensors="np")
        
        # 提取所需的输入
        ort_inputs = {
            "input_ids": inputs["input_ids"].astype(np.int64),
            "attention_mask": inputs["attention_mask"].astype(np.int64)
        }
        
        # 始终为模型提供token_type_ids
        if "token_type_ids" in inputs:
            ort_inputs["token_type_ids"] = inputs["token_type_ids"].astype(np.int64)
        else:
            # 如果tokenizer没有生成token_type_ids，我们手动创建一个全零的数组
            ort_inputs["token_type_ids"] = np.zeros_like(inputs["input_ids"]).astype(np.int64)
            logger.info(f"手动创建token_type_ids，形状: {ort_inputs['token_type_ids'].shape}")
        
        try:
            # 打印模型输入和期望的输入名称
            model_inputs = self.ort_session.get_inputs()
            input_names = [input.name for input in model_inputs]
            logger.info(f"模型期望的输入: {input_names}")
            logger.info(f"提供的输入: {list(ort_inputs.keys())}")
            
            # 确保提供的输入匹配模型期望的输入
            final_inputs = {}
            for name in input_names:
                if name in ort_inputs:
                    final_inputs[name] = ort_inputs[name]
                else:
                    logger.warning(f"模型需要输入 '{name}'，但未提供。")
            
            # 运行ONNX推理
            ort_outputs = self.ort_session.run(None, final_inputs)
        except Exception as e:
            logger.error(f"ONNX推理失败: {str(e)}")
            # 尝试提供更详细的输入信息进行调试
            logger.error(f"输入形状 - input_ids: {inputs['input_ids'].shape}, attention_mask: {inputs['attention_mask'].shape}")
            raise e
        
        # 处理输出，转换为与pipeline兼容的格式
        logits = ort_outputs[0]
        
        # 获取预测的标签
        predictions = np.argmax(logits, axis=-1)
        
        # 将预测转换为实体
        entities = []
        tokens = self.tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
        
        i = 0
        while i < len(tokens):
            if tokens[i] in [self.tokenizer.cls_token, self.tokenizer.sep_token, self.tokenizer.pad_token]:
                i += 1
                continue
                
            # 获取预测的标签
            label_id = str(predictions[0][i])
            if label_id in self.id2label:
                label = self.id2label[label_id]
                
                # 如果是B-开头的标签，表示实体的开始
                if label.startswith("B-"):
                    entity_type = label[2:]  # 去掉"B-"前缀
                    start = self.tokenizer.convert_tokens_to_string(tokens[:i]).find(tokens[i].replace("##", ""))
                    
                    # 查找整个实体
                    j = i + 1
                    while j < len(tokens) and str(predictions[0][j]) in self.id2label and self.id2label[str(predictions[0][j])].startswith("I-") and self.id2label[str(predictions[0][j])][2:] == entity_type:
                        j += 1
                    
                    # 计算实体文本和位置
                    entity_tokens = tokens[i:j]
                    entity_text = self.tokenizer.convert_tokens_to_string(entity_tokens).replace(" ##", "").replace("##", "")
                    end = start + len(entity_text)
                    
                    entities.append({
                        "entity_group": entity_type,
                        "word": entity_text,
                        "start": start,
                        "end": end,
                        "score": 1.0  # ONNX模型通常不提供置信度分数
                    })
                    
                    i = j
                else:
                    i += 1
            else:
                i += 1
        
        return entities

    def _combine_entities(self, result, text, options):
        """
        合并相关的实体，如生物结构和症状
        """
        combined_result = []
        i = 0
        while i < len(result):
            entity = result[i]
            entity['score'] = float(entity['score'])

            if options.get('combineBioStructure', False) and entity['entity_group'] in ['SIGN_SYMPTOM', 'DISEASE_DISORDER']:
                # 检查并合并生物结构
                combined_entity = self._try_combine_with_bio_structure(result, i, text)
                if combined_entity:
                    combined_result.append(combined_entity)
                    i += 1
                    continue
            combined_result.append(entity)
            i += 1
        return combined_result

    def _try_combine_with_bio_structure(self, result, i, text):
        """
        尝试将当前实体与生物结构实体合并
        """
        # 检查前一个实体
        if i > 0 and result[i-1]['entity_group'] == 'BIOLOGICAL_STRUCTURE':
            return self._create_combined_entity(result[i-1], result[i], text)
        # 检查后一个实体
        elif i < len(result) - 1 and result[i+1]['entity_group'] == 'BIOLOGICAL_STRUCTURE':
            return self._create_combined_entity(result[i], result[i+1], text)
        return None

    def _create_combined_entity(self, entity1, entity2, text):
        """
        创建合并后的实体
        """
        start = min(entity1['start'], entity2['start'])
        end = max(entity1['end'], entity2['end'])
        word = text[start:end]
        return {
            'entity_group': 'COMBINED_BIO_SYMPTOM',
            'word': word,
            'start': start,
            'end': end,
            'score': (entity1['score'] + entity2['score']) / 2,
            'original_entities': [entity1, entity2]
        }

    def _remove_overlapping_entities(self, entities):
        """
        移除重叠的实体，保留得分最高的实体
        """
        # 按开始位置、结束位置（降序）和得分（降序）排序
        sorted_entities = sorted(entities, key=lambda x: (x['start'], -x['end'], -x['score']))
        non_overlapping = []
        last_end = -1

        i = 0
        while i < len(sorted_entities):
            current = sorted_entities[i]
            
            # 如果当前实体与之前的实体不重叠，直接添加
            if current['start'] >= last_end:
                non_overlapping.append(current)
                last_end = current['end']
                i += 1
            else:
                # 处理重叠实体
                same_span = [current]
                j = i + 1
                while j < len(sorted_entities) and sorted_entities[j]['start'] == current['start'] and sorted_entities[j]['end'] == current['end']:
                    same_span.append(sorted_entities[j])
                    j += 1
                
                # 选择得分最高的实体
                best_entity = max(same_span, key=lambda x: x['score'])
                if best_entity['end'] > last_end:
                    non_overlapping.append(best_entity)
                    last_end = best_entity['end']
                
                i = j

        return non_overlapping

    def _filter_entities(self, entities, term_types):
        """
        根据术语类型过滤实体
        """
        filtered_result = []
        for entity in entities:
            if term_types.get('allMedicalTerms', False):
                filtered_result.append(entity)
            elif (term_types.get('symptom', False) and entity['entity_group'] in ['SIGN_SYMPTOM', 'COMBINED_BIO_SYMPTOM']) or \
                 (term_types.get('disease', False) and entity['entity_group'] == 'DISEASE_DISORDER') or \
                 (term_types.get('therapeuticProcedure', False) and entity['entity_group'] == 'THERAPEUTIC_PROCEDURE'):
                filtered_result.append(entity)
        return filtered_result
        
    def _get_default_labels(self):
        """返回默认的标签映射"""
        return {
            "0": "O",
            "1": "B-DISEASE_DISORDER",
            "2": "I-DISEASE_DISORDER", 
            "3": "B-SIGN_SYMPTOM",
            "4": "I-SIGN_SYMPTOM",
            "5": "B-BIOLOGICAL_STRUCTURE",
            "6": "I-BIOLOGICAL_STRUCTURE",
            "7": "B-THERAPEUTIC_PROCEDURE",
            "8": "I-THERAPEUTIC_PROCEDURE"
        }




