from transformers import AutoModelForTokenClassification, AutoTokenizer
import logging
import os
import torch
import numpy as np

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class FinanceNERService:
    """
    金融命名实体识别服务
    使用transformers库直接加载模型进行金融文本的实体识别
    """
    def __init__(self, local_model_path=None):
        # 设置默认的本地模型路径
        if local_model_path is None:
            local_model_path = os.environ.get("FINANCE_NER_MODEL_PATH", "/appslog/NER/nlp_raner_named-entity-recognition_chinese-base-finance")
        
        logger.info(f"加载本地金融NER模型: {local_model_path}")
        
        try:
            # 加载分词器和模型
            try:
                # 显式使用BertTokenizer，避免使用AutoTokenizer可能导致的问题
                from transformers import BertTokenizer
                self.tokenizer = BertTokenizer.from_pretrained(local_model_path, local_files_only=True)
                logger.info("成功使用BertTokenizer加载分词器")
            except Exception as e:
                logger.warning(f"加载BertTokenizer失败: {e}")
                # 退回到AutoTokenizer
                self.tokenizer = AutoTokenizer.from_pretrained(local_model_path, local_files_only=True)
                logger.info("使用AutoTokenizer加载分词器")
                
            self.model = AutoModelForTokenClassification.from_pretrained(local_model_path, local_files_only=True)
            logger.info("Transformers模型和分词器加载成功")
            
            # 从模型配置中获取标签映射
            self.id2label = {}
            
            # 尝试从模型配置中获取标签映射
            if hasattr(self.model.config, "id2label") and self.model.config.id2label:
                self.id2label = self.model.config.id2label
                # 确保键是字符串类型
                if not all(isinstance(k, str) for k in self.id2label.keys()):
                    self.id2label = {str(k): v for k, v in self.id2label.items()}
                logger.info(f"从模型配置中加载标签映射: {self.id2label}")
            else:
                # 使用默认标签映射
                self.id2label = {
                    "0": "O",
                    "1": "B-COMPANY",
                    "2": "I-COMPANY", 
                    "3": "B-POSITION",
                    "4": "I-POSITION",
                    "5": "B-PRODUCT",
                    "6": "I-PRODUCT",
                    "7": "B-MARKET",
                    "8": "I-MARKET"
                }
                logger.info(f"使用默认标签映射: {self.id2label}")
        except Exception as e:
            error_msg = f"加载Transformers模型失败: {str(e)}"
            logger.error(error_msg)
            raise RuntimeError(error_msg)
  
    def process(self, text, options, term_types):
        """
        处理输入文本，识别金融实体
        
        Args:
            text: 输入文本
            options: 处理选项
            term_types: 需要识别的术语类型
            
        Returns:
            包含识别出的实体和原始文本的字典
        """
        try:
            # 直接使用transformers模型进行实体识别
            entities = self._predict(text)
            
            # 根据术语类型过滤实体
            filtered_result = self._filter_entities(entities, term_types)
            
            return {
                "text": text,
                "entities": filtered_result
            }
        except Exception as e:
            logger.error(f"金融NER处理失败: {str(e)}")
            return {
                "text": text, 
                "entities": [],
                "error": str(e)
            }
    
    def _predict(self, text):
        """
        结合模型输出和规则提取的混合方法
        
        Args:
            text: 输入文本
            
        Returns:
            实体列表
        """
        logger.info(f"开始处理文本: {text}")
        
        try:
            # 首先尝试使用规则提取关键实体
            rule_entities = self._extract_by_rules(text)
            
            # 如果规则提取有结果，直接返回
            if rule_entities:
                logger.info(f"使用规则提取的实体: {rule_entities}")
                return rule_entities
            
            # 否则使用模型提取
            # 对文本进行分词
            inputs = self.tokenizer(
                text, 
                return_tensors="pt", 
                padding=True,
                truncation=True
            )
            
            # 检查输入
            logger.info(f"模型输入键: {inputs.keys()}")
            
            # 执行前向传播
            with torch.no_grad():
                outputs = self.model(**inputs)
            
            # 获取预测结果
            predictions = torch.argmax(outputs.logits, dim=2)
            
            # 获取token列表
            tokens = self.tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
            token_predictions = [self.id2label.get(str(pred.item()), "O") for pred in predictions[0]]
            
            # 将中文字符与tokens对齐
            aligned_tokens = []
            aligned_preds = []
            char_index = 0
            
            # 跳过开头的[CLS]
            token_idx = 1
            
            while char_index < len(text) and token_idx < len(tokens):
                if tokens[token_idx] == "[SEP]" or tokens[token_idx] == "[PAD]":
                    break
                
                # 处理##开头的子词（WordPiece分词的一部分）
                current_token = tokens[token_idx].replace("##", "")
                
                # 如果是单个汉字或符号
                if len(current_token) == 1 or current_token in text[char_index:char_index+len(current_token)]:
                    aligned_tokens.append(current_token)
                    aligned_preds.append(token_predictions[token_idx])
                    char_index += len(current_token)
                    token_idx += 1
                else:
                    # 处理难以对齐的情况
                    logger.warning(f"无法对齐token: {current_token} 与文本位置: {char_index}")
                    token_idx += 1
                    
            # 输出对齐结果
            alignment_pairs = []
            for i, (token, label) in enumerate(zip(aligned_tokens, aligned_preds)):
                alignment_pairs.append(f"{token}={label}")
            
            logger.info(f"字符与标签对齐: {' '.join(alignment_pairs)}")
            
            # 如果模型标注没有有用的信息，再次尝试规则提取
            return self._extract_by_rules(text)
            
        except Exception as e:
            logger.error(f"金融NER处理失败: {str(e)}")
            # 打印完整的错误堆栈
            import traceback
            logger.error(traceback.format_exc())
            # 出错时仍然尝试使用规则提取
            return self._extract_by_rules(text)
            
    def _extract_by_rules(self, text):
        """
        使用规则提取实体
        """
        entities = []
        
        # 1. 提取金额
        import re
        
        # 金额模式 (包括"元"、"块"等单位)
        money_pattern = r'(\d+(?:\.\d+)?)\s*(元|块钱|块|人民币|RMB|￥)'
        money_matches = re.finditer(money_pattern, text)
        for match in money_matches:
            entities.append({
                "entity_group": "资损金额",
                "word": match.group(),
                "start": match.start(),
                "end": match.end(),
                "score": 1.0
            })
            
        # 2. 提取支付渠道 (常见的支付平台和方式)
        payment_channels = ["支付宝", "微信支付", "微信", "花呗", "借呗", "信用卡", "储蓄卡", "银行卡", 
                          "余额宝", "微粒贷", "云闪付", "PayPal", "京东白条", "苏宁金融"]
        for channel in payment_channels:
            if channel in text:
                start = text.find(channel)
                entities.append({
                    "entity_group": "支付渠道",
                    "word": channel,
                    "start": start,
                    "end": start + len(channel),
                    "score": 1.0
                })
                
        # 3. 提取可能的订单号 (连续数字，长度>=4)
        order_pattern = r'(?<!\w)(\d{4,})(?!\w)'
        order_matches = re.finditer(order_pattern, text)
        for match in order_matches:
            # 检查这个数字是否已经被识别为金额
            is_part_of_amount = False
            for entity in entities:
                if (entity["entity_group"] == "资损金额" and 
                    match.start() >= entity["start"] and 
                    match.end() <= entity["end"]):
                    is_part_of_amount = True
                    break
            
            if not is_part_of_amount:
                entities.append({
                    "entity_group": "订单号",
                    "word": match.group(),
                    "start": match.start(),
                    "end": match.end(),
                    "score": 1.0
                })
        
        # 4. 提取涉案平台和行为 (常见诈骗方式)
        scam_platforms = ["刷单", "网购平台", "电商平台", "兼职", "投资理财", "虚拟币", "直播间", 
                         "抖音", "淘宝", "京东", "拼多多", "返利", "赚钱", "招聘"]
        for platform in scam_platforms:
            if platform in text:
                start = text.find(platform)
                entities.append({
                    "entity_group": "涉案平台",
                    "word": platform,
                    "start": start,
                    "end": start + len(platform),
                    "score": 1.0
                })
        
        # 5. 检测案件类型
        scam_types = {
            "刷单": "刷单诈骗",
            "兼职": "兼职诈骗",
            "贷款": "贷款诈骗", 
            "理财": "理财诈骗",
            "投资": "投资诈骗",
            "退款": "退款诈骗",
            "冒充": "冒充诈骗"
        }
        
        for keyword, scam_type in scam_types.items():
            if keyword in text:
                entities.append({
                    "entity_group": "案件类型",
                    "word": scam_type,
                    "start": text.find(keyword),
                    "end": text.find(keyword) + len(keyword),
                    "score": 1.0
                })
        
        # 移除重复实体
        unique_entities = []
        seen = set()
        for entity in entities:
            key = (entity["entity_group"], entity["start"], entity["end"])
            if key not in seen:
                unique_entities.append(entity)
                seen.add(key)
        
        return unique_entities

    def _filter_entities(self, entities, term_types):
        """
        根据术语类型过滤实体
        如果用户未选择任何类型，返回所有实体
        """
        # 如果没有传入term_types或者term_types为空，返回所有实体
        if not term_types or all(not value for value in term_types.values()):
            logger.info("未选择任何实体类型，返回所有识别到的实体")
            return entities
            
        filtered_result = []
        
        # 实体类型映射表 - 模型实体类型到前端选项的映射
        entity_type_map = {
            # 金融交易相关
            "交易号": "tradeNum",
            "订单号": "orderNum", 
            "资损金额": "lossAmount",
            "银行卡号": "bankCardNum",
            "支付渠道": "payChannel",
            
            # 相关方信息
            "受害人": "victim",
            "受害人身份": "victimIdentity",
            "嫌疑人": "suspect",
            
            # 时间地点
            "案发城市": "caseCity",
            "案发时间": "caseTime",
            "涉案平台": "casePlatform",
            "手机号": "phoneNum",
            
            # 传统金融实体类型
            "COMPANY": "company",
            "POSITION": "position",
            "PRODUCT": "product",
            "MARKET": "market"
        }
        
        for entity in entities:
            entity_type = entity['entity_group']
            
            # 选择了所有金融术语
            if term_types.get('allFinanceTerms', False):
                filtered_result.append(entity)
                continue
                
            # 根据映射表匹配前端选择的类型
            if entity_type in entity_type_map:
                frontend_type = entity_type_map[entity_type]
                if term_types.get(frontend_type, False):
                    filtered_result.append(entity)
        
        logger.info(f"过滤后的实体: {filtered_result}")
        return filtered_result 