"""LegalBERT 模型相关工具函数"""
import logging
import torch
import numpy as np
from typing import List, Dict, Tuple, Optional
from transformers import AutoTokenizer, AutoModel

from app.models.model_manager import model_manager
from app.config import settings

logger = logging.getLogger(__name__)


def get_text_embedding(text: str) -> np.ndarray:
    """
    获取文本的嵌入向量
    
    Args:
        text: 输入文本
        
    Returns:
        文本嵌入向量
    """
    tokenizer = model_manager.load_tokenizer()
    model = model_manager.load_base_model()
    
    # 分词和编码
    inputs = tokenizer(
        text,
        max_length=settings.LEGALBERT_MAX_LENGTH,
        padding=True,
        truncation=True,
        return_tensors="pt"
    )
    
    # 移动到设备
    inputs = {k: v.to(model_manager.device) for k, v in inputs.items()}
    
    # 获取嵌入向量
    with torch.no_grad():
        outputs = model(**inputs)
        # 使用 [CLS] token 的嵌入向量，或者使用平均池化
        embedding = outputs.last_hidden_state[:, 0, :].cpu().numpy()
    
    return embedding[0]


def calculate_similarity(text1: str, text2: str) -> float:
    """
    计算两个文本的余弦相似度
    
    Args:
        text1: 第一个文本
        text2: 第二个文本
        
    Returns:
        相似度分数 (0-1)
    """
    emb1 = get_text_embedding(text1)
    emb2 = get_text_embedding(text2)
    
    # 计算余弦相似度
    dot_product = np.dot(emb1, emb2)
    norm1 = np.linalg.norm(emb1)
    norm2 = np.linalg.norm(emb2)
    
    if norm1 == 0 or norm2 == 0:
        return 0.0
    
    similarity = dot_product / (norm1 * norm2)
    return float(similarity)


def classify_text(text: str, num_labels: int = 2) -> Dict[str, float]:
    """
    对文本进行分类
    
    Args:
        text: 输入文本
        num_labels: 分类标签数量
        
    Returns:
        包含每个标签概率的字典
    """
    tokenizer = model_manager.load_tokenizer()
    model = model_manager.load_classification_model()
    
    # 分词和编码
    inputs = tokenizer(
        text,
        max_length=settings.LEGALBERT_MAX_LENGTH,
        padding=True,
        truncation=True,
        return_tensors="pt"
    )
    
    # 移动到设备
    inputs = {k: v.to(model_manager.device) for k, v in inputs.items()}
    
    # 分类预测
    with torch.no_grad():
        outputs = model(**inputs)
        
        # 如果模型有 logits 属性，使用它
        if hasattr(outputs, 'logits'):
            logits = outputs.logits
        else:
            # 如果使用基础模型，使用 [CLS] token 的嵌入
            hidden_state = outputs.last_hidden_state[:, 0, :]
            # 简单的线性分类（实际应用中应使用训练好的分类头）
            logits = torch.nn.functional.linear(hidden_state, torch.randn(num_labels, hidden_state.size(-1)).to(model_manager.device))
        
        # 应用 softmax 获取概率
        probs = torch.nn.functional.softmax(logits, dim=-1)
        probs = probs.cpu().numpy()[0]
    
    # 返回每个标签的概率
    result = {f"label_{i}": float(prob) for i, prob in enumerate(probs)}
    return result


def extract_entities(text: str, label_map: Optional[Dict[int, str]] = None) -> List[Dict[str, any]]:
    """
    从文本中提取命名实体
    
    Args:
        text: 输入文本
        label_map: 标签映射字典，如果为 None 则使用默认映射
        
    Returns:
        实体列表，每个实体包含 text, label, start, end
    """
    if label_map is None:
        # 默认标签映射（BIO 格式示例）
        label_map = {
            0: "O",  # Outside
            1: "B-LAW",  # Begin-Law
            2: "I-LAW",  # Inside-Law
            3: "B-CASE",  # Begin-Case
            4: "I-CASE",  # Inside-Case
            5: "B-ORG",  # Begin-Organization
            6: "I-ORG",  # Inside-Organization
            7: "B-PERSON",  # Begin-Person
            8: "I-PERSON",  # Inside-Person
        }
    
    tokenizer = model_manager.load_tokenizer()
    model = model_manager.load_ner_model()
    
    # 分词和编码
    inputs = tokenizer(
        text,
        max_length=settings.LEGALBERT_MAX_LENGTH,
        padding=True,
        truncation=True,
        return_tensors="pt"
    )
    
    # 移动到设备
    inputs = {k: v.to(model_manager.device) for k, v in inputs.items()}
    
    # NER 预测
    with torch.no_grad():
        outputs = model(**inputs)
        
        # 如果模型有 logits 属性，使用它
        if hasattr(outputs, 'logits'):
            logits = outputs.logits
        else:
            # 如果使用基础模型，需要额外的处理
            # 这里简化处理，实际应用中应使用训练好的 NER 模型
            hidden_state = outputs.last_hidden_state
            logits = torch.nn.functional.linear(
                hidden_state,
                torch.randn(len(label_map), hidden_state.size(-1)).to(model_manager.device)
            )
        
        predictions = torch.argmax(logits, dim=-1)
        predictions = predictions.cpu().numpy()[0]
    
    # 获取原始 tokens
    tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0].cpu().numpy())
    
    # 提取实体
    entities = []
    current_entity = None
    
    for i, (token, pred) in enumerate(zip(tokens, predictions)):
        if token in [tokenizer.cls_token, tokenizer.sep_token, tokenizer.pad_token]:
            continue
        
        label = label_map.get(pred, "O")
        
        if label.startswith("B-"):
            # 开始新实体
            if current_entity:
                entities.append(current_entity)
            entity_type = label[2:]
            current_entity = {
                "text": token,
                "label": entity_type,
                "start": i,
                "end": i
            }
        elif label.startswith("I-") and current_entity:
            # 继续当前实体
            entity_type = label[2:]
            if current_entity["label"] == entity_type:
                current_entity["text"] += " " + token
                current_entity["end"] = i
            else:
                entities.append(current_entity)
                current_entity = {
                    "text": token,
                    "label": entity_type,
                    "start": i,
                    "end": i
                }
        else:
            # O 标签，结束当前实体
            if current_entity:
                entities.append(current_entity)
                current_entity = None
    
    # 添加最后一个实体
    if current_entity:
        entities.append(current_entity)
    
    # 清理实体文本（移除 ## 符号）
    for entity in entities:
        entity["text"] = entity["text"].replace("##", "").strip()
    
    return entities

