#
import os
import re
import json
import time
import numpy as np
import logging
import tiktoken
from typing import Dict, List, Tuple, Any, Optional, Union, Callable, TypeVar
from dataclasses import dataclass
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import cosine_similarity
import torch
from transformers import AutoModel, AutoTokenizer
# import faiss
from pymilvus import MilvusClient, DataType
from pymilvus.milvus_client import IndexParams
# from IPython.display import display, Markdown, HTML
from common.app_registry import AppRegistry as AR
from mmlm.cpm.cpm_engine import CpmEngine

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

def count_tokens(text: str, model: str = AR.DEFAULT_MODEL) -> int:
    """
    Count tokens in text string using the appropriate tokenizer.

    Args:
        text: Text to tokenize
        model: Model name to use for tokenization

    Returns:
        int: Token count
    """
    try:
        return len(AR.tokenizer.encode(text))
        # encoding = tiktoken.encoding_for_model(model)
        # return len(encoding.encode(text))
    except Exception as e:
        # Fallback for when tiktoken doesn't support the model
        logger.warning(f"Could not use tiktoken for {model}: {e}")
        # Rough approximation: 1 token ≈ 4 chars in English
        return len(text) // 4
    
def mean_pooling(token_embeddings, attention_mask):
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

def generate_embedding(
    text: str,
    client=None,
    model: str = AR.DEFAULT_EMBEDDING_MODEL
) -> List[float]:
    """
    Generate an embedding vector for the given text.

    Args:
        text: Text to embed
        client: API client (if None, will create one)
        model: Embedding model name

    Returns:
        list: Embedding vector
    """
    # if client is None:
    #     client, _ = setup_client()
    #     if client is None:
    #         # Return dummy embedding if no client available
    #         return [0.0] * 1536  # Default size for many embedding models
    # 池化函数
    # 4. 生成嵌入向量
    AR.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
    print(f'text: {text};')
    with torch.no_grad():
        # 分词处理
        inputs = AR.tokenizer(
            text, 
            padding=True, 
            truncation=True, 
            return_tensors="pt",
            max_length=512  # 根据需求调整
        )
        print(f'inputs: {type(inputs)}; \n{inputs};')
        # 模型推理
        AR.emb_model.eval()
        outputs = AR.emb_model(**inputs)
        # 获取最后一层隐藏状态
        last_hidden_state = outputs.last_hidden_state
        # 平均池化生成句子向量 (常用方法)
        return mean_pooling(last_hidden_state, inputs['attention_mask'])
    
def generate_response(
    prompt: str,
    client=None,
    model: str = AR.DEFAULT_MODEL,
    temperature: float = AR.DEFAULT_TEMPERATURE,
    max_tokens: int = AR.DEFAULT_MAX_TOKENS,
    system_message: str = "You are a helpful assistant."
) -> Tuple[str, Dict[str, Any]]:
    """
    Generate a response from the LLM and return with metadata.

    Args:
        prompt: The prompt to send
        client: API client (if None, will create one)
        model: Model name
        temperature: Temperature parameter
        max_tokens: Maximum tokens to generate
        system_message: System message to use

    Returns:
        tuple: (response_text, metadata)
    """    
    prompt_tokens = count_tokens(prompt, model)
    system_tokens = count_tokens(system_message, model)
    
    metadata = {
        "prompt_tokens": prompt_tokens,
        "system_tokens": system_tokens,
        "model": model,
        "temperature": temperature,
        "max_tokens": max_tokens,
        "timestamp": time.time()
    }
    
    try:
        start_time = time.time()
        messages=[
            {"role": "system", "content": system_message},
            {"role": "user", "content": prompt}
        ]
        response = CpmEngine.infer_structure(messages=messages, temperature=temperature, max_tokens=max_tokens)
        # response = client.chat.completions.create(
        #     model=model,
        #     messages=[
        #         {"role": "system", "content": system_message},
        #         {"role": "user", "content": prompt}
        #     ],
        #     temperature=temperature,
        #     max_tokens=max_tokens
        # )
        latency = time.time() - start_time
        response_text = response #.choices[0].message.content
        response_tokens = count_tokens(response_text, model)
        metadata.update({
            "latency": latency,
            "response_tokens": response_tokens,
            "total_tokens": prompt_tokens + system_tokens + response_tokens,
            "token_efficiency": response_tokens / (prompt_tokens + system_tokens) if (prompt_tokens + system_tokens) > 0 else 0,
            "tokens_per_second": response_tokens / latency if latency > 0 else 0
        })
        return response_text, metadata
    except Exception as e:
        logger.error(f"Error generating response: {e}")
        metadata["error"] = str(e)
        return f"ERROR: {str(e)}", metadata
    
def format_metrics(metrics: Dict[str, Any]) -> str:
    """
    Format metrics dictionary into a readable string.
    
    Args:
        metrics: Dictionary of metrics
        
    Returns:
        str: Formatted metrics string
    """
    # Select the most important metrics to show
    key_metrics = {
        "prompt_tokens": metrics.get("prompt_tokens", 0),
        "response_tokens": metrics.get("response_tokens", 0),
        "total_tokens": metrics.get("total_tokens", 0),
        "latency": f"{metrics.get('latency', 0):.2f}s",
        "token_efficiency": f"{metrics.get('token_efficiency', 0):.2f}"
    }
    return " | ".join([f"{k}: {v}" for k, v in key_metrics.items()])

def display_response(
    prompt: str,
    response: str,
    retrieved_context: Optional[str] = None,
    metrics: Dict[str, Any] = None,
    show_prompt: bool = True,
    show_context: bool = True
) -> None:
    """
    Display a prompt-response pair with metrics in a notebook.
    
    Args:
        prompt: The prompt text
        response: The response text
        retrieved_context: Retrieved context (optional)
        metrics: Metrics dictionary (optional)
        show_prompt: Whether to show the prompt text
        show_context: Whether to show the retrieved context
    """
    if show_prompt:
        print("<h4>Query:</h4>")
        print(f"```\n{prompt}\n```")
    if retrieved_context and show_context:
        print("<h4>Retrieved Context:</h4>")
        print(f"```\n{retrieved_context}\n```")
    print("<h4>Response:</h4>")
    print(response)
    if metrics:
        print("<h4>Metrics:</h4>")
        print(f"```\n{format_metrics(metrics)}\n```")  
    

@dataclass
class Document:
    """Represents a document or chunk of text with metadata."""
    content: str
    metadata: Dict[str, Any] = None
    embedding: Optional[List[float]] = None
    id: Optional[str] = None
    
    def __post_init__(self):
        """Initialize default values if not provided."""
        if self.metadata is None:
            self.metadata = {}
        if self.id is None:
            # Generate a simple ID based on content hash
            import hashlib
            self.id = hashlib.md5(self.content.encode()).hexdigest()[:8]

# Document Processing Functions
# ============================

def text_to_chunks(
    text: str,
    chunk_size: int = AR.DEFAULT_CHUNK_SIZE,
    chunk_overlap: int = AR.DEFAULT_CHUNK_OVERLAP,
    model: str = AR.DEFAULT_EMBEDDING_MODEL
) -> List[Document]:
    """
    Split text into overlapping chunks of specified token size.
    
    Args:
        text: Text to split
        chunk_size: Maximum tokens per chunk
        chunk_overlap: Number of tokens to overlap between chunks
        model: Model to use for tokenization
        
    Returns:
        list: List of Document objects
    """
    if not text:
        return []
    # Tokenize the text
    tokens = AR.tokenizer.encode(text)
    # Create chunks
    chunks = []
    i = 0
    while i < len(tokens):
        # Extract chunk tokens
        chunk_end = min(i + chunk_size, len(tokens))
        chunk_tokens = tokens[i:chunk_end]
        # Decode back to text
        chunk_text = AR.tokenizer.decode(chunk_tokens)
        # Create document
        chunks.append(Document(
            content=chunk_text,
            metadata={
                "start_idx": i,
                "end_idx": chunk_end,
                "chunk_size": len(chunk_tokens)
            }
        ))
        # Move to next chunk, considering overlap
        i += max(1, chunk_size - chunk_overlap)
    return chunks    

def extract_document_batch_embeddings(
    documents: List[Document],
    client=None,
    model: str = AR.DEFAULT_EMBEDDING_MODEL,
    batch_size: int = 10
) -> List[Document]:
    """
    Generate embeddings for a batch of documents efficiently.
    
    Args:
        documents: List of Document objects to embed
        client: API client (if None, will create one)
        model: Embedding model to use
        batch_size: Number of documents to embed in each API call
        
    Returns:
        list: Updated Document objects with embeddings
    """
    AR.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
    if not documents:
        return []    
    # Process in batches
    for i in range(0, len(documents), batch_size):
        batch = documents[i:i+batch_size]
        batch_texts = [doc.content for doc in batch]
        try:
            # Generate embeddings for the batch
            # response = client.embeddings.create(
            #     model=model,
            #     input=batch_texts
            # ) 
            # Update documents with embeddings
            for j, doc in enumerate(batch):
                inputs = AR.tokenizer(
                    doc.content,
                    padding = True,
                    truncation = True,
                    return_tensors = 'pt',
                    max_length = 512
                )
                outputs = AR.emb_model(**inputs)
                last_hidden_state = outputs.last_hidden_state
                doc.embedding = mean_pooling(last_hidden_state, inputs['attention_mask'])
                # if j < len(response.data):
                #     doc.embedding = response[i] # response.data[j].embedding
                # else:
                #     logger.warning(f"Missing embedding for document {i+j}")
        except Exception as e:
            logger.error(f"Error generating batch embeddings: {e}")
    
    return documents

def similarity_search(
    query_embedding: List[float],
    documents: List[Document],
    top_k: int = AR.DEFAULT_TOP_K
) -> List[Tuple[Document, float]]:
    """
    Find the most similar documents to a query embedding.
    
    Args:
        query_embedding: Query embedding vector
        documents: List of Document objects with embeddings
        top_k: Number of results to return
        
    Returns:
        list: List of (document, similarity_score) tuples
    """    
    # Filter out documents without embeddings
    docs_with_embeddings = [doc for doc in documents if doc.embedding is not None]
    if not docs_with_embeddings:
        logger.warning("No documents with embeddings found")
        return []
    # Convert embeddings to numpy arrays
    query_embedding_np = np.array(query_embedding).reshape(1, -1)
    doc_embeddings = np.array([doc.embedding for doc in docs_with_embeddings])
    doc_embeddings_2d = doc_embeddings.squeeze(axis=1)  # 形状变为 [9, 1024]
    # Calculate cosine similarities
    similarities = cosine_similarity(query_embedding_np, doc_embeddings_2d)[0]    
    # Create (document, similarity) pairs
    doc_sim_pairs = list(zip(docs_with_embeddings, similarities))    
    # Sort by similarity (descending) and take top_k
    sorted_pairs = sorted(doc_sim_pairs, key=lambda x: x[1], reverse=True)
    return sorted_pairs[:top_k]


def load_vdb() -> Tuple[MilvusClient, IndexParams]:
    # 1. 初始化 Milvus 客户端 (自动启动本地 Milvus-lite 实例)
    client = MilvusClient(AR.VDB_FN)  # 指定本地存储路径
    if not client.has_collection(AR.COLLECTION_NAME):
        # 定义集合结构
        schema = MilvusClient.create_schema(
            auto_id=True,
            enable_dynamic_field=False
        )
        # 添加向量字段 (1024维的浮点向量)
        schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
        schema.add_field(field_name="vector", datatype=DataType.FLOAT_VECTOR, dim=AR.RAG_DIM)
        # 创建索引参数
        index_params = client.prepare_index_params()
        # 添加向量索引 (使用 L2 距离的 IVF_FLAT 索引)
        index_params.add_index(
            field_name="vector",
            index_type="IVF_FLAT", # 小于100M数据集，否则采用FLAT
            metric_type="L2", # cos相似性用IP
            params={"nlist": 128}  # 聚类中心数
        )
        # 创建集合
        client.create_collection(
            collection_name=AR.COLLECTION_NAME,
            schema=schema,
            index_params=index_params
        )
    # 6. 加载集合到内存 (准备查询)
    client.load_collection(AR.COLLECTION_NAME)
    return client, index_params

g_client, g_index_params = load_vdb()
g_ragdb = {}

def create_vdb_index(documents: List[Document]) -> None:
    """
    Create a FAISS index from document embeddings for efficient similarity search.
    Args:
        documents: List of Document objects with embeddings
    Returns:
        object: FAISS index or None if FAISS not available
    """    
    global g_client
    # Filter out documents without embeddings
    docs_with_embeddings = [doc for doc in documents if doc.embedding is not None]
    if not docs_with_embeddings:
        logger.warning("No documents with embeddings found")
        return None
    # Get embedding dimension from first document
    embedding_dim = len(docs_with_embeddings[0].embedding)
    # 3. 准备插入数据
    embeddings = np.array([doc.embedding for doc in docs_with_embeddings], dtype=np.float32)
    data = [
        {"vector": emb.tolist()} for emb in embeddings  # 将每个向量转为 list 格式
    ]
    # 4. 插入数据到 Milvus
    insert_result = g_client.insert(AR.COLLECTION_NAME, data)
    print(f"成功插入 {len(insert_result['insert_count'])} 条向量数据")
    print(f"生成的主键 IDs: {insert_result['primary_keys']}")
    for rid in insert_result['primary_keys']:
        g_ragdb[rid] = docs_with_embeddings[rid].content
    # 5. 可选：创建索引并等待构建完成 (自动触发)
    g_client.create_index(AR.COLLECTION_NAME, "vector", g_index_params)
    # # Create FAISS index
    # index = faiss.IndexFlatL2(embedding_dim)
    
    # # Add embeddings to index
    # embeddings = np.array([doc.embedding for doc in docs_with_embeddings], dtype=np.float32)
    # index.add(embeddings)

def vdb_similarity_search(
    query_embedding: List[float],
    faiss_index: Any,
    documents: List[Document],
    top_k: int = AR.DEFAULT_TOP_K
) -> List[List[dict]]:
    """
    Find the most similar documents using a FAISS index.
    
    Args:
        query_embedding: Query embedding vector
        faiss_index: FAISS index (from create_faiss_index)
        documents: List of Document objects corresponding to the index
        top_k: Number of results to return
        
    Returns:
        list: List of (document, similarity_score) tuples
    """
    # Unpack the index and documents if returned from create_faiss_index
    # Convert query to numpy array
    query_np = np.array([query_embedding], dtype=np.float32).tolist()
    # 执行相似度搜索
    results = g_client.search(
        collection_name=AR.COLLECTION_NAME,
        data=[query_np],  # 可同时搜索多个向量
        limit=3,  # 返回前3个相似结果
        output_fields=["id"]  # 返回的字段
    )

    print("\nTop 3 相似结果:")
    for hits in results:
        for hit in hits:
            print(f"ID: {hit['entity']['id']}, 距离: {hit['distance']:.4f}")
    return results
    # # Search the index
    # distances, indices = index.search(query_np, top_k)
    
    # # Create (document, similarity) pairs
    # # Convert L2 distance to similarity score (higher is better)
    # results = []
    # for i in range(len(indices[0])):
    #     idx = indices[0][i]
    #     if idx < len(docs_with_embeddings):
    #         # Convert L2 distance to similarity (1 / (1 + distance))
    #         similarity = 1.0 / (1.0 + distances[0][i])
    #         results.append((docs_with_embeddings[idx], similarity))
    
    # return results    