from typing import List
from tqdm import tqdm

# 1. 抽象基类定义
from typing import List
import faiss
import os
from abc import ABC, abstractmethod
from transformers import AutoTokenizer, AutoModel
from datasets import load_dataset
import torch
import numpy as np
import json
from collections import Counter

def mean_pooling(model_output, attention_mask):
    # Mean Pooling - Take attention mask into account for correct averaging
    token_embeddings = model_output[0] # First element of model_output contains all token embeddings
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

class BaseRetriever(ABC):
    """
    抽象基类定义
    input: query: List[str], 每个 str 表示一个 query
    output: List[List[str]], 每个 List[str] 表示一个 query 的检索结果
    """
    def __init__(self, model_name: str, index_path: str, lang: str='rkt'):
        self.lang = lang
        self.model_name = model_name
        self.index_path = index_path
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
        # 加载模型和 tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
        self.model = AutoModel.from_pretrained(self.model_name, trust_remote_code=True).to(self.device)
        self.model.eval()
        
        # FAISS 索引初始化
        self.chunks = self.load_chunks()  # Abstract method to be implemented by subclasses
        self.index = self.prepare_index()
        
    @abstractmethod
    def load_chunks(self) -> List[str]:
        """Load and return chunks of text or code to be indexed."""
        pass

    def prepare_index(self):
        """
        准备索引：如果缓存存在则加载，否则构建新索引
        """
        if os.path.exists(self.index_path):
            index = faiss.read_index(self.index_path)
        else:
            index = self.build_index()
        return index
    
    def embed_texts(self, texts: List[str]) -> np.ndarray:
        """
        将文本列表转换为嵌入向量
        """
        with torch.no_grad():
            if self.model_name == "jinaai/jina-embeddings-v2-base-code":
                embeddings = self.model.encode(texts)
            elif self.model_name == "sentence-transformers/all-MiniLM-L6-v2":
                encoded_input = self.tokenizer(texts, padding=True, truncation=True, return_tensors='pt').to(self.device)
                model_output = self.model(**encoded_input)
                attention_mask = encoded_input['attention_mask']
                embeddings = mean_pooling(model_output, attention_mask)
            else:
                raise NotImplementedError(f"Model {self.model_name} embedding method not implemented")

        if isinstance(embeddings, torch.Tensor):
            embeddings = embeddings.cpu().numpy()
        # use L2 normalization
        embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
        return embeddings
    
    def build_index(self, batch_size=128):
        """
        构建 FAISS 索引，使用批处理来避免 OOM 错误，并显示进度条
        """
        dimension = None
        index = None
        
        # Calculate total number of batches
        total_batches = (len(self.chunks) + batch_size - 1) // batch_size
        
        # Create progress bar
        with tqdm(total=total_batches, desc="Building Index") as pbar:
            for i in range(0, len(self.chunks), batch_size):
                batch = self.chunks[i:i+batch_size]
                embeddings = self.embed_texts(batch)

                if index is None:
                    dimension = embeddings.shape[1]
                    index = faiss.IndexFlatIP(dimension)  # 使用内积作为相似度度量
                
                index.add(embeddings)
                
                # Update progress bar
                pbar.update(1)

        # save index to cache
        if not os.path.exists(self.index_path):
            os.makedirs(os.path.dirname(self.index_path), exist_ok=True)
        faiss.write_index(index, self.index_path)
        return index

    def retrieve(self, queries: List[str], top_k: int = 5) -> List[List[str]]:
        """
        检索方法
        """
        query_embeddings = self.embed_texts(queries)
        assert isinstance(query_embeddings, np.ndarray), "faiss only support numpy array"
        scores, indices = self.index.search(query_embeddings, top_k)
        return scores, indices


class KnowledgeRetriever(BaseRetriever):
    def __init__(self, model_name: str, index_path: str, knowledge_path: str, lang: str='racket'):
        self.knowledge_path = knowledge_path
        self.knowledges = []
        super().__init__(model_name, index_path, lang)

    def load_chunks(self) -> List[str]:
        """
        Load knowledge from a JSONL file and return a list of knowledge descriptions
        """
        chunks = []
        with open(self.knowledge_path, 'r', encoding='utf-8') as f:
            for line in f:
                knowledge = json.loads(line.strip())
                self.knowledges.append(knowledge)
                chunks.append(knowledge['intent'])
        return chunks
    
    def retrieve(self, queries: List[str], top_k: int = 5) -> List[dict]:
        """
        Retrieval method that returns full knowledge objects
        """
        scores, indices = super().retrieve(queries, top_k)
        
        # Flatten the indices list
        flat_indices = [idx for sublist in indices for idx in sublist]
        
        # 1. Count occurrences of each index
        index_counts = Counter(flat_indices)
        
        # 2. Sort indices by count (descending) and then by index value (ascending)
        sorted_indices = sorted(index_counts.keys(), key=lambda x: (-index_counts[x], x))
        
        # Retrieve the corresponding knowledge objects
        results = [self.knowledges[idx] for idx in sorted_indices]
        
        return results


class CodeRetriever(BaseRetriever):
    def __init__(self, model_name: str, index_path: str, lang: str='rkt'):
        self.code_snippets = []
        super().__init__(model_name, index_path, lang)

    def load_chunks(self):
        """
        Load code snippets from a multipl-t dataset
        """
        short_lang = {
            'rkt': 'racket',
            'jl': 'julia',
            'ml': 'ocaml',
            'r': 'r'
        }
        if self.lang in short_lang.keys():
            self.lang = short_lang[self.lang]
        dataset = load_dataset("nuprl/MultiPL-T", split=self.lang)
        self.code_snippets = dataset['content']
        return self.code_snippets
    
    def retrieve(self, queries: List[str], top_k: int = 5) -> List[List[str]]:
        """
        queries: List[str]
        top_k: int
        return: List[List[str]]
        """
        scores, indices = super().retrieve(queries, top_k)
        results = []
        for idx in indices:
            results.append([self.code_snippets[i] for i in idx])
        return results
