import numpy as np
from tqdm import tqdm
from typing import List, Dict, Tuple, Set
from qdrant_client import QdrantClient, models
# from sentence_transformers import SentenceTransformer
from util_for_clip import clip_vit_b32
# from demo_janus import janus_pro_7b, multimodal_understanding
from demo_qwen_instrcut import qwen3_4b_instruct, get_result_by_qwen


class Text2Img:
    def __init__(self, collection_name: str = 'images'):
        self.collection_name = collection_name

        # Initialize encoder models for image and text
        self.text_encoder = clip_vit_b32()

        # Initialize Qdrant client
        self.qdrant_client = QdrantClient("http://localhost:6333")

        ## qwen
        self.tokenizer, self.model = qwen3_4b_instruct()
        self.question_to_en = '翻译为简短且精确的英语'

    def get_text_embedding(self,text):

        text = get_result_by_qwen( self.tokenizer, self.model,
                                    prompt=f'{self.question_to_en}: {text} /no_think', max_new_tokens=256 )
        
        # Remove content within and including <think>...</think> from text
        import re
        text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL).strip()
        vector = self.text_encoder.encode(text).tolist() # [ .. ].__len__() = 512
        print('简化的输入: ',text)
        '''
        简化的输入:  <think>

        </think>

        Yellow.
        '''
        return vector


    def search(self, text: str,search_result_num = 5) -> List[Dict[str, str]]:
        """
        Search function for the vector database
        Args:
            text: text used in the search

        Returns:
            List of payloads (images paths more exactly)
        """
        # Convert text query into vector
        # vector = self.text_encoder.encode(text).tolist()
        vector = self.get_text_embedding(text)

        # Use `vector` to search for closest images in the collection
        search_result = self.qdrant_client.search(
            collection_name=self.collection_name,
            query_vector=vector,
            query_filter=None,
            with_payload=True,
            limit=search_result_num,
        )
        # Retrieve payload results
        payloads = [hit.payload for hit in search_result]

        return payloads

    def avg_precision_at_k(self, test_dataset: List[str], k: int = 5) -> Tuple[float, Dict[str, Set[str]]]:
        """
        Computes precition@k metric for a custom set of queries
        Args:
            test_dataset: dataset of text queries
            k: parameter of the metric

        Returns: tuple represented by the result of the metric precision@k
                and a mapping between one search query and a list of common images in both ANN and full kNN
        """
        # Initialize precision and common indexes mapping
        precisions = []
        common_images_mapping = {}

        print("Evaluating custom dataset...")
        for item in tqdm(test_dataset):
            # Convert text query into vector
            vector = self.text_encoder.encode(item).tolist()

            # Get approximate results
            ann_result = self.qdrant_client.search(
                collection_name=self.collection_name,
                query_vector=vector,
                limit=k,
            )

            # Get full results
            knn_result = self.qdrant_client.search(
                collection_name=self.collection_name,
                query_vector=vector,
                limit=k,
                search_params=models.SearchParams(
                    exact=True,  # Turns on the exact search mode
                ),
            )

            # Get ids of the search results
            ann_ids = set(item.id for item in ann_result)
            knn_ids = set(item.id for item in knn_result)

            # Get common indexes
            common_indexes = ann_ids.intersection(knn_ids)

            # Get common images path
            mask = np.isin(list(common_indexes), list(knn_ids))
            common_results = np.array(knn_result)[mask]
            common_images = [res.payload['path'] for res in common_results]
            common_images_mapping[item] = set(common_images)

            # Compute precision
            precision = len(common_indexes) / k
            precisions.append(precision)

        return sum(precisions) / len(precisions), common_images_mapping
