import os
import json
import faiss
import pandas as pd
import numpy as np

# Import MindSpore and Transformers
import mindspore as ms
from mindspore import Tensor
import mindspore.ops as ops
import mindspore.numpy as mnp
from transformers import AutoTokenizer, AutoModel

# --- New AI Embedding Functions for MindSpore ---

def mean_pooling(model_output, attention_mask):
    """
    Performs mean pooling on the token embeddings.
    """
    token_embeddings = model_output[0]
    input_mask_expanded = ops.expand_dims(attention_mask, -1).broadcast_to(token_embeddings.shape)
    sum_embeddings = ops.reduce_sum(token_embeddings * input_mask_expanded, 1)
    sum_mask = ops.reduce_sum(input_mask_expanded, 1)
    sum_mask = mnp.clip(sum_mask, 1e-9, None)
    return sum_embeddings / sum_mask


class SearchService:
    def __init__(self):
        # --- 1. Setup MindSpore Environment ---
        print("Setting up MindSpore context for Ascend...")
        try:
            ms.set_context(device_target="Ascend")
            print("MindSpore context set to Ascend.")
        except Exception as e:
            print(f"Could not set device to Ascend. Error: {e}")
            print("Falling back to CPU mode.")
            ms.set_context(device_target="CPU")

        # --- 2. Load Data Artifacts ---
        base_dir = os.path.dirname(os.path.dirname(__file__))
        data_dir = os.path.join(base_dir, 'data')
        
        index_path = os.path.join(data_dir, 'book_index.faiss')
        id_map_path = os.path.join(data_dir, 'index_to_id.json')
        book_data_path = os.path.join(data_dir, 'processed_books.parquet')

        print("Loading search service assets...")
        try:
            self.index = faiss.read_index(index_path)
            
            with open(id_map_path, 'r') as f:
                self.index_to_id = json.load(f)
                self.index_to_id = {int(k): v for k, v in self.index_to_id.items()}
            
            self.book_data = pd.read_parquet(book_data_path)
            # Use the DataFrame index as the lookup key, which matches Faiss IDs
            self.book_data.set_index(self.book_data.index.astype(int), inplace=True)
            
            print("Assets loaded successfully.")

        except FileNotFoundError as e:
            print(f"FATAL: Could not find data files. Did you run the processing script? Error: {e}")
            self.index = None
            self.book_data = None
            # In a real app, you might want to exit or handle this more gracefully

        # --- 3. Load AI Model on Ascend ---
        if self.index is not None:
            print("Loading embedding model...")
            model_name = 'sentence-transformers/all-MiniLM-L6-v2'
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.model = AutoModel.from_pretrained(model_name)
            self.model.set_train(False)  # Evaluation mode
            print("AI model loaded.")

    def _generate_embedding(self, text: str) -> np.ndarray:
        """Generates a single embedding for a given text string."""
        if not self.model or not self.tokenizer:
            return None
            
        # Tokenize
        encoded_input = self.tokenizer([text], padding=True, truncation=True, return_tensors='ms')
        
        # Inference
        model_output = self.model(**encoded_input)
        
        # Pool and normalize
        embedding = mean_pooling(model_output, encoded_input['attention_mask'])
        normalized_embedding = ops.L2Normalize(axis=1)(embedding)
        
        return normalized_embedding.asnumpy()

    def search(self, query: str, top_k: int = 10):
        """
        Performs a semantic search for a given query.
        """
        if self.index is None:
            print("Search service is not available because data files are missing.")
            return []

        query_embedding = self._generate_embedding(query)
        
        # Perform search
        distances, indices = self.index.search(query_embedding, top_k)
        
        results = []
        for i, idx in enumerate(indices[0]):
            if idx == -1:  # Faiss returns -1 for no result
                continue
            
            # Get book ID from the Faiss index position -> dataframe index -> book ID
            book_id_from_map = self.index_to_id.get(idx)
            
            if book_id_from_map:
                book_info = self.book_data[self.book_data['id'] == book_id_from_map].iloc[0]
                results.append({
                    "id": book_info['id'],
                    "title": book_info['title'],
                    "author": book_info.get('contributor', 'Unknown'),
                    "summary": book_info['summary'],
                    "score": float(distances[0][i])
                })

        return results

    def get_book_by_id(self, book_id: str):
        """Retrieves a single book by its ID."""
        if self.book_data is None:
            return None
        
        book_series = self.book_data[self.book_data['id'] == book_id]
        if book_series.empty:
            return None
        return book_series.iloc[0].to_dict()

    def get_homepage_books(self, num_books: int = 20):
        """Returns a random sample of books for the homepage."""
        if self.book_data is None:
            return []
        
        return self.book_data.sample(n=num_books).to_dict(orient='records')

# --- Instantiate the service ---
# This object will be a singleton, created when the app starts.
search_service = SearchService() 