import pandas as pd
import os
import json
import re
from tqdm import tqdm
import requests
import faiss

# Import MindSpore and Transformers
import mindspore as ms
from mindspore import Tensor
import mindspore.ops as ops
import mindspore.numpy as mnp
from transformers import AutoTokenizer, AutoModel

# --- Configuration ---
DATA_DIR = os.path.join(os.path.dirname(__file__), '..', 'data')
METADATA_PATH = os.path.join(DATA_DIR, 'metadata.csv')
MANIFEST_PATH = os.path.join(DATA_DIR, 'manifest.json')

OUTPUT_DATA_PATH = os.path.join(DATA_DIR, 'processed_books.parquet')
OUTPUT_INDEX_PATH = os.path.join(DATA_DIR, 'book_index.faiss')
OUTPUT_ID_MAP_PATH = os.path.join(DATA_DIR, 'index_to_id.json')

MODEL_NAME = 'sentence-transformers/all-MiniLM-L6-v2'
NUM_BOOKS_TO_PROCESS = 1000  # For development, we'll process a smaller subset
WORDS_PER_SUMMARY = 300

# --- Helper Functions ---

def get_master_dataframe():
    """Loads and merges metadata and manifest data."""
    # Load metadata
    meta_df = pd.read_csv(METADATA_PATH, low_memory=False)
    meta_df.rename(columns={'id': 'item_url'}, inplace=True)
    meta_df['id'] = meta_df['item_url'].str.strip('/').str.split('/').str[-1]

    # Load and process manifest
    with open(MANIFEST_PATH, 'r') as f:
        manifest_data = json.load(f)
    
    rows = manifest_data.get('rows', [])
    processed_records = []
    for record in rows:
        file_name, item_url, _, _, object_key = record
        if file_name.endswith('.txt'):
            item_id = item_url.strip('/').split('/')[-1]
            full_text_url = f"https://{object_key}"
            processed_records.append({'id': item_id, 'text_url': full_text_url})
    
    manifest_df = pd.DataFrame(processed_records)
    
    # Merge
    master_df = pd.merge(meta_df, manifest_df, on='id')
    master_df.drop_duplicates(subset='id', inplace=True)
    
    print(f"Successfully merged data. Total records: {len(master_df)}")
    return master_df

def clean_text(text, num_words):
    """Cleans raw text and extracts a summary."""
    text = re.sub(r'\s+', ' ', text)
    words = text.split()
    summary = ' '.join(words[:num_words])
    return summary

# --- New AI Embedding Functions for MindSpore ---

def mean_pooling(model_output, attention_mask):
    """
    Performs mean pooling on the token embeddings.
    Uses the attention mask to ignore padding tokens.
    """
    token_embeddings = model_output[0]  # First element of model_output contains all token embeddings
    input_mask_expanded = ops.expand_dims(attention_mask, -1).broadcast_to(token_embeddings.shape)
    sum_embeddings = ops.reduce_sum(token_embeddings * input_mask_expanded, 1)
    sum_mask = ops.reduce_sum(input_mask_expanded, 1)
    sum_mask = mnp.clip(sum_mask, 1e-9, None)
    return sum_embeddings / sum_mask

def generate_embeddings(sentences, model, tokenizer):
    """
    Generates sentence embeddings using the provided MindSpore model and tokenizer.
    """
    # Tokenize sentences
    encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='ms')

    # Compute token embeddings on Ascend NPU
    model_output = model(**encoded_input)

    # Perform pooling
    sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])

    # Normalize embeddings
    sentence_embeddings = ops.L2Normalize(axis=1)(sentence_embeddings)
    
    # Return as NumPy array for Faiss
    return sentence_embeddings.asnumpy()


def main():
    """
    Main processing pipeline.
    """
    # --- 1. Setup MindSpore Environment ---
    print("Setting up MindSpore context for Ascend...")
    try:
        ms.set_context(device_target="Ascend")
        print("MindSpore context set to Ascend.")
    except Exception as e:
        print(f"Could not set device to Ascend. Make sure CANN and MindSpore are installed correctly. Error: {e}")
        print("Falling back to CPU. This will be slow.")
        ms.set_context(device_target="CPU")


    # --- 2. Load Data ---
    master_df = get_master_dataframe()
    subset_df = master_df.head(NUM_BOOKS_TO_PROCESS).copy()

    # --- 3. Download and Clean Text ---
    print(f"Processing {len(subset_df)} books...")
    summaries = []
    pbar = tqdm(subset_df.itertuples(), total=len(subset_df), desc="Downloading & Cleaning")
    for row in pbar:
        try:
            response = requests.get(row.text_url, timeout=10)
            response.raise_for_status()
            summary = clean_text(response.text, WORDS_PER_SUMMARY)
            summaries.append(summary)
        except requests.RequestException as e:
            print(f"Warning: Could not download book ID {row.id}. Error: {e}")
            summaries.append("") # Append empty string on failure

    subset_df['summary'] = summaries
    # Filter out books we couldn't get a summary for
    processed_df = subset_df[subset_df['summary'] != ""].copy()
    print(f"Successfully processed {len(processed_df)} books.")

    # --- 4. Generate Embeddings with MindSpore ---
    print("Loading embedding model on Ascend NPU...")
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    model = AutoModel.from_pretrained(MODEL_NAME)
    model.set_train(False) # Set to evaluation mode

    print("Generating embeddings for all summaries...")
    # Process in batches if memory becomes an issue, but for 1000 it should be fine
    book_summaries = processed_df['summary'].tolist()
    embeddings = generate_embeddings(book_summaries, model, tokenizer)
    print(f"Generated {embeddings.shape[0]} embeddings of dimension {embeddings.shape[1]}")

    # --- 5. Create and Save Faiss Index ---
    dimension = embeddings.shape[1]
    index = faiss.IndexFlatL2(dimension)
    index = faiss.IndexIDMap(index)
    
    ids_to_index = processed_df.index.to_numpy(dtype='int64')
    index.add_with_ids(embeddings, ids_to_index)
    
    print(f"Saving Faiss index to {OUTPUT_INDEX_PATH}")
    faiss.write_index(index, OUTPUT_INDEX_PATH)

    # --- 6. Save Processed Data and ID Map ---
    # Create a map from faiss index (which is the df index) to our actual book id
    index_to_id_map = processed_df['id'].to_dict()

    print(f"Saving processed book data to {OUTPUT_DATA_PATH}")
    processed_df.to_parquet(OUTPUT_DATA_PATH)
    
    print(f"Saving index-to-ID map to {OUTPUT_ID_MAP_PATH}")
    with open(OUTPUT_ID_MAP_PATH, 'w') as f:
        json.dump(index_to_id_map, f)
        
    print("\nData processing and embedding generation complete!")


if __name__ == "__main__":
    main() 