from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
import numpy as np
from pymongo import MongoClient
from bson import ObjectId
from config.config import Config
from datetime import datetime
import re

class EmbeddingProcessor:
    def __init__(self):
        self.config = Config()
        self.client = MongoClient(self.config.MONGODB_URI)
        self.db = self.client[self.config.DB_NAME]
        
        # Initialize ModelScope embedding pipeline
        self.embedding_pipeline = pipeline(
            Tasks.sentence_embedding,
            model=self.config.EMBEDDING_MODEL
        )
    
    def chunk_text(self, text, chunk_size=500, overlap=50):
        """Split text into overlapping chunks"""
        words = text.split()
        chunks = []
        
        for i in range(0, len(words), chunk_size - overlap):
            chunk = ' '.join(words[i:i + chunk_size])
            if len(chunk.strip()) > 0:
                chunks.append(chunk)
        
        return chunks
    
    def generate_embedding(self, text):
        """Generate embedding vector for text"""
        try:
            result = self.embedding_pipeline(input=text)
            return result['text_embedding']
        except Exception as e:
            print(f"Error generating embedding: {e}")
            return None
    
    def process_document(self, doc_id):
        """Process document: chunk and create embeddings"""
        try:
            # Convert string doc_id to ObjectId if needed
            if isinstance(doc_id, str):
                doc_id = ObjectId(doc_id)
            
            # Get document from database
            document = self.db.documents.find_one({'_id': doc_id})
            if not document:
                return {'status': 'error', 'message': 'Document not found'}
            
            # Delete old chunks if they exist
            self.db.document_chunks.delete_many({'document_id': doc_id})
            
            # Chunk the content
            chunks = self.chunk_text(document['content_text'])
            
            # Process each chunk
            for idx, chunk in enumerate(chunks):
                embedding = self.generate_embedding(chunk)
                
                if embedding is not None:
                    # Extract page information (for PDFs)
                    page_number = self._extract_page_number(chunk, idx, document.get('page_count', 1))
                    
                    chunk_data = {
                        'document_id': doc_id,
                        'chunk_index': idx,
                        'chunk_text': chunk,
                        'embedding': embedding.tolist() if isinstance(embedding, np.ndarray) else embedding,
                        'language': document['language'],
                        'page_number': page_number,
                        'collection': document.get('collection'),
                        'tags': document.get('tags', []),
                        'metadata': {
                            'country': document['country'],
                            'regulation_id': document.get('regulation_id', ''),
                            'document_type': document.get('document_type', ''),
                            'issuing_authority': document.get('issuing_authority', ''),
                            'document_name': document.get('file_name', document.get('original_name', ''))
                        }
                    }
                    
                    self.db.document_chunks.insert_one(chunk_data)
            
            # Update document processing status
            self.db.documents.update_one(
                {'_id': doc_id},
                {'$set': {'embedding_status': 'completed', 'last_updated': datetime.now()}}
            )
            
            return {'status': 'success', 'chunks_created': len(chunks)}
            
        except Exception as e:
            # Update document status to error
            if isinstance(doc_id, str):
                doc_id = ObjectId(doc_id)
            self.db.documents.update_one(
                {'_id': doc_id},
                {'$set': {'embedding_status': 'error', 'error_message': str(e)}}
            )
            return {'status': 'error', 'message': str(e)}
    
    def _extract_page_number(self, chunk_text, chunk_index, total_pages):
        """Extract page number from text chunk"""
        try:
            # Look for page markers (e.g., "--- Page X ---")
            page_match = re.search(r'---\s*Page\s*(\d+)', chunk_text)
            if page_match:
                return int(page_match.group(1))
            
            # If no page marker found, estimate based on chunk index
            if total_pages > 1:
                # Simple estimation: assume chunks are evenly distributed across pages
                estimated_page = min(chunk_index // 3 + 1, total_pages)
                return estimated_page
            
            return 1
        except:
            return 1
    
    def get_similar_chunks(self, query_embedding, collection_filter=None, top_k=5):
        """Query similar text chunks directly (for QA system)"""
        try:
            # Build query conditions
            query_conditions = {}
            if collection_filter and collection_filter != 'ALL DOCS':
                query_conditions['collection'] = collection_filter
            
            # Get all relevant chunks
            relevant_chunks = list(self.db.document_chunks.find(query_conditions))
            
            if not relevant_chunks:
                return []
            
            # Calculate cosine similarity
            similarities = []
            for chunk in relevant_chunks:
                chunk_embedding = np.array(chunk['embedding'])
                query_emb = np.array(query_embedding)
                
                # Cosine similarity
                similarity = np.dot(query_emb, chunk_embedding) / (
                    np.linalg.norm(query_emb) * np.linalg.norm(chunk_embedding)
                )
                
                similarities.append({
                    'chunk': chunk,
                    'similarity': similarity
                })
            
            # Sort by similarity
            similarities.sort(key=lambda x: x['similarity'], reverse=True)
            
            return similarities[:top_k]
            
        except Exception as e:
            print(f"Error in get_similar_chunks: {e}")
            return []