import os
import warnings

# Environment setup at the beginning
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
os.environ['HF_HUB_DISABLE_SYMLINKS_WARNING'] = '1'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
os.environ['CUDA_LAUNCH_BLOCKING'] = '0'  # Disable for performance

from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.retrievers import EnsembleRetriever

from bs4 import BeautifulSoup
from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
from time import time
from tqdm import tqdm
from langchain_community.retrievers import BM25Retriever
import numpy as np
import torch
import chromadb
import torch.nn.functional as F
import re
from langchain_core.documents import Document
from langchain.storage import InMemoryStore
from langchain.retrievers import ParentDocumentRetriever
from langchain_text_splitters import CharacterTextSplitter
import gc

from typing import List, Dict, Optional, Tuple, Union, Any
import pandas as pd
import numpy as np
import jieba
import re
from datetime import datetime


class QwenEmbeddings:
    def __init__(self, model_path="models/Qwen/Qwen3-Embedding-0.6B", device="cuda:0", max_batch_size=8):
        self.model_path = model_path
        self.device = device
        self.max_batch_size = max_batch_size
        self.model = None
        self.tokenizer = None
        self._load_model()
    
    def _load_model(self):
        """Load model with memory optimization"""
        try:
            print(f"Loading Qwen embedding model from: {self.model_path}")
            
            # Clear CUDA cache before loading
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path,
                trust_remote_code=True
            )
            
            self.model = AutoModel.from_pretrained(
                self.model_path,
                torch_dtype=torch.float16,
                trust_remote_code=True,
                low_cpu_mem_usage=True,
                device_map=None  # Load to CPU first
            )
            
            # Move to device after loading
            self.model = self.model.to(self.device).eval()
            print("Qwen embedding model loaded successfully")
            
        except Exception as e:
            print(f"Failed to load Qwen embedding model: {e}")
            self._create_dummy_model()
    
    def _create_dummy_model(self):
        """Create dummy model as fallback"""
        print("Using dummy embeddings as fallback")
        self.model = None
        self.tokenizer = None
    
    def _last_token_pool(self, last_hidden_states, attention_mask):
        """Extract last token embeddings"""
        left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
        if left_padding:
            return last_hidden_states[:, -1]
        else:
            sequence_lengths = attention_mask.sum(dim=1) - 1
            batch_size = last_hidden_states.shape[0]
            return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]
    
    def embed_documents(self, texts):
        """Embed documents with batch processing and memory management"""
        if not texts:
            return []
        return self._embed_batch_with_memory_control(texts)
    
    def embed_query(self, text):
        """Embed single query"""
        if not text:
            return [0.0] * 1024
        result = self._embed_batch_with_memory_control([text])
        return result[0] if result else [0.0] * 1024
    
    def _embed_batch_with_memory_control(self, texts):
        """Embed with memory control and batch processing"""
        if self.model is None or self.tokenizer is None:
            return [[0.0] * 1024 for _ in texts]
        
        try:
            # Process in smaller batches to avoid OOM
            all_embeddings = []
            batch_size = min(self.max_batch_size, len(texts))
            
            for i in range(0, len(texts), batch_size):
                batch_texts = texts[i:i+batch_size]
                
                # Clear cache before each batch
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                
                with torch.no_grad():
                    # Limit input length to prevent OOM
                    inputs = self.tokenizer(
                        batch_texts,
                        padding=True,
                        truncation=True,
                        max_length=256,  # Reduced from 512
                        return_tensors="pt"
                    )
                    
                    # Move to device
                    inputs = {k: v.to(self.device) for k, v in inputs.items()}
                    
                    # Forward pass with memory optimization
                    outputs = self.model(**inputs)
                    embeddings = self._last_token_pool(outputs.last_hidden_state, inputs['attention_mask'])
                    embeddings = F.normalize(embeddings, p=2, dim=1)
                    
                    # Move to CPU immediately to free GPU memory
                    batch_embeddings = embeddings.cpu().numpy().tolist()
                    all_embeddings.extend(batch_embeddings)
                    
                    # Clear intermediate tensors
                    del inputs, outputs, embeddings
                
                # Garbage collection after each batch
                gc.collect()
            
            return all_embeddings
            
        except Exception as e:
            print(f"Embedding failed with error: {e}")
            # Fallback with even smaller batches
            if batch_size > 1:
                print("Retrying with smaller batch size...")
                self.max_batch_size = max(1, self.max_batch_size // 2)
                return self._embed_batch_with_memory_control(texts)
            else:
                return [[0.0] * 1024 for _ in texts]

class QwenReranker:
    def __init__(self, model_path="models/Qwen/Qwen3-Reranker-0.6B", device="cuda:1", max_batch_size=4):
        self.model_path = model_path
        self.device = device
        self.max_batch_size = max_batch_size
        self.model = None
        self.tokenizer = None
        self.token_false_id = None
        self.token_true_id = None
        self._load_model()
    
    def _load_model(self):
        """Load reranker model with memory optimization"""
        try:
            print(f"Loading Qwen reranker model from: {self.model_path}")
            
            # Clear CUDA cache
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path,
                trust_remote_code=True
            )
            
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_path,
                torch_dtype=torch.float16,
                trust_remote_code=True,
                low_cpu_mem_usage=True,
                device_map=None  # Load to CPU first
            )
            
            # Move to device
            self.model = self.model.to(self.device).eval()
            
            self.token_false_id = self.tokenizer.convert_tokens_to_ids("no")
            self.token_true_id = self.tokenizer.convert_tokens_to_ids("yes")
            print("Qwen reranker model loaded successfully")
            
        except Exception as e:
            print(f"Failed to load Qwen reranker model: {e}")
            self.model = None
    
    def _format_input(self, query, document):
        """Format input for reranker"""
        instruction = 'Given a web search query, retrieve relevant passages that answer the query'
        return f"<|im_start|>system\n{instruction}<|im_end|>\n<|im_start|>user\n<Query>: {query}\n<Document>: {document}<|im_end|>\n<|im_start|>assistant\n"
    
    def compute_score(self, sentence_pairs, normalize=True, batch_size=4):
        """Compute relevance scores with memory optimization"""
        if self.model is None:
            return [0.5 for _ in sentence_pairs]
        
        try:
            scores = []
            # Use smaller batch size to prevent OOM
            effective_batch_size = min(batch_size, self.max_batch_size)
            
            with torch.no_grad():
                for i in range(0, len(sentence_pairs), effective_batch_size):
                    batch_pairs = sentence_pairs[i:i+effective_batch_size]
                    
                    # Clear cache before each batch
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
                    
                    for query, doc in batch_pairs:
                        try:
                            # Truncate document to prevent OOM
                            doc_truncated = doc[:1000] if len(doc) > 1000 else doc
                            formatted_input = self._format_input(query, doc_truncated)
                            
                            inputs = self.tokenizer(
                                formatted_input,
                                return_tensors="pt",
                                truncation=True,
                                max_length=256  # Reduced from 512
                            )
                            
                            # Move to device
                            inputs = {k: v.to(self.device) for k, v in inputs.items()}
                            
                            outputs = self.model(**inputs)
                            
                            last_token_logits = outputs.logits[:, -1, :]
                            true_logits = last_token_logits[:, self.token_true_id]
                            false_logits = last_token_logits[:, self.token_false_id]
                            
                            scores_tensor = torch.stack([false_logits, true_logits], dim=1)
                            probabilities = torch.nn.functional.softmax(scores_tensor, dim=1)
                            score = probabilities[:, 1].item()
                            scores.append(score)
                            
                            # Clean up tensors
                            del inputs, outputs, last_token_logits, scores_tensor, probabilities
                            
                        except RuntimeError as e:
                            if "out of memory" in str(e):
                                print("OOM in reranking, using default score")
                                scores.append(0.5)
                                # Clear cache and continue
                                if torch.cuda.is_available():
                                    torch.cuda.empty_cache()
                            else:
                                raise e
                    
                    # Garbage collection after each batch
                    gc.collect()
            
            return scores
            
        except Exception as e:
            print(f"Reranking failed: {e}")
            return [0.5 for _ in sentence_pairs]

class Retriever2:
    def __init__(self, device1, device2, batch_size=32, tokenizer=None):  # Reduced batch size
        print("Initializing Retriever2...")
        
        # Memory optimization settings
        self.max_doc_length = 8000  # Reduced from 12000
        self.max_docs_per_init = 3    # Reduced from 5
        self.chunk_size = 200         # Reduced chunk sizes
        self.child_chunk_size = 80
        
        # Model paths
        embedding_path = "models/Qwen/Qwen3-Embedding-0.6B"
        reranker_path = "models/Qwen/Qwen3-Reranker-0.6B"
        token_path = "models/Qwen/Qwen3-8B"
        
        # Patch torch.load
        self._patch_torch_load()
        
        # Load models with memory optimization
        try:
            print("Loading Qwen embedding models with memory optimization...")
            self.hf_embeddings = QwenEmbeddings(embedding_path, device1, max_batch_size=4)
            self.english_embeddings = QwenEmbeddings(embedding_path, device1, max_batch_size=4)
            self.use_dual_embeddings = True
            print("Dual Qwen embedding models loaded")
        except Exception as e:
            print(f"Failed to load Qwen embeddings: {e}")
            self.hf_embeddings = self._create_dummy_embeddings()
            self.use_dual_embeddings = False
            
        # Load reranker with memory optimization
        try:
            self.reranker = QwenReranker(reranker_path, device2, max_batch_size=2)
        except Exception as e:
            print(f"Failed to load Qwen reranker: {e}")
            self.reranker = self._create_dummy_reranker()

        # Load tokenizer
        if tokenizer is not None:
            self.tokenizer = tokenizer
            print("Using external tokenizer from RAGModel")
        else:
            self.tokenizer = self._load_tokenizer(token_path)
        
        # Initialize text splitters with smaller chunks
        self.parent_text_splitter = CharacterTextSplitter(
            chunk_size=self.chunk_size, 
            chunk_overlap=20, 
            separator=' '
        )
        self.child_text_splitter = CharacterTextSplitter(
            chunk_size=self.child_chunk_size, 
            chunk_overlap=10, 
            separator=' '
        )
        
        # Load data files
        self._load_data_files()
        
        print("Retriever2 initialization completed with memory optimization")

    def _patch_torch_load(self):
        """Patch torch.load for compatibility"""
        try:
            self._original_torch_load = torch.load
            
            def patched_load(*args, **kwargs):
                kwargs.pop('weights_only', None)
                return self._original_torch_load(*args, **kwargs)
            
            torch.load = patched_load
            print("Torch.load patched for compatibility")
        except Exception as e:
            print(f"Could not patch torch.load: {e}")

    def _restore_torch_load(self):
        """Restore original torch.load"""
        try:
            if hasattr(self, '_original_torch_load'):
                torch.load = self._original_torch_load
        except Exception as e:
            print(f"Could not restore torch.load: {e}")

    def _create_dummy_embeddings(self):
        """Create dummy embedding model"""
        class DummyEmbeddings:
            def __init__(self):
                self.dimension = 1024
                
            def embed_documents(self, texts):
                print("Warning: Using dummy embeddings for documents")
                return [np.random.rand(self.dimension).tolist() for _ in texts]
                
            def embed_query(self, text):
                print("Warning: Using dummy embeddings for query")
                return np.random.rand(self.dimension).tolist()
        
        return DummyEmbeddings()

    def _create_dummy_reranker(self):
        """Create dummy reranker"""
        class DummyReranker:
            def compute_score(self, sentence_pairs, normalize=True, batch_size=16):
                import random
                return [random.random() for _ in sentence_pairs]
        return DummyReranker()

    def _load_tokenizer(self, token_path):
        """Load tokenizer with error handling"""
        try:
            tokenizer = AutoTokenizer.from_pretrained(token_path, trust_remote_code=True)
            if tokenizer.pad_token is None:
                tokenizer.pad_token = tokenizer.eos_token
            print(f"Tokenizer loaded: {token_path}")
            return tokenizer
        except Exception as e:
            print(f"Tokenizer loading failed: {e}")
            class DummyTokenizer:
                def encode(self, text, max_length=None, truncation=False, **kwargs):
                    return list(range(min(len(text.split()), max_length or 256)))
                def decode(self, tokens, skip_special_tokens=True):
                    return " ".join([f"token_{i}" for i in tokens])
                def __call__(self, text, max_length=None, truncation=False, return_tensors=None, add_special_tokens=True):
                    return {"input_ids": self.encode(text, max_length, truncation)}
            
            print("Using dummy tokenizer")
            return DummyTokenizer()

    def _load_data_files(self):
        """Load data files with error handling"""
        try:
            self.oscar_map = np.load('models/processed_data/oscar_map.npy', allow_pickle=True).tolist()
            self.oscar_map_dlc = np.load('models/processed_data/oscar_map_dlc.npy', allow_pickle=True).tolist()
            self.grammy_map = np.load('models/processed_data/grammy.npy', allow_pickle=True).tolist()
            
            self.imdb_movie_dataset = np.load('models/processed_data/all_imdb_movie.npy', allow_pickle=True).tolist()
            self.movie_index = {}
            for idx, data in enumerate(self.imdb_movie_dataset):
                if 'title' in data and 'original_title' in data:
                    self.movie_index[data['title'].lower()] = idx
                    self.movie_index[data['original_title'].lower()] = idx
            
            finance_data = np.load('models/processed_data/finance_data.npy', allow_pickle=True).tolist()
            self.ticker_name_map, self.ticker_info_map, self.ticker_name_set_map = finance_data
            
            print("Data files loaded successfully")
        except Exception as e:
            print(f"Data files loading failed: {e}")
            # Initialize empty data structures
            self.oscar_map = {}
            self.oscar_map_dlc = {}
            self.grammy_map = {}
            self.imdb_movie_dataset = []
            self.movie_index = {}
            self.ticker_name_map = {}
            self.ticker_info_map = {}
            self.ticker_name_set_map = {}

    def detect_language(self, query):
        """Detect query language for optimal embedding selection"""
        chinese_chars = len([c for c in query if '\u4e00' <= c <= '\u9fff'])
        english_chars = len([c for c in query if c.isalpha() and ord(c) < 256])
        total_chars = chinese_chars + english_chars
        
        if total_chars == 0:
            return 'mixed'
        
        chinese_ratio = chinese_chars / total_chars
        return 'zh' if chinese_ratio > 0.5 else ('mixed' if chinese_ratio > 0.1 else 'en')

    def get_optimal_embedding(self, query):
        """Select optimal embedding model based on language"""
        if not self.use_dual_embeddings:
            return self.hf_embeddings
        
        lang = self.detect_language(query)
        return self.english_embeddings if lang == 'en' else self.hf_embeddings

    def get_contextual_info(self, query):
        """Get contextual information from structured data"""
        contexts = []
        
        # Grammy information
        if 'grammy' in query.lower():
            year_match = re.search(r'\b(19|20)\d{2}\b', query)
            if year_match and int(year_match.group()) in self.grammy_map:
                contexts.append('<Doc>' + self.grammy_map[int(year_match.group())] + '</Doc>')
        
        # Oscar information with memory-optimized processing
        if any(word in query.lower() for word in ['oscar', 'academy']):
            year_match = re.search(r'\b(19|20)\d{2}\b', query)
            if year_match and int(year_match.group()) in self.oscar_map_dlc:
                year = int(year_match.group())
                description = self.oscar_map.get(year, self.oscar_map_dlc[year])
                
                # Limit description length to prevent OOM
                if len(description) > 20:
                    description = description[:20]
                
                sentence_pairs = [[query, doc] for doc in description]
                sim = self.reranker.compute_score(sentence_pairs, normalize=True)
                indices = torch.topk(torch.tensor(sim), min(5, len(description))).indices  # Reduced from 10
                contexts.append('\n'.join([description[idx] for idx in indices]))
        
        # Movie information
        query_lower = query.lower()
        for movie_name in list(self.movie_index.keys())[:100]:  # Limit search scope
            if movie_name in query_lower:
                movie_info = self.imdb_movie_dataset[self.movie_index[movie_name]]
                context = f'<Doc>Information about {movie_name}: '
                for key, value in movie_info.items():
                    if key not in ['cast', 'crew'] and len(context) < 500:  # Limit context length
                        context += f'{key}: {value}. '
                contexts.append(context + '</Doc>')
                break
        
        # Financial information
        for word in query.split()[:10]:  # Limit word processing
            word_lower = word.lower().strip()
            if word_lower in self.ticker_info_map:
                info = self.ticker_info_map[word_lower][:500]  # Truncate long info
                contexts.append('<Doc>' + info + '</Doc>')
                break
            elif word_lower in self.ticker_name_map:
                ticker = self.ticker_name_map[word_lower]
                if ticker in self.ticker_info_map:
                    info = self.ticker_info_map[ticker][:500]  # Truncate long info
                    contexts.append('<Doc>' + info + '</Doc>')
                    break
        
        return '\n'.join(contexts)


    # 在Retriever2中改进init_retriever方法：
    def init_retriever(self, search_results, recall_k=30, task3_topk=None, max_length=None, 
                    method='ensemble', query=None):

        """Initialize retriever with aggressive memory optimization"""
        st = time()
        docs = []
        hashes = set()
        
        # Use optimized parameters
        if task3_topk is None:
            task3_topk = self.max_docs_per_init
        if max_length is None:
            max_length = self.max_doc_length
        
        # Reduce parameters further if needed
        task3_topk = min(task3_topk, self.max_docs_per_init)
        max_length = min(max_length, self.max_doc_length)
        
        optimal_embeddings = self.get_optimal_embedding(query) if query else self.hf_embeddings
        
        # Clear memory before processing
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        

        for idx, html in enumerate(search_results[:task3_topk]):
            try:
                html_content = html.get('page_content', '') or html.get('page_result', '')
                
                if not html_content:
                    continue
                
                # 改进HTML解析
                soup = BeautifulSoup(html_content, 'html.parser')
                
                # 移除script和style标签
                for script in soup(["script", "style", "nav", "header", "footer"]):
                    script.decompose()
                
                # 提取主要内容
                main_content = soup.find(['main', 'article', 'div[class*="content"]'])
                if main_content:
                    text = main_content.get_text(separator=' ', strip=True)
                else:
                    text = soup.get_text(separator=' ', strip=True)
                
                # 清理文本
                text = re.sub(r'\s+', ' ', text).strip()
                text = re.sub(r'^[^\w]*', '', text)  # 移除开头的非单词字符
                
                # 添加snippet作为高质量内容
                snippet = html.get('page_snippet', '')
                title = html.get('title', '')
                
                if snippet and len(snippet.strip()) > 10:
                    text = f"{title}: {snippet}\n\n{text}" if title else f"{snippet}\n\n{text}"
                
                # Aggressive text truncation using tokenizer
                try:
                    if hasattr(self.tokenizer, '__call__'):
                        inputs = self.tokenizer(
                            text,
                            max_length=max_length // 2,  # Use half the max length initially
                            truncation=True,
                            return_tensors=None,
                            add_special_tokens=False
                        )["input_ids"]
                    else:
                        inputs = self.tokenizer.encode(text, max_length=max_length // 2, truncation=True)
                    
                    # Further truncate if still too long
                    if len(inputs) > max_length // 3:
                        inputs = inputs[:max_length // 3]
                    
                    text = self.tokenizer.decode(inputs, skip_special_tokens=True)
                    
                except Exception as e:
                    # Fallback: simple text truncation
                    text = text[:max_length // 4]
                    
                docs.append(Document(page_content=text, metadata={"start_index": idx}))
                
                # Memory cleanup after each document
                del soup, text, html_content
                
            except Exception as e:
                print(f"Error processing document {idx}: {e}")
                continue
        
        if len(docs) == 0:
            print("No documents processed successfully")
            return False
            
        try:
            # Clear memory before creating retrievers
            gc.collect()
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            # Create vector retriever with memory optimization
            vectorstore = Chroma(
                collection_name=f"qwen_split_parents_{int(time())}", 
                embedding_function=optimal_embeddings
            )
            
            hf_retriever = ParentDocumentRetriever(
                vectorstore=vectorstore,
                docstore=InMemoryStore(),
                child_splitter=self.child_text_splitter,
                parent_splitter=self.parent_text_splitter,
            )
            
            # Add documents in smaller batches
            batch_size = 2  # Very small batch size
            for i in range(0, len(docs), batch_size):
                batch_docs = docs[i:i+batch_size]
                hf_retriever.add_documents(batch_docs)
                
                # Memory cleanup after each batch
                gc.collect()
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
            
            # Create BM25 retriever
            bm25_retriever = BM25Retriever.from_texts(
                [doc.page_content for doc in docs], 
                metadatas=[doc.metadata for doc in docs]
            )
            bm25_retriever.k = min(recall_k, 20)  # Limit recall
            
            # Create ensemble retriever
            if method == 'ensemble':
                self.retriever = EnsembleRetriever(
                    retrievers=[hf_retriever, bm25_retriever],
                    weights=[0.6, 0.4]
                )
            elif method == 'bm25':
                self.retriever = bm25_retriever
            else:
                self.retriever = hf_retriever
            
            print(f'Memory-optimized retriever initialized in {time()-st:.2f}s with {len(docs)} documents')
            return True
            
        except Exception as e:
            print(f"Error initializing retriever: {e}")
            # Clean up on failure
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            gc.collect()
            return False


    # 在Retriever2中添加：
    def load_documents(self, documents):
        """为了兼容RAGModel的调用而添加的方法"""
        try:
            # 将文档格式化为搜索结果格式
            formatted_results = []
            for doc in documents:
                if isinstance(doc, str):
                    formatted_results.append({
                        'page_content': doc,
                        'page_snippet': doc[:200] + '...' if len(doc) > 200 else doc,
                        'title': 'Document'
                    })
                else:
                    formatted_results.append(doc)
            
            # 调用现有的初始化方法
            return self.init_retriever(formatted_results, method='ensemble')
            
        except Exception as e:
            print(f"Error in load_documents: {e}")
            return False
    
    def get_result(self, query, k=3, rerank=True):  # Reduced default k
        """Get retrieval results with memory optimization"""
        try:
            # Clear memory before retrieval
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            docs = self.retriever.get_relevant_documents(query)
            print(f'Retrieved {len(docs)} documents with Qwen embeddings')
            
            if not docs:
                return [""]
            
            # Limit number of documents to prevent OOM
            max_docs = min(len(docs), k * 2, 10)
            docs = docs[:max_docs]
            
            if len(docs) <= k:
                return [doc.page_content for doc in docs]
            
            if rerank:
                try:
                    # Truncate document content for reranking
                    truncated_docs = []
                    for doc in docs:
                        content = doc.page_content
                        if len(content) > 800:  # Truncate long documents
                            content = content[:800] + "..."
                        truncated_docs.append(content)
                    
                    sentence_pairs = [[query, content] for content in truncated_docs]
                    
                    # Process in very small batches for reranking
                    sim = self.reranker.compute_score(sentence_pairs, normalize=True, batch_size=2)
                    indices = torch.topk(torch.tensor(sim), min(k, len(docs))).indices
                    result_docs = [docs[idx].page_content for idx in indices]
                    
                    print(f"Reranked with Qwen reranker, selected {len(result_docs)} documents")
                    return result_docs
                    
                except Exception as e:
                    print(f"Qwen reranking failed: {e}, using top-k without reranking")
                    return [doc.page_content for doc in docs[:k]]
            else:
                return [doc.page_content for doc in docs[:k]]
                
        except Exception as e:
            print(f"Get result failed: {e}")
            return [""]
        finally:
            # Always clean up memory
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            gc.collect()

    def get_context_documents(self, query, k=3, **kwargs):
        """Get context documents with memory optimization"""
        try:
            context_info = self.get_contextual_info(query)
            
            if hasattr(self, 'retriever'):
                docs = self.get_result(query, k=k, rerank=True)
                
                # Combine context info with retrieved docs
                if context_info:
                    # Limit total context length
                    combined_docs = [context_info] + docs
                    total_length = sum(len(doc) for doc in combined_docs)
                    
                    if total_length > 4000:  # Limit total context
                        # Truncate documents proportionally
                        max_doc_length = (4000 - len(context_info)) // max(len(docs), 1)
                        docs = [doc[:max_doc_length] for doc in docs]
                    
                    return '\n\n'.join([context_info] + docs)
                else:
                    return '\n\n'.join(docs)
            else:
                return context_info if context_info else ""
                
        except Exception as e:
            print(f"Get context documents failed: {e}")
            return ""
        finally:
            # Memory cleanup
            gc.collect()

    def clear(self):
        """Clear all resources and memory"""
        try:
            print("Clearing retriever resources...")
            
            # Clear retriever
            if hasattr(self, 'retriever'):
                del self.retriever
                
            # Clear models to free GPU memory
            if hasattr(self.hf_embeddings, 'model') and self.hf_embeddings.model is not None:
                del self.hf_embeddings.model
                self.hf_embeddings.model = None
                
            if hasattr(self.english_embeddings, 'model') and self.english_embeddings.model is not None:
                del self.english_embeddings.model  
                self.english_embeddings.model = None
                
            if hasattr(self.reranker, 'model') and self.reranker.model is not None:
                del self.reranker.model
                self.reranker.model = None
                
        except Exception as e:
            print(f"Error clearing retriever components: {e}")
            
        try:
            # Clear Chroma collections
            client = chromadb.Client()
            try:
                # Get all collections that might have been created
                collections = client.list_collections()
                for collection in collections:
                    if "qwen_split_parents" in collection.name:
                        client.delete_collection(collection.name)
                        print(f"Deleted collection: {collection.name}")
            except Exception as e:
                print(f"Error clearing Chroma collections: {e}")
        except Exception as e:
            print(f"Error accessing Chroma client: {e}")
            
        try:
            # Aggressive CUDA memory cleanup
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
                torch.cuda.synchronize()
                
                # Get current GPU memory usage
                memory_allocated = torch.cuda.memory_allocated()
                memory_cached = torch.cuda.memory_reserved()
                
                print(f"GPU memory after cleanup - Allocated: {memory_allocated/1024**3:.2f}GB, Cached: {memory_cached/1024**3:.2f}GB")
                
        except Exception as e:
            print(f"Error clearing CUDA cache: {e}")
        
        try:
            # System garbage collection
            gc.collect()
            
            # Force garbage collection multiple times
            for _ in range(3):
                gc.collect()
                
        except Exception as e:
            print(f"Error in garbage collection: {e}")
        
        # Restore torch.load
        self._restore_torch_load()
        
        print("Resource clearing completed")



    def __del__(self):
        """Destructor with comprehensive cleanup"""
        try:
            print("Retriever2 destructor called")
            self.clear()
        except Exception as e:
            print(f"Error in destructor: {e}")
            try:
                # Last resort cleanup
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                gc.collect()
            except:
                pass




class ChineseCSVEmbeddings:
    """专门为中文CSV数据设计的轻量级嵌入模型"""
    
    def __init__(self, model_path="models/Qwen/Qwen3-Embedding-0.6B", device="cuda:0"):
        self.model_path = model_path
        self.device = device
        self.model = None
        self.tokenizer = None
        self._load_model()
    
    def _load_model(self):
        try:
            print(f"加载中文嵌入模型: {self.model_path}")
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path,
                trust_remote_code=True
            )
            self.model = AutoModel.from_pretrained(
                self.model_path,
                torch_dtype=torch.float16,
                trust_remote_code=True,
                device_map="auto"
            ).eval()
            print("中文嵌入模型加载成功")
        except Exception as e:
            print(f"嵌入模型加载失败: {e}")
            self._create_fallback()
    
    def _create_fallback(self):
        """创建简单的词向量后备方案"""
        print("使用简化的词向量方案")
        self.model = None
        self.tokenizer = None
    
    def embed_text(self, text: str) -> np.ndarray:
        """嵌入单个文本"""
        if self.model is None:
            return self._simple_embedding(text)
        
        try:
            with torch.no_grad():
                inputs = self.tokenizer(
                    text,
                    return_tensors="pt",
                    max_length=512,
                    truncation=True
                ).to(self.device)
                
                outputs = self.model(**inputs)
                embeddings = outputs.last_hidden_state.mean(dim=1)
                embeddings = F.normalize(embeddings, p=2, dim=1)
                
                return embeddings.cpu().numpy().flatten()
        except Exception as e:
            print(f"嵌入失败: {e}")
            return self._simple_embedding(text)
    
    def _simple_embedding(self, text: str) -> np.ndarray:
        """简单的词向量方案"""
        words = jieba.lcut(text)
        # 基于字符hash的简单向量表示
        vector = np.zeros(384)  # 简化的维度
        for i, word in enumerate(words[:20]):  # 只取前20个词
            for j, char in enumerate(word[:5]):  # 每词只取前5个字符
                idx = (hash(char) + i * 7 + j * 13) % 384
                vector[idx] += 1.0
        
        # 标准化
        norm = np.linalg.norm(vector)
        if norm > 0:
            vector = vector / norm
        
        return vector

class ChineseCSVRetriever:
    """专门处理中文CSV数据的检索器"""
    
    def __init__(self, csv_files: Dict[str, str], device="cuda:0"):
        """
        初始化中文CSV检索器
        Args:
            csv_files: 文件路径字典 {'calendar': 'path/to/calendar.csv', ...}
        """
        self.csv_files = csv_files
        self.data = {}
        self.embeddings_model = ChineseCSVEmbeddings(device=device)
        self.text_embeddings = {}
        self.retriever = None
        
        # 为不同类型数据定义字段映射
        self.field_mappings = {
            'calendar': {
                'time_fields': ['日期', '时间', 'date', 'time', '开始时间', '结束时间'],
                'text_fields': ['标题', '描述', '地点', 'title', 'description', 'location', '会议主题', '参会人员'],
                'key_fields': ['负责人', '联系人', 'organizer', '会议室']
            },
            'financial': {
                'amount_fields': ['金额', '费用', 'amount', '应缴', '总计', '合计'],
                'text_fields': ['描述', '项目', 'description', 'item', '账单类型'],
                'key_fields': ['账户', '持有人', '收费方', 'account_holder']
            },
            'email': {
                'time_fields': ['发送时间', '时间', 'send_time', 'timestamp'],
                'text_fields': ['主题', '内容', 'subject', 'content', '正文', '摘要'],
                'key_fields': ['发件人', '收件人', 'sender', 'recipient', '发送者']
            }
        }
        
        self._load_data()

    def _detect_data_type(self, filename: str, df: pd.DataFrame) -> str:
        """根据文件名和内容检测数据类型"""
        filename_lower = filename.lower()
        columns = [col.lower() for col in df.columns]
        
        if any(word in filename_lower for word in ['calendar', 'event', '日历', '会议']):
            return 'calendar'
        elif any(word in filename_lower for word in ['financial', 'bill', '账单', '财务']):
            return 'financial'  
        elif any(word in filename_lower for word in ['email', '邮件', 'mail']):
            return 'email'
        
        # 基于列名判断
        if any(word in ' '.join(columns) for word in ['时间', 'time', '会议', '地点']):
            return 'calendar'
        elif any(word in ' '.join(columns) for word in ['金额', 'amount', '费用', '账单']):
            return 'financial'
        elif any(word in ' '.join(columns) for word in ['主题', 'subject', '发件人', '内容']):
            return 'email'
        
        return 'general'

    def _load_data(self):
        """加载CSV数据"""
        print("加载CSV数据文件...")
        
        for data_type, file_path in self.csv_files.items():
            if not os.path.exists(file_path):
                print(f"文件不存在: {file_path}")
                continue
                
            try:
                # 尝试不同编码
                for encoding in ['utf-8', 'gbk', 'gb2312']:
                    try:
                        df = pd.read_csv(file_path, encoding=encoding)
                        break
                    except UnicodeDecodeError:
                        continue
                else:
                    print(f"无法读取文件: {file_path}")
                    continue
                
                print(f"成功读取 {data_type}: {len(df)} 行数据")
                
                # 自动检测数据类型
                detected_type = self._detect_data_type(file_path, df)
                actual_key = detected_type if detected_type != 'general' else data_type
                
                self.data[actual_key] = {
                    'dataframe': df,
                    'text_representations': self._create_text_representations(df, actual_key),
                    'embeddings': None
                }
                
                print(f"数据类型识别为: {actual_key}")
                
            except Exception as e:
                print(f"读取CSV文件失败 {file_path}: {e}")

        # 创建文本嵌入
        self._create_embeddings()

    def _create_text_representations(self, df: pd.DataFrame, data_type: str) -> List[str]:
        """为每一行数据创建文本表示"""
        text_representations = []
        field_mapping = self.field_mappings.get(data_type, {})
        
        for _, row in df.iterrows():
            # 构建结构化的文本描述
            parts = []
            
            # 处理时间字段
            time_fields = field_mapping.get('time_fields', [])
            for field in time_fields:
                for col in df.columns:
                    if any(tf in col for tf in time_fields):
                        if pd.notna(row[col]):
                            parts.append(f"时间: {row[col]}")
                        break
            
            # 处理金额字段（财务数据）
            if data_type == 'financial':
                amount_fields = field_mapping.get('amount_fields', [])
                for field in amount_fields:
                    for col in df.columns:
                        if any(af in col for af in amount_fields):
                            if pd.notna(row[col]):
                                parts.append(f"金额: {row[col]}元")
                            break
            
            # 处理文本字段
            text_fields = field_mapping.get('text_fields', [])
            for col in df.columns:
                if pd.notna(row[col]) and str(row[col]).strip():
                    # 判断是否为重要文本字段
                    col_lower = col.lower()
                    if any(tf in col_lower for tf in text_fields):
                        parts.append(f"{col}: {row[col]}")
                    elif len(str(row[col])) > 10:  # 长文本字段
                        parts.append(f"{col}: {str(row[col])[:100]}")
            
            # 处理关键字段
            key_fields = field_mapping.get('key_fields', [])
            for field in key_fields:
                for col in df.columns:
                    if any(kf in col for kf in key_fields):
                        if pd.notna(row[col]):
                            parts.append(f"{col}: {row[col]}")
                        break
            
            text_representation = " | ".join(parts) if parts else str(row.to_dict())
            text_representations.append(text_representation)
        
        return text_representations

    def _create_embeddings(self):
        """为所有文本创建嵌入向量"""
        print("创建文本嵌入向量...")
        
        for data_type, data_info in self.data.items():
            texts = data_info['text_representations']
            print(f"为 {data_type} 创建 {len(texts)} 个嵌入向量")
            
            embeddings = []
            for text in texts:
                embedding = self.embeddings_model.embed_text(text)
                embeddings.append(embedding)
            
            self.data[data_type]['embeddings'] = np.array(embeddings)

    def search_similar(self, query: str, k: int = 5, data_types: Optional[List[str]] = None) -> List[Dict]:
        """搜索相似内容"""
        if not self.data:
            return []
        
        query_embedding = self.embeddings_model.embed_text(query)
        results = []
        
        # 确定搜索范围
        search_types = data_types if data_types else list(self.data.keys())
        
        for data_type in search_types:
            if data_type not in self.data:
                continue
                
            data_info = self.data[data_type]
            if data_info['embeddings'] is None:
                continue
            
            # 计算相似度
            similarities = cosine_similarity(
                [query_embedding],
                data_info['embeddings']
            )[0]
            
            # 获取top-k结果
            top_indices = np.argsort(similarities)[::-1][:k]
            
            for idx in top_indices:
                if similarities[idx] > 0.1:  # 相似度阈值
                    results.append({
                        'data_type': data_type,
                        'similarity': float(similarities[idx]),
                        'text': data_info['text_representations'][idx],
                        'row_data': data_info['dataframe'].iloc[idx].to_dict(),
                        'index': int(idx)
                    })
        
        # 按相似度排序
        results.sort(key=lambda x: x['similarity'], reverse=True)
        return results[:k]

    def get_context_documents(self, query: str, k: int = 3) -> str:
        """获取上下文文档，兼容RAGModel接口"""
        # 智能识别查询意图和数据类型
        query_intent = self._analyze_query_intent(query)
        target_types = self._determine_target_types(query, query_intent)
        
        results = self.search_similar(query, k=k*2, data_types=target_types)
        
        if not results:
            return ""
        
        # 构建上下文
        context_parts = []
        for result in results[:k]:
            data_type = result['data_type']
            text = result['text']
            
            # 根据数据类型格式化
            if data_type == 'financial':
                context_parts.append(f"【账单信息】{text}")
            elif data_type == 'calendar':
                context_parts.append(f"【会议安排】{text}")
            elif data_type == 'email':
                context_parts.append(f"【邮件内容】{text}")
            else:
                context_parts.append(f"【{data_type}】{text}")
        
        return "\n\n".join(context_parts)

    def _analyze_query_intent(self, query: str) -> Dict[str, Any]:
        """分析查询意图"""
        intent = {
            'type': 'general',
            'entities': [],
            'time_related': False,
            'amount_related': False,
            'person_related': False
        }
        
        # 时间相关
        time_keywords = ['时间', '日期', '什么时候', '几点', '周几', '月份']
        if any(kw in query for kw in time_keywords):
            intent['time_related'] = True
        
        # 金额相关
        amount_keywords = ['多少钱', '金额', '费用', '总计', '合计', '应缴', '元']
        if any(kw in query for kw in amount_keywords):
            intent['amount_related'] = True
        
        # 人员相关
        person_keywords = ['谁', '负责人', '联系人', '参会', '发件人', '收件人']
        if any(kw in query for kw in person_keywords):
            intent['person_related'] = True
        
        # 实体提取
        entities = []
        # 简单的实体识别
        words = jieba.lcut(query)
        for word in words:
            if len(word) > 1 and not word in ['什么', '哪里', '怎么', '为什么', '的',                '是', '在', '了', '和', '与', '或', '但', '如果', '因为']:
                entities.append(word)
        
        intent['entities'] = entities
        
        # 确定查询类型
        if intent['amount_related']:
            intent['type'] = 'financial'
        elif intent['time_related'] or any(kw in query for kw in ['会议', '发布会', '活动']):
            intent['type'] = 'calendar'
        elif any(kw in query for kw in ['邮件', '通知', '发送', '收件']):
            intent['type'] = 'email'
        
        return intent

    def _determine_target_types(self, query: str, intent: Dict) -> Optional[List[str]]:
        """根据查询和意图确定目标数据类型"""
        query_lower = query.lower()
        
        # 明确的类型指示词
        if any(word in query_lower for word in ['账单', '费用', '金额', '应缴']):
            return ['financial']
        elif any(word in query_lower for word in ['会议', '发布会', '活动', '时间安排']):
            return ['calendar']  
        elif any(word in query_lower for word in ['邮件', '通知', '发送']):
            return ['email']
        
        # 基于意图推断
        suggested_types = []
        if intent['type'] != 'general':
            suggested_types.append(intent['type'])
        
        # 如果有明确实体，可能需要多类型搜索
        if intent['entities'] and len(suggested_types) == 0:
            return None  # 搜索所有类型
        
        return suggested_types if suggested_types else None

    def init_retriever(self, search_results, recall_k=30, task3_topk=None, max_length=None, 
                      method='csv', query=None):
        """兼容RAGModel的初始化方法"""
        # CSV检索器不需要外部搜索结果，直接返回成功
        print(f"CSV检索器已就绪，包含 {len(self.data)} 种数据类型")
        self.retriever = self  # 设置自己为检索器
        return True

    def get_result(self, query: str, k: int = 3, rerank: bool = True) -> List[str]:
        """兼容RAGModel的结果获取方法"""
        results = self.search_similar(query, k=k)
        return [result['text'] for result in results]

    def clear(self):
        """清理资源"""
        print("清理CSV检索器资源...")
        try:
            # 清理数据
            self.data.clear()
            self.text_embeddings.clear()
            
            # 清理嵌入模型
            if hasattr(self.embeddings_model, 'model') and self.embeddings_model.model is not None:
                del self.embeddings_model.model
                self.embeddings_model.model = None
            
            # CUDA清理
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            print("CSV检索器资源清理完成")
        except Exception as e:
            print(f"清理资源时出错: {e}")

class Retriever1:
    """简化的Retriever2，专门处理中文CSV数据"""
    
    def __init__(self, device1="cuda:0", device2="cuda:1", batch_size=32, tokenizer=None):
        print("初始化简化版Retriever2用于中文CSV数据...")
        
        # CSV文件路径配置
        self.csv_files = {
            'calendar': 'models/processed_data/daily_data/calendar_events_sync.csv',
            'financial': 'models/processed_data/daily_data/financial_bills_history.csv', 
            'email': 'models/processed_data/daily_data/personal_emails.csv'
        }
        
        # 初始化中文CSV检索器
        try:
            self.csv_retriever = ChineseCSVRetriever(self.csv_files, device=device1)
            self.retriever = self.csv_retriever
            print("中文CSV检索器初始化成功")
        except Exception as e:
            print(f"CSV检索器初始化失败: {e}")
            self.csv_retriever = None
            self.retriever = None
        
        # 保存tokenizer引用
        self.tokenizer = tokenizer
        
        print("简化版Retriever2初始化完成")

    def init_retriever(self, search_results, recall_k=30, task3_topk=None, max_length=None,
                      method='csv', query=None):
        """初始化检索器"""
        if self.csv_retriever is None:
            return False
        
        return self.csv_retriever.init_retriever(
            search_results, recall_k, task3_topk, max_length, method, query
        )

    def get_result(self, query: str, k: int = 3, rerank: bool = True) -> List[str]:
        """获取检索结果"""
        if self.csv_retriever is None:
            return [""]
        
        return self.csv_retriever.get_result(query, k, rerank)

    def get_context_documents(self, query: str, k: int = 3, **kwargs) -> str:
        """获取上下文文档"""
        if self.csv_retriever is None:
            return ""
        
        return self.csv_retriever.get_context_documents(query, k)

    def load_documents(self, documents):
        """加载文档（CSV数据已经在初始化时加载）"""
        # CSV数据在初始化时已经加载，这里只需要返回成功
        return self.csv_retriever is not None

    def get_contextual_info(self, query: str) -> str:
        """获取上下文信息（重定向到CSV检索器）"""
        if self.csv_retriever is None:
            return ""
        
        return self.csv_retriever.get_context_documents(query, k=2)

    def detect_language(self, query: str) -> str:
        """检测语言"""
        chinese_chars = len([c for c in query if '\u4e00' <= c <= '\u9fff'])
        total_chars = len(query)
        
        if total_chars == 0:
            return 'zh'
        
        chinese_ratio = chinese_chars / total_chars
        return 'zh' if chinese_ratio > 0.3 else 'mixed'

    def clear(self):
        """清理资源"""
        print("清理Retriever2资源...")
        try:
            if self.csv_retriever:
                self.csv_retriever.clear()
                self.csv_retriever = None
            
            self.retriever = None
            
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            print("Retriever2资源清理完成")
        except Exception as e:
            print(f"清理资源时出错: {e}")

    def __del__(self):
        """析构函数"""
        try:
            self.clear()
        except:
            pass

