import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import models.Retriever as Retriever
from transformers import GenerationConfig
from models.prompt_api import template_map
import time
from peft import PeftModel
import os
import warnings
from langchain._api import LangChainDeprecationWarning
import traceback
import gc
import re
from typing import Dict, List, Tuple, Any
import jieba
import json

class RAGModel:

    def __init__(self):
        # Set CUDA visible devices
        os.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5,6"
        os.environ["CUDA_LAUNCH_BLOCKING"] = "3"
        warnings.filterwarnings("ignore", category=LangChainDeprecationWarning)
        warnings.filterwarnings("ignore", message=".*torch.load.*")
        self.model_path = "models/Qwen3-8B"
        print("-------------------------Loading LLM--------------------------")
        t1 = time.time()
        
        # Load tokenizer
        print("Loading tokenizer...")
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path, 
            trust_remote_code=True,
            local_files_only=True,
            use_fast=False
        )
        
        # Handle Qwen special tokens
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token
            print(f"Tokenizer pad_token set to eos_token: {self.tokenizer.eos_token}")

        print(f"Tokenizer initial vocab size: {len(self.tokenizer)}")

        # Add special tokens
        special_tokens = [
            "<|start_header_id|>", "<|end_header_id|>", 
            "<|eot_id|>", "<|user|>", "<|assistant|>"
        ]
        num_added = self.tokenizer.add_special_tokens({"additional_special_tokens": special_tokens})
        print(f"Added {num_added} special tokens. New tokenizer size: {len(self.tokenizer)}")

        # Load base model
        print("Loading base model...")
        self.base_model = AutoModelForCausalLM.from_pretrained(
            self.model_path, 
            trust_remote_code=True,
            device_map="auto",
            local_files_only=True,
            torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16,
            low_cpu_mem_usage=True
        )
        
        print(f"Base model loaded. Initial vocab size: {self.base_model.get_input_embeddings().weight.size(0)}")

        # Resize model vocabulary
        self.base_model.resize_token_embeddings(len(self.tokenizer))
        print(f"Model size adjusted to match tokenizer: {self.base_model.get_input_embeddings().weight.size(0)}")
        
        # Load multiple PEFT adapters
        print("Loading multiple PEFT adapters...")
        self.load_multiple_adapters()
        
        print(f"Finished loading LLM, total time: {time.time() - t1:.2f}s")

        # Load retriever
        try:
            print("-------------------------Loading RET----------------------")
            t1 = time.time()
            self.k = 6
            self.r = Retriever.Retriever2(
                device1="cuda:0", device2="cuda:1", batch_size=64,
                tokenizer=self.tokenizer
            )
            print(f"Finished loading RET, total time: {time.time() - t1:.2f}s")
            self.r.clear()
        except Exception as e:
            print(f"Warning: Failed to load retriever: {e}")
            self.r = None

    def fast_answer_validation(self, answer: str, query: str, intent_info: Dict) -> Tuple[bool, float]:
        """Fast answer validation with essential checks only"""
        if not answer or len(answer.strip()) < 3:
            return False, 0.0
        
        answer_lower = answer.lower().strip()
        
        # Quick invalid pattern check
        invalid_patterns = [
            'i don\'t know', 'invalid question', 'i do not know', 
            'sorry', 'cannot', 'get_', 'api_call', 'i don\'t have'
        ]
        
        if any(pattern in answer_lower for pattern in invalid_patterns):
            return False, 0.0
        
        # Quick API response detection
        if answer.startswith('get_') or 'get_' in answer[:50]:
            return False, 0.0
        
        # Quick length validation
        word_count = len(answer.split())
        if word_count < 3 or word_count > 80:
            return False, 0.0
        
        # Basic quality score
        if word_count > 8:
            return True, 0.7
        elif word_count > 5:
            return True, 0.6
        else:
            return True, 0.5

    def load_multiple_adapters(self):
        """Load multiple PEFT adapters with dynamic switching capability"""
        base_peft_dir = "models/pretrain_models/Qwen3-52-peft"
        
        # Define adapter configurations
        adapter_configs = {
            "web_context": "checkpoint-480",
            "api_generation": "checkpoint-500", 
            "api_extraction": "checkpoint-580"
        }
        
        # Initialize adapter management
        self.current_adapter = None
        self.current_peft_model = None
        
        # Prepare adapter paths for dynamic loading
        self.adapter_paths = {}
        for name, checkpoint in adapter_configs.items():
            adapter_path = os.path.join(base_peft_dir, checkpoint)
            if os.path.exists(adapter_path):
                self.adapter_paths[name] = adapter_path
                print(f"Found adapter {name}: {adapter_path}")
            else:
                # Check for available checkpoints if specific not found
                if os.path.exists(base_peft_dir):
                    checkpoints = [d for d in os.listdir(base_peft_dir) if d.startswith('checkpoint-')]
                    if checkpoints:
                        checkpoints.sort(key=lambda x: int(x.split('-')[1]) if x.split('-')[1].isdigit() else 0)
                        fallback_path = os.path.join(base_peft_dir, checkpoints[-1])
                        self.adapter_paths[name] = fallback_path
                        print(f"Auto-selected checkpoint for {name}: {fallback_path}")

        self.available_adapters = list(self.adapter_paths.keys()) + ['base_model']
        print(f"Successfully configured adapters: {self.available_adapters}")

    def select_optimal_adapter(self, intent_info: Dict, query: str) -> str:
        """Select optimal adapter based on query intent"""
        domain = intent_info.get('domain', 'general')
        intent = intent_info.get('primary_intent', 'factual_lookup')
        query_lower = query.lower()
        
        # Sports domain - use base model or web_context
        if domain == 'sports':
            return "base_model"
        
        # Finance, movies, music - use API adapters
        elif domain in ['finance', 'movies', 'music']:
            if intent in ['numerical', 'comparative']:
                return "api_generation"
            else:
                return "api_extraction"
        
        # Default to web context
        return "web_context"

    def multi_strategy_generation(self, context_str: str, query: str, intent_info: Dict, 
                                query_time: str) -> Tuple[str, str]:
        """Optimized multi-strategy generation with fewer strategies and early stopping"""
        domain = intent_info.get('domain', 'general')
        intent = intent_info.get('primary_intent', 'factual_lookup')
        
        print(f"Fast strategy - Domain: {domain}, Intent: {intent}")
        
        # Process context once
        if context_str:
            context_str = self.process_context_text(context_str, query, intent_info)
        
        # Simplified strategy selection - maximum 3 strategies
        strategies = []
        
        # Strategy 1: Always try optimal adapter
        optimal_adapter = self.select_optimal_adapter(intent_info, query)
        strategies.append(('optimal', optimal_adapter, 80))
        
        # Strategy 2: Domain-specific fallback
        if domain == 'finance':
            strategies.append(('finance_fallback', 'api_generation', 70))
        elif domain == 'movies':
            strategies.append(('movie_fallback', 'api_extraction', 70))
        elif domain == 'sports':
            strategies.append(('sports_fallback', 'base_model', 70))
        else:
            strategies.append(('general_fallback', 'web_context', 70))
        
        # Strategy 3: Base model as final fallback
        if strategies[0][1] != 'base_model':
            strategies.append(('base_fallback', 'base_model', 60))
        
        # Fast execution with early stopping
        collected_answers = []
        
        for i, (strategy_name, adapter, max_tokens) in enumerate(strategies):
            try:
                print(f"Strategy {i+1}/3: {strategy_name} with {adapter}")
                
                answer = self.fast_generate_answer(adapter, query, context_str, intent_info, max_tokens)
                
                if answer:
                    is_valid, score = self.fast_answer_validation(answer, query, intent_info)
                    print(f"Strategy '{strategy_name}' - Valid: {is_valid}, Score: {score:.2f}")
                    
                    if is_valid:
                        collected_answers.append({
                            'strategy': strategy_name,
                            'answer': answer,
                            'score': score
                        })
                        
                        # Early stopping for high-quality answers
                        if score > 0.8:
                            print(f"High quality answer found early, stopping search")
                            break
                            
            except Exception as e:
                print(f"Strategy {strategy_name} failed: {e}")
                continue
        
        # Fast synthesis or direct return
        final_answer = self.fast_synthesize_answer(query, context_str, collected_answers, intent_info)
        
        return final_answer, context_str

    def fast_generate_answer(self, adapter: str, query: str, context_str: str, 
                           intent_info: Dict, max_tokens: int) -> str:
        """Fast answer generation with reduced overhead"""
        try:
            if adapter == 'base_model':
                return self.fast_base_model_generate(query, context_str, max_tokens)
            else:
                return self.fast_adapter_generate(adapter, query, context_str, intent_info, max_tokens)
        except Exception as e:
            print(f"Fast generation failed for {adapter}: {e}")
            return ""

    def fast_base_model_generate(self, query: str, context_str: str, max_tokens: int) -> str:
        """Optimized base model generation"""
        try:
            # Simplified prompt
            if context_str and len(context_str) > 50:
                prompt = f"Context: {context_str[:600]}\nQuestion: {query}\nAnswer:"
            else:
                prompt = f"Question: {query}\nAnswer:"
            
            inputs = self.tokenizer(
                prompt,
                return_tensors="pt",
                max_length=1000,
                truncation=True
            )["input_ids"]
            
            inputs = self.safe_tensor_to_device(inputs)
            
            # Fast generation parameters
            with torch.no_grad():
                outputs = self.base_model.generate(
                    inputs,
                    max_new_tokens=max_tokens,
                    temperature=0.1,
                    top_p=0.5,
                    do_sample=True,
                    pad_token_id=self.tokenizer.pad_token_id,
                    use_cache=True,
                    num_beams=1
                )
            
            response = self.tokenizer.decode(
                outputs[0][len(inputs[0]):], 
                skip_special_tokens=True
            ).strip()
            
            return self.quick_clean_response(response)
            
        except Exception as e:
            print(f"Fast base model generation error: {e}")
            return ""

    def fast_adapter_generate(self, adapter_name: str, query: str, context_str: str,
                            intent_info: Dict, max_tokens: int) -> str:
        """Fast adapter generation with caching"""
        try:
            # Quick adapter loading check
            if not self.quick_load_adapter(adapter_name):
                return self.fast_base_model_generate(query, context_str, max_tokens)
            
            # Simplified prompt creation
            prompt = self.create_simple_adapter_prompt(adapter_name, query, context_str, intent_info)
            
            inputs = self.tokenizer(
                prompt,
                return_tensors="pt",
                max_length=1200,
                truncation=True
            )["input_ids"]
            
            inputs = self.safe_tensor_to_device(inputs)
            
            with torch.no_grad():
                outputs = self.current_peft_model.generate(
                    inputs,
                    max_new_tokens=max_tokens,
                    temperature=0.15,
                    top_p=0.6,
                    do_sample=True,
                    pad_token_id=self.tokenizer.pad_token_id,
                    use_cache=True,
                    num_beams=1
                )
            
            response = self.tokenizer.decode(
                outputs[0][len(inputs[0]):],
                skip_special_tokens=True
            ).strip()
            
            return self.quick_clean_response(response)
            
        except Exception as e:
            print(f"Fast adapter generation error: {e}")
            return self.fast_base_model_generate(query, context_str, max_tokens)

    def quick_load_adapter(self, adapter_name: str) -> bool:
        """Quick adapter loading with caching"""
        # If already loaded, return immediately
        if adapter_name == self.current_adapter and self.current_peft_model:
            return True
        
        # Skip loading if adapter not available
        if adapter_name not in self.adapter_paths:
            return False
        
        try:
            # Quick cleanup if needed
            if self.current_peft_model:
                del self.current_peft_model
                torch.cuda.empty_cache()
            
            # Load new adapter
            adapter_path = self.adapter_paths[adapter_name]
            self.current_peft_model = PeftModel.from_pretrained(
                self.base_model,
                adapter_path,
                torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
            )
            
            self.current_adapter = adapter_name
            return True
            
        except Exception as e:
            print(f"Quick adapter loading failed: {e}")
            return False

    def create_simple_adapter_prompt(self, adapter_name: str, query: str, 
                                   context_str: str, intent_info: Dict) -> str:
        """Create simplified prompts for adapters"""
        domain = intent_info.get('domain', 'general')
        
        if adapter_name == 'api_generation':
            if domain == 'finance':
                from models.prompt_api import finance_prompt
                current_time = time.strftime("%m/%d/%Y, %H:%M:%S PT")
                return finance_prompt.format(query_str=query, time_str=current_time)
            elif domain == 'movies':
                from models.prompt_api import movie_prompt
                return movie_prompt.format(query_str=query)
            elif domain == 'music':
                from models.prompt_api import music_prompt
                return music_prompt.format(query_str=query)
            else:
                return f"Generate API calls for: {query}"
        
        elif adapter_name == 'api_extraction':
            return f"Context: {context_str[:800] if context_str else 'No context'}\nQuestion: {query}\nAnswer:"
        
        else:  # web_context
            return f"Web Results: {context_str[:800] if context_str else 'No results'}\nQuestion: {query}\nAnswer:"

    def fast_synthesize_answer(self, query: str, context_str: str, 
                             collected_answers: List[Dict], intent_info: Dict) -> str:
        """Fast answer synthesis with minimal overhead"""
        try:
            if not collected_answers:
                return self.generate_quick_fallback(query, intent_info)
            
            # Sort by score
            collected_answers.sort(key=lambda x: x['score'], reverse=True)
            best_answer = collected_answers[0]
            
            # If single high-quality answer, return directly
            if best_answer['score'] > 0.7 and len(collected_answers) == 1:
                return self.quick_clean_response(best_answer['answer'])
            
            # If multiple answers but best is very good, use it
            if best_answer['score'] > 0.8:
                return self.quick_clean_response(best_answer['answer'])
            
            # Simple synthesis for multiple mediocre answers
            if len(collected_answers) > 1:
                return self.simple_answer_synthesis(query, collected_answers, intent_info)
            
            # Single mediocre answer - clean and return
            return self.quick_clean_response(best_answer['answer'])
            
        except Exception as e:
            print(f"Fast synthesis error: {e}")
            return self.generate_quick_fallback(query, intent_info)

    def simple_answer_synthesis(self, query: str, answers: List[Dict], intent_info: Dict) -> str:
        """Simple synthesis without full base model generation"""
        try:
            # Extract common elements
            all_text = ' '.join([a['answer'] for a in answers[:3]])  # Top 3 only
            
            # Simple extraction based on query type
            domain = intent_info.get('domain', 'general')
            
            if domain == 'movies' and 'movie' in query.lower():
                # Extract movie titles
                movie_match = re.search(r'"([^"]+)"|\b([A-Z][a-z]+(?: [A-Z][a-z]+)*)\b', all_text)
                if movie_match:
                    movie_title = movie_match.group(1) or movie_match.group(2)
                    return f"The movie is \"{movie_title}\"."
            
            elif 'how many' in query.lower() or 'number' in query.lower():
                # Extract numbers
                numbers = re.findall(r'\b(\d+(?:\.\d+)?)\s*(?:million|billion|attempts|games)?\b', all_text)
                if numbers:
                    return f"The answer is {numbers[0]}."
            
            elif 'when' in query.lower() or 'date' in query.lower():
                # Extract years/dates
                years = re.findall(r'\b(19\d{2}|20\d{2})\b', all_text)
                if years:
                    return f"The year is {years[0]}."
            
            # Default: return best answer cleaned
            return self.quick_clean_response(answers[0]['answer'])
            
        except Exception as e:
            print(f"Simple synthesis failed: {e}")
            return answers[0]['answer'] if answers else "Unable to determine the answer."

    def quick_clean_response(self, response: str) -> str:
        """Quick response cleaning without heavy processing"""
        if not response:
            return ""
        
        # Remove common prefixes quickly
        prefixes = ["based on", "according to", "the answer is", "answer:"]
        response_lower = response.lower()
        
        for prefix in prefixes:
            if response_lower.startswith(prefix):
                response = response[len(prefix):].strip()
                if response.startswith(':'):
                    response = response[1:].strip()
                break
        
        # Quick sentence cleanup
        sentences = response.split('.')
        if sentences and len(sentences[0]) > 5:
            response = sentences[0].strip()
            if not response.endswith('.'):
                response += '.'
        
        return response

    def generate_quick_fallback(self, query: str, intent_info: Dict) -> str:
        """Quick fallback response"""
        domain = intent_info.get('domain', 'general')
        
        fallback_responses = {
            'sports': "I don't have access to the specific sports statistics requested.",
            'movies': "I don't have information about that specific movie.",
            'finance': "I don't have access to current financial data.",
            'general': "I don't have access to the requested information."
        }
        
        return fallback_responses.get(domain, fallback_responses['general'])

    def process_context_text(self, text: str, query: str, intent_info: Dict) -> str:
        """Process context text with smart segmentation"""
        if not text or len(text) < 50:
            return text
        
        # Get intent info
        entities = intent_info.get('entities', [])
        
        # Split context into segments
        segments = re.split(r'\n\n|\n(?=[A-Z])', text[:2000])  # Limit input
        scored_segments = []
        
        for segment in segments:
            if len(segment.strip()) < 20:
                continue
                
            score = 0
            segment_lower = segment.lower()
            
            # Score based on entity presence
            for entity in entities:
                if entity.lower() in segment_lower:
                    score += 2.0
            
            # Score based on query keywords
            query_words = set(re.findall(r'\b\w{3,}\b', query.lower())) if query else set()
            segment_words = set(re.findall(r'\b\w{3,}\b', segment_lower))
            overlap = len(query_words.intersection(segment_words))
            score += overlap * 0.5
            
            if score > 0:
                scored_segments.append((segment, score))
        
        # Sort by score and select top segments
        scored_segments.sort(key=lambda x: x[1], reverse=True)
        selected_segments = [seg[0] for seg in scored_segments[:3]]  # Top 3 segments
        
        # Reconstruct context
        processed_context = '\n\n'.join(selected_segments)
        
        # Ensure context isn't too long
        if len(processed_context) > 1500:
            processed_context = processed_context[:1500]
            last_period = processed_context.rfind('.')
            if last_period > 900:
                processed_context = processed_context[:last_period + 1]
        
        return processed_context

    def fast_intent_recognition(self, query: str) -> Dict[str, any]:
        """Fast intent recognition using pattern matching"""
        query_lower = query.lower().strip()
        
        intent_info = {
            'primary_intent': 'factual_lookup',
            'domain': 'general',
            'complexity': 'medium',
            'entities': [],
            'intent_confidence': 0.7
        }
        
        # Domain detection with scoring
        domain_patterns = {
            'movies': ['oscar', 'academy award', 'visual effects', 'film', 'movie', 'actor', 'director'],
            'finance': ['dow jones', 'stock', 'company', 'market', 'ceo', 'performer', 'salesforce'],
            'sports': ['team', 'player', 'game', 'score', 'season', 'nash', '3-point'],
            'geography': ['country', 'located', 'region', 'africa', 'southern']
        }
        
        best_domain = 'general'
        best_score = 0
        
        for domain, patterns in domain_patterns.items():
            score = sum(1 for pattern in patterns if pattern in query_lower)
            if score > best_score:
                best_score = score
                best_domain = domain
        
        if best_score > 0:
            intent_info['domain'] = best_domain
            intent_info['intent_confidence'] = min(0.9, 0.6 + best_score * 0.1)
        
        # Intent detection
        if any(pattern in query_lower for pattern in ['how many', 'number', 'count', 'average']):
            intent_info['primary_intent'] = 'numerical'
        elif any(pattern in query_lower for pattern in ['when', 'date', 'year', 'time', 'in 2021']):
            intent_info['primary_intent'] = 'temporal'
        elif any(pattern in query_lower for pattern in ['best', 'top', 'highest', 'compare']):
            intent_info['primary_intent'] = 'comparative'
        
        # Extract entities
        intent_info['entities'] = self.extract_entities_from_query(query)
        
        return intent_info

    def extract_entities_from_query(self, query: str) -> List[str]:
        """Fast entity extraction from query"""
        entities = []
        
        # Extract years
        years = re.findall(r'\b(19\d{2}|20\d{2})\b', query)
        entities.extend(years)
        
        # Extract proper nouns 
        proper_nouns = re.findall(r'\b([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)\b', query)
        entities.extend([noun for noun in proper_nouns if len(noun) > 2])
        
        # Extract specific terms
        specific_terms = {
            'steve nash': r'\bsteve\s+nash\b',
            'dow jones': r'\bdow\s+jones\b',
            'visual effects': r'\bvisual\s+effects\b',
            'oscar': r'\boscar\b',
            '50-40-90': r'\b50-40-90\b'
        }
        
        for term_name, pattern in specific_terms.items():
            if re.search(pattern, query.lower()):
                entities.append(term_name)
        
        # Remove duplicates
        seen = set()
        unique_entities = []
        for entity in entities:
            entity_clean = entity.strip()
            if entity_clean and entity_clean.lower() not in seen:
                unique_entities.append(entity_clean)
                seen.add(entity_clean.lower())
        
        return unique_entities[:5]

    def process_task_with_fast_rag(self, query: str, query_time: str) -> Tuple[str, str]:
        """Fast RAG processing with pattern-based intent recognition"""
        try:
            # Step 1: Fast intent recognition
            intent_info = self.fast_intent_recognition(query)
            print(f"Fast intent analysis: {intent_info}")
            
            # Step 2: Quick context retrieval
            context_str = ""
            try:
                if self.r and hasattr(self.r, 'retriever') and self.r.retriever:
                    context_str = self.r.get_context_documents(query, k=self.k)
                    print(f"Retrieved context length: {len(context_str) if context_str else 0}")
                else:
                    # Use search results as context
                    context_parts = []
                    search_results = getattr(self, 'current_search_results', [])
                    
                    for i, result in enumerate(search_results[:4]):  # Limit to 4 results
                        content = (result.get('page_content') or 
                                 result.get('page_result') or 
                                 result.get('snippet', ''))
                        
                        if content and len(content.strip()) > 20:
                            clean_content = re.sub(r'\s+', ' ', content.strip())[:400]
                            context_parts.append(clean_content)
                    
                    context_str = "\n\n".join(context_parts)
                    
            except Exception as e:
                print(f"Error during context retrieval: {e}")
                context_str = ""

            # Step 3: Fast multi-strategy generation  
            answer, final_context = self.multi_strategy_generation(
                context_str, query, intent_info, query_time
            )

            return answer, final_context
            
        except Exception as e:
            print(f"Fast RAG processing error: {e}")
            return "I apologize, but I cannot process this request.", ""

    def safe_tensor_to_device(self, tensor, fallback_device='cpu'):
        """Safely move tensor to model device"""
        try:
            if tensor is None or not isinstance(tensor, torch.Tensor):
                return tensor
            
            # Determine target device based on current model
            if hasattr(self, 'current_peft_model') and self.current_peft_model:
                target_device = next(self.current_peft_model.parameters()).device
            else:
                target_device = next(self.base_model.parameters()).device
                
            return tensor.to(target_device)
            
        except Exception as e:
            print(f"Error moving tensor to device: {e}")
            return tensor.to(fallback_device) if fallback_device else tensor

    def cleanup_memory(self):
        """Memory cleanup function"""
        try:
            # Clean up current adapter
            if hasattr(self, 'current_peft_model') and self.current_peft_model:
                del self.current_peft_model
                self.current_peft_model = None
            
            # Clean up retriever
            if hasattr(self, 'r') and self.r:
                self.r.clear()
                
            # General cleanup
            gc.collect()
            torch.cuda.empty_cache() if torch.cuda.is_available() else None
            
            print("Memory cleanup completed")
            
        except Exception as e:
            print(f"Error during memory cleanup: {e}")

    def generate_answer(self, query: str, search_results: List, query_time: str) -> str:
        """Main answer generation function with fast processing"""
        self.t_s = time.time()
        self.current_search_results = search_results or []
        
        try:
            # Memory cleanup
            gc.collect()
            
            # Initialize retriever with search results
            if self.current_search_results and self.r:
                print(f"Initializing retriever with {len(self.current_search_results)} search results")
                try:
                    success = self.r.init_retriever(
                        self.current_search_results, 
                        query=query,
                        recall_k=self.k,
                        task3_topk=5
                    )
                    print("Retriever initialized" if success else "Retriever initialization failed")
                        
                except Exception as e:
                    print(f"Error initializing retriever: {e}")
            
            # Fast RAG processing
            answer, context = self.process_task_with_fast_rag(query, query_time)
            
            print(f"Final answer: {answer}")
            print(f"Generation time: {time.time() - self.t_s:.2f}s")
            
            return answer
            
        except Exception as e:
            print(f"Error in generate_answer: {e}")
            traceback.print_exc()
            return "I apologize, but I encountered an error whileprocessing your request."

    def __del__(self):
        """Destructor with memory cleanup"""
        try:
            self.cleanup_memory()
        except:
            pass

import os
import time
import torch
import warnings
import gc
import re
import json
import jieba
import pandas as pd
import numpy as np
from typing import Dict, List, Tuple, Optional, Any
from dataclasses import dataclass
from sklearn.metrics.pairwise import cosine_similarity

import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
from peft import PeftModel, LoraConfig, get_peft_model, TaskType
from langchain_core._api.deprecation import LangChainDeprecationWarning

# 禁用警告
warnings.filterwarnings("ignore", category=LangChainDeprecationWarning)
warnings.filterwarnings("ignore", message=".*torch.load.*")

class ChineseRAGModel:
    """专门处理中文数据的RAG模型"""

    def __init__(self):
        # 设置CUDA可见设备
        os.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5,6"
        os.environ["CUDA_LAUNCH_BLOCKING"] = "3"
        
        self.model_path = "models/Qwen3-8B"
        print("-------------------------加载中文LLM模型--------------------------")
        t1 = time.time()
        
        # 加载tokenizer
        print("正在加载中文tokenizer...")
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path, 
            trust_remote_code=True,
            local_files_only=True,
            use_fast=False
        )
        
        # 处理Qwen特殊tokens
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token
            print(f"Tokenizer pad_token设置为eos_token: {self.tokenizer.eos_token}")

        print(f"Tokenizer初始词汇表大小: {len(self.tokenizer)}")

        # 添加中文特殊tokens
        chinese_special_tokens = [
            "<|start_header_id|>", "<|end_header_id|>", 
            "<|eot_id|>", "<|user|>", "<|assistant|>",
            "<|中文|>", "<|账单|>", "<|邮件|>", "<|日程|>"
        ]
        num_added = self.tokenizer.add_special_tokens({"additional_special_tokens": chinese_special_tokens})
        print(f"添加了 {num_added} 个中文特殊tokens。新tokenizer大小: {len(self.tokenizer)}")

        # 加载基础模型
        print("正在加载中文基础模型...")
        self.base_model = AutoModelForCausalLM.from_pretrained(
            self.model_path, 
            trust_remote_code=True,
            device_map="auto",
            local_files_only=True,
            torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16,
            low_cpu_mem_usage=True
        )
        
        print(f"基础模型加载完成。初始词汇表大小: {self.base_model.get_input_embeddings().weight.size(0)}")

        # 调整模型词汇表大小
        self.base_model.resize_token_embeddings(len(self.tokenizer))
        print(f"模型大小调整完成: {self.base_model.get_input_embeddings().weight.size(0)}")
        
        # 加载多个中文适配器
        print("正在加载中文PEFT适配器...")
        self.load_chinese_adapters()
        
        print(f"中文LLM模型加载完成，总耗时: {time.time() - t1:.2f}秒")

        # 加载中文检索器
        try:
            print("-------------------------加载中文检索器----------------------")
            t1 = time.time()
            self.k = 6
            # 使用Retriever1处理中文CSV数据
            self.r = Retriever1(
                device1="cuda:0", device2="cuda:1", batch_size=64,
                tokenizer=self.tokenizer
            )
            print(f"中文检索器加载完成，总耗时: {time.time() - t1:.2f}秒")
            self.r.clear()
        except Exception as e:
            print(f"警告: 中文检索器加载失败: {e}")
            self.r = None

        # 初始化中文意图识别器
        self.chinese_intent_patterns = self._init_chinese_patterns()
        
    def load_chinese_adapters(self):
        """加载中文适配器"""
        base_peft_dir = "./models/qwen-chinese-peft"
        
        # 定义中文适配器配置
        adapter_configs = {
            "email_summary": "qwen-email-summary-adapter/final_model",
            "bill_parser": "qwen-bill-parser-adapter/final_model", 
            "general_chat": "qwen-general-chat-adapter/final_model"
        }
        
        # 初始化适配器管理
        self.current_adapter = None
        self.current_peft_model = None
        
        # 准备适配器路径
        self.adapter_paths = {}
        for name, adapter_subpath in adapter_configs.items():
            adapter_path = os.path.join(base_peft_dir, adapter_subpath)
            if os.path.exists(adapter_path):
                self.adapter_paths[name] = adapter_path
                print(f"找到中文适配器 {name}: {adapter_path}")
            else:
                print(f"警告: 中文适配器路径不存在: {adapter_path}")

        self.available_adapters = list(self.adapter_paths.keys()) + ['base_model']
        print(f"可用的中文适配器: {self.available_adapters}")

    def _init_chinese_patterns(self):
        """初始化中文意图识别模式"""
        return {
            '账单相关': {
                'keywords': ['账单', '费用', '金额', '缴费', '应付', '应缴', '总计', '合计', '电费', '水费', '手机费', '信用卡'],
                'entities': ['张伟', '张三', '李明', '王芳']
            },
            '邮件相关': {
                'keywords': ['邮件', '会议', '通知', '发送', '收件人', '发件人', '主题', '项目', '任务分配', '负责人'],
                'entities': ['新产品发布会', '项目启动', '会议室']
            },
            '日程相关': {
                'keywords': ['日程', '会议', '时间', '地点', '安排', '活动', '大会', '启动', '会议中心'],
                'entities': ['项目启动大会', '新纪元', '会议中心']
            }
        }

    def chinese_intent_recognition(self, query: str) -> Dict[str, Any]:
        """中文意图识别"""
        query_lower = query.lower()
        
        intent_info = {
            'domain': '通用',
            'primary_intent': '信息查询',
            'entities': [],
            'confidence': 0.5,
            'query_type': 'factual'
        }
        
        # 使用jieba进行中文分词
        words = jieba.lcut(query)
        
        # 领域识别
        max_score = 0
        best_domain = '通用'
        
        for domain, patterns in self.chinese_intent_patterns.items():
            score = 0
            # 关键词匹配
            for keyword in patterns['keywords']:
                if keyword in query:
                    score += 2
            
            # 实体匹配
            for entity in patterns['entities']:
                if entity in query:
                    score += 3
                    intent_info['entities'].append(entity)
            
            if score > max_score:
                max_score = score
                best_domain = domain.replace('相关', '')
        
        if max_score > 0:
            intent_info['domain'] = best_domain
            intent_info['confidence'] = min(0.9, 0.5 + max_score * 0.1)
        
        # 查询类型识别
        if any(word in query for word in ['多少', '几', '什么时候', '哪里', '谁']):
            intent_info['query_type'] = 'specific_info'
        elif any(word in query for word in ['总计', '合计', '金额']):
            intent_info['query_type'] = 'numerical'
        elif any(word in query for word in ['时间', '地点', '负责人']):
            intent_info['query_type'] = 'factual'
        
        return intent_info

    def select_chinese_adapter(self, intent_info: Dict, query: str) -> str:
        """根据中文查询意图选择最优适配器"""
        domain = intent_info.get('domain', '通用')
        
        if domain == '账单':
            return "bill_parser"
        elif domain == '邮件':
            return "email_summary"
        elif domain == '日程':
            return "general_chat"  # 或者可以训练专门的日程适配器
        else:
            return "general_chat"

    def quick_load_chinese_adapter(self, adapter_name: str) -> bool:
        """快速加载中文适配器"""
        if adapter_name == self.current_adapter and self.current_peft_model:
            return True
        
        if adapter_name not in self.adapter_paths:
            return False
        
        try:
            if self.current_peft_model:
                del self.current_peft_model
                torch.cuda.empty_cache()
            
            adapter_path = self.adapter_paths[adapter_name]
            self.current_peft_model = PeftModel.from_pretrained(
                self.base_model,
                adapter_path,
                torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
            )
            
            self.current_adapter = adapter_name
            print(f"成功加载中文适配器: {adapter_name}")
            return True
            
        except Exception as e:
            print(f"中文适配器加载失败: {e}")
            return False

    def create_chinese_prompt(self, query: str, context: str, intent_info: Dict) -> str:
        """创建中文提示词"""
        domain = intent_info.get('domain', '通用')
        
        if domain == '账单':
            return f"""基于以下账单信息，请准确回答用户的问题。

账单信息：
{context[:800] if context else '无相关账单信息'}

用户问题：{query}

请提供准确、简洁的答案："""
        
        elif domain == '邮件':
            return f"""基于以下邮件内容，请准确回答用户的问题。

邮件内容：
{context[:800] if context else '无相关邮件信息'}

用户问题：{query}

请提供准确、简洁的答案："""
        
        elif domain == '日程':
            return f"""基于以下日程安排信息，请准确回答用户的问题。

日程信息：
{context[:800] if context else '无相关日程信息'}

用户问题：{query}

请提供准确、简洁的答案："""
        
        else:
            return f"""请基于提供的信息回答用户问题。

相关信息：
{context[:800] if context else '无相关信息'}

用户问题：{query}

答案："""

    def chinese_generate_with_adapter(self, adapter_name: str, prompt: str, max_tokens: int = 100) -> str:
        """使用中文适配器生成回答"""
        try:
            if adapter_name == 'base_model':
                model = self.base_model
            else:
                if not self.quick_load_chinese_adapter(adapter_name):
                    return self.chinese_generate_with_adapter('base_model', prompt, max_tokens)
                model = self.current_peft_model
            
            inputs = self.tokenizer(
                prompt,
                return_tensors="pt",
                max_length=1000,
                truncation=True
            )["input_ids"]
            
            inputs = self.safe_tensor_to_device(inputs)
            
            with torch.no_grad():
                outputs = model.generate(
                    inputs,
                    max_new_tokens=max_tokens,
                    temperature=0.1,
                    top_p=0.8,
                    do_sample=True,
                    pad_token_id=self.tokenizer.pad_token_id,
                    eos_token_id=self.tokenizer.eos_token_id,
                    use_cache=True,
                    num_beams=1
                )
            
            response = self.tokenizer.decode(
                outputs[0][len(inputs[0]):], 
                skip_special_tokens=True
            ).strip()
            
            # 中文答案清理
            return self.clean_chinese_response(response)
            
        except Exception as e:
            print(f"中文适配器生成失败: {e}")
            return ""

    def clean_chinese_response(self, response: str) -> str:
        """清理中文回答"""
        if not response:
            return ""
        
        # 移除常见的中文前缀
        prefixes = ["根据", "基于", "答案是", "回答:", "答:", "根据提供的信息"]
        for prefix in prefixes:
            if response.startswith(prefix):
                response = response[len(prefix):].strip()
                if response.startswith('：') or response.startswith(':'):
                    response = response[1:].strip()
        
        # 提取第一个完整句子
        sentences = re.split(r'[。！？\n]', response)
        if sentences and len(sentences[0].strip()) > 3:
            response = sentences[0].strip()
            if not response.endswith(('。', '！', '？', '元')):
                response += '。'
        
        return response

    def safe_tensor_to_device(self, tensor, fallback_device='cpu'):
        """安全地将张量移动到模型设备"""
        try:
            if tensor is None or not isinstance(tensor, torch.Tensor):
                return tensor
            
            if hasattr(self, 'current_peft_model') and self.current_peft_model:
                target_device = next(self.current_peft_model.parameters()).device
            else:
                target_device = next(self.base_model.parameters()).device
                
            return tensor.to(target_device)
            
        except Exception as e:
            print(f"移动张量到设备时出错: {e}")
            return tensor.to(fallback_device) if fallback_device else tensor

    def process_chinese_context(self, context_str: str, query: str, intent_info: Dict) -> str:
        """处理中文上下文"""
        if not context_str or len(context_str) < 20:
            return context_str
        
        domain = intent_info.get('domain', '通用')
        entities = intent_info.get('entities', [])
        
        # 基于jieba分词的中文文本处理
        query_words = set(jieba.lcut(query))
        
        # 将上下文分段
        segments = re.split(r'\n\n|\n(?=【|\*\*|##)', context_str[:2000])
        scored_segments = []
        
        for segment in segments:
            if len(segment.strip()) < 10:
                continue
            
            score = 0
            segment_words = set(jieba.lcut(segment))
            
            # 实体匹配得分
            for entity in entities:
                if entity in segment:
                    score += 3.0
            
            # 关键词匹配得分
            overlap = len(query_words.intersection(segment_words))
            score += overlap * 0.8
            
            # 数字信息优先（对账单查询很重要）
            if re.search(r'\d+\.?\d*元', segment):
                score += 2.0
            
            # 时间信息优先
            if re.search(r'\d{4}年\d{1,2}月|\d{1,2}:\d{2}', segment):
                score += 1.5
            
            if score > 0:
                scored_segments.append((segment, score))
        
        # 按得分排序并选择前3个最相关的段落
        scored_segments.sort(key=lambda x: x[1], reverse=True)
        selected_segments = [seg[0] for seg in scored_segments[:3]]
        
        processed_context = '\n\n'.join(selected_segments)
        
        # 确保上下文长度合适
        if len(processed_context) > 1200:
            processed_context = processed_context[:1200]
            last_period = processed_context.rfind('。')
            if last_period > 800:
                processed_context = processed_context[:last_period + 1]
        
        return processed_context

    def chinese_multi_strategy_generation(self, context_str: str, query: str, 
                                        intent_info: Dict) -> Tuple[str, str]:
        """中文多策略生成"""
        domain = intent_info.get('domain', '通用')
        
        print(f"中文多策略生成 - 领域: {domain}")
        
        # 处理上下文
        if context_str:
            context_str = self.process_chinese_context(context_str, query, intent_info)
        
        # 策略选择（针对中文优化）
        strategies = []
        
        # 策略1：领域特定适配器
        optimal_adapter = self.select_chinese_adapter(intent_info, query)
        strategies.append(('领域适配器', optimal_adapter, 80))
        
        # 策略2：通用适配器作为备选
        if optimal_adapter != 'general_chat':
            strategies.append(('通用适配器', 'general_chat', 70))
        
        # 策略3：基础模型
        strategies.append(('基础模型', 'base_model', 60))
        
        # 执行策略
        best_answer = ""
        best_score = 0
        
        for i, (strategy_name, adapter, max_tokens) in enumerate(strategies):
            try:
                print(f"执行策略 {i+1}: {strategy_name} ({adapter})")
                
                # 创建中文提示词
                prompt = self.create_chinese_prompt(query, context_str, intent_info)
                
                # 生成回答
                answer = self.chinese_generate_with_adapter(adapter, prompt, max_tokens)
                
                if answer:
                    # 中文答案验证
                    is_valid, score = self.validate_chinese_answer(answer, query, intent_info)
                    print(f"策略 '{strategy_name}' - 有效: {is_valid}, 得分: {score:.2f}")
                    
                    if is_valid and score > best_score:
                        best_answer = answer
                        best_score = score
                        
                        # 高分答案提前停止
                        if score > 0.8:
                            print("找到高质量中文答案，停止搜索")
                            break
                            
            except Exception as e:
                print(f"策略 {strategy_name} 执行失败: {e}")
                continue
        
        return best_answer if best_answer else "抱歉，我无法找到相关信息。", context_str

    def validate_chinese_answer(self, answer: str, query: str, intent_info: Dict) -> Tuple[bool, float]:
        """验证中文答案质量"""
        if not answer or len(answer.strip()) < 2:
            return False, 0.0
        
        answer_clean = answer.strip()
        
        # 检查无效回答模式
        invalid_patterns = [
            '不知道', '无法回答', '不清楚', '抱歉', '无法确定',
            '无相关', '没有信息', '无法找到'
        ]
        
        if any(pattern in answer_clean for pattern in invalid_patterns):
            return False, 0.0
        
        # 基于查询类型的验证
        query_type = intent_info.get('query_type', 'factual')
        domain = intent_info.get('domain', '通用')
        
        score = 0.6  # 基础分
        
        # 数值查询验证
        if query_type == 'numerical' or '多少' in query:
            if re.search(r'\d+\.?\d*元', answer_clean):
                score += 0.3
            elif re.search(r'\d+', answer_clean):
                score += 0.2
        
        # 时间查询验证  
        if '时间' in query or '什么时候' in query:
            if re.search(r'\d{1,2}:\d{2}|\d{4}年\d{1,2}月|\d{1,2}日', answer_clean):
                score += 0.3
        
        # 地点查询验证
        if '地点' in query or '哪里' in query:
            if any(word in answer_clean for word in ['会议室', '中心', '楼', '厅', '号']):
                score += 0.3
        
        # 人员查询验证
        if '谁' in query or '负责人' in query:
            if re.search(r'[\u4e00-\u9fa5]{2,4}(?=，|。|$)', answer_clean):  # 中文姓名匹配
                score += 0.3
        
        # 答案长度评估
        if len(answer_clean) < 5:
            score -= 0.1
        elif len(answer_clean) > 50:
            score -= 0.1
        
        return True, min(score, 0.9)

    def process_chinese_rag_task(self, query: str, query_time: str) -> Tuple[str, str]:
        """处理中文RAG任务"""
        try:
            # 中文意图识别
            intent_info = self.chinese_intent_recognition(query)
            print(f"中文意图分析: {intent_info}")
            
            # 中文上下文检索
            context_str = ""
            try:
                if self.r and hasattr(self.r, 'retriever') and self.r.retriever:
                    context_str = self.r.get_context_documents(query, k=self.k)
                    print(f"检索到中文上下文长度: {len(context_str) if context_str else 0}")
                else:
                    # 使用搜索结果作为上下文
                    search_results = getattr(self, 'current_search_results', [])
                    context_parts = []
                    
                    for result in search_results[:4]:
                        content = (result.get('page_content') or 
                                 result.get('page_result') or 
                                 result.get('snippet', ''))
                        
                        if content and len(content.strip()) > 10:
                            clean_content = re.sub(r'\s+', ' ', content.strip())[:500]
                            context_parts.append(clean_content)
                    
                    context_str = "\n\n".join(context_parts)
                    
            except Exception as e:
                print(f"中文上下文检索出错: {e}")
                context_str = ""

            # 中文多策略生成
            answer, final_context = self.chinese_multi_strategy_generation(
                context_str, query, intent_info
            )

            return answer, final_context
            
        except Exception as e:
            print(f"中文RAG任务处理出错: {e}")
            return "抱歉，处理您的请求时遇到了问题。", ""

    def generate_chinese_fallback(self, query: str, intent_info: Dict) -> str:
        """生成中文后备回答"""
        domain = intent_info.get('domain', '通用')
        
        fallback_responses = {
            '账单': "抱歉，我无法找到相关的账单信息。",
            '邮件': "抱歉，我无法找到相关的邮件内容。",  
            '日程': "抱歉，我无法找到相关的日程安排。",
            '通用': "抱歉，我无法找到您所需的信息。"
        }
        
        return fallback_responses.get(domain, fallback_responses['通用'])

    def cleanup_chinese_memory(self):
        """清理中文模型内存"""
        try:
            # 清理当前适配器
            if hasattr(self, 'current_peft_model') and self.current_peft_model:
                del self.current_peft_model
                self.current_peft_model = None
            
            # 清理检索器
            if hasattr(self, 'r') and self.r:
                self.r.clear()
                
            # 通用清理
            gc.collect()
            torch.cuda.empty_cache() if torch.cuda.is_available() else None
            
            print("中文模型内存清理完成")
            
        except Exception as e:
            print(f"中文内存清理出错: {e}")

    def chinese_generate_answer(self, query: str, search_results: List, 
                               query_time: str, conversation_history: Optional[List] = None) -> str:
        """
        中文答案生成主函数 - 测试接口
        
        Args:
            query: 中文查询问题
            search_results: 搜索结果列表
            query_time: 查询时间
            conversation_history: 对话历史（可选）
            
        Returns:
            str: 生成的中文答案
        """
        self.t_s = time.time()
        self.current_search_results = search_results or []
        
        try:
            print(f"\n{'='*50}")
            print(f"处理中文查询: {query}")
            print(f"查询时间: {query_time}")
            print(f"搜索结果数量: {len(self.current_search_results)}")
            print(f"{'='*50}")
            
            # 内存清理
            gc.collect()
            
            # 初始化检索器
            if self.current_search_results and self.r:
                print(f"使用 {len(self.current_search_results)} 个搜索结果初始化检索器")
                try:
                    success = self.r.init_retriever(
                        self.current_search_results, 
                        query=query,
                        recall_k=self.k,
                        task3_topk=5
                    )
                    print("检索器初始化成功" if success else "检索器初始化失败")
                        
                except Exception as e:
                    print(f"检索器初始化出错: {e}")
            
            # 处理中文RAG任务
            answer, context = self.process_chinese_rag_task(query, query_time)
            
            # 答案后处理
            if not answer or answer.strip() == "":
                intent_info = self.chinese_intent_recognition(query)
                answer = self.generate_chinese_fallback(query, intent_info)
            
            print(f"\n最终中文答案: {answer}")
            print(f"生成耗时: {time.time() - self.t_s:.2f}秒")
            print(f"{'='*50}\n")
            
            return answer
            
        except Exception as e:
            print(f"中文答案生成出错: {e}")
            import traceback
            traceback.print_exc()
            return "抱歉，处理您的请求时遇到了问题。"
