#
import os
import json
import time
# import tiktoken  # OpenAI's tokenizer
import numpy as np
import matplotlib.pyplot as plt
from typing import Dict, List, Tuple, Any, Optional, Union
from mmlm.cpm.cpm_engine import CpmEngine

class ExpandContext(object):
    model = None
    tokenizer = None
    @staticmethod
    def startup(params:Dict = {}) -> None:
        pass

    @staticmethod
    def count_tokens(text: str) -> int:
        """Count tokens in a string using the appropriate tokenizer."""
        if ExpandContext.tokenizer:
            return len(ExpandContext.tokenizer.encode(text))
        # Fallback for non-OpenAI models (rough approximation)
        return len(text.split()) * 1.3  # Rough approximation

    @staticmethod
    def measure_latency(func, *args, **kwargs) -> Tuple[Any, float]:
        """Measure execution time of a function."""
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        return result, end_time - start_time
    
    @staticmethod
    def calculate_metrics(prompt: str, response: str, latency: float) -> Dict[str, float]:
        """Calculate key metrics for a prompt-response pair."""
        prompt_tokens = ExpandContext.count_tokens(prompt)
        response_tokens = ExpandContext.count_tokens(response)
        
        # Simple token efficiency (response tokens / prompt tokens)
        token_efficiency = response_tokens / prompt_tokens if prompt_tokens > 0 else 0
        
        # Latency per 1k tokens
        latency_per_1k = (latency / prompt_tokens) * 1000 if prompt_tokens > 0 else 0
        
        return {
            "prompt_tokens": prompt_tokens,
            "response_tokens": response_tokens,
            "token_efficiency": token_efficiency,
            "latency": latency,
            "latency_per_1k": latency_per_1k
        }

    @staticmethod
    def generate_response(prompt: str) -> Tuple[str, float]:
        """Generate a response from the LLM and measure latency."""
        start_time = time.time()
        resp = CpmEngine.infer(query=prompt)
        end_time = time.time()
        return resp, end_time-start_time
    
    def create_expanded_context(
        base_prompt: str, 
        role: Optional[str] = None,
        examples: Optional[List[str]] = None,
        constraints: Optional[List[str]] = None,
        audience: Optional[str] = None,
        tone: Optional[str] = None,
        output_format: Optional[str] = None
    ) -> str:
        """
        Create an expanded context from a base prompt with optional components.
        
        Args:
            base_prompt: The core instruction or question
            role: Who the model should act as
            examples: List of example outputs to guide the model
            constraints: List of requirements or boundaries
            audience: Who the output is intended for
            tone: Desired tone of the response
            output_format: Specific format requirements
            
        Returns:
            Expanded context as a string
        """
        context_parts = []
        
        # Add role if provided
        if role:
            context_parts.append(f"You are {role}.")
        
        # Add base prompt
        context_parts.append(base_prompt)
        
        # Add audience if provided
        if audience:
            context_parts.append(f"Your response should be suitable for {audience}.")
        
        # Add tone if provided
        if tone:
            context_parts.append(f"Use a {tone} tone in your response.")
        
        # Add output format if provided
        if output_format:
            context_parts.append(f"Format your response as {output_format}.")
        
        # Add constraints if provided
        if constraints and len(constraints) > 0:
            context_parts.append("Requirements:")
            for constraint in constraints:
                context_parts.append(f"- {constraint}")
        
        # Add examples if provided
        if examples and len(examples) > 0:
            context_parts.append("Examples:")
            for i, example in enumerate(examples, 1):
                context_parts.append(f"Example {i}:\n{example}")
        
        # Join all parts with appropriate spacing
        expanded_context = "\n\n".join(context_parts)
        
        return expanded_context
    
    def layered_contexts(base_prompt: str, context_layers: Dict[str, str]) -> Dict[str, Dict]:
        """
        Test different combinations of context layers to find optimal configurations.
        
        Args:
            base_prompt: Core instruction
            context_layers: Dictionary of layer name -> layer content
            
        Returns:
            Results dictionary with metrics for each tested configuration
        """
        layer_results = {}
        
        # Test base prompt alone
        print("Testing base prompt...")
        base_response, base_latency = ExpandContext.generate_response(base_prompt)
        layer_results["base"] = {
            "prompt": base_prompt,
            "response": base_response,
            **ExpandContext.calculate_metrics(base_prompt, base_response, base_latency)
        }
        
        # Test each layer individually added to base
        for layer_name, layer_content in context_layers.items():
            combined_prompt = f"{base_prompt}\n\n{layer_content}"
            print(f"Testing base + {layer_name}...")
            response, latency = ExpandContext.generate_response(combined_prompt)
            layer_results[f"base+{layer_name}"] = {
                "prompt": combined_prompt,
                "response": response,
                **ExpandContext.calculate_metrics(combined_prompt, response, latency)
            }
        
        # Test all layers combined
        all_layers = "\n\n".join(context_layers.values())
        full_prompt = f"{base_prompt}\n\n{all_layers}"
        print("Testing all layers combined...")
        full_response, full_latency = ExpandContext.generate_response(full_prompt)
        layer_results["all_layers"] = {
            "prompt": full_prompt,
            "response": full_response,
            **ExpandContext.calculate_metrics(full_prompt, full_response, full_latency)
        }
        
        return layer_results
    
    @staticmethod
    def compress_context(context: str, technique: str = 'summarize') -> str:
        """
        Apply different compression techniques to reduce token usage while preserving meaning.
        
        Args:
            context: The context to compress
            technique: Compression technique to use (summarize, keywords, bullet)
            
        Returns:
            Compressed context
        """
        if technique == 'summarize':
            # Use the LLM to summarize the context
            prompt = f"""Summarize the following context in a concise way that preserves all key information
    but uses fewer words. Focus on essential instructions and details:

    {context}"""
            compressed, _ = ExpandContext.generate_response(prompt)
            return compressed
        
        elif technique == 'keywords':
            # Extract key terms and phrases
            prompt = f"""Extract the most important keywords, phrases, and instructions from this context:

    {context}

    Format your response as a comma-separated list of essential terms and short phrases."""
            keywords, _ = ExpandContext.generate_response(prompt)
            return keywords
        
        elif technique == 'bullet':
            # Convert to bullet points
            prompt = f"""Convert this context into a concise, structured list of bullet points that
    captures all essential information with minimal words:

    {context}"""
            bullets, _ = ExpandContext.generate_response(prompt)
            return bullets
        
        else:
            return context  # No compression
        
    @staticmethod
    def evaluate_response_quality(prompt: str, response: str, criteria: List[str]) -> float:
        """
        Use the LLM to evaluate the quality of a response based on specific criteria.
        
        Args:
            prompt: The prompt that generated the response
            response: The response to evaluate
            criteria: List of criteria to evaluate against
            
        Returns:
            Quality score from 0.0 to 1.0
        """
        criteria_list = "\n".join([f"- {c}" for c in criteria])
        eval_prompt = f"""Rate the quality of the following response to a prompt. 
        
    Prompt: 
    {prompt}

    Response:
    {response}

    Please evaluate based on these criteria:
    {criteria_list}

    For each criterion, rate from 0-10, then provide an overall score from 0.0 to 1.0 where 
    1.0 is perfect and 0.0 is completely inadequate. Format your response as:

    Criterion 1: [score] - [brief comment]
    Criterion 2: [score] - [brief comment]
    ...
    Overall Score: [0.0-1.0]
    """
        
        evaluation, _ = ExpandContext.generate_response(eval_prompt)
        
        # Extract overall score
        try:
            # Find the last occurrence of a decimal number following "Overall Score:"
            import re
            score_match = re.findall(r"Overall Score:\s*([0-9]*\.?[0-9]+)", evaluation)
            if score_match:
                return float(score_match[-1])
            else:
                return 0.5  # Default if parsing fails
        except:
            return 0.5  # Default if parsing fails

    @staticmethod
    def prune_context_layers(base_prompt: str, layers: Dict[str, str], criteria: List[str]) -> Tuple[str, Dict]:
        """
        Systematically test and prune context layers that don't improve response quality.
        
        Args:
            base_prompt: Core instruction
            layers: Dictionary of context layer name -> content
            criteria: Evaluation criteria for responses
            
        Returns:
            Tuple of (optimized prompt, results dictionary)
        """
        print("Testing baseline...")
        base_response, base_latency = ExpandContext.generate_response(base_prompt)
        base_quality = ExpandContext.evaluate_response_quality(base_prompt, base_response, criteria)
        
        results = {
            "base": {
                "prompt": base_prompt,
                "response": base_response,
                "quality": base_quality,
                "tokens": ExpandContext.count_tokens(base_prompt),
                "latency": base_latency
            }
        }
        
        # Add all layers
        all_layers_text = "\n\n".join(layers.values())
        full_prompt = f"{base_prompt}\n\n{all_layers_text}"
        print("Testing all layers...")
        full_response, full_latency = ExpandContext.generate_response(full_prompt)
        full_quality = ExpandContext.evaluate_response_quality(full_prompt, full_response, criteria)
        
        results["all_layers"] = {
            "prompt": full_prompt,
            "response": full_response,
            "quality": full_quality,
            "tokens": ExpandContext.count_tokens(full_prompt),
            "latency": full_latency
        }
        
        # Test removing one layer at a time
        best_quality = full_quality
        best_config = "all_layers"
        
        for layer_to_remove in layers.keys():
            remaining_layers = {k: v for k, v in layers.items() if k != layer_to_remove}
            remaining_text = "\n\n".join(remaining_layers.values())
            test_prompt = f"{base_prompt}\n\n{remaining_text}"
            
            print(f"Testing without '{layer_to_remove}'...")
            test_response, test_latency = ExpandContext.generate_response(test_prompt)
            test_quality = ExpandContext.evaluate_response_quality(test_prompt, test_response, criteria)
            
            config_name = f"without_{layer_to_remove}"
            results[config_name] = {
                "prompt": test_prompt,
                "response": test_response,
                "quality": test_quality,
                "tokens": ExpandContext.count_tokens(test_prompt),
                "latency": test_latency
            }
            
            # If removing a layer improves or maintains quality, update best config
            if test_quality >= best_quality:
                best_quality = test_quality
                best_config = config_name
        
        # If the best config is "all_layers", return the full prompt
        if best_config == "all_layers":
            return full_prompt, results
        
        # If removing a layer improved quality, recursively prune more
        if best_config.startswith("without_"):
            removed_layer = best_config.replace("without_", "")
            remaining_layers = {k: v for k, v in layers.items() if k != removed_layer}
            print(f"Layer '{removed_layer}' can be removed. Testing further pruning...")
            return ExpandContext.prune_context_layers(base_prompt, remaining_layers, criteria)
        
        return results[best_config]["prompt"], results
    
    @staticmethod
    def retrieve_relevant_info(query: str, knowledge_base: List[Dict[str, str]]) -> List[str]:
        """
        Retrieve relevant information from a knowledge base based on a query.
        Args:
            query: The search query
            knowledge_base: List of dictionaries with 'title' and 'content' keys
        Returns:
            List of relevant information snippets
        """
        # In a real application, you would use vector embeddings and similarity search
        # For this example, we'll use simple keyword matching
        relevant_info = []
        query_terms = set(query.lower().split())
        for item in knowledge_base:
            content = item['content'].lower()
            title = item['title'].lower()
            # Count matching terms
            matches = sum(1 for term in query_terms if term in content or term in title)
            if matches > 0:
                relevant_info.append(item['content'])
        return relevant_info[:3]  # Return top 3 matches
    
    @staticmethod
    def create_rag_context(base_prompt: str, query: str, knowledge_base: List[Dict[str, str]]) -> str:
        """
        Create a retrieval-augmented context by combining a base prompt with relevant information.
        Args:
            base_prompt: Core instruction
            query: The query to search for relevant information
            knowledge_base: Knowledge base to search
        Returns:
            Expanded context with retrieved information
        """
        relevant_info = ExpandContext.retrieve_relevant_info(query, knowledge_base)
        if not relevant_info:
            return base_prompt
        # Add retrieved information as context
        context_block = "Relevant information:\n\n" + "\n\n".join(relevant_info)
        # Combine with base prompt
        rag_context = f"{base_prompt}\n\n{context_block}"
        return rag_context

