import os
import json
import argparse
from tqdm import tqdm
import pandas as pd
import openai
import dotenv
from datasets import load_from_disk, load_dataset
from joblib import Parallel, delayed

dotenv.load_dotenv()

# Import the SYSTEM_PROMPT from process_data.py
from process_data import SYSTEM_PROMPT

def parse_prediction(output_text):
    """
    Extract the predicted disintegration time from model output.
    Looks for the value inside \boxed{} or simply the last number in the text.
    """
    if "\\boxed{" in output_text:
        start_idx = output_text.find("\\boxed{") + len("\\boxed{")
        end_idx = output_text.find("}", start_idx)
        if end_idx > start_idx:
            answer_text = output_text[start_idx:end_idx].strip()
            # Try to convert to float
            try:
                return float(answer_text.split()[0])
            except ValueError:
                pass
    
    # Fallback: find numbers in the text and take the last one
    import re
    numbers = re.findall(r"(\d+\.?\d*)", output_text)
    if numbers:
        return float(numbers[-1])
    
    return None

def process_single_example(example, model_id, split_name, temperature=0.1):
    """Process a single example with OpenAI API."""
    # Create a fresh client for this thread
    client = openai.OpenAI(
        api_key=os.environ.get("OPENAI_API_KEY"),
        base_url=os.environ.get("OPENAI_API_BASE"),
    )
    
    prompt = example["prompt"]
    ground_truth = float(example["answer"])
    
    # Call OpenAI API
    completion = client.chat.completions.create(
        model=model_id,
        messages=[{"role": "user", "content": prompt}],
        temperature=temperature,
        max_tokens=1000,
    )
    
    prediction_text = completion.choices[0].message.content
    
    # Parse prediction
    prediction = parse_prediction(prediction_text)
    
    result = {
        "id": example.get("id", None),
        "split": split_name,
        "prompt": prompt,
        "ground_truth": ground_truth,
        "prediction": prediction,
        "prediction_text": prediction_text,
    }
    
    # Calculate accuracy metrics if prediction is valid
    if prediction is not None:
        if ground_truth == 0:
            is_correct = (prediction == 0)
            error_percentage = 0.0 if is_correct else float('inf')
        else:
            error_percentage = abs(prediction - ground_truth) / abs(ground_truth) * 100
            is_correct = error_percentage <= 10.0
        
        result["error_percentage"] = error_percentage
        result["is_correct"] = is_correct
    else:
        result["error_percentage"] = None
        result["is_correct"] = False
    
    return result

def process_dataset_split(split_dataset, split_name, model_id, sample_size, n_jobs):
    """Process a single dataset split (train/test)."""
    # Sample dataset if needed
    if sample_size and sample_size < len(split_dataset):
        split_dataset = split_dataset.select(range(sample_size))
    
    # Add index to examples
    examples = []
    for i, example in enumerate(split_dataset):
        example_with_id = example.copy()
        example_with_id["id"] = i
        examples.append(example_with_id)
    
    print(f"Processing {len(examples)} examples from {split_name} set with {n_jobs} parallel jobs...")
    
    # Process examples in parallel
    results = Parallel(n_jobs=n_jobs, verbose=10)(
        delayed(process_single_example)(example, model_id, split_name) for example in examples
    )
    
    # Process results for this split
    correct = sum(1 for result in results if result.get("is_correct", False))
    total = sum(1 for result in results if result.get("prediction") is not None)
    
    # Calculate accuracy for this split
    accuracy = correct / total * 100 if total > 0 else 0
    print(f"\n{split_name.capitalize()} set accuracy: {accuracy:.2f}% ({correct}/{total})")
    
    return {
        "split": split_name,
        "accuracy": accuracy,
        "correct": correct,
        "total": total,
        "results": results
    }

def evaluate_with_openai(dataset_path, model_id, output_path, sample_size=None, n_jobs=10):
    """
    Evaluate ODT predictions using OpenAI API in parallel for both train and test sets.
    """
    # Load dataset
    print(f"Loading dataset from {dataset_path}")
    if os.path.isdir(dataset_path):
        dataset = load_from_disk(dataset_path)
    else:
        try:
            dataset = load_dataset(dataset_path)
        except Exception as e:
            print(f"Failed to load dataset: {e}")
            return None
    
    # Process both train and test sets if available
    all_results = {}
    total_correct = 0
    total_examples = 0
    
    # Determine which splits to process
    splits_to_process = []
    if "train" in dataset:
        splits_to_process.append("train")
    if "test" in dataset:
        splits_to_process.append("test")
    if not splits_to_process:
        # If no train/test splits, use the entire dataset as one split
        splits_to_process = [next(iter(dataset.keys()))]
    
    # Process each split
    for split_name in splits_to_process:
        split_result = process_dataset_split(
            dataset[split_name], 
            split_name, 
            model_id, 
            sample_size, 
            n_jobs
        )
        
        all_results[split_name] = split_result
        total_correct += split_result["correct"]
        total_examples += split_result["total"]
    
    # Calculate overall accuracy
    overall_accuracy = total_correct / total_examples * 100 if total_examples > 0 else 0
    print(f"\nOverall accuracy: {overall_accuracy:.2f}% ({total_correct}/{total_examples})")
    
    # Prepare final results
    final_results = {
        "overall": {
            "accuracy": overall_accuracy,
            "correct": total_correct,
            "total": total_examples,
        },
        "model_id": model_id,
        "splits": all_results
    }
    
    # Save results
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, "w") as f:
        json.dump(final_results, f, indent=2)
    
    print(f"Results saved to {output_path}")
    return overall_accuracy

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Evaluate ODT predictions using OpenAI API")
    parser.add_argument("--dataset_path", type=str, default="./assets/odt_dataset", 
                        help="Path to the dataset or Hugging Face dataset ID")
    parser.add_argument("--model_id", type=str, default=os.environ.get("OPENAI_MODEL_ID"), 
                        help="OpenAI model ID to use")
    parser.add_argument("--output_path", type=str, default="./results/openai_evaluation_results.json", 
                        help="Path to save results JSON")
    parser.add_argument("--sample_size", type=int, default=None, 
                        help="Number of examples to sample from each split (default: use all)")
    parser.add_argument("--n_jobs", type=int, default=10,
                        help="Number of parallel jobs")
    
    args = parser.parse_args()
    
    evaluate_with_openai(
        args.dataset_path,
        args.model_id,
        args.output_path,
        args.sample_size,
        args.n_jobs
    ) 