import os
import json
import torch
import argparse
from tqdm import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM

def parse_prediction(output_text):
    """
    Extract the predicted disintegration time from model output.
    Looks for the value inside \boxed{} or simply the last number in the text.
    """
    try:
        # Try to find boxed answer
        if "\\boxed{" in output_text:
            start_idx = output_text.find("\\boxed{") + len("\\boxed{")
            end_idx = output_text.find("}", start_idx)
            if end_idx > start_idx:
                answer_text = output_text[start_idx:end_idx].strip()
                # Try to convert to float
                try:
                    return float(answer_text.split()[0])
                except ValueError:
                    pass
        
        # Fallback: find numbers in the text and take the last one
        import re
        numbers = re.findall(r"(\d+\.?\d*)", output_text)
        if numbers:
            return float(numbers[-1])
        
        return None
    except Exception as e:
        print(f"Error parsing prediction: {e}")
        return None

def evaluate_model(model_path, dataset_path, template="qwen", batch_size=1, device="cuda"):
    """
    Evaluate the trained model against the test dataset.
    A prediction is considered correct if it's within 10% of the ground truth.
    """
    # Load dataset
    print(f"Loading dataset from {dataset_path}")
    if os.path.isdir(dataset_path):
        print("Loading dataset from disk...")
        dataset = load_from_disk(dataset_path)
    else:
        print("Loading dataset from Hugging Face Hub...")
        try:
            dataset = load_dataset(dataset_path)
            if "test" not in dataset:
                 print(f"Warning: 'test' split not found in Hub dataset {dataset_path}. Trying to use the first available split.")
                 available_splits = list(dataset.keys())
                 if not available_splits:
                     raise ValueError(f"No splits found in the dataset {dataset_path}")
                 dataset = dataset[available_splits[0]]
            else:
                 dataset = dataset["test"]

        except Exception as e:
            print(f"Failed to load dataset from Hugging Face Hub: {e}")
            print("Please ensure the dataset name is correct and you have internet access.")
            return None

    test_dataset = dataset

    # Load model and tokenizer
    print(f"Loading model from {model_path}")
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        model_path, 
        torch_dtype=torch.float16 if device == "cuda" else torch.float32,
        device_map=device,
        trust_remote_code=True
    )
    
    correct = 0
    total = 0
    results = []
    
    for i, example in enumerate(tqdm(test_dataset, desc="Evaluating")):
        prompt = example["prompt"]
        try:
            ground_truth = float(example["answer"])
        except KeyError:
            print(f"Error: 'answer' column not found in the dataset example {i}. Skipping.")
            continue
        except ValueError:
            print(f"Error: Could not convert 'answer' value '{example.get('answer')}' to float in example {i}. Skipping.")
            continue
        
        # Get model prediction
        inputs = tokenizer(prompt, return_tensors="pt").to(device)
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_new_tokens=1000,
                temperature=0.1,
                top_p=0.9,
                repetition_penalty=1.1
            )
        
        # Decode and extract prediction
        prediction_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        if prediction_text.startswith(prompt):
             prediction_text = prediction_text[len(prompt):].strip()

        prediction = parse_prediction(prediction_text)
        
        if prediction is not None:
            # Check if prediction is within 10% of ground truth
            if ground_truth == 0:
                is_correct = (prediction == 0)
                error_percentage = 0.0 if is_correct else float('inf')
            else:
                error_percentage = abs(prediction - ground_truth) / abs(ground_truth) * 100
                is_correct = error_percentage <= 10.0
            
            if is_correct:
                correct += 1
            total += 1
            
            results.append({
                "id": i,
                "prompt": prompt,
                "ground_truth": ground_truth,
                "prediction": prediction,
                "prediction_text": prediction_text,
                "error_percentage": error_percentage,
                "is_correct": is_correct
            })
            
            # Print progress
            if (i + 1) % 5 == 0:
                current_accuracy = correct / total * 100 if total > 0 else 0
                print(f"Current accuracy: {current_accuracy:.2f}% ({correct}/{total})")
    
    # Calculate final accuracy
    accuracy = correct / total * 100 if total > 0 else 0
    print(f"\nFinal accuracy: {accuracy:.2f}% ({correct}/{total})")
    
    # Save results
    results_dir = os.path.join(os.path.dirname(model_path), "evaluation_results")
    os.makedirs(results_dir, exist_ok=True)
    
    with open(os.path.join(results_dir, "evaluation_results.json"), "w") as f:
        json.dump({
            "accuracy": accuracy,
            "correct": correct,
            "total": total,
            "results": results
        }, f, indent=2)
    
    print(f"Results saved to {os.path.join(results_dir, 'evaluation_results.json')}")
    return accuracy

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Evaluate ODT disintegration time prediction model")
    parser.add_argument("--model_path", type=str, required=True, help="Path to the trained model")
    parser.add_argument("--dataset_path", type=str, default="./assets/odt_dataset", help="Path to the local dataset directory or Hugging Face Hub dataset name (e.g., 'username/my_odt_dataset')")
    parser.add_argument("--template", type=str, default="qwen", help="Template name")
    parser.add_argument("--batch_size", type=int, default=1, help="Batch size for evaluation (currently only supports 1)")
    parser.add_argument("--device", type=str, default="cuda", help="Device to use (cuda or cpu)")
    
    args = parser.parse_args()
    if args.batch_size != 1:
         print("Warning: Current implementation only supports batch_size=1. Setting batch_size to 1.")
         args.batch_size = 1

    evaluate_model(
        args.model_path,
        args.dataset_path,
        args.template,
        args.batch_size,
        args.device
    )
