import argparse
import json
import os

import dotenv
import openai
import pandas as pd
from datasets import load_dataset, load_from_disk
from joblib import Parallel, delayed
from tqdm import tqdm

PROMPT = """
You are a pharmaceutical AI specialist focused on Oral Disintegrating Tablet (ODT) formulations.

Input Fields:
Field	Description	Example
API	Active Pharmaceutical Ingredient name	"Paracetamol"
API Dose (mg)	Dosage of active ingredient	325.0
Filler	Primary filler excipient	"Mannitol"
Filler Dose (mg)	Filler amount in milligrams	285.0
Binder	Binding agent	"PVP"
Binder Dose (mg)	Binder amount	195.0
Disintegrant	Disintegrating agent	"CC-Na"
Disintegrant Dose (mg)	Disintegrant amount	25.0
Lubricant	Lubricating agent	"Mg stearate"
Lubricant Dose (mg)	Lubricant amount	10.0
Solubilizer	Solubility enhancer	"β-cyclodextrin"
Solubilizer Dose (mg)	Solubilizer amount	43.0
Hardness (N)	Tablet hardness in Newtons	53.0
Friability (%)	Tablet friability percentage	0.56
Thickness (mm)	Tablet thickness in millimeters	4.76
Punch (mm)	Tooling diameter for compression	8.0

Output Field:
Field	Description
Disintegration time (sec)	Time for complete oral disintegration

Examples1:
Input fields: {{"API": "Sildenafil", "Dose (mg)": 29.8, "Filler": "Mannitol", "Dose (mg).1": 205.5, "Filler.1": NaN, "Dose (mg).2": NaN, "Binder": NaN, "Dose (mg).3": NaN, "Disintegrant": "PVPP", "Dose (mg).4": 13.0, "Disintegrant.1": NaN, "Dose (mg).5": NaN, "Lubricant": "Mg stearate", "Dose (mg).6": 3.0, "Lubricant.1": "Aerosil", "Dose (mg).7": 0.75, "Solubilzer": NaN, "Dose (mg).8": NaN, "Hardness (N)": 32.0, "Friability(%)": 0.46, "Thickness (mm)": NaN, "Punch (mm)": 10.0}}
Disintegration time (sec): 27

Example2:
Input fields: {{"API": "Eslicarbazepine", "Dose (mg)": 800.0, "Filler": "Mannitol", "Dose (mg).1": 150.0, "Filler.1": "MCC", "Dose (mg).2": 50.08, "Binder": NaN, "Dose (mg).3": NaN, "Disintegrant": "CC-Na", "Dose (mg).4": 0.0, "Disintegrant.1": "PVPP", "Dose (mg).5": 0.0, "Lubricant": "Mg stearate", "Dose (mg).6": 4.0, "Lubricant.1": NaN, "Dose (mg).7": NaN, "Solubilzer": "\u03b2-cyclodextrin", "Dose (mg).8": 109.9, "Hardness (N)": 38.0, "Friability(%)": 0.87, "Thickness (mm)": 6.5, "Punch (mm)": 16.0}}
Disintegration time (sec): 55.66

Example3:
Input fields: {{"API": "Risperidone", "Dose (mg)": 0.5, "Filler": "Mannitol", "Dose (mg).1": 2.6, "Filler.1": "MCC", "Dose (mg).2": 1.3, "Binder": "PVP", "Dose (mg).3": 0.5, "Disintegrant": "CC-Na", "Dose (mg).4": 0.0, "Disintegrant.1": "CMS-Na", "Dose (mg).5": 0.0, "Lubricant": "Aerosil", "Dose (mg).6": 50.0, "Lubricant.1": NaN, "Dose (mg).7": NaN, "Solubilzer": NaN, "Dose (mg).8": NaN, "Hardness (N)": NaN, "Friability(%)": NaN, "Thickness (mm)": 1.77, "Punch (mm)": 3.0}}
Disintegration time (sec): 2.19

Example4:
Input fields: {{"API": "Donepezil", "Dose (mg)": 10.0, "Filler": "Mannitol", "Dose (mg).1": 198.1, "Filler.1": NaN, "Dose (mg).2": NaN, "Binder": NaN, "Dose (mg).3": NaN, "Disintegrant": "CC-Na", "Dose (mg).4": 0.0, "Disintegrant.1": "PVPP", "Dose (mg).5": 0.0, "Lubricant": "Mg stearate", "Dose (mg).6": 3.0, "Lubricant.1": NaN, "Dose (mg).7": NaN, "Solubilzer": NaN, "Dose (mg).8": NaN, "Hardness (N)": 69.0, "Friability(%)": 0.57, "Thickness (mm)": 4.12, "Punch (mm)": 9.5}}
Disintegration time (sec): 73

Example5:
Input fields: {{"API": "Ondansetron", "Dose (mg)": 8.0, "Filler": "Mannitol", "Dose (mg).1": 22.5, "Filler.1": "MCC", "Dose (mg).2": 15.12, "Binder": NaN, "Dose (mg).3": NaN, "Disintegrant": "PVPP", "Dose (mg).4": 7.5, "Disintegrant.1": NaN, "Dose (mg).5": NaN, "Lubricant": "Aerosil", "Dose (mg).6": 0.75, "Lubricant.1": NaN, "Dose (mg).7": NaN, "Solubilzer": NaN, "Dose (mg).8": NaN, "Hardness (N)": 24.0, "Friability(%)": 0.54, "Thickness (mm)": 2.67, "Punch (mm)": 5.5}}
Disintegration time (sec): 16.33


Given the input fields, help me predict the disintegration time (sec).
{input_fields}
Please reason step by step, and put your final answer within \\boxed{{}}.
"""


dotenv.load_dotenv()

# Import the SYSTEM_PROMPT from process_data.py
from process_data import SYSTEM_PROMPT


def parse_prediction(output_text):
    """
    Extract the predicted disintegration time from model output.
    Looks for the value inside \boxed{} or simply the last number in the text.
    """
    if "\\boxed{" in output_text:
        start_idx = output_text.find("\\boxed{") + len("\\boxed{")
        end_idx = output_text.find("}", start_idx)
        if end_idx > start_idx:
            answer_text = output_text[start_idx:end_idx].strip()
            # Try to convert to float
            try:
                return float(answer_text.split()[0])
            except ValueError:
                pass

    # Fallback: find numbers in the text and take the last one
    import re

    numbers = re.findall(r"(\d+\.?\d*)", output_text)
    if numbers:
        return float(numbers[-1])

    return None


def process_single_example(example, model_id, split_name, temperature=0.1):
    """Process a single example with OpenAI API."""
    # Create a fresh client for this thread
    client = openai.OpenAI(
        api_key=os.environ.get("OPENAI_API_KEY"),
        base_url=os.environ.get("OPENAI_API_BASE"),
    )

    ground_truth = float(example["answer"])

    prompt = PROMPT.format(input_fields=example["raw_features_json"])

    # Call OpenAI API
    completion = client.chat.completions.create(
        model=model_id,
        messages=[{"role": "user", "content": prompt}],
        temperature=temperature,
        max_tokens=1000,
    )

    prediction_text = completion.choices[0].message.content

    # Parse prediction
    prediction = parse_prediction(prediction_text)

    result = {
        "id": example.get("id", None),
        "split": split_name,
        "prompt": prompt,
        "ground_truth": ground_truth,
        "prediction": prediction,
        "prediction_text": prediction_text,
    }

    # Calculate accuracy metrics if prediction is valid
    if prediction is not None:
        if ground_truth == 0:
            is_correct = prediction == 0
            error_percentage = 0.0 if is_correct else float("inf")
        else:
            error_percentage = abs(prediction - ground_truth) / abs(ground_truth) * 100
            is_correct = error_percentage <= 10.0

        result["error_percentage"] = error_percentage
        result["is_correct"] = is_correct
    else:
        result["error_percentage"] = None
        result["is_correct"] = False

    return result


def process_dataset_split(split_dataset, split_name, model_id, sample_size, n_jobs):
    """Process a single dataset split (train/test)."""
    # Sample dataset if needed
    if sample_size and sample_size < len(split_dataset):
        split_dataset = split_dataset.select(range(sample_size))

    # Add index to examples
    examples = []
    for i, example in enumerate(split_dataset):
        example_with_id = example.copy()
        example_with_id["id"] = i
        examples.append(example_with_id)

    print(
        f"Processing {len(examples)} examples from {split_name} set with {n_jobs} parallel jobs..."
    )

    # Process examples in parallel
    results = Parallel(n_jobs=n_jobs, verbose=10)(
        delayed(process_single_example)(example, model_id, split_name)
        for example in examples
    )

    # Process results for this split
    correct = sum(1 for result in results if result.get("is_correct", False))
    total = sum(1 for result in results if result.get("prediction") is not None)

    # Calculate accuracy for this split
    accuracy = correct / total * 100 if total > 0 else 0
    print(
        f"\n{split_name.capitalize()} set accuracy: {accuracy:.2f}% ({correct}/{total})"
    )

    return {
        "split": split_name,
        "accuracy": accuracy,
        "correct": correct,
        "total": total,
        "results": results,
    }


def evaluate_with_openai(
    dataset_path, model_id, output_path, sample_size=None, n_jobs=10
):
    """
    Evaluate ODT predictions using OpenAI API in parallel for both train and test sets.
    """
    # Load dataset
    print(f"Loading dataset from {dataset_path}")
    if os.path.isdir(dataset_path):
        dataset = load_from_disk(dataset_path)
    else:
        try:
            dataset = load_dataset(dataset_path)
        except Exception as e:
            print(f"Failed to load dataset: {e}")
            return None

    # Process both train and test sets if available
    all_results = {}
    total_correct = 0
    total_examples = 0

    # Determine which splits to process
    splits_to_process = []
    if "train" in dataset:
        splits_to_process.append("train")
    if "test" in dataset:
        splits_to_process.append("test")
    if not splits_to_process:
        # If no train/test splits, use the entire dataset as one split
        splits_to_process = [next(iter(dataset.keys()))]

    # Process each split
    for split_name in splits_to_process:
        split_result = process_dataset_split(
            dataset[split_name], split_name, model_id, sample_size, n_jobs
        )

        all_results[split_name] = split_result
        total_correct += split_result["correct"]
        total_examples += split_result["total"]

    # Calculate overall accuracy
    overall_accuracy = total_correct / total_examples * 100 if total_examples > 0 else 0
    print(
        f"\nOverall accuracy: {overall_accuracy:.2f}% ({total_correct}/{total_examples})"
    )

    # Prepare final results
    final_results = {
        "overall": {
            "accuracy": overall_accuracy,
            "correct": total_correct,
            "total": total_examples,
        },
        "model_id": model_id,
        "splits": all_results,
    }

    # Save results
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, "w") as f:
        json.dump(final_results, f, indent=2)

    print(f"Results saved to {output_path}")
    return overall_accuracy


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Evaluate ODT predictions using OpenAI API"
    )
    parser.add_argument(
        "--dataset_path",
        type=str,
        default="./assets/odt_dataset",
        help="Path to the dataset or Hugging Face dataset ID",
    )
    parser.add_argument(
        "--model_id",
        type=str,
        default=os.environ.get("OPENAI_MODEL_ID"),
        help="OpenAI model ID to use",
    )
    parser.add_argument(
        "--output_path",
        type=str,
        default="./results/openai_evaluation_results.json",
        help="Path to save results JSON",
    )
    parser.add_argument(
        "--sample_size",
        type=int,
        default=None,
        help="Number of examples to sample from each split (default: use all)",
    )
    parser.add_argument(
        "--n_jobs", type=int, default=10, help="Number of parallel jobs"
    )

    args = parser.parse_args()

    evaluate_with_openai(
        args.dataset_path,
        args.model_id,
        args.output_path,
        args.sample_size,
        args.n_jobs,
    )