import json
import os

import click
import dotenv
import openai
import pandas as pd
from datasets import Dataset, Features, Sequence, Value
from huggingface_hub import HfApi
from joblib import Parallel, delayed
from tqdm import tqdm

dotenv.load_dotenv()

# Initialize OpenAI client with API key and base from environment variables
client = openai.OpenAI(
    api_key=os.environ.get("OPENAI_API_KEY"),
    base_url=os.environ.get("OPENAI_API_BASE"),
)

SYSTEM_PROMPT = """You are a pharmaceutical AI specialist focused on Oral Disintegrating Tablet (ODT) formulations.

Some knowledge about this:
The disintegration time of Oral Disintegrating Tablets (ODTs) is influenced by multiple formulation and manufacturing parameters, as discussed in the paper. Below is a summary of how each input field affects the disintegration time:

1. **API and API Dose (mg)**  
   - The molecular properties of the API (e.g., solubility, molecular weight, hydrogen bonding) indirectly affect disintegration. For example, APIs with poor solubility may require more disintegrant or solubilizer to enhance dissolution, impacting disintegration time.  
   - Higher API doses may increase tablet density, potentially prolonging disintegration if not balanced with excipients.

2. **Filler and Filler Dose (mg)**  
   - Fillers like **mannitol** are highly water-soluble and can promote rapid disintegration by absorbing saliva and breaking apart.  
   - The filler's particle size and concentration influence porosity and wettability. Optimal amounts (e.g., 285 mg) ensure fast disintegration without compromising tablet integrity.

3. **Binder and Binder Dose (mg)**  
   - Binders (e.g., **PVP**) enhance tablet strength but may slow disintegration if overused (e.g., 195 mg). A balance is needed to maintain hardness while allowing quick breakdown.

4. **Disintegrant and Disintegrant Dose (mg)**  
   - Disintegrants (e.g., **CC-Na**) are critical. They swell or produce gas upon contact with saliva, creating channels for rapid disintegration.  
   - The dose (e.g., 25 mg) must be optimized; too little may delay disintegration, while excess may cause friability.

5. **Lubricant and Lubricant Dose (mg)**  
   - Lubricants (e.g., **Mg stearate**) reduce friction during compression but can form hydrophobic layers, delaying water penetration. A minimal dose (e.g., 10 mg) is preferred.

6. **Solubilizer and Solubilizer Dose (mg)**  
   - Solubilizers (e.g., **β-cyclodextrin**) improve API dissolution, indirectly aiding disintegration. Higher doses (e.g., 43 mg) may enhance this effect.

7. **Hardness (N)**  
   - Tablets with higher hardness (e.g., 53 N) may resist disintegration due to compact structure. The paper notes that hardness must be balanced with disintegrant efficiency.

8. **Friability (%)**  
   - Low friability (e.g., 0.56%) indicates good mechanical strength but may correlate with longer disintegration if the tablet is too dense.

9. **Thickness (mm) and Punch (mm)**  
   - Thinner tablets (e.g., 4.76 mm) or larger diameters (e.g., 8 mm) may disintegrate faster due to increased surface area exposed to saliva.

In summary, disintegration time is a multifactorial outcome influenced by API properties, excipient choices, and manufacturing parameters.

Input Fields:
Field	Description	Example
API	Active Pharmaceutical Ingredient name	"Paracetamol"
API Dose (mg)	Dosage of active ingredient	325.0
Filler	Primary filler excipient	"Mannitol"
Filler Dose (mg)	Filler amount in milligrams	285.0
Binder	Binding agent	"PVP"
Binder Dose (mg)	Binder amount	195.0
Disintegrant	Disintegrating agent	"CC-Na"
Disintegrant Dose (mg)	Disintegrant amount	25.0
Lubricant	Lubricating agent	"Mg stearate"
Lubricant Dose (mg)	Lubricant amount	10.0
Solubilizer	Solubility enhancer	"β-cyclodextrin"
Solubilizer Dose (mg)	Solubilizer amount	43.0
Hardness (N)	Tablet hardness in Newtons	53.0
Friability (%)	Tablet friability percentage	0.56
Thickness (mm)	Tablet thickness in millimeters	4.76
Punch (mm)	Tooling diameter for compression	8.0

Output Field:
Field	Description
Disintegration time (sec)	Time for complete oral disintegration

Given the input fields, help me predict the disintegration time (sec).
{input_fields}
Please reason step by step, and put your final answer within \\boxed{{}}.
"""

COMPLETION_COT_PROMPT = (
    SYSTEM_PROMPT + "The answer is {answer}, help me completion the COT process"
)


def process_single_prompt(prompt, model_id):
    """Process a single prompt with OpenAI API."""
    # Create a fresh client instance for this thread
    local_client = openai.OpenAI(
        api_key=os.environ.get("OPENAI_API_KEY"),
        base_url=os.environ.get("OPENAI_API_BASE"),
    )

    completion = local_client.chat.completions.create(
        model=model_id,
        messages=[{"role": "user", "content": prompt}],
        temperature=0.7,
        max_tokens=1000,
    )
    return completion.choices[0].message.content


def generate_cot_responses(prompts, model_id=None, n_jobs=10):
    """Generate Chain-of-Thought responses using OpenAI API in parallel."""
    if model_id is None:
        model_id = os.environ.get("OPENAI_MODEL_ID", "deepseek-chat")

    # Use joblib for parallel processing with progress bar
    responses = Parallel(n_jobs=n_jobs, verbose=10)(
        delayed(process_single_prompt)(prompt, model_id) for prompt in prompts
    )

    return responses


def create_sharegpt_format(input_fields, cot_response, answer):
    """Create a conversation in ShareGPT format."""
    conversation = [
        {
            "role": "human",
            "content": SYSTEM_PROMPT.format(input_fields=input_fields),
        },
        {
            "role": "assistant",
            "content": cot_response
            if cot_response
            else f"In conclusion, the disintegration time is about \\boxed{{{answer}}} seconds.",
        },
    ]
    return conversation


@click.command()
@click.option("--hf-repo", help="Hugging Face repository ID (username/repo-name)")
@click.option("--upload/--no-upload", default=False, help="Upload to Hugging Face Hub")
@click.option(
    "--test-size", default=0.2, help="Test set size for train/test split", type=float
)
@click.option("--seed", default=42, help="Random seed for reproducibility", type=int)
@click.option(
    "--generate-cot/--no-generate-cot",
    default=False,
    help="Generate CoT using OpenAI API",
)
@click.option(
    "--n-jobs", default=10, type=int, help="Number of parallel jobs for CoT generation"
)
def main(hf_repo, upload, test_size, seed, generate_cot, n_jobs):
    # Load CSV data
    df = pd.read_csv("./assets/ODT_formulations.csv")

    if not os.path.exists("./assets/hf_data.json") or not os.path.exists(
        "./assets/datasets.json"
    ):
        # Create datasets in two formats
        # 1. JSON for custom usage
        datasets_json = []
        # 2. Dictionary format for Hugging Face Dataset
        hf_data = {
            "prompt": [],
            "answer": [],
            "raw_features_json": [],  # Store as JSON strings instead of dicts
            "cot": [],  # Add Chain-of-Thought field
            "conversations": [],  # Add ShareGPT format conversations
        }

        cot_prompts = []  # To store prompts for CoT generation
        input_fields_list = []  # To store input fields for ShareGPT format

        for _, row in tqdm(df.iterrows(), total=len(df), desc="Processing data"):
            fields = row.to_dict()
            answer = fields.pop("Disintegration time (sec)")
            input_fields = json.dumps(fields, indent=2)
            input_fields_list.append(input_fields)
            prompt = SYSTEM_PROMPT.format(input_fields=input_fields)

            cot_prompt = COMPLETION_COT_PROMPT.format(
                answer=answer, input_fields=input_fields
            )

            cot_prompts.append(cot_prompt)

            # Add to JSON format
            datasets_json.append({"prompt": prompt, "answer": answer})

            # Add to Hugging Face format - store features as JSON string
            hf_data["prompt"].append(prompt)
            hf_data["answer"].append(float(answer))
            hf_data["raw_features_json"].append(json.dumps(fields))
            hf_data["cot"].append(
                ""
            )  # Placeholder, will be updated if generate_cot is True
            # Initialize with empty ShareGPT format, will be updated later
            hf_data["conversations"].append("")

        # Generate CoT responses if requested
        if generate_cot:
            print(
                f"Generating Chain-of-Thought responses using OpenAI API with {n_jobs} parallel jobs..."
            )

            cot_responses = generate_cot_responses(cot_prompts, n_jobs=n_jobs)

            # Update the cot field with generated responses
            hf_data["cot"] = cot_responses

            # Also update the JSON format
            for i, cot in enumerate(cot_responses):
                datasets_json[i]["cot"] = cot
        else:
            # If no CoT generation, use empty strings
            cot_responses = [""] * len(input_fields_list)

        # Create ShareGPT format data
        print("Creating ShareGPT format conversations...")
        for i, (input_field, cot, answer) in enumerate(
            zip(input_fields_list, cot_responses, hf_data["answer"])
        ):
            sharegpt_conversation = create_sharegpt_format(input_field, cot, answer)
            hf_data["conversations"][i] = sharegpt_conversation
            datasets_json[i]["conversations"] = sharegpt_conversation

        # Save JSON format
        with open("./assets/datasets.json", "w") as f:
            json.dump(datasets_json, f, indent=2)

        with open("./assets/hf_data.json", "w") as f:
            json.dump(hf_data, f, indent=2)

    else:
        with open("./assets/datasets.json", "r") as f:
            datasets_json = json.load(f)
        with open("./assets/hf_data.json", "r") as f:
            hf_data = json.load(f)

    # Create and save Hugging Face dataset with explicit features
    hf_dataset = Dataset.from_dict(hf_data)

    # Split into train/validation sets
    dataset_dict = hf_dataset.train_test_split(test_size=test_size, seed=seed)

    # Save locally
    dataset_dict.save_to_disk("./assets/odt_dataset")

    print(f"Dataset created with {len(hf_dataset)} examples")

    # Upload to HF Hub if requested
    if upload:
        if not hf_repo:
            raise ValueError(
                "HF repository ID is required for upload. Use --hf-repo option."
            )

        # Get token from environment
        hf_token = os.environ.get("HF_TOKEN")
        if not hf_token:
            raise ValueError(
                "HF_TOKEN environment variable not found. Please set it before uploading."
            )

        print(f"Uploading dataset to Hugging Face Hub: {hf_repo}")
        api = HfApi(token=hf_token)
        api.create_repo(hf_repo, repo_type="dataset", exist_ok=True)
        dataset_dict.push_to_hub(hf_repo)
        print(f"Dataset successfully uploaded to {hf_repo}")
    else:
        print("To upload to Hugging Face Hub later, run:")
        print(
            "python process_data.py --hf-repo your-username/odt-formulations --upload"
        )


if __name__ == "__main__":
    main()