import os
import json
import argparse
from typing import Dict, List, Tuple

import pandas as pd


def extract_category(caption: str) -> str:
    """Extract disease category from caption.
    Rules:
    - If contains 'healthy' (case-insensitive), return 'healthy'.
    - If contains 'diseased by <name>', return <name> trimmed, and cut suffix ' with symptoms ...'.
    - Else return 'unknown'.
    """
    if not isinstance(caption, str):
        return 'unknown'
    lower = caption.lower()
    if 'healthy' in lower:
        return 'healthy'
    key = 'diseased by'
    idx = lower.find(key)
    if idx >= 0:
        start = idx + len(key)
        # slice original caption to preserve case
        after = caption[start:].strip()
        # cut by common suffix
        cut_mark = ' with symptoms'
        cut_idx = after.lower().find(cut_mark)
        if cut_idx >= 0:
            after = after[:cut_idx]
        # normalize spaces
        return ' '.join(after.split())
    return 'unknown'


def build_label_mapping(categories: List[str]) -> Dict[str, int]:
    uniq = sorted(set(categories))
    return {c: i for i, c in enumerate(uniq)}


def convert_split(json_path: str, images_dir: str, label2id: Dict[str, int]) -> pd.DataFrame:
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    rows = []
    for d in data:
        image_filename = d.get('image_filename')
        caption = d.get('caption', '')
        cat = extract_category(caption)
        label = label2id.get(cat)
        if label is None:
            # add dynamically if unseen
            label = len(label2id)
            label2id[cat] = label
        rows.append({
            'image': image_filename,
            'text': caption,
            'label': label,
            'disease_category': cat,
        })
    df = pd.DataFrame(rows)
    # Basic sanity: check path existence (optional, no fail)
    exists = df['image'].apply(lambda fn: os.path.exists(os.path.join(images_dir, str(fn))))
    missing = (~exists).sum()
    if missing:
        print(f"[warn] {os.path.basename(json_path)}: {missing} images not found in {images_dir}")
    return df


def collect_all_categories(json_paths: List[str]) -> List[str]:
    cats = []
    for p in json_paths:
        with open(p, 'r', encoding='utf-8') as f:
            data = json.load(f)
        for d in data:
            cat = extract_category(d.get('caption', ''))
            cats.append(cat)
    return cats


def main():
    parser = argparse.ArgumentParser(description='Convert scold_12000 JSON splits to CSV for train_classifier.py')
    parser.add_argument('--dataset_dir', required=True, help='Root directory of the dataset (contains images/ and splits/)')
    parser.add_argument('--splits_dir', default=None, help='Directory that contains train.json/val.json/test.json; defaults to <dataset_dir>/splits')
    parser.add_argument('--images_dir', default=None, help='Images directory; defaults to <dataset_dir>/images')
    parser.add_argument('--out_dir', default=None, help='Output directory for CSV and label2id.json; defaults to splits_dir')
    parser.add_argument('--label_out', default=None, help='Explicit path to save label2id.json; defaults to <out_dir>/label2id.json')
    args = parser.parse_args()

    dataset_dir = args.dataset_dir
    splits_dir = args.splits_dir or os.path.join(dataset_dir, 'splits')
    images_dir = args.images_dir or os.path.join(dataset_dir, 'images')
    out_dir = args.out_dir or splits_dir
    os.makedirs(out_dir, exist_ok=True)

    json_paths = [
        os.path.join(splits_dir, 'train.json'),
        os.path.join(splits_dir, 'val.json'),
        os.path.join(splits_dir, 'test.json'),
    ]

    # Build label mapping from union of categories across splits
    all_cats = collect_all_categories(json_paths)
    label2id = build_label_mapping(all_cats)

    # Save mapping (and reverse) for reproducibility
    label_out = args.label_out or os.path.join(out_dir, 'label2id.json')
    id2label = {v: k for k, v in label2id.items()}
    with open(label_out, 'w', encoding='utf-8') as f:
        json.dump({'label2id': label2id, 'id2label': id2label}, f, ensure_ascii=False, indent=2)
    print(f"Saved label2id mapping to {label_out} (num_classes={len(label2id)})")

    # Convert each split
    out_csv_paths: List[Tuple[str, str]] = []
    for name in ['train', 'val', 'test']:
        jp = os.path.join(splits_dir, f'{name}.json')
        df = convert_split(jp, images_dir, label2id)
        csv_path = os.path.join(out_dir, f'{name}.csv')
        df.to_csv(csv_path, index=False)
        out_csv_paths.append((name, csv_path))
        print(f"Wrote {name}.csv -> {csv_path} (rows={len(df)})")

    # Summary counts per class (train only)
    train_csv = os.path.join(out_dir, 'train.csv')
    df_train = pd.read_csv(train_csv)
    dist = df_train['disease_category'].value_counts().sort_values(ascending=False)
    dist.to_csv(os.path.join(out_dir, 'class_distribution_train.csv'))
    print("Saved class_distribution_train.csv")

    print("Conversion done.")


if __name__ == '__main__':
    main()