import os
import argparse
from sklearn.model_selection import train_test_split
import json
import re
from tqdm import tqdm
from parse_jsonl import iter_jsonl, parse_jsonl_line


def reasoning_text_refined(original_text):
    original_sentences = re.split(r'(?<=[。！？])\s*', original_text.strip())
    refined_sentences = []

    for sentence in original_sentences:
        if any([kw in sentence for kw in ["前序", "前续", "后序", "后续", "前后", "N-1", "N-2", "N+1", "N+2"]]):
            # 包含前后文描述的句子，去掉
            continue
        refined_sentences.append(sentence)
    refined_text = " ".join(refined_sentences).strip()
    refined_text = refined_text.replace("在目标Epoch N中，", "")
    return refined_text


def parse_jsonl_file(jsonl_path):
    rows = {}
    for idx, line in enumerate(iter_jsonl(jsonl_path), start=1):
        parsed = parse_jsonl_line(line, idx)
        if parsed:
            if parsed.custom_id.split("_")[-1] != parsed.target_epoch_stage:
                # print(f"Warning: custom_id stage {parsed.custom_id.split('_')[-1]} does not match target_epoch_stage {parsed.target_epoch_stage} in line {idx}")
                continue
            rows[parsed.custom_id] = {
            "target_epoch_stage": parsed.target_epoch_stage,
            "reasoning_text": parsed.reasoning_text,
            "applicable_rules": parsed.applicable_rules,
            }
    return rows


def is_image_file(path: str) -> bool:
    ext = os.path.splitext(path)[1].lower()
    return ext in {".png", ".jpg", ".jpeg", ".webp", ".gif", ".bmp"}


def parse_filename(fname: str):
    """从文件名解析 (idx:int, stage:str)。返回 None 表示解析失败。
    期望格式: {idx}_{stage}.<ext>
    """
    m = re.match(r"^(\d+)_([^.]+)\.[A-Za-z0-9]+$", fname)
    if not m:
        return None
    try:
        idx = int(m.group(1))
    except ValueError:
        return None
    stage = m.group(2)
    return idx, stage


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--img_dir", type=str, default=r"/home/denggf/Desktop/Workspace/EEGLLM/SS/Wave_render_V2/MASS-SS3_Wave_224_448_V2")
    parser.add_argument("--save_dir", type=str, default=r"/home/denggf/Desktop/Cluster_JiangLab/Qwen25_VL_FT/saved")
    parser.add_argument("--assistant_dir", type=str, default=r"/home/denggf/Desktop/Cluster_JiangLab/MASS-SS3_Qwen2.5-VL-72B-Instruct")
    parser.add_argument("--system_prompt", type=str, default=r"/home/denggf/Desktop/Cluster_JiangLab/Qwen25_VL_FT/system_prompt.md")
    parser.add_argument("--split_seed", type=int, default=42)
    args = parser.parse_args()
    
    img_dir = args.img_dir
    assistant_dir = args.assistant_dir
    system_prompt_file = args.system_prompt
    split_seed = args.split_seed

    subjects_json = os.path.join(args.save_dir, "data", "subjects.json")
    train_jsonl = os.path.join(args.save_dir, "data", "train.jsonl")
    val_jsonl = os.path.join(args.save_dir, "data", "val.jsonl")

    os.makedirs(os.path.dirname(subjects_json), exist_ok=True)
    os.makedirs(os.path.dirname(train_jsonl), exist_ok=True)
    os.makedirs(os.path.dirname(val_jsonl), exist_ok=True)


    with open(system_prompt_file, "r", encoding="utf-8") as f:
        system_prompt = f.read()

    subjects = [d for d in sorted(os.listdir(img_dir)) if os.path.isdir(os.path.join(img_dir, d))]
    train_val_subjects, test_subjects = train_test_split(subjects, test_size=1/10, random_state=split_seed)
    train_subjects, val_subjects = train_test_split(train_val_subjects, test_size=1/9, random_state=split_seed)

    subjects_info = {
        "total_subjects_num": len(subjects),
        "train_subjects_num": len(train_subjects),
        "val_subjects_num": len(val_subjects),
        "test_subjects_num": len(test_subjects),
        "train_subjects": train_subjects,
        "val_subjects": val_subjects,
        "test_subjects": test_subjects
    }
    with open(subjects_json, "w", encoding="utf-8") as fjson:
        json.dump(subjects_info, fjson, ensure_ascii=False, indent=2)


    with open(train_jsonl, "w", encoding="utf-8") as ftrain, open(val_jsonl, "w", encoding="utf-8") as fval:
        for sub_id in tqdm(subjects):
            # print(f"Processing subject: {sub_id}")

            if sub_id in test_subjects:
                continue

            sub_img_dir = os.path.join(img_dir, sub_id)
            sub_img_files = [f for f in os.listdir(sub_img_dir) if is_image_file(f)]
            
            sub_assistant_jsonl = os.path.join(assistant_dir, f"{sub_id}.jsonl")
            sub_assistant_data = parse_jsonl_file(sub_assistant_jsonl)
            
            epochs = []  # list of dict: {idx, stage, path}

            for f in sub_img_files:
                parsed = parse_filename(f)
                if not parsed:
                    continue
                idx, stage = parsed
                epochs.append({
                    "idx": idx,
                    "stage": stage,
                    "path": os.path.join(sub_id, f),
                })

            if not epochs:
                continue

            # 按 idx 排序，确保稳定顺序
            epochs.sort(key=lambda x: x["idx"]) 

            n = len(epochs)
            if n < 5:
                # 不足以形成 N-2..N+2 的窗口
                continue

            # 使用位置索引避免数值 idx 不连续的情况
            for pos in range(n):
                current = epochs[pos]
                current_stage = current["stage"]

                custom_id = f"{sub_id}#{current['idx']}_{current_stage}"

                sub_assistant_item = sub_assistant_data.get(custom_id)
                if not sub_assistant_item:
                     continue
                reasoning_text = reasoning_text_refined(sub_assistant_item["reasoning_text"])
                applicable_rules = sub_assistant_item["applicable_rules"]
                # print(custom_id, target_epoch_stage)

                messages = [
                    {"role": "system", "content": system_prompt}, 
                    # {"role": "system", "content": ""},  # debug
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": ""},
                            {"type": "image", "image": current["path"]},
                        ],
                    },
                    {
                        "role": "assistant",
                        "content": f"""```json\n{{\n  \"sleep_stage\": \"{current_stage}\",\n  \"reasoning_text\": \"{reasoning_text}\",\n  \"applicable_rules\": \"{applicable_rules}\"\n}}\n```""",
                    }
                ]

                json_item = {
                    "id": custom_id,
                    "messages": messages
                }

                jsonl_item = json.dumps(json_item, ensure_ascii=False)
                if sub_id in train_subjects:
                    ftrain.write(jsonl_item + "\n")
                else:
                    fval.write(jsonl_item + "\n")

if __name__ == "__main__":
    main()
    
