"""
Author: Katharina Löffler (2022), Karlsruhe Institute of Technology
Licensed under MIT License
"""
import matplotlib
import matplotlib.pyplot as plt
import sys
sys.path.append(r'D:\CodeRode\project\EmbedTrack-master')
matplotlib.use("Agg")
from embedtrack.train.run_training_pipeline import (
    DataConfig,
    ModelConfig,
    TrainConfig,
    run_pipeline,
)
import os
from pathlib import Path
from multiprocessing import freeze_support
import cv2
import numpy as np
import torch
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
os.environ['OMP_NUM_THREADS'] = '1'  # 限制OpenMP线程数
from embedtrack.datasets.prepare_data import prepare_ctc_data

def get_all_sequences(train_dir):
    """
    自动获取所有序列名（如41, 42, 43...），排除_GT结尾的目录
    """
    train_dir = Path(train_dir)
    if not train_dir.exists():
        print(f"警告：{train_dir} 不存在，已自动创建。")
        train_dir.mkdir(parents=True, exist_ok=True)
        return []
    seqs = [d.name for d in train_dir.iterdir() if d.is_dir() and not d.name.endswith("_GT")]
    seqs.sort()
    return seqs

# data configs
FILE_PATH = Path(__file__)
PROJECT_PATH = str(FILE_PATH.parent.parent.parent)

RAW_DATA_PATH = os.path.join(PROJECT_PATH, "ctc_raw_data/train")
DATA_PATH_DEST = os.path.join(PROJECT_PATH, "data")
MODEL_PATH = os.path.join(PROJECT_PATH, "models")

USE_SILVER_TRUTH = True

# 自动获取序列名
# 假设DATA_SETS中每个数据集目录下有train子目录
DATA_SETS = [
    # "3t3",
    # "hela",
    "pc3",
    # "raw",
    # "raw",
    # "Fluo-N2DH-SIM+",
    # "Fluo-C2DL-MSC",
    # "Fluo-N2DH-GOWT1",
    # "PhC-C2DL-PSC",
    # "BF-C2DL-HSC",
    # "Fluo-N2DL-HeLa",
    # "BF-C2DL-MuSC",
    # "DIC-C2DH-HeLa",
    # "PhC-C2DH-U373",
]

TRAIN_VAL_SPLIT = 0.2
N_EPOCHS = 15
# Adam optimizer; normalize images; OneCycle LR sheduler; N epochs
MODEL_NAME = "adam_norm_onecycle_" + str(N_EPOCHS)


def resize_if_needed(img, size=(256, 256)):
    if img.shape[-2:] != size:
        return cv2.resize(img, size[::-1], interpolation=cv2.INTER_NEAREST)
    return img


if __name__ == '__main__':
    freeze_support()
    for data_set in DATA_SETS:
        train_dir = os.path.join(DATA_PATH_DEST, data_set, "train")
        TRAIN_VAL_SEQUNCES = get_all_sequences(train_dir)
        if not TRAIN_VAL_SEQUNCES:
            print(f"未检测到{data_set}的序列，自动从原始数据准备...")
            # 自动准备数据（从ctc_raw_data拷贝到data目录）
            raw_data_path = os.path.join(RAW_DATA_PATH, data_set)
            prepare_ctc_data(
                source_path=raw_data_path,
                result_path=DATA_PATH_DEST,
                keep_st=USE_SILVER_TRUTH,
                val_split=TRAIN_VAL_SPLIT,
                sub_dir_names=None  # prepare_ctc_data内部可自动适配全部序列
            )
            # 再次尝试获取序列
            TRAIN_VAL_SEQUNCES = get_all_sequences(train_dir)
            if not TRAIN_VAL_SEQUNCES:
                print(f"自动准备后仍未检测到{data_set}的序列，跳过。")
                continue
        print(f"{data_set} 自动获取序列: {TRAIN_VAL_SEQUNCES}")

        if data_set == "Fluo-N2DH-SIM+":
            use_silver_truth = False
        else:
            use_silver_truth = USE_SILVER_TRUTH

        data_config = DataConfig(
            RAW_DATA_PATH,
            data_set,
            DATA_PATH_DEST,
            use_silver_truth=use_silver_truth,
            train_val_sequences=TRAIN_VAL_SEQUNCES,
            train_val_split=TRAIN_VAL_SPLIT,
        )

        # train configs
        MODEL_SAVE_DIR = os.path.join(
            MODEL_PATH,
            data_set,
            MODEL_NAME,
        )
        if data_set != "Fluo-C2DL-MSC":
            CROP_SIZE = 256
            TRAIN_BATCH_SIZE = 4
            VAL_BATCH_SIZE = 4
            DISPLAY_IT = 1000
        else:
            CROP_SIZE = 512
            TRAIN_BATCH_SIZE = 2
            VAL_BATCH_SIZE = 2
            DISPLAY_IT = 200

        CENTER = "medoid"  # 'centroid', 'approximate-medoid', 'medoid'
        RESUME_TRAINING = False
        TRAIN_SIZE = None  # None代表全部数据
        VAL_SIZE = None
        # TRAIN_SIZE = 3000  # None代表全部数据
        # VAL_SIZE = 2600
        VIRTUAL_TRAIN_BATCH_MULTIPLIER = 1
        VIRTUAL_VAL_BATCH_MULTIPLIER = 1
        DISPLAY = False

        train_config = TrainConfig(
            MODEL_SAVE_DIR,
            crop_size=CROP_SIZE,
            center=CENTER,
            resume_training=RESUME_TRAINING,
            train_size=TRAIN_SIZE,
            train_batch_size=TRAIN_BATCH_SIZE,
            virtual_train_batch_multiplier=VIRTUAL_TRAIN_BATCH_MULTIPLIER,
            val_size=VAL_SIZE,
            val_batch_size=VAL_BATCH_SIZE,
            virtual_val_batch_multiplier=VIRTUAL_VAL_BATCH_MULTIPLIER,
            n_epochs=N_EPOCHS,
            display=DISPLAY,
            display_it=DISPLAY_IT,
        )

        # model config
        INPUT_CHANNELS = 1
        N_SEG_CLASSES = [4, 1]
        N_TRACK_CLASSES = 2

        model_config = ModelConfig(INPUT_CHANNELS, N_SEG_CLASSES, N_TRACK_CLASSES)

        run_pipeline(data_config, train_config, model_config)
        plt.close("all")
