import os
os.environ['WANDB_DISABLED'] = 'true'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = '1,2'  # 指定可见的GPU
# os.environ["CUDA_VISIBLE_DEVICES"] = "5,6"
from ultralytics import YOLO
from ultralytics.data import build_dataloader, build_llltData
from ultralytics.cfg import get_cfg
from ultralytics.models.yolo.detect import DetectionTrainer, llltTrainer
import argparse
import yaml
import torch.distributed as dist
import torch
from ultralytics.utils.torch_utils import select_device
import swanlab
from ultralytics.cfg import DEFAULT_CFG_PATH
# 初始化分布式环境
def init_distributed_mode():
    if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
        rank = int(os.environ['RANK'])
        world_size = int(os.environ['WORLD_SIZE'])
        gpu = int(os.environ['LOCAL_RANK'])
    else:
        print('Not using distributed mode')
        return False, 0, 1, 0

    torch.cuda.set_device(gpu)
    dist_backend = 'nccl'
    dist.init_process_group(
        backend=dist_backend,
        init_method='env://',
        world_size=world_size,
        rank=rank
    )
    return True, rank, world_size, gpu

# 参数解析
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="lllt.yaml", help="data yaml file")
parser.add_argument("--model", type=str, default="lif_yolov8m.yaml", help="model yaml file")
parser.add_argument("--weights", type=str, default="./runs/train/exp_coco_mm_yolov8_1/weights/last.pt", help="weights file")
parser.add_argument("--epochs", type=int, default=100, help="epochs num")
parser.add_argument("--batch", type=int, default=8, help="batch size") # 24G 16 50G 24 16G 12
parser.add_argument("--workers", type=int, default=4, help="workers num")
args = parser.parse_args()

# 主函数
def main():
    # 初始化分布式训练
    # is_distributed, rank, world_size, gpu = init_distributed_mode()
    # print(gpu)
    # 加载数据配置
    # with open("./ultralytics/cfg/datasets/"+ args.data, "r") as file:
    #     data = yaml.safe_load(file)
    # image_path = data.get('path')
    # 训练参数
    train_args = dict(
        model=args.model,
        data=args.data,
        epochs=args.epochs,
        batch=args.batch,
        device=[1,2], 
        # half=True,
        # amp=False,
        # single_cls=False,
        # optimizer='auto',
        project='runs/train',
        name='exp_llt_Integration_4_pretrain',
        exist_ok=True,
        # resume=True,
    )

    swanlab.init(
        project="llt_SpikeYOLO",
        workspace="Linexus",
        experiment_name=train_args['name'],
        config=train_args,
        logdir='./runs/log',
        load='ultralytics/cfg/default.yaml',
        # mode='cloud',
        mode='disabled'
        
    )

    # 初始化训练器
    trainer = llltTrainer(
        json_path='/data1/lkf24/data/NESR/annotations',
        isLIF=True,
        overrides=train_args
    )
    
    # 开始训练
    trainer.train(args.model)

if __name__ == "__main__":
    main()