import sys
import torch
import os
import traceback
import numpy as np
import subprocess
import threading
import random

from .util import print_boxed_description
from .colorful import Color
__all__ = ["utils", "colorful", "dataset", "logger", "module", "trainer", "util"]
# from .utils.timing import timing_decorator

def _init():
    run_file = sys.argv[0]
    if run_file.endswith(".py"):
        if torch.cuda.device_count() > 1 and not os.environ.get("LOCAL_RANK"):
            # print_boxed_description(f"")
            cmd = " ".join(sys.argv)
            print_boxed_description(f"{Color.Pink} Ciallo～. {Color.End}{Color.Yellow}Multi-GPU{Color.End} detected, try to enable {Color.Yellow}distributed training.{Color.End} You can {Color.Green}set the environment variable CUDA_VISIBLE_DEVICES to special one{Color.End} to avoid this operation, for example: {Color.Green}CUDA_VISIBLE_DEVICES=0 python {cmd}{Color.End}")
            subprocess.run(['torchrun', '--standalone', '--nnodes=1', '--nproc_per_node={}'.format(torch.cuda.device_count()), *sys.argv])
            exit()
            
        else:
            if os.environ.get("LOCAL_RANK") == "0":
                print()
                print_boxed_description(f"{Color.Pink}Ciallo～.{Color.End} {Color.Yellow}Enjoy your training!{Color.End}")
                print()
            # np.random.seed(torch.initial_seed())
            # random.seed(np.random.rand()) 
            # print(os.environ.get("LOCAL_RANK"), np.random.get_state()[1][0], np.random.get_state()[1][100], np.random.get_state()[1][300], torch.initial_seed())
_init()

local_rank = int(os.environ.get("LOCAL_RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
distributed = world_size > 1
prof = False


def init_seeds(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
if distributed:
    torch.cuda.set_device(local_rank)
    torch.distributed.init_process_group(backend='nccl')
    init_seeds(20000227 + local_rank)