import time
import torch,json
from typing import Dict, Union, Optional, List, Any
from accelerate import infer_auto_device_map, init_empty_weights, load_checkpoint_and_dispatch
from accelerate.utils.modeling import get_max_memory
from transformers import AutoConfig, AutoModel, AutoTokenizer
_last_gc = 0
_LAYER_SIZE = 28
def torch_gc():
    # 使用 last_gc 变量来控制 gc 的频率，不多于 1min 一次
    global _last_gc
    if time.time() - _last_gc > 60:
        _last_gc = time.time()
        if torch.cuda.is_available():
            device = torch.cuda.current_device()
            print(f"Emptying gpu cache {device}...")
            with torch.cuda.device(device):
                torch.cuda.empty_cache()
                torch.cuda.ipc_collect()

def init_glm(checkpoint: str,
        max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
        no_split_module_classes: Optional[List[str]] = ["GLMBlock"],
        verbose: Optional[bool] = False):
    """init_chatglm 尽可能多的使用GPU加载模型，当显存不够用的时候使用CPU分层加载。
    Args:
        checkpoint (str): 
            path to the checkpoint
    """
    if max_memory is None:
        max_memory = get_max_memory()
        for k,v in max_memory.items():
            # try to use 80% of device memroy
            max_memory[k] = int(v*0.8)
    if verbose:
        print("memory usage:")
        print(json.dumps(max_memory, indent=4))
    config = AutoConfig.from_pretrained(checkpoint, trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
    with init_empty_weights():
        model = AutoModel.from_config(config, trust_remote_code=True)
        device_map = infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=no_split_module_classes)
        if torch.cuda.is_available():
            device_map['transformer.word_embeddings'] = 0
            device_map['transformer.final_layernorm'] = 0
            device_map['lm_head'] = 0
        model = load_checkpoint_and_dispatch(
            model, checkpoint, device_map=device_map
            ).half()
        if verbose:
            print("device map:")
            print(json.dumps(model.hf_device_map, indent=4))
        return tokenizer, model