import torch
import re

def device_replace(model_device_map,model_gees_map,model_name = None):
    '''
    trans gees_map to hf_device_map
    author:chuhongjie
    time:2024-4-16
    version:2 
    para:
    model_device_map:  map['opname':']
    model_gees_map: map['gees_opname':'device']
    model_name:model name
    return:
    model_device_map: trans gees_map to model_device_map
    
    '''
    for k in list(model_device_map.keys()):  # 使用list来避免在迭代时修改字典
        new_k = k.replace('.', '_')
#         print(new_k)
        if 'LayerNorm' in new_k:
        # 如果存在，替换'LayerNorm'为'layer_norm'
            new_k = new_k.replace('LayerNorm', 'layer_norm')
        if 'SelfAttention' in new_k:
            new_k = new_k.replace('SelfAttention', 'self_attention')
        if 'EncDecAttention' in new_k:
            new_k = new_k.replace('EncDecAttention', 'enc_dec_attention')
        if 'DenseReluDense' in new_k:
            new_k = new_k.replace('DenseReluDense', 'dense_relu_dense')
        if new_k in model_gees_map:
            # 如果 new_k 存在于 model_gees_map 中，更新 model_device_map 中对应的值
            model_device_map[k] = model_gees_map[new_k]
        else:
            del model_device_map[k]
            # 创建新的键,这两个是在gees中存在的
            new_bias_key = new_k + '_bias'
            new_weight_key = new_k + '_weight'
            # 检查并添加新键及其值到 gpt2_device_map，如果它们存在于 model_gees_map 中
            if new_bias_key in model_gees_map:
                model_device_map[k+'.bias'] = model_gees_map[new_bias_key]
            if new_weight_key in model_gees_map:
                model_device_map[k+'.weight'] = model_gees_map[new_weight_key]
    if model_name == 'BertLMHeadModel':
        model_device_map['cls.predictions.bias'] = model_device_map['cls.predictions.transform.dense']
    if model_name == 'T5Model':
        model_device_map['decoder.embed_tokens'] = model_device_map['shared']
        model_device_map['encoder.embed_tokens'] = model_device_map['encoder.dropout']
    if model_name == 'BertForSequenceClassification':
        model_device_map['bert.embeddings.position_ids'] = model_device_map['bert.embeddings.word_embeddings']
    if model_name == 'GPT2ForSequenceClassification':
        if 'transformer.ln_f' in model_device_map:
            model_device_map['score'] = model_device_map['transformer.ln_f']
    return model_device_map

def opNameConvert(geesi_device_map,model,model_name = None):
    
    model_device_map = {}
    # 遍历所有模块

    for name, module in model.named_modules():
        # 检查该模块是否有子模块
        if not list(module.named_children()):  # 如果没有子模块，即该模块是最内层的
            model_device_map[name] = ''
    # print(model_device_map)
    geesi_device_map = {key: value.replace('gpu:', 'cuda:') for key, value in geesi_device_map.items()}
    new_device_map = device_replace(model_device_map,geesi_device_map,model_name)
    return new_device_map

def opsToLayer(device_map):
    layer_device_map = {}
    for key,value in device_map.items():
        match = re.search(r'(\d+)', key)
        if match:
            new_key = key[:match.end()]
            layer_device_map[new_key] = value
        else:
            layer_device_map[key] = value
    return layer_device_map
