import codecs
import os

import torch

from wenet.utils import init_model
from gxl_ai_utils.config.gxl_config import GxlNode
from wenet.utils.init_tokenizer import init_tokenizer
from wenet.utils.train_utils import check_modify_and_save_config
from wenet.utils.checkpoint import save_checkpoint


def load_dic_from_scp(label_scp_file: str) -> dict:
    res = {}
    with codecs.open(label_scp_file, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        for line in lines:
            line = line.strip()
            items = line.split()
            if len(items) < 2:
                print('warning_gxl:, this row not conform to the regulation of scp(key content) and skip it:', line)
                continue
            elif len(items) == 2:
                res[items[0]] = items[1]
            else:
                print(
                    'warning_gxl:, this row not conform to the regulation of'
                    ' scp(key content) and no skip it,第一个为key,剩下的都是value:',
                    line)
                res[items[0]] = ' '.join(items[1:])
    return res


if __name__ == '__main__':
    args = GxlNode({})
    args.checkpoint = './data/wenet_paraformer.pt'
    args.symbol_table = './data/lang_char.txt'
    args.model_dir = './data2'
    os.makedirs(args.model_dir, exist_ok=True)
    configs = GxlNode.get_config_from_yaml('./conf/train_paraformer.yaml').dict()

    # init tokenizer
    tokenizer = init_tokenizer(configs, args.symbol_table, args.bpe_model,
                               args.non_lang_syms)
    configs['vocab_size'] = tokenizer.vocab_size()
    # Do some sanity checks and save config to arsg.model_dir
    configs = check_modify_and_save_config(args, configs, tokenizer.symbol_table)
    # Init asr model from configs
    model, configs = init_model.init_model(args, configs)
    print(model)
    print(model.decoder.embed[0].weight.shape)

    # original_weight = model.decoder.embed[0].weight
    # target_dict = load_dic_from_scp('./data2/lang_char.txt')
    # source_dict = load_dic_from_scp('./data/lang_char.txt')
    # new_weight = torch.randn(len(target_dict), 512)
    # for key, index in target_dict.items():
    #     if key in source_dict:
    #         new_weight[int(index)] = original_weight[int(source_dict[key])]
    # model.decoder.embed[0] = torch.nn.Embedding(len(target_dict), 512)
    # model.decoder.embed[0].weight = torch.nn.Parameter(new_weight)
    # print(model)
    # print(model.decoder.output_layer.weight.shape)
    # print(model.decoder.output_layer.bias.shape)
    # old_weight = model.decoder.output_layer.weight
    # old_bias = model.decoder.output_layer.bias
    # new_weight = torch.randn(len(target_dict), 512)
    # new_bias = torch.randn(len(target_dict))
    # for key, index in target_dict.items():
    #     if key in source_dict:
    #         new_weight[int(index)] = old_weight[int(source_dict[key])]
    #         new_bias[int(index)] = old_bias[int(source_dict[key])]
    # model.decoder.output_layer = torch.nn.Linear(512, len(target_dict))
    # model.decoder.output_layer.weight = torch.nn.Parameter(new_weight)
    # model.decoder.output_layer.bias = torch.nn.Parameter(new_bias)
    # print(model)
    # print(model.decoder.output_layer.weight.shape)
    # print(model.decoder.output_layer.bias.shape)
    # save_checkpoint(model, './data2/wenet_paraformer.pt')
