import numpy as np
import torch

from config import Config


def convert_weight2(src_path, des_path):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    src_dict = torch.load(src_path, map_location=device)
    des_dict = torch.load(des_path, map_location=device)

    src_dict = {k: v for k, v in src_dict.items() if np.shape(v) != torch.Size([])}
    des_dict = {k: v for k, v in des_dict.items() if np.shape(v) != torch.Size([])}

    if len(src_dict) == len(des_dict):
        for i, (src_item, des_item) in enumerate(zip(src_dict, des_dict)):
            if len(des_dict[des_item]) == len(src_dict[src_item]):
                des_dict[des_item] = src_dict[src_item]
            else:
                print('Format error!')
                return

        torch.save(des_dict, des_path)
        print('Finished!')
        return
    else:
        print('Format error!')
        return


def convert_weight(src_path):
    weight_dict = torch.load(src_path, map_location=torch.device('cpu'))
    with open(src_path + '.yb', 'w') as f:

        for k, v in weight_dict.items():
            f.write(k + "\n")
            f.write(str(v.dtype) + "\n")
            f.write(str(v.dim()) + "\n")

            if v.dim() == 1:
                assert (v.dtype == torch.float32)
                f.write(str(v.shape[0]) + "\n")
                for i in range(v.shape[0]):
                    a = float(v[i])
                    f.write(str(a) + "\n")

            elif v.dim() == 2:
                assert (v.dtype == torch.float32)
                f.write(str(v.shape[0]) + "," + str(v.shape[1]) + "\n")
                for i in range(v.shape[0]):
                    for ii in range(v.shape[1]):
                        a = float(v[i][ii])
                        f.write(str(a) + "\n")

            elif v.dim() == 3:
                assert (v.dtype == torch.float32)
                f.write(str(v.shape[0]) + "," + str(v.shape[1]) + "," + str(v.shape[2]) + "\n")
                for i in range(v.shape[0]):
                    for ii in range(v.shape[1]):
                        for iii in range(v.shape[2]):
                            a = float(v[i][ii][iii])
                            f.write(str(a) + "\n")

            else:
                assert (v.dim() == 0)
                assert (v.dtype == torch.int64)
                a = np.int(v)
                f.write(str(a) + "\n")

            f.write("\n")


def convert_weight_res(src_path):
    def get_cnt(_v):
        if _v.dim() == 1:
            return _v.shape[0]
        elif _v.dim() == 2:
            return _v.shape[0] * _v.shape[1]
        elif _v.dim() == 3:
            return _v.shape[0] * _v.shape[1] * _v.shape[2]
        else:
            assert 0

    with open(src_path + '.res', 'w') as f:
        weight_dict = torch.load(src_path, map_location='cpu')
        for k, v in weight_dict.items():
            key = k.split('.')

            if key[2] != 'num_batches_tracked':
                cnt = get_cnt(v)
                f.write("const float %s$%s[%d] = \n" % (key[1], key[2], cnt))
                f.write("{\n")

                if v.dim() == 1:
                    assert (v.dtype == torch.float32)
                    for i in range(v.shape[0]):
                        a = float(v[i])
                        if key[2] == 'running_var':
                            a = np.sqrt(a + 1e-5)
                        f.write("\t" + str(a) + "f,\n")

                elif v.dim() == 2:
                    assert (v.dtype == torch.float32)
                    for i in range(v.shape[0]):
                        for ii in range(v.shape[1]):
                            a = float(v[i][ii])
                            f.write("\t" + str(a) + "f,\n")

                elif v.dim() == 3:
                    assert (v.dtype == torch.float32)
                    for i in range(v.shape[0]):
                        for ii in range(v.shape[1]):
                            for iii in range(v.shape[2]):
                                a = float(v[i][ii][iii])
                                f.write("\t" + str(a) + "f,\n")

                else:
                    assert 0

                f.write("};\n")


def convert_weight_int(src_path):
    def get_cnt(_v):
        if _v.dim() == 1:
            return _v.shape[0]
        elif _v.dim() == 2:
            return _v.shape[0] * _v.shape[1]
        elif _v.dim() == 3:
            return _v.shape[0] * _v.shape[1] * _v.shape[2]
        else:
            assert 0

    with open(src_path + '.int', 'w') as f:
        weight_dict = torch.load(src_path, map_location='cpu')
        for k, v in weight_dict.items():
            key = k.split('.')
            scale = 4096.0
            scale_5 = 0.5 / scale

            if key[2] != 'num_batches_tracked':
                cnt = get_cnt(v)
                f.write("const int %s$%s[%d] = \n" % (key[1], key[2], cnt))
                f.write("{\n")

                if v.dim() == 1:
                    assert (v.dtype == torch.float32)
                    for i in range(v.shape[0]):
                        a = float(v[i])
                        if key[2] == 'running_var':
                            a = np.sqrt(a + 1e-5)
                        a += scale_5 if a > 0.0 else -scale_5
                        b = int(a * scale)
                        f.write("\t" + str(b) + ",\n")

                elif v.dim() == 2:
                    assert (v.dtype == torch.float32)
                    for i in range(v.shape[0]):
                        for ii in range(v.shape[1]):
                            a = float(v[i][ii])
                            a += scale_5 if a > 0.0 else -scale_5
                            b = int(a * scale)
                            f.write("\t" + str(b) + ",\n")

                elif v.dim() == 3:
                    assert (v.dtype == torch.float32)
                    for i in range(v.shape[0]):
                        for ii in range(v.shape[1]):
                            for iii in range(v.shape[2]):
                                a = float(v[i][ii][iii])
                                a += scale_5 if a > 0.0 else -scale_5
                                b = int(a * scale)
                                f.write("\t" + str(b) + ",\n")

                else:
                    assert 0

                f.write("};\n")


if __name__ == '__main__':
    # convert_weight2("", "")
    # convert_weight(Config["weight_path"])
    convert_weight_res(Config["weight_path"])
    convert_weight_int(Config["weight_path"])

    print('Finished!')
