import os
import torch
import numpy as np
import cv2
import argparse
from hub.yolov8 import yolov8_tfhead, yolov8, yolov8_transfromer_head, yolov8_p2, yolov8_p2_tf, yolov8_embed_transfromer_head, yolov8_p2_cmbl,yolov8_cmbl
from hub.yolov5 import yolov5

from visualize.feature_maps import feature_map_visualize
from util.common import image_preprocess
from util.metrics import NMS, collect_simpledetdataloader_resault

OFFICIAL_MODEL_INFO = 'official_model.txt'
MY_MODEL_INFO = 'my_model.txt'


def main(parse):
    weight_path = parse.weight
    if not os.path.exists(weight_path):
        print('the weigth dose not exists')
        return
    my_model = yolov8.yolov8_detect(nc=80, phi='s')
    my_model.eval()
    official_model = torch.load(weight_path)
    predict = {k:v for k, v in official_model.items()}
    my_state_dict = {k:v for k, v in my_model.state_dict().items()}
    my_state_dict_values = list(my_state_dict.values())
    my_state_dict_keys = list(my_state_dict.keys())
    # f = open(OFFICIAL_MODEL_INFO, 'w')
    # for k, v in predict.items():
    #     line = '{} : [{}]\n'.format(k, v.shape)
    #     f.writelines(line)
    # f.close()
    f = open(MY_MODEL_INFO, 'w')
    for k, v in my_model.state_dict().items():
        line = '{} : [{}]\n'.format(k, v.shape)
        f.writelines(line)
    # f.write(str(my_model))
    f.close()
    print('official model state dict len={}, and my model = {}'.format(len(official_model.items()), len(my_model.state_dict().items())))
    for i, k in enumerate(predict.keys()):
        v = predict[k]
        # if not 'model.22' in k:
        my_state_dict[my_state_dict_keys[i]] = v
    my_model.load_state_dict(my_state_dict)
    torch.save(my_model.state_dict(), os.path.join('weights', 'update_state_dict.pt'))
        # if i < len(my_model.state_dict()):
        #     if v.shape != my_state_dict_values[i].shape:
        #         print('{}[{}] dismatch {}[{}]'.format(k, v.shape, my_state_dict_keys[i], my_state_dict_values[i].shape))


def eval_size():
    from thop import profile, clever_format
    # model = yolov8_tfhead.yolov8_detect(1, 0.33)
    # model = yolov8.yolov8_detect(3, 's')
    anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
    model = yolov5.yolov5(3, anchors, 'l')
    input = torch.randn(1, 3, 640, 640)
    macs, params = profile(model, inputs=(input, ))
    macs, params = clever_format([macs, params], "%.3f")            #macs * 2为FLOPS，params是参数量

    print((macs, params))

def test_model():
    device = torch.device('cpu')
    model = yolov8_p2.yolov8_detect(3, 's').to(device)
    input = torch.randn(1, 3, 640, 640).to(device)
    model.train()
    print('模型大小：l')
    print('backbone输出：')
    y_list = model.backbone(input)
    for y in y_list:
        print(y.shape)
    y_list = model.neck(y_list[0], y_list[1], y_list[2], y_list[3])
    print('neck输出：')
    for y in y_list:
        print(y.shape)
    y_list = model.head(list(y_list))
    print('head输出：')
    for y in y_list:
        print(y.shape)


def show_feature_map(parse):
    device = torch.device('cpu')
    model = yolov8.yolov8_detect(3, 's').to(device)
    input = torch.randn(1, 3, 640, 640).to(device)

    if os.path.exists(parse.weight):
        print('load weight from {}'.format(parse.weight))
        pretrained_dict = torch.load(parse.weight, map_location=device)
        model.load_state_dict(pretrained_dict)
    # model.eval()
    model.train()
    src_img = cv2.imread(parse.img)
    img = src_img.copy()
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    src_img = image_preprocess(img, (640, 640), False)
    img = image_preprocess(img, (640, 640), True)
    img_tensor = torch.from_numpy(img).float().to(device)
    img_tensor = img_tensor.permute(2, 0, 1).unsqueeze(0)
    fv = feature_map_visualize(model, save_dir='visualize/feature_maps')    
    fv.fit_input(img_tensor)
    fv.visualize('backbone')

def load_weight(model, weight_path):
    pretrained = torch.load(weight_path, map_location=torch.device('cpu'))['model']
    pretrained_dict = {k:v for k, v in pretrained_dict.items()}
    model_dict = model.state_dict()
    model_dict_keys = list(model_dict.keys())
    load_key, no_load_key, temp_dict = [], [], {}
    for i, k in enumerate(pretrained_dict.keys()):
        v = pretrained_dict[k]
        if np.shape(model_dict[model_dict_keys[i]]) == np.shape(v):
            temp_dict[model_dict_keys[i]] = v
            load_key.append(model_dict_keys[i])
        else:
            no_load_key.append(model_dict_keys[i])
    model_dict.update(temp_dict)
    model.load_state_dict(model_dict)
    print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key))
    print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key))

if __name__ == '__main__':
    ars = argparse.ArgumentParser()
    # ars.add_argument('--weight', help='the path to the weight', type=str, default='weights/official/yolov8s_state_dict.pt')
    ars.add_argument('--weight', help='the path to the weight', type=str, default='weights/official/yolov5l.pt')
    ars.add_argument('--img', help='the path to the image', type=str, default='../dataset/SYNTHIA/images/00000000.jpg')
    parse = ars.parse_args()

    anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
    model = yolov5.yolov5(3, anchors, 'l')
    load_weight(model, parse.weight)
    # main(parse)
    # test_model()
    # eval_size()
    # show_feature_map(parse)

