import torch
import numpy as np
import cv2
import os
import glob
import codecs
import json
from heilsTrain import retrain
from models.MobileNet import MobileNet
import shutil
import time

def rename():
    for i, imgpath in enumerate(glob.glob(os.path.join('/data1/meter-server/deploy/pic/C14/1_开/', "*"))):
        new_name = os.path.join("/data1/meter-server/deploy/pic/C14/1_开/", str(i).zfill(4) + ".jpg")
        os.rename(imgpath, new_name)
        print(i, new_name)


def crop_fromjson():
    folder_path = "/data1/sheng/1116/04_geli_switch/test_daset_orgimg"
    target_path = "/data1/sheng/1116/04_geli_switch/test_img"
    # id = 0
    for i, jsonfile in enumerate(glob.glob(os.path.join(folder_path,"*.json"))):
        with codecs.open(jsonfile, 'r', encoding='utf-8', errors='ignore') as f:
            jsondict = json.load(f)
            imagePath = os.path.join(folder_path, jsonfile.replace("json", "jpg"))
            imageHeight = jsondict["imageHeight"]
            imageWidth = jsondict["imageWidth"]
            # print(imagePath)
            if os.path.exists(imagePath):
                img = cv2.imread(imagePath)

                for shapeid, shape in enumerate(jsondict["shapes"]):
                    # print(imagePath, shape["points"].__len__())
                    status = shape['group_id']
                    print("status: ", status, type(status))

                    if status == 0:
                        new_path = os.path.join(target_path, "0")
                        if not os.path.exists(new_path):
                            os.makedirs(new_path)
                    if status == 1:
                        new_path = os.path.join(target_path, "1")
                        if not os.path.exists(new_path):
                            os.makedirs(new_path)

                    img_name = jsondict["imagePath"].split(".")[0] + '_' + str(shapeid) + ".jpg"
                    p0, p1= shape["points"]
                    x0,y0 = p0
                    x1,y1 = p1
                    x0,x1 = min(x0,x1), max(x0,x1)
                    y0,y1 = min(y0,y1), max(y0,y1)
                    y1 = min(y1+5,imageHeight-1) #x0,y0, x1,   max(x0-5,0),max(y0-5,0),min(x1+5,imageWidth-1),
                    x0,y0, x1,y1 = map(int,[x0,y0, x1,y1])
                    # 按照原名保存
                    # cv2.imwrite(os.path.join(target_path,os.path.splitext(jsondict["imagePath"])[0]+f"_{shapeid}_{shape['label']}.jpg"),img[y0:y1, x0:x1])
                    # 按照label_id的格式保存
                    cv2.imwrite(os.path.join(new_path, shape["label"]+img_name), img[y0:y1, x0:x1])
                    print(i, 'save: ', os.path.join(new_path, shape["label"]+img_name))
                    # i/d += 1


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    return dict


def cafir10():
    a = ['data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']
    for i in a:
        dict1 = unpickle('/home/sheng/Downloads/cifar-10-batches-py/{}'.format(i))
        print(dict1[b'data'].shape)

        for i in range(10000):
            img = dict1[b"data"][i]
            img = np.reshape(img, (3, 32, 32))
            img = img.transpose((1, 2, 0))
            img_name = dict1[b"filenames"][i]
            img_label = str(dict1[b"labels"][i])
            my_path = "/data1/sheng/1116/04_geli_switch/cafir_train/" + str(img_label) + "/" + str(img_name.decode("utf-8"))

            # 这个是关键，相当于存到对应文件夹下了
            if not os.path.exists("/data1/sheng/1116/04_geli_switch/cafir_train/" + str(img_label)):
                os.makedirs("/data1/sheng/1116/04_geli_switch/cafir_train/" + str(img_label))
            cv2.imwrite(my_path, img)
            print(i, my_path)


def heils_train_imagenet():
    # path = '/data1/sheng/temp/1211/2_2fenlei'
    path = '/data1/sheng/1116/04_geli_switch/4'
    # print(path+'_output/wei*')
    # path = '/data1/sheng/temp/1225_classify/foods'
    # path = '/data1/sheng/temp/1225_classify/majian'
    path = '/data1/imagenet/train_12277'

    iters = '1, 1, 1'
    # final_path = retrain(path, iters)
    import heilsTrain_imagenet
    final_path = heilsTrain_imagenet.retrain(path, iters)
    print("sheng train final_path: {} ".format(final_path))
    delete_model = sorted(glob.glob(path+'_output/wei*'), key=os.path.getmtime)[:-1]
    for delete_model_name in delete_model:
        os.remove(delete_model_name)
        print("success delete :", delete_model_name)


def get_mac_address():
    import uuid, getmac
    # for i in range(3):
    print(uuid.getnode())
    mac=uuid.UUID(int = uuid.getnode()).hex[:].upper()
    a = uuid.uuid1(uuid.getnode())
    print("a:", a.hex[-12:].upper())
    #return '%s:%s:%s:%s:%s:%s' % (mac[0:2],mac[2:4],mac[4:6],mac[6:8],mac[8:10],mac[10:])
    # return ":".join([mac[e:e+2] for e in range(0,11,2)]), mac[0:12]
    macaddr=getmac.get_mac_address(interface="eno2").upper()
    macid = "".join(macaddr.split(":"))
    print("------macaddr: {},   mac:{}   ---------".format(macaddr, mac))
    return macaddr,macid


def printNetIfAddr():
    import psutil
    dic = psutil.net_if_addrs()
    for adapter in dic:
        snicList = dic[adapter]
        mac = 'wu mac'
        ipv4 = 'wu ipv4'
        ipv6 = 'wu ipv6'
        for snic in snicList:
            if snic.family.name in {'AF_LINK', 'AF_PACKET'}:
                mac = snic.address
            if snic.family.name in 'AF_INET':
                ipv4 = snic.address
            if snic.family.name in 'AF_INET6':
                ipv6 = snic.address
        print('%s  -----  %s  -----  %s  -----  %s'%(adapter, mac, ipv4, ipv6))


def make_gt_txt():
    pre_path = '/data1/sheng/1116/04_geli_switch/4/'
    folders = ['0', '1']
    txt = open('./gt.txt', 'w')
    for folder in folders:
        new_folder = pre_path + folder
        name_list = os.listdir(new_folder)
        for name in name_list:
            content = os.path.join(new_folder, name) + " " + folder
            txt.write(content + '\n')
    txt.close()


def rename_folder():
    train_path = '/data1/imagenet/ILSVRC2012_img_train'
    val_path = '/data1/imagenet/ILSVRC2012_img_val'
    folder_list = os.listdir(train_path)
    # train_folder_list = os.listdir(train_path)
    for i, folder_old_name in enumerate(folder_list):
        train_old_name = os.path.join(train_path, folder_old_name)
        val_old_name = os.path.join(val_path, folder_old_name)
        train_new_name = os.path.join(train_path, str(i))
        val_new_name = os.path.join(val_path, str(i))
        # os.rename(old_name, new_name)
        shutil.move(train_old_name, train_new_name)
        shutil.move(val_old_name, val_new_name)
        print(i, train_old_name, val_old_name)
        print(i, train_new_name, val_new_name)


def load_model_2_cpu(state_dict):
    '''
    when use torch.nn.DataParallel to train model ,it will add a prefix(model.) in before paras name,
    so we should delete the prefix (model.)
    '''
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        # name = k[7:]  # remove `module.`
        name = k.replace("module.", "")
        print(k, name)
        new_state_dict[name] = v
    return new_state_dict


def save_new_premodel():

    model = MobileNet(num_classes=3, alpha=0.5)

    state_dict = load_model_2_cpu(torch.load('/data1/sheng/1116/classify/Net_05_171_1610156048.3053317.pth'))
    # state_dict = torch.load('/data1/sheng/1116/classify/new_pretrain.pth')
    state_dict['fc.weight'] = state_dict['fc.weight'][:3]
    state_dict['fc.bias'] = state_dict['fc.bias'][:3]
    model.load_state_dict(state_dict)
    # model.fc = torch.nn.Linear(model.fc.in_features, 3)
    torch.save(model.state_dict(), './0109_171epoch_pretrain.pth')


def json_to_make_txt():
    label = 0
    # txt_handle = open('./data/xueyaji_train.txt', 'w')
    folder_path = r'/data1/sheng/fall/FallDetectionDataset/02'
    target_path = "/data1/sheng/fall/fall_dataset"
    iid = 0
    for ii, jsonfile in enumerate(glob.glob(os.path.join(folder_path, "*.json"))):
        with codecs.open(jsonfile, 'r', encoding='utf-8', errors='ignore') as f:
            jsondict = json.load(f)
            imagePath = os.path.join(folder_path,os.path.split(jsonfile)[1].replace("json", "png"))
            imageHeight = jsondict["imageHeight"]
            imageWidth = jsondict["imageWidth"]
            print(ii, imagePath)
            if os.path.exists(imagePath):
                img = cv2.imread(imagePath)
                # img = cv2.imdecode(np.fromfile(imagePath, dtype=np.uint8), cv2.IMREAD_COLOR)
                # a = img.shape
                # print(id, imagePath, a)
                context = ""
                if jsondict["shapes"].__len__() >= 1:
                    for shapeid, shape in enumerate(jsondict["shapes"]):
                        # print(imagePath, shape["points"].__len__())
                        if shape["points"].__len__() == 2:
                            # print("shape:", shape["points"])
                            # label = shape['label']
                            p0, p1= shape["points"]
                            x0,y0 = p0
                            x1,y1 = p1
                            x0,x1 = max(0, int(min(x0,x1))), max(0, int(max(x0,x1)))
                            y0,y1 = max(0, int(min(y0,y1))), max(0, int(max(y0,y1)))
                            # 按照label_id的格式保存
                            # print("shape[label]:", shape["label"])
                            label = shape["label"]
                            crop_img = img[y0:y1, x0:x1]
                            # print(y0,y1, x0,x1)
                            cv2.imwrite(os.path.join(target_path, label, f"{iid}_{str(time.time())[-5:]}.jpg"), crop_img)
                            iid += 1
                            coor_message = " " + str(int(x0)) + "," + str(int(y0)) + "," + str(int(x1)) + "," + str(int(y1)) + ",1"
                            context += coor_message
                        else:
                            print("shape[points].__len__() != 2", imagePath)
                    # txt_handle.write(imagePath + "" + context + '\n')
                    # print(id, imagePath + "" + context + '\n')
                else:
                    print("bug json: ", imagePath)
            else:
                print(imagePath, " no in -----")
    # txt_handle.close()


def read_hk_rtsp():
    # cap = cv2.VideoCapture("rtsp://admin:admin@192.168.2.64:554//Streaming/Channels/1")
    cap = cv2.VideoCapture("rtsp://admin:admin@192.168.1.251:554/h264/ch2/sub/av_stream")
    ret, frame = cap.read()
    while ret:
        ret, frame = cap.read()
        print(frame.shape)
        cv2.imshow("frame", frame)
        if cv2.waitKey(1) & 0xFF == ord(' '):
            break
    cv2.destroyAllWindows()
    cap.release()

def shuffle_data():
    import random
    data_path=r"/data2/enducation/datas/answer_card/class_2/class_data/20211015_data/0"
    data_list=os.listdir(data_path)
    data_sample=random.sample(data_list,135)
    for name in data_sample:
        to_path=r"/data2/enducation/datas/answer_card/class_2/class_data/20211015_data/00"
        shutil.move(os.path.join(data_path,name),os.path.join(to_path,name))

if __name__ == '__main__':
    # rename()
    # read_hk_rtsp()
    # crop_fromjson()
    # print(torch.backends.cudnn.benchmark)
    # heils_train()
    # save_new_premodel()
    # rename_folder()
    # cafir10()
    # get_mac_address()
    # printNetIfAddr()
    # make_gt_txt()
    # json_to_make_txt()
    shuffle_data()
    # model = MobileNet(3, 0.5)
    # model.load_state_dict(torch.load('/data1/sheng/1116/04_geli_switch/4_output/weights-49-15-[0.9883551673944687]-1608778559.0543816.pth'))
    # m = torch.load('./sucess_premodel_mobilenet.pth')
    # print(model)

    # lr = 0.001
    # for e in range(200//3):
    #     lr = lr * 0.91
    # img = cv2.imread(r'/data1/sheng/1116/c/card_classify/1/ans_.20608_00172.jpg')
    # img = cv2.resize(img, (48, 26))
    # cv2.imshow("t", img)
    # cv2.waitKey(0)

    print("over ~_~ ")
