import json
import os
import random
import pandas as pd
from torchvision.transforms import ToTensor,Normalize
import numpy as np
import cv2


Mean = [0.5,0.5,0.5]
Std = [0.5,0.5,0.5]

def normalize_data(image):
    to_tensor = ToTensor()
    to_normalize = Normalize(mean=Mean,std=Std)
    image = to_normalize(to_tensor(image)).float()
    return image

def split_json(json_dir):
    json_file = json.load(open(json_dir,'rb'))['annotations']
    random.shuffle(json_file)
    with open(json_dir.replace('.json','_train.json'),'w') as f:
        json.dump(json_file[:int(0.7*len(json_file))],f)
    with open(json_dir.replace('.json','_val.json'),'w') as f:
        json.dump(json_file[int(0.7*len(json_file)):],f)
    with open(json_dir.replace('.json','_train_2.json'),'w') as f:
        json.dump(json_file[int(0.3*len(json_file)):],f)
    with open(json_dir.replace('.json','_val_2.json'),'w') as f:
        json.dump(json_file[:int(0.3*len(json_file))],f)



def remake_data_list(data_list,stage = 'stage1'):
    ## 针对已经生成的data list进行修改。stage1表示。将其中的类别1和类别2合并成同一个类别。标签0，1，2，3 变成0，1，2。
    ## stage 2表示只保留类别1和类别2的数据，分别重置为标签0，1两类。
    new_list = []
    for anno in data_list:
        label = anno[1]
        if stage == 'stage1':
            if anno[1] == 2:
                label = 1
            if anno[1] == 3:
                label = 2
        new_list.append([anno[0],label])
        if stage == 'stage2':
            if anno[1] == 1:
                label = 0
                new_list.append([anno[0],label])
            elif anno[1] == 2:
                label = 1
                new_list.append([anno[0],label])


    return new_list



def adjust_data_list(data_list):
    ## 调整data list各个类别的比例。保持和线上测试集一致。只需要把类别1和类别2复制两倍即可
    new_data_list = []
    for anno in data_list:
        if anno[1] == 1 or anno[1] == 2:
            new_data_list.append(anno)
            new_data_list.append(anno)
        new_data_list.append(anno)
    return new_data_list


# def get_anno_list(image_path = '../train',json_dir = '../label/train.json',):
#     ## 返回data list，每个元素是有同一个类别的图片路径加上类别属性(最后一个元素)组成的list
#     anno_list = []
#     json_file = open(json_dir)
#     json_file = json.load(json_file)['annotations']
#     for data in json_file:
#         data_name = os.path.join(image_path,data['id'])
#         image_dir = []
#         for name in os.listdir(data_name):
#             image_dir.append(os.path.join(data_name,name))
#         anno_list.append(image_dir+[data['status']])
#     return anno_list

# def get_data_list(anno_list,image_size = (1280,720)):
#     data_list = []
#     for data in anno_list:
#         image_list = []
#         label = data[-1]
#         for dir in data[:-1]:
#             image_list.append(cv2.resize(cv2.imread(dir),image_size))
#             image_list.append(label)
#         data_list.append(image_list)
#     return data_list[:int(0.7*len(data_list))],data_list[int(0.7*len(data_list)):]

def get_data_list(image_path = '../final_train',json_dir = '../label/final.json',only_key_frame = False,over_sample = False):
    ## 返回data list，每个元素是有同一个类别的图片路径加上类别属性(最后一个元素)组成的list。如果only_key_frame，则
    ## 仅保留frame帧数据，。如果False，则该路径下所有数据都会被采集。
    def convert_json_file_train(json_file,anno_list):
        ## 针对训练数据生成list，对数据进行重采样。
        for data in json_file[:]:
            data_name = os.path.join(image_path,data['id'])
            if only_key_frame:
                ## 只选取key frame数据进行训练，和测试。
                anno_list.append([os.path.join(data_name,data['key_frame']),data['status']])
                if over_sample:
                    ## 重采样数据
                    if data['status'] == 1:
                        for index in range(10):
                            anno_list.append([os.path.join(data_name, data['key_frame']), data['status']])
                    elif data['status'] == 2:
                        for index in range(4):
                            anno_list.append([os.path.join(data_name, data['key_frame']), data['status']])
            else:
                for name in os.listdir(data_name):
                    anno_list.append([os.path.join(data_name,name),data['status']])
                    if over_sample:
                        if data['status'] == 1:
                            for index in range(10):
                                anno_list.append([os.path.join(data_name, name), data['status']])
                        elif data['status'] == 2:
                            for index in range(4):
                                anno_list.append([os.path.join(data_name, name), data['status']])
    def convert_json_file_val(json_file,anno_list):
        ## 针对val生成list。不进行重采样操作
        for data in json_file[:]:
            data_name = os.path.join(image_path,data['id'])
            if only_key_frame:
                ## 只选取key frame数据进行训练，和测试。
                anno_list.append([os.path.join(data_name,data['key_frame']),data['status']])
            else:
                for name in os.listdir(data_name):
                    anno_list.append([os.path.join(data_name,name),data['status']])
    anno_list_train = []
    anno_list_val = []
    json_file_train = json.load(open(json_dir.replace('.json','_train.json')))
    json_file_val   = json.load(open(json_dir.replace('.json','_val.json')))
    convert_json_file_train(json_file_train,anno_list_train)
    convert_json_file_val(json_file_val,anno_list_val)
    print('use multi class list,get num of train--',len(anno_list_train),'--get num of val --',len(anno_list_val))
    return anno_list_train,anno_list_val

def get_binary_list_stage1(image_path = '../final_train',json_dir = '../label/final.json',only_key_frame = False):
    ## 二分类第一阶段模型，label1和2作为同一类。返回data list，处理成二分类数据，0对应0,标签1和2均设置为1.多增加两倍采样。
    def convert_json_file_train(json_file,anno_list):
        ## 针对训练数据生成list，对数据进行重采样。
        for data in json_file[:]:
            data_name = os.path.join(image_path,data['id'])
            if only_key_frame:
                ## 只选取key frame数据进行训练，和测试。
                anno_list.append([os.path.join(data_name,data['key_frame']),int(data['status']!=0)])
                if data['status'] == 1 or data['status'] == 2:
                    ## 针对1和2额外复制两倍。
                    for index in range(2):
                        anno_list.append([os.path.join(data_name, data['key_frame']), int(data['status']!=0)])
            else:
                for name in os.listdir(data_name):
                    anno_list.append([os.path.join(data_name,name),int(data['status']!=0)])
                    if data['status'] == 1 or data['status'] == 2:
                        for index in range(2):
                            anno_list.append([os.path.join(data_name, name), int(data['status']!=0)])

    def convert_json_file_val(json_file,anno_list):
        ## 针对val生成list。不进行重采样操作
        for data in json_file[:]:
            data_name = os.path.join(image_path,data['id'])
            if only_key_frame:
                ## 只选取key frame数据进行训练，和测试。
                anno_list.append([os.path.join(data_name,data['key_frame']),int(data['status']!=0)])
            else:
                for name in os.listdir(data_name):
                    anno_list.append([os.path.join(data_name,name),int(data['status']!=0)])
    anno_list_train = []
    anno_list_val = []
    json_file_train = json.load(open(json_dir.replace('.json','_train.json')))
    json_file_val   = json.load(open(json_dir.replace('.json','_val.json')))
    convert_json_file_train(json_file_train,anno_list_train)
    convert_json_file_val(json_file_val,anno_list_val)
    print('use binary list,get num of train--',len(anno_list_train),'--get num of val --',len(anno_list_val))
    return anno_list_train,anno_list_val


def get_binary_list_stage2(image_path = '../final_train',json_dir = '../label/final.json',only_key_frame = False):
    ## 筛除label0 数据，label1和label2分别设置为0/1
    def convert_json_file_train(json_file,anno_list):
        ## 针对训练数据生成list，对数据进行重采样。
        for data in json_file[:]:
            if data['status'] == 0:
                continue
            data_name = os.path.join(image_path,data['id'])
            if only_key_frame:
                ## 只选取key frame数据进行训练，和测试。
                anno_list.append([os.path.join(data_name,data['key_frame']),int(data['status']!=1)])
                if data['status'] == 1 :
                    ## 针对1额外复制两倍。
                    for index in range(2):
                        anno_list.append([os.path.join(data_name, data['key_frame']), int(data['status']!=1)])
            else:
                for name in os.listdir(data_name):
                    anno_list.append([os.path.join(data_name,name),int(data['status']!=1)])
                    if data['status'] == 1 :
                        for index in range(2):
                            anno_list.append([os.path.join(data_name, name), int(data['status']!=1)])

    def convert_json_file_val(json_file,anno_list):
        ## 针对val生成list。不进行重采样操作
        for data in json_file[:]:
            if data['status'] == 0:
                continue
            data_name = os.path.join(image_path,data['id'])
            if only_key_frame:
                ## 只选取key frame数据进行训练，和测试。
                anno_list.append([os.path.join(data_name,data['key_frame']),int(data['status']!=1)])
            else:
                for name in os.listdir(data_name):
                    anno_list.append([os.path.join(data_name,name),int(data['status']!=1)])
    anno_list_train = []
    anno_list_val = []
    json_file_train = json.load(open(json_dir.replace('.json','_train.json')))
    json_file_val   = json.load(open(json_dir.replace('.json','_val.json')))
    convert_json_file_train(json_file_train,anno_list_train)
    convert_json_file_val(json_file_val,anno_list_val)
    print('use binary list,get num of train--',len(anno_list_train),'--get num of val --',len(anno_list_val))
    return anno_list_train,anno_list_val


def concat_data(data_list,key_frame,down_sample=1,concat = 0):
    '''传入的data_list中每个元素是处理之后的image，这样随机性更强。
    将数据进行concat操作。使用五张数据进行concat，保证concat后尺寸一致。
    缺少的数据用key frame 的数据去补足。
    只需要获取only_key_frame为True情况下对应的list。即可获取改路径下所有数据和key frame的结果。作为参数。
    key_frame在list中的实际顺序从1开始的， 所以需要-1
    concat=0为y轴方向concat，1为x方向concat。random为随机选取三张图concat，方向也随机，
     随机选取3张图片进行concat操作。concat的方向也为随机。
     '''
    if concat == 'sample':
        data_list = random.sample(data_list,3)
        ## concat的方向从0,1中随机选取。
        # data = np.concatenate(data_list,axis=np.random.randint(0,2))
        data = np.concatenate(data_list,axis=0)
        data = cv2.resize(data,(int(data.shape[1]/down_sample),int(data.shape[0]/down_sample)))
    else:
        key_frame-=1
        add_num = 5-len(data_list)
        add_list = []
        for i in range(add_num):
            add_list.append(data_list[key_frame])
        data_list = data_list[:key_frame] + add_list + data_list[key_frame:]
        # axis = 0 为垂直方向进行叠加。为1为水平方向进行叠加。
        if concat == 2:
            ## 2*2的concat方式进行拼接。
            ## 只能取4张数据进行训练。
            data_1 = np.concatenate(data_list[:2],axis=1)
            data_2 = np.concatenate(data_list[2:4],axis=1)
            data = np.concatenate([data_1,data_2],axis=0)
        else:
            ## 竖直（concat=0）或者水平（concat=1）的拼接方式。
            data = np.concatenate(data_list,axis=concat)
            data = cv2.resize(data,(int(data.shape[1]/down_sample),int(data.shape[0]/down_sample)))
    return data









if __name__ == '__main__':
    # data_list = []
    # for i in range(3):
    #     data_list.append(np.random.rand(32,64,3))
    # data = concat_data(data_list,key_frame=2)
    # print(data.shape)
    split_json(json_dir='../label/final.json')
    # get_data_list(only_key_frame=True)
    pass