import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json

'''
对使用data_clean清洗过的标注文件，进行训练集/验证集分割

分割后形成三个数据集
train
validate
mini_validate  只包含100个图像的小验证集(属于validate集的子集)，用于快速测试
'''


root_path = 'D:\\datasets\\chongqing'
train_path = root_path + '\\chongqing1_round1_train1_20191223'
train_img_path = train_path + '\\images\\'

train_json_file = train_path + '\\annotations.json'
train_json_file = train_path + '\\clean_annotations.json'


def create_train_val_json(annot_json, train_size=1000, size='all'):
    #     img_counts = len(annot_json['images'])
    #     print ('image count: {}'.format(img_counts))
    # img_counts = 4516

    img_ids = [img['id'] for img in annot_json['images']]
    print('image count: {}'.format(len(img_ids)))

    randidx = list(np.random.choice(img_ids, train_size, replace=False))

    ## 得到train_set
    new_imgs = []
    for img in annot_json['images']:
        if img['id'] in randidx:
            if size == 'small':
                if img['width'] <= 658:
                    new_imgs.append(img)
            elif size == 'big':
                if img['width'] > 658:
                    new_imgs.append(img)
            else:
                new_imgs.append(img)

    new_img_ids = [img['id'] for img in new_imgs]

    new_annots = []
    for ann in annot_json['annotations']:
        if ann['image_id'] in new_img_ids:
            new_annots.append(ann)
            continue

    train_annot = annot_json.copy()
    train_annot['images'] = new_imgs.copy()
    train_annot['annotations'] = new_annots.copy()

    ## 得到val_set

    new_imgs = []
    for img in annot_json['images']:
        if img['id'] not in randidx:
            if size == 'small':
                if img['width'] <= 658:
                    new_imgs.append(img)
            elif size == 'big':
                if img['width'] > 658:
                    new_imgs.append(img)
            else:
                new_imgs.append(img)

    new_img_ids = [img['id'] for img in new_imgs]

    new_annots = []
    for ann in annot_json['annotations']:
        if ann['image_id'] in new_img_ids:
            new_annots.append(ann)
            continue

    val_annot = annot_json.copy()
    val_annot['images'] = new_imgs.copy()
    val_annot['annotations'] = new_annots.copy()

    ## 得到一个mini val_set
    new_imgs = []
    count = 0
    for img in annot_json['images']:
        if count > 100:
            break
        if img['id'] not in randidx:
            if size == 'small':
                if img['width'] <= 658:
                    new_imgs.append(img)
                    count += 1
            elif size == 'big':
                if img['width'] > 658:
                    new_imgs.append(img)
                    count += 1
            else:
                new_imgs.append(img)
                count += 1

    new_img_ids = [img['id'] for img in new_imgs]

    new_annots = []
    for ann in annot_json['annotations']:
        if ann['image_id'] in new_img_ids:
            new_annots.append(ann)
            continue

    mini100_val_annot = annot_json.copy()
    mini100_val_annot['images'] = new_imgs.copy()
    mini100_val_annot['annotations'] = new_annots.copy()

    return train_annot, val_annot, mini100_val_annot

# main

with open(train_json_file) as f:
    annot_json = json.load(f)
annot_json.keys()

# 生成只包含小图像（瓶盖）的数据集
train_small_annot, val_small_annot, mini100_val_small_annot = create_train_val_json(annot_json, train_size=2400,
                                                                                     size='small')
train_small_json_file = train_path + "\\train_small_annot.json"

with open(train_small_json_file, "w") as f:
    json.dump(train_small_annot, f)

val_small_json_file = train_path + "\\val_small_annot.json"
with open(val_small_json_file, "w") as f2:
    json.dump(val_small_annot, f2)

mini100_val_small_json_file = train_path + "\\mini100_val_small_annot.json"
with open(mini100_val_small_json_file, "w") as f3:
    json.dump(mini100_val_small_annot, f3)

print('============small images only ===============')
print ( 'train images: {} train annotations:{}  val images:{} val annotations:{}'.format(
    len(train_small_annot['images']),
    len(train_small_annot['annotations']),
    len(val_small_annot['images']),
    len(val_small_annot['annotations'])))

# 生成只包含大/小图像（瓶盖和瓶身）的数据集
train_all_annot, val_all_annot, mini100_val_all_annot = create_train_val_json(annot_json, train_size = 2400,size='all')
train_all_json_file = train_path + "\\train_all_annot.json"

with open(train_all_json_file,"w") as f:
    json.dump(train_all_annot, f)

val_all_json_file = train_path + "\\val_all_annot.json"
with open(val_all_json_file,"w") as f2:
    json.dump(val_all_annot, f2)

mini100_val_all_json_file = train_path + "\\mini100_val_all_annot.json"
with open(mini100_val_all_json_file,"w") as f3:
    json.dump(mini100_val_all_annot, f3)

print('============all images ===============')
print ( 'train images: {} train annotations:{}  val images:{} val annotations:{}'.format(
    len(train_all_annot['images']),
    len(train_all_annot['annotations']),
    len(val_all_annot['images']),
    len(val_all_annot['annotations'])))