"""
https://blog.csdn.net/weixin_41424926/article/details/105383064

动手学习深度学习pytorch版——从零开始实现YOLOv1

instants:
many obj:
    x39: train, idx 14254: 2009_004062.jpg
    x28: train, idx 6011: 2011_002810.jpg
    x10: train idx 5029: 2007_003207.jpg

problem:
2 diff obj in the same grid
    train, idx 1: 2008_005431.jpg
    train idx 15: 2008_008755.jpg

obj with property "difficult"
    train idx 5: 2010_001640.jpg
    train idx 7: 2010_001680.jpg

new problem:
    train idx 14255: 2008_002738.jpg
    train idx 5029: 2007_003207.jpg
    2008_003478.jpg
"""
import os
import numpy as np
import cv2 as cv
from python_ai.CV_3.project.yolo.csdn_pt_4_car_person import prepare_data
# from ...csdn_pt_4_car_person import prepare_data
from python_ai.CV_3.project.yolo.csdn_pt_4_car_person.prepare_data import GL_FLATTEN_DEPTH
from python_ai.common.xcommon import *
import xml.etree.ElementTree as ET

# STATIC_DATASET_PATH = r'../../../../../../large_data/_very_large/VOCtrainval_11-May-2012/VOCdevkit/VOC2012'
# STATIC_DATASET_PATH = r'../../../../../../large_data/_very_large/VOCtrainval_11-May-2012/VOCdevkit/VOC2012_mini'
# STATIC_DATASET_PATH = r'../../../../../../large_data/_very_large/VOCtrainval_11-May-2012/VOCdevkit/VOC2012_mid'
# STATIC_DATASET_PATH = r'../../../../../../large_data/_very_large/VOCtrainval_11-May-2012/VOCdevkit/VOC2012_diag'
STATIC_DATASET_PATH = r'../../../../../../large_data\CV3\_many_files\yolov1_person_car'
TRAIN_OR_TEST = 'test'
# TRAIN_OR_TEST = 'train'
np.random.seed(1)
offset = 6
spr = 2
spc = 3
"""
.
├── Annotations                 source of annotations
├── ImageSets
│   ├── Action
│   ├── Layout
│   ├── Main
│   └── Segmentation
├── JPEGImages                  source of pictures
├── labels                      dest txt files format: cls and x,y,w,h
                                (1) Annotations => labels
                                (2) labels => labels for padding and scaling
├── SegmentationClass
├── SegmentationObject
└── voc2012_forYolov1           dest: csv and txt for train/test
                                csv format: (x,y,w,h,C)x2 + one_hotX20
    └── img                     dest: padded and scaled pictures

"""


sep('Load labels')
LABEL_DIR = os.path.join(STATIC_DATASET_PATH, 'voc2012_forYolov1')
txt_path = os.path.join(LABEL_DIR, TRAIN_OR_TEST + '.txt')
csv_path = os.path.join(LABEL_DIR, TRAIN_OR_TEST + '.csv')
print('Loading path ...')
with open(txt_path, 'r') as f:
    path_data = f.readlines()
print('Path loaded.')
print('Loading labels csv ...')
label_data = np.loadtxt(csv_path)
print(label_data.shape)
if len(label_data.shape) == 1:
    label_data = np.expand_dims(label_data, axis=0)
print(label_data.shape)
M, N = label_data.shape
print('label_data:', M, N)

sep('Check it')


def check_it(offset):
    sep()
    if offset is None:
        offset = np.random.randint(0, M)
    print('offset:', offset)
    i = -1
    result_img_arr = []
    ori_img_arr = []
    max_w = 0
    for row in range(spr):
        row_img_arr = []
        ori_row_img_arr = []
        max_h = 0
        for col in range(spc):
            i += 1
            idx = i + offset
            idx %= M
            print('idx:', idx)

            img_path = '../' + path_data[idx]
            img_path = img_path.strip()
            print(f'path=|{img_path}|')
            img_name = os.path.split(img_path)[1]
            ori_img_path = os.path.join(STATIC_DATASET_PATH, 'JPEGImages', img_name)
            anno_path = os.path.join(STATIC_DATASET_PATH, 'Annotations', os.path.splitext(img_name)[0] + '.xml')
            img = cv.imread(img_path, cv.IMREAD_COLOR)
            ori_img = cv.imread(ori_img_path, cv.IMREAD_COLOR)
            ori_h, ori_w = ori_img.shape[:2]
            if ori_h > max_h:
                max_h = ori_h

            # boxes and labels for processed image
            label_row = label_data[idx]
            CELL_LEN = GL_FLATTEN_DEPTH
            for j in range(prepare_data.GL_NUMGRID * prepare_data.GL_NUMGRID):
                label_cell = label_row[j * CELL_LEN:(j + 1) * CELL_LEN]
                if np.isclose(label_cell[4], 0):
                    continue

                idx_cls = np.argmax(label_cell[10:])
                cls = prepare_data.GL_CLASSES[idx_cls]
                pt1, pt2 = xywh2pts(label_cell[0], label_cell[1], label_cell[2], label_cell[3], j)
                print(f'processed: name_cls: {cls}, idx_cls: {idx_cls}, pt1: {pt1}, pt2: {pt2}')

                cv.rectangle(img, pt1, pt2, rand_color(), 2)
                cv.putText(img, cls, (pt1[0], pt1[1] + 15), cv.FONT_HERSHEY_PLAIN, 1.5, rand_color(), 2)

            # boxes and labels for original image
            with open(anno_path, 'r') as f:
                tree = ET.parse(f)
                root = tree.getroot()
                for obj in root.iter('object'):
                    difficult = obj.find('difficult').text
                    cls = obj.find('name').text
                    if cls not in prepare_data.GL_CLASSES or int(difficult) == 1:
                        continue
                    xmlbox = obj.find('bndbox')
                    pt1 = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text))
                    pt2 = (int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text))
                    print(f'original: name_cls: {cls}, idx_cls: {idx_cls}, pt1: {pt1}, pt2: {pt2}')

                    cv.rectangle(ori_img, pt1, pt2, rand_color(), 2)
                    cv.putText(ori_img, cls, (pt1[0], pt1[1] + 15), cv.FONT_HERSHEY_PLAIN, 1.5, rand_color(), 2)

            row_img_arr.append(img)
            ori_row_img_arr.append(ori_img)

        # pad each image in row
        for k, ori_img in enumerate(ori_row_img_arr):
            ori_h, ori_w = ori_img.shape[:2]
            pad_h = max_h - ori_h
            ori_img = np.pad(ori_img, ((pad_h, 0), (0, 0), (0, 0)), 'constant')
            ori_row_img_arr[k] = ori_img

        # concat a row
        row_img = np.concatenate(row_img_arr, axis=1)
        ori_row_img = np.concatenate(ori_row_img_arr, axis=1)
        row_h, row_w = ori_row_img.shape[:2]
        if row_w > max_w:
            max_w = row_w

        result_img_arr.append(row_img)
        ori_img_arr.append(ori_row_img)

    # pad each row
    for k, ori_row_img in enumerate(ori_img_arr):
        row_h, row_w = ori_row_img.shape[:2]
        pad_w = max_w - row_w
        ori_row_img = np.pad(ori_row_img, ((0, 0), (pad_w, 0), (0, 0)), 'constant')
        ori_img_arr[k] = ori_row_img

    # concat each row into final result
    result_img = np.concatenate(result_img_arr, axis=0)
    ori_result_img = np.concatenate(ori_img_arr, axis=0)

    # reshape final result to 1600x800 (1600x900 considering task bar)
    H, W = result_img.shape[:2]
    if H / W > 9/16:
        rate = 800 / H
    else:
        rate = 1600 / W
    result_img = cv.resize(result_img, None, None, rate, rate, cv.INTER_CUBIC)
    # reshape final result of original images
    H, W = ori_result_img.shape[:2]
    if H / W > 9/16:
        rate = 800 / H
    else:
        rate = 1600 / W
    ori_result_img = cv.resize(ori_result_img, None, None, rate, rate, cv.INTER_CUBIC)

    cv.imshow('Processed', result_img)
    cv.imshow('Original', ori_result_img)
    cv.waitKey(0)
    cv.destroyAllWindows()


while True:
    print(f'Please input offset to start: (Will check {spr}x{spc} pictures, offset from 0-{max(0, M-spr*spc)}, negative input for quit.)')
    xin = input()
    try:
        xin = int(xin)
    except ValueError as ex:
        print(ex)
        print('Please re-input.')
        continue
    if xin < 0:
        break
    check_it(xin)

sep('Over')
