# _*_ coding: utf-8 _*_
#
# Copyright (C) 2023 - 2023 linchen, Inc. All Rights Reserved 
#
# @Time    : 2023/4/26 16:43
# @Author  : linchen
# @File    : check_dataset.py
# @IDE     : PyCharm

import json
import os
import shutil
from pathlib import Path
import cv2
import numpy as np
from dataset.dataloader import CocoDataset, SimpleDetDataset, VOCDataset
from util.metrics import min_NMS
import torch

def check_coco(coco_dataset):
    batch_size = 1
    if not os.path.exists(coco_dataset):
        print('dataset {} not exists'.format(coco_dataset))
        return
    dataset = CocoDataset(coco_dataset, set_name='train',
                                batch=batch_size,
                                )
    for i in range(len(dataset)):
        image, annot = dataset[i]
        image = image.permute(1, 2, 0)
        image = image.numpy()
        annot = annot.numpy()
        bbox = annot[:, 2:]

        for j in range(bbox.shape[0]):
            box = bbox[j, :].astype(np.int)
            image = cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (124, 36, 75), 1)
        cv2.imwrite(os.path.join('check', '{}.jpg'.format(i)), image)
        # cv2.imshow('image', image)
        # cv2.waitKey(0)

def check_simpledet(simple_dataset):
    batch_size = 1
    if not os.path.exists(simple_dataset):
        print('dataset {} not exists'.format(simple_dataset))
        return
    dataset = SimpleDetDataset(simple_dataset, batch=batch_size, annotation_file='all_mix.txt', resize=(640, 640)
                                )
    print(len(dataset))
    for i in range(len(dataset)):
        image, annot = dataset[i]
        image = image.permute(1, 2, 0)
        image = image.numpy() * 255
        image = image.astype(np.uint8)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        annot = annot.numpy()
        bbox = annot[:, 2:]

        for j in range(bbox.shape[0]):
            box = bbox[j, :].astype(np.int)
            image = cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (124, 36, 75), 2)
            image = image.astype(np.int64)
            image[box[1]:box[3], box[0]:box[2], :] += np.array([50, 50, 50])
            image = np.clip(image, a_min=0, a_max=255)
            image = image.astype(np.uint8)
        cv2.imwrite(os.path.join('check', '{}.jpg'.format(i)), image)

def check_voc(voc_dataset):
    batch_size = 1
    if not os.path.exists(voc_dataset):
        print('dataset {} not exists'.format(voc_dataset))
        return
    dataset = VOCDataset(voc_dataset, resize=(640, 640))
    color = (124, 36, 75)
    for i in range(len(dataset)):
        image, annot = dataset[i]
        image = image.permute(1, 2, 0)
        image = image.numpy()
        image = image.astype(np.uint8)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        annot = annot.numpy()
        bbox = annot[:, 1:]
        for j in range(bbox.shape[0]):
            box = bbox[j, :].astype(np.int)
            image = cv2.rectangle(image, (box[1], box[2]), (box[3], box[4]), color, 2)
            image = cv2.putText(image, dataset.classes[box[0]],(box[1], box[2] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
            # image = image.astype(np.int64)
            # image[box[2]:box[4], box[1]:box[3], :] += np.array([50, 50, 50])
            # image = np.clip(image, a_min=0, a_max=255)
            # image = image.astype(np.uint8)
        cv2.imwrite(os.path.join('check', '{}.jpg'.format(i)), image)

def translate2jpg_simpledet(simple_dataset):
    batch_size = 1
    if not os.path.exists(simple_dataset):
        print('dataset {} not exists'.format(simple_dataset))
        return
    files = ['train.txt', 'val.txt']
    for file in files:
        file_path = os.path.join(simple_dataset, file)
        f = open(file_path, 'r')
        lines = [l.strip() for l in f.readlines()]
        f.close()
        new_lines = []
        for l in lines:
            image_path = os.path.join(simple_dataset, l.split(' ')[0])
            suffix = Path(image_path).suffix
            jpgimage_path = image_path.replace(suffix, '.jpg')
            if os.path.exists(jpgimage_path):
                new_lines.append(l.replace(suffix, '.jpg'))
                continue        

            if not os.path.exists(image_path):
                continue
            img = cv2.imread(image_path)
            
            if Path(image_path).suffix != '.jpg':
                cv2.imwrite(jpgimage_path, img)
                os.remove(image_path)
            new_lines.append(l.replace(suffix, '.jpg'))
        f = open(file_path, 'w')
        for l in new_lines:
            f.writelines(l + '\n')
        f.close()

def simple_dataset_annotation_filter(dataset_path, file='train.txt'):
    f = open(os.path.join(dataset_path, file))
    annot_lines = [l.strip() for l in f.readlines()]
    f.close()
    min_size = 20
    print('get {} annotations befored filter'.format(len(annot_lines)))
    filtered_lines = []
    for line in annot_lines:
        annots = line.split(' ')
        if len(annots) <= 1:
            continue
        labels = []
        for annot in annots[1:]:
            label = [int(x) for x in annot.split(',')]
            labels.append(label)
        labels = torch.tensor(labels)
        sizes = torch.stack((labels[:, 3] - labels[:, 1], labels[:, 4] - labels[:, 2]), -1)
        mask = sizes.ge(min_size).sum(dim=-1).ge(2)
        if int(mask.sum()) == 0:
            continue
        labels = labels[mask]
        sizes = sizes[mask]
        bboxes = labels[:, 1:]
        cls_info  = labels[:, 0]
        areas = sizes[:, 0] * sizes[:, 1]
        scores = areas / areas.max()
        # print('{},{},{}'.format(bboxes, scores, cls_info))
        boxes = torch.cat((bboxes, scores.unsqueeze(-1), cls_info.unsqueeze(-1)), -1)

        if (boxes.shape[0] > 1):
            # print(boxes)
            bboxes, scores, cls_info = min_NMS(boxes, iou_threshold=0.7, scores_threshold=1e-4)
        
        bbox = torch.cat((bboxes, scores.unsqueeze(-1), cls_info.unsqueeze(-1)), -1)
        filtered_line = annots[0]
        for i, box in enumerate(bbox):
            filtered_line += ' {},{},{},{},{}'.format(int(box[-1]), int(box[0]), int(box[1]), int(box[2]), int(box[3]))
        filtered_lines.append(filtered_line)
    print('get {} annotations after filter'.format(len(filtered_lines)))
    filter_file = os.path.join(dataset_path, 'filtered_' + file)
    f = open(filter_file, 'w')
    for line in filtered_lines:
        f.writelines(line + '\n')
    f.close()
            

# check_coco('China_MotorBike_coco')
# check_coco('cat')
# check_simpledet('../dataset/aisafety/')

# translate2jpg_simpledet('../dataset/aisafety_jpg/')
# check_simpledet('../dataset/aichallenge')
# check_voc('../dataset/strawberry/VOCdevkit')


shutil.rmtree('check')
os.mkdir('check')
check_simpledet('../dataset/Phase2_trainset')
# simple_dataset_annotation_filter('../dataset/SYNTHIA', 'val.txt')
# simple_dataset_annotation_filter('../dataset/Phase2_trainset', 'SHIFT_total.txt')

# check_voc('../dataset/strawberry/VOCdevkit')
# check_voc('../dataset/VOCdevkit')