# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     datalist
   Description :   
   Author :       lth
   date：          2022/1/27
-------------------------------------------------
   Change Activity:
                   2022/1/27 13:35: create this script
-------------------------------------------------
create this script acted in this project to translate the data from process
"""
__author__ = 'lth'

import random

import cv2
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import transforms

from utils import PostProcess

PostProcess = PostProcess()
train_transform = transforms.Compose(
    [
        # transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ]
)

test_transform = transforms.Compose(
    [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ]
)
labels = {
    "visible vertical line": {'id': 3, 'color': [255, 255, 255]},
    "invisible horizontal line": {'id': 0, 'color': [0, 255, 0]},
    "invisible vertical line": {'id': 1, 'color': [0, 0, 255]},
    "visible horizontal line": {'id': 2, 'color': [255, 0, 2]},

}

image_size = 800

mode = ["visible vertical line", "visible horizontal line", "invisible vertical line", "invisible horizontal line"]


class UnetTableData(Dataset):
    def __init__(self, data, mode="train"):
        super(UnetTableData, self).__init__()
        self.mode = mode
        self.data = data
        self.label1 = self.get_images("label1", data)  # v visible
        self.label2 = self.get_images("label2", data)  # v invisible

    def __getitem__(self, index):
        img = Image.open(self.data[index]).convert("RGB")

        kernel = np.ones((5, 5), np.uint8)
        target1 = cv2.imread(self.label1[index], cv2.IMREAD_GRAYSCALE)
        if target1 is None:
            target1 = np.zeros([img.height, img.width])

        target2 = cv2.imread(self.label2[index], cv2.IMREAD_GRAYSCALE)
        if target2 is None:
            target2 = np.zeros([img.height, img.width])

        # """
        # show
        # """
        # if  np.sum(target2)>0:
        #     t2=Image.fromarray(target2)
        #     t2=t2.convert("RGBA")
        #     timg=img.convert("RGBA")
        #     dsa=Image.blend(timg,t2,0.5)
        #     dsa.show()

        # if min(img.height, img.width) > 1600 and random.random() > 0:

        # 为了防止图片在resize的过程中出现线段消失的现象，采用了crop的操作
        if random.random() > 0:
            start_x, start_y = random.randint(0, 800), random.randint(0, 800)
            img = train_transform(img.crop((start_x, start_y, start_x + 800, start_y + 800)))
            target1 = target1[start_y:start_y + 800, start_x:start_x + 800]
            target2 = target2[start_y:start_y + 800, start_x:start_x + 800]

        else:
            img = train_transform(img.resize([image_size, image_size]))
            target1 = cv2.resize(target1, [image_size, image_size])
            target2 = cv2.resize(target2, [image_size, image_size])

        target1 = target1.astype(np.uint8)
        target2 = target2.astype(np.uint8)

        mask1 = cv2.dilate(target1, kernel, iterations=2)
        mask2 = cv2.dilate(target2, kernel, iterations=2)

        _, target1 = self.encode_label_map(target1)
        _, target2 = self.encode_label_map(target2)

        target1 = cv2.distanceTransform(target1, cv2.DIST_L2,
                                        maskSize=5)

        contours, _ = cv2.findContours(mask1, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        for cnt in contours:
            margin = 0
            x, y, w, h = cv2.boundingRect(cnt)
            qw = target1[y - margin:y + h + margin, x - margin:x + w + margin]

            target1[y - margin:y + h + margin, x - margin:x + w + margin] = np.clip(
                target1[y - margin:y + h + margin, x - margin:x + w + margin] / (np.max(qw) + 1e-7), 0.5, 1)

        target2 = cv2.distanceTransform(target2, cv2.DIST_L2,
                                        maskSize=5)

        contours, _ = cv2.findContours(mask2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        for cnt in contours:
            margin = 0
            x, y, w, h = cv2.boundingRect(cnt)
            qw = target2[y - margin:y + h + margin, x - margin:x + w + margin]
            target2[y - margin:y + h + margin, x - margin:x + w + margin] = np.clip(
                target2[y - margin:y + h + margin, x - margin:x + w + margin] / (np.max(qw) + 1e-7), 0.5, 1)

        temp = np.zeros([image_size, image_size])
        #      visible vline,invisible vline,visible hline,invisible hline
        target = np.stack([target1, target2, temp, temp])
        mask = np.stack([mask1.astype(np.bool), mask2.astype(np.bool), temp.astype(np.bool), temp.astype(np.bool)])

        return img, target, mask

    def __len__(self):
        return len(self.data)

    @staticmethod
    def encode_label_map(target):
        return cv2.threshold(target, 0, 1, 0)

    @staticmethod
    def decode_label_map(pred, threshold=0.5):
        pred = torch.sigmoid(pred)
        pred = pred.cpu().numpy()
        new_mask = np.zeros((*pred.shape, 3), dtype=np.uint8)
        for index, c in enumerate(pred):
            col, row = np.where(c >= threshold)
            new_mask[index, col, row] = labels[mode[index]]['color']
        return new_mask

    @staticmethod
    def get_images(attribute, data, name=None):
        label = []
        for d in data:
            t = d.replace("image", attribute)
            t = t.split(".png")
            if name is None:
                t = t[0] + ".png"
            else:
                t = t[0] + name + ".png"
            label.append(t)

        return label
