# -*- coding: utf-8 -*-
# @Time : 2020/10/30 19:00
# @Author : wudeyang
# @email :wudeyang@sjtu.edu.cn
# @Description:

import tqdm

import torch
import torch.utils.data as data

from torch.utils.data import  DataLoader
import scipy.io as scio
from utils.util import GaussianTransformer
from utils.util import show_img
from Data_Loader import augment
import matplotlib.pyplot as plt
import re
import itertools
from file_utils import *

from PIL import Image
import torchvision.transforms as transforms
import craft_utils




class Synth80k(data.Dataset):

    def __init__(self, synthtext_folder,input_size=(450,600),transform=None):

        self.datapath=synthtext_folder
        self.transform=transform
        self.input_size=input_size
        """
        imnames : names of the image files
        
        wordBB : word-level bounding-boxes for each image, represented by
        tensors of size 2x4xNWORDS_i, where:
        - the first dimension is 2 for x and y respectively,
        - the second dimension corresponds to the 4 points
        (clockwise, starting from top-left), and
        - the third dimension of size NWORDS_i, corresponds to
        the number of words in the i_th image.
        
        charBB : character-level bounding-boxes,
        each represented by a tensor of size 2x4xNCHARS_i
        (format is same as wordBB’s above)
        
        txt : text-strings contained in each image (char array).
        
        """
        gt = scio.loadmat(os.path.join(synthtext_folder, 'gt.mat'))
        self.charbox = gt['charBB'][0]
        self.image = gt['imnames'][0]

        self.gaussian = GaussianTransformer(Gaussian_img_size=512,
                                            region_threshold=0.3,distanceRatio = 3.34,
                                            create=True,
                                            gauss_map_path='/home/wudeyang/code/CRAFT-pytorch/utils/image/standard.jpg')

    def __getitem__(self, index):
        im = cv2.imread(os.path.join(self.datapath,self.image[index][0]))
        # print('图片路径',os.path.join(self.datapath,self.image[index][0]))
        img,mask_map,gauss_map=self.image_label(im,text_labels=self.charbox[index])
        img = Image.fromarray(img)
        if self.transform:
            img = self.transform(img)

        return img,mask_map,gauss_map



    def __len__(self):
        return len(self.image)

    def image_label(self,img,text_labels):
        """

        :param img: 输入的图像
        :param text_labels: 文本/单词框 (x,y,num)
               input_size: 训练文本的(h,w)
        :return:
        二值化的区域以及高斯图
        """
        h, w, _ = img.shape
        mask_map = np.zeros((h, w), dtype=np.uint8)


        text_labels=text_labels.astype(np.int)
        text_labels=text_labels.transpose(2,1,0)
        cv2.fillPoly(mask_map, text_labels, 1)


        gauss_map = self.gaussian.generate_region((w,h), text_labels)
        # ## 测试代码  在原图上画框对代码进行校验##################
        # for text in text_labels:
        #     cv2.polylines(img, [np.reshape(text, (-1, 1, 2))], True, (255, 255, 255), thickness=1)
        # cv2.imwrite('ori_image_with_boundingbox.jpg',img)
        # #######################################################

        # 对图像进行裁切保证训练的图片大小一致, 保证短边

        scale_ratio = max(self.input_size[0]/h, self.input_size[1]/w)


        img = cv2.resize(img, dsize=None, fx=scale_ratio, fy=scale_ratio)
        mask_map = cv2.resize(mask_map, dsize=None, fx=scale_ratio, fy=scale_ratio)
        gauss_map = cv2.resize(gauss_map, dsize=None, fx=scale_ratio, fy=scale_ratio)

        #  进行裁切
        # print(img.shape)
        DataAugment=augment.DataAugment()
        img,mask_map,gauss_map = DataAugment.random_crop([img,mask_map,gauss_map], self.input_size)
        # print(img.shape ,mask_map.shape,gauss_map.shape)
        return img,mask_map,gauss_map

class Synth3D(data.Dataset):
    def __init__(self,train_data,input_size=(720,1080),transform=None):
        """

        :param train_data:
        # train_data 数据格式
    # [('/data/Synth3D-10K/img/5545.jpg', '/data/Synth3D-10K/label/5545.txt'),
    #  ('/data/Synth3D-10K/img/2395.jpg', '/data/Synth3D-10K/label/2395.txt'),
    #  ('/data/Synth3D-10K/img/8809.jpg', '/data/Synth3D-10K/label/8809.txt')]
            label数据格式如下：

            Each text instance is the label files takes up 5 lines:
            x1,y1
            x2,y2
            x3,y3
            x4,y4
            is_difficult
            when is_difficult==1, the text is marked as difficult. The coordinates are arranged clockwise.
        :param input_size:(w,h)
        :param transform:
        """
        self.train_data_list=train_data
        self.transform=transform
        self.input_size=input_size
        self.gaussian = GaussianTransformer(Gaussian_img_size=512,
                                            region_threshold=0.3,distanceRatio = 3.34,
                                            create=True)
    def __len__(self):
        return len(self.train_data_list)
    def __getitem__(self, index):
        img_path,label_path=self.train_data_list[index]
        im = cv2.imread(img_path)
        # 读取label的内容
        charbox=self.read_label(label_path)
        img,mask_map,gauss_map=self.image_label(im,text_labels=charbox)
        img = Image.fromarray(img)
        if self.transform:
            img = self.transform(img)

        return img,mask_map,gauss_map
    def read_label(self,label_path):
        boxes=[]
        box=[]
        with open(label_path, 'r', encoding='utf-8') as f:

            for i ,line in enumerate(f.readlines()):
                if (i+1)%5==0:
                    boxes.append(box)
                    box=[]
                    continue # 0/1标记，代表是不是难样本
                x,y=line.split(',')
                box.append([float(x),float(y)])


        return np.array(boxes).transpose(2,1,0)

    def image_label(self,img,text_labels):
        """

        :param img: 输入的图像
        :param text_labels: 文本/单词框 (x,y,num)
               input_size: 训练文本的(h,w)
        :return:
        二值化的区域以及高斯图
        """
        h, w, _ = img.shape
        mask_map = np.zeros((h, w), dtype=np.uint8)


        text_labels=text_labels.astype(np.int)
        text_labels=text_labels.transpose(2,1,0)
        cv2.fillPoly(mask_map, text_labels, 1)


        gauss_map = self.gaussian.generate_region((w,h), text_labels)
        # ## 测试代码  在原图上画框对代码进行校验##################
        # for text in text_labels:
        #     cv2.polylines(img, [np.reshape(text, (-1, 1, 2))], True, (255, 255, 255), thickness=1)
        # cv2.imwrite('ori_image_with_boundingbox.jpg',img)
        # #######################################################

        # 对图像进行裁切保证训练的图片大小一致, 保证短边

        scale_ratio = max(self.input_size[0]/h, self.input_size[1]/w)


        img = cv2.resize(img, dsize=None, fx=scale_ratio, fy=scale_ratio)
        mask_map = cv2.resize(mask_map, dsize=None, fx=scale_ratio, fy=scale_ratio)
        gauss_map = cv2.resize(gauss_map, dsize=None, fx=scale_ratio, fy=scale_ratio)

        #  进行裁切
        # print(img.shape)
        DataAugment=augment.DataAugment()
        img,mask_map,gauss_map = DataAugment.random_crop([img,mask_map,gauss_map], self.input_size)
        # print(img.shape ,mask_map.shape,gauss_map.shape)
        return img,mask_map,gauss_map





if __name__=='__main__':

    data_list=[('/data/Synth3D-10K/img/1.jpg', '/data/Synth3D-10K/label/1.txt'),
                 ('/data/Synth3D-10K/img/2.jpg', '/data/Synth3D-10K/label/2.txt'),
                ('/data/Synth3D-10K/img/3.jpg', '/data/Synth3D-10K/label/3.txt')]
    train_dataset=Synth3D(data_list,input_size=(720,1080),transform=transforms.Compose([transforms.ToTensor()]))



    train_loader = DataLoader(dataset=train_dataset, batch_size=1, shuffle=True, num_workers=0)
    for i, (img, mask, gauss) in enumerate(train_loader):
        if i>0:
            break

        print(img.shape)

        print(mask.shape)
        print(gauss.shape)
        show_img(img[0].permute(1, 2, 0), color=True)
        show_img(mask[0])
        show_img(gauss[0]) 
        plt.show()










