'''
Author: your name
Date: 2020-10-28 15:53:34
LastEditTime: 2020-10-28 21:15:45
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \cnc_rec_pytorch\Dataloader.py
'''
# coding=utf-8
import glob
import math
import sys
import os
import random

import cv2
import lmdb
import numpy as np
import six
import torch
import torchvision.transforms as transforms
import yaml
from edit_distance import edit_distance
from PIL import Image
from torch.utils.data import DataLoader, Dataset

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class CTCLabel:
    def __init__(self, dict_path):
        self.dict_path = dict_path
        self.dict = self.get_dict()
        self.dict_index2char = self.dict_i2c()
        self.dict_char2index = self.dict_c2i()

    def get_dict(self):
        word_dict = []
        word_dict.append('blank')
        for ch in open(self.dict_path, 'r', encoding='utf-8').readlines():
            ch = ch.replace('\n', '')
            word_dict.append(ch)
        word_dict.append('UNKNOWN')
        print('Dict length is :', len(word_dict))
        return word_dict

    def dict_i2c(self):
        index2char_ = {}
        for index, item in enumerate(self.dict):
            index2char_[index] = item
        return index2char_

    def dict_c2i(self):
        char2index_ = {}
        for index, item in enumerate(self.dict):
            char2index_[item] = index
        return char2index_

    def encode(self, texts, batch_max_length=30):
        length = [len(text) for text in texts]
        texts = ''.join(texts)
        texts = [self.dict_char2index[char] if char in self.dict else self.dict_char2index['UNKNOWN'] for char in texts]
        return (torch.IntTensor(texts), torch.IntTensor(length))

    def decode(self, text_index, length):
        texts = []
        index = 0
        for l in length:
            t = text_index[index:index + l]
            char_lst = []
            for i in range(l):
                if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
                    char_lst.append(self.dict_index2char[t[i]])
            text = ''.join(char_lst)

            texts.append(text)
            index += l
        return texts

    def get_acc_ned(self, labels, pred_text):
        assert len(labels) == len(pred_text), 'pred and label is not same number'
        cnt = 0
        ned = 0
        for i in range(len(labels)):
            if len(labels[i]) != 0:
                ned += edit_distance(pred_text[i], labels[i])[0] / len(labels[i])
            else:
                ned += 0
            if pred_text[i] == labels[i]:
                cnt += 1

        acc = cnt * 1.0 / len(labels)
        ned = ned * 1.0 / len(labels)

        return acc, 1 - ned


class ConfigDict(dict):
    def __init__(self, origin_dict, **kw):
        super(ConfigDict, self).__init__(**kw)
        for k, w in origin_dict.items():
            self[k] = w

    def __getattr__(self, key):
        try:
            return self[key]
        except KeyError:
            raise AttributeError(r"'Config' object has no attribute '%s'" % key)

    def __setattr__(self, key, value):
        self[key] = value


class Aug(object):
    def __init__(self):
        pass

    def brightness_adjust(self, img):
        new_img = np.zeros(img.shape, img.dtype)
        alpha = np.random.choice([0.1, 0.2, 0.3, 0.5, 0.7], p=[0.1, 0.25, 0.25, 0.2, 0.2])
        beta = np.random.choice([30, 60, 90, 120], p=[0.25, 0.25, 0.25, 0.25])
        new_img = np.uint8(np.clip((alpha * img + beta), 0, 255))
        return new_img

    def gau_bulr(self, img):
        kernel_size = np.random.choice([3, 5, 7, 9], p=[0.1, 0.3, 0.4, 0.2])
        sig_ma = np.random.choice([0, 5, 10, 20], p=[0.25, 0.25, 0.25, 0.25])
        new_img = cv2.GaussianBlur(img, (kernel_size, kernel_size), sig_ma)
        return new_img

    def gau_noise(self, img):
        mean = np.random.choice([0.01, 0.02, 0.05, 0.1], p=[0.3, 0.3, 0.3, 0.1])
        var = np.random.choice([0.0001, 0.0003, 0.0005, 0.0007], p=[0.3, 0.3, 0.3, 0.1])
        img = np.array(img / 255, dtype=float)
        noise = np.random.normal(mean, var ** 0.5, img.shape)
        out = img + noise
        if out.min() < 0:
            low_clip = -1
        else:
            low_clip = 0
        out = np.clip(out, low_clip, 1.0)
        out = np.uint8(out * 255)
        return out

    def rotate_img(self, img, angle, scale=1):
        H, W = img.shape
        rangle = np.deg2rad(angle)  # angle in radians
        new_width = (abs(np.sin(rangle) * H) + abs(np.cos(rangle) * W)) * scale
        new_height = (abs(np.cos(rangle) * H) + abs(np.sin(rangle) * W)) * scale

        rot_mat = cv2.getRotationMatrix2D((new_width * 0.5, new_height * 0.5), angle, scale)
        rot_move = np.dot(rot_mat, np.array([(new_width - W) * 0.5, (new_height - H) * 0.5, 0]))
        rot_mat[0, 2] += rot_move[0]
        rot_mat[1, 2] += rot_move[1]

        rot_img = cv2.warpAffine(img, rot_mat, (int(math.ceil(new_width)), int(math.ceil(new_height))),
                                 flags=cv2.INTER_LANCZOS4)
        return rot_img

    def crop(self, img):
        """

        :param img: numpy 灰度图
        :return:
        """
        h, w = img.shape[:2]
        p = random.random()
        crop_size = int(h * random.uniform(0.15, 0.25))
        prob_of_crop_other_side = 0.1
        h_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
        h_end = (h - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else h
        w_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
        w_end = (w - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else w
        if p < 0.25:  # up crop
            h_start = crop_size
        elif 0.25 <= p < 0.5:  # down crop
            h_end = h - crop_size
        elif 0.5 <= p < 0.75:  # left crop
            w_start = crop_size
        else:  # right crop
            w_end = w - crop_size
        # 防止w溢出
        if w_start >= w:
            w_start = 0
        if w_end <= 0:
            w_end = w
        cropped_img = img[h_start:h_end, w_start:w_end]
        return cropped_img


class Image_label_dataset(Dataset):
    def __init__(self, gt_file_path, char2index):
        self.lines = []
        self.char2index = char2index
        lines = open(gt_file_path, encoding='utf-8').readlines()
        self.lines = [i.rstrip('\n') for i in lines]

    def __len__(self):
        return len(self.lines)

    def __getitem__(self, index):
        line = self.lines[index]
        space_index = line.find(' ')
        image_path = line[:space_index]
        label = line[space_index + 1:]
        clean_label = ''
        for char in label:
            if char not in self.char2index:
                print(char + " not in dict")
                continue
            clean_label += char
        label = clean_label
        temp_img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
        return (temp_img, label)


class LmdbDataset(Dataset):
    def __init__(self, gt_file_path, char2index, datasets_rate=1.0):
        super(LmdbDataset, self).__init__()
        self.env = lmdb.open(gt_file_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
        if not self.env:
            print(f'cannot create lmdb from:{gt_file_path}')
            sys.exit(0)
        self.txn = self.env.begin()
        self.nSamples = int(datasets_rate * int(self.txn.get(b"num-samples")))
        self.char2index = char2index

    def __len__(self):
        return self.nSamples

    def __getitem__(self, index):
        assert index <= len(self), 'index range error'
        index += 1
        img_key = b'image-%09d' % index
        imgbuf = self.txn.get(img_key)
        buf = six.BytesIO()
        buf.write(imgbuf)
        buf.seek(0)
        img = Image.open(buf).convert('RGB')
        try:
            img = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2GRAY)
        except Exception as e:
            img = np.asarray(img)
        label_key = b'label-%09d' % index
        label = self.txn.get(label_key).decode()
        return (img, label)


class Get_dataloader(object):
    def __init__(self, dataset, sub_batch_size, img_target_w, img_target_h, shuffle, num_workers):
        self.dataset = dataset
        self.batch_size = sub_batch_size
        self.img_target_w = img_target_w
        self.img_target_h = img_target_h
        self.shuffle = shuffle
        self.num_workers = num_workers
        self.if_aug = True
        self.aug = Aug()

    def aug_resize_transform(self, image):
        if self.if_aug:
            aug_type = np.random.choice(['None', 'bulr', 'noise'], p=[0.5, 0.2, 0.3])
            if aug_type == 'None':
                image = image
            elif aug_type == 'bulr':
                image = self.aug.gau_bulr(image)
            elif aug_type == 'noise':
                image = self.aug.gau_noise(image)
            else:
                pass

        img_h = image.shape[0]
        img_w = image.shape[1]
        new_w = int(img_w * self.img_target_h / img_h)
        if new_w > self.img_target_w:
            new_w = self.img_target_w
        img = cv2.resize(image, (new_w, self.img_target_h), interpolation=cv2.INTER_CUBIC)
        return img

    def pad_image(self, image):

        pad_img = np.zeros([self.img_target_h, self.img_target_w, 1])
        img_h = image.shape[0]
        img_w = image.shape[1]
        img = (image - 127.0) / 128.0
        if img_w < self.img_target_w:
            pad_img[:, :img_w, 0] = img[:, :]
        else:
            pad_img[:, :, 0] = img[:, :]
        return pad_img

    def collate_fn(self, batch):
        batch_img = [self.aug_resize_transform(x[0]) for x in batch]
        batch_label = [x[1] for x in batch]
        batch_imgs = [self.pad_image(x) for x in batch_img]
        batch_imgs = torch.from_numpy(np.array(batch_imgs))
        batch_imgs = batch_imgs.permute(0, 3, 1, 2).float()
        return batch_imgs, batch_label

    def get_loader(self):
        sub_train_loader = DataLoader(self.dataset, batch_size=self.batch_size,
                                      shuffle=self.shuffle,
                                      num_workers=self.num_workers,
                                      collate_fn=self.collate_fn)
        return sub_train_loader


class Balanced_dataset(object):
    def __init__(self, config, char2index):
        self.config = config
        self.batch_size = config.batch_size
        self.char2index = char2index
        self.img_target_w = config.img_width
        self.img_target_h = config.img_height
        self.shuffle = config.shuffle
        self.num_workers = config.num_workers
        self.dataset_rate = config.dataset_rate

        assert sum([b for (f, b) in self.config.lmdb_data]) == self.batch_size
        self.dataloader_lst = []
        self.dataloader_iter_lst = []
        for (f, b) in self.config.lmdb_data:
            if f.endswith('.txt'):
                _dataset = Image_label_dataset(f, self.char2index)
            else:
                _dataset = LmdbDataset(f, self.char2index, self.dataset_rate)
            _dataloader = Get_dataloader(_dataset, b, self.img_target_w, self.img_target_h, self.shuffle,
                                         self.num_workers).get_loader()
            self.dataloader_lst.append(_dataloader)
            self.dataloader_iter_lst.append(iter(_dataloader))

    def get_batch(self):
        balanced_imgs = []
        balanced_label = []
        for i, data_loader_iter in enumerate(self.dataloader_iter_lst):
            try:
                img, label = data_loader_iter.next()
                balanced_imgs.append(img)
                balanced_label.extend(label)
            except StopIteration as s:
                # print('Error: StopIteration', s.args)
                self.dataloader_iter_lst[i] = iter(self.dataloader_lst[i])
                img, label = self.dataloader_iter_lst[i].next()
                balanced_imgs.append(img)
                balanced_label.extend(label)
            except ValueError as v:
                print('Error: ValueError', v.args)
                pass
        balanced_imgs = torch.cat(balanced_imgs, 0)
        return balanced_imgs, balanced_label


def adjust_learning_rate(optimizer, global_step, init_lr, decay_rate, decay_steps, min_lr):
    item_num = global_step // decay_steps
    n_lr = max(init_lr * math.pow(decay_rate, item_num), min_lr)
    for param_group in optimizer.param_groups:
        param_group['lr'] = n_lr
    print(f'adjust learning rate to {n_lr}')


def check_path(config):
    train_log_dir = config.train_log_dir
    model_save_dir = config.save_dir
    if not os.path.exists(train_log_dir):
        os.makedirs(train_log_dir)
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)


def parse_version(model_type):
    if "v1" in model_type:
        return 1
    elif "v2" in model_type:
        return 2
    elif "v3" in model_type:
        return 3
    else:
        return 0


def test_aug_crop():
    img_list = glob.glob("./test/tr2_with_blur_noise_lmdb/*.jpg")
    save_dir = "./test/test_cropped_image_dst"
    os.makedirs(save_dir, exist_ok=True)
    for img_path in img_list:
        img = cv2.imread(img_path)
        h, w = img.shape[:2]
        p = random.random()

        crop_size = int(h * random.uniform(0.15, 0.25))
        prob_of_crop_other_side = 0.1
        if p < 0.25:  # up crop
            h_start = crop_size
            h_end = (h - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else h
            w_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
            w_end = (w - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else w
            cropped_img = img[h_start:h_end, w_start:w_end]
            cropped_img_name = os.path.join(save_dir, os.path.basename(img_path))
            # cropped_img_name = f"_up_cropped".join(os.path.splitext(img_path))
        elif 0.25 <= p < 0.5:  # down crop
            h_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
            h_end = h - crop_size
            w_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
            w_end = (w - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else w
            cropped_img = img[h_start:h_end, w_start:w_end]
            cropped_img_name = os.path.join(save_dir, os.path.basename(img_path))
            # cropped_img_name = f"_down_cropped".join(os.path.splitext(img_path))
        elif 0.5 <= p < 0.75:  # left crop
            h_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
            h_end = (h - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else h
            w_start = crop_size
            w_end = (w - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else w
            cropped_img = img[h_start:h_end, w_start:w_end]
            cropped_img_name = os.path.join(save_dir, os.path.basename(img_path))
            # cropped_img_name = f"_left_cropped".join(os.path.splitext(img_path))
        else:  # right crop
            h_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
            h_end = (h - int(h * random.uniform(0.1, 0.15))) if random.random() < prob_of_crop_other_side else h
            w_start = int(h * random.uniform(0.1, 0.15)) if random.random() < prob_of_crop_other_side else 0
            w_end = (w - crop_size)
            cropped_img = img[h_start:h_end, w_start:w_end]
            cropped_img_name = os.path.join(save_dir, os.path.basename(img_path))
            # cropped_img_name = f"_right_cropped".join(os.path.splitext(img_path))

        # # crop_size:
        # # up and down: 0.1h - 0.2h
        # # left and right: 0.125h - 0.25h
        # crop_size = int(h * random.uniform(0.15, 0.25))
        # if p < 0.5:
        #     cropped_img = img[:, crop_size:, :]
        #     # cropped_img_name = os.path.join(save_dir, "left_crop" + os.path.basename(img_path))
        #     cropped_img_name = f"_left_cropped".join(os.path.splitext(img_path))
        # else:
        #     cropped_img = img[:, :(w - crop_size), :]
        #     # cropped_img_name = os.path.join(save_dir, "left_crop" + os.path.basename(img_path))
        #     cropped_img_name = f"_right_cropped".join(os.path.splitext(img_path))
        #
        # # if p < 0.5:
        # #     cropped_img = img[crop_size:, :, :]
        # #     cropped_img_name = os.path.join(save_dir, "up_crop" + os.path.basename(img_path))
        # #     # cropped_img_name = f"_up_cropped".join(os.path.splitext(img_path))
        # # else:
        # #     cropped_img = img[:(h - crop_size), :, :]
        # #     cropped_img_name = os.path.join(save_dir, "down_crop" + os.path.basename(img_path))
        # #     # cropped_img_name = f"_down_cropped".join(os.path.splitext(img_path))
        print(cropped_img_name)
        cv2.imwrite(cropped_img_name, cropped_img)


if __name__ == '__main__':
    test_aug_crop()
    # for c in ['c', 'j', '陈', '学', '<', '。', '《', '，', '好']:
    #     if'\u4e00' <= c <= '\u9fa5':
    #         print(c)

# if __name__ =="__main__":
#     # each_num = [1,2,3,4,5,6]
#     # b = []
#     # b = [each_num[i] if i==0 else b[i]+b[i-1] for i in range(len(each_num)) ]
#     # print(b)
#     # config = ConfigDict(yaml.load(open('./Config/v3.yaml',encoding='utf-8')))
#     # al = AttenLabel(config.dict_file)
#     # bd = Balanced_dataset(config,al.dict_char2index)

#     # for i in range(100):
#     #     a,b = bd.get_batch()
#     #     print(b)
#     # a = torch.LongTensor([9069])
#     # print(a)
