import os
import cv2
import time
import random
import functools
import itertools
import argparse
import json
import collections
import pandas as pd
import numpy as np
from concurrent.futures import ProcessPoolExecutor, wait


def get_direction(text):
    if text == '底部朝下':
        return 0
    elif text == '底部朝右':
        return 1 
    elif text == '底部朝上':
        return 2
    elif text == '底部朝左':
        return 3
    else:
        return -1


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', type=str, default='/tcdata')
    parser.add_argument('--output_dir', type=str, default='text')
    parser.add_argument('--mode', type=int, default=0)
    args = parser.parse_args()
    return args


def decode_annotation(ann):
    if len(ann) > 1:
        default_dir = get_direction(ann[1]['option'])
    else:
        default_dir = 0
    labels = []
    for a in ann[0]:
        aj = json.loads(a['text'], encoding='utf-8')
        text = aj['text']
        direction = aj.get('direction', None)
        coord = [float(x) for x in a['coord']]
        coord = np.asarray(coord, dtype=np.float32).reshape(4, 2)
        direction = get_direction(direction)
        if direction == -1:
            direction = default_dir
        labels.append({
            'text': text,
            'points': coord,
            'direction': direction
        })

    return labels

def get_rotate_crop_image(img, points):
    img_crop_width = int(
        max(
            np.linalg.norm(points[0] - points[1]),
            np.linalg.norm(points[2] - points[3])))
    img_crop_height = int(
        max(
            np.linalg.norm(points[0] - points[3]),
            np.linalg.norm(points[1] - points[2])))
    pts_std = np.float32([[0, 0], [img_crop_width, 0],
                            [img_crop_width, img_crop_height],
                            [0, img_crop_height]])
    M = cv2.getPerspectiveTransform(points, pts_std)
    dst_img = cv2.warpPerspective(
        img,
        M, (img_crop_width, img_crop_height),
        borderMode=cv2.BORDER_REPLICATE,
        flags=cv2.INTER_CUBIC)
    return dst_img


def process_one_image(text_info, prefix, output_img_dir):
    idx, img_fname, ann = text_info
    img = cv2.imread(img_fname) 

    result = []
    for k in range(len(ann)):
        points = ann[k]['points']
        text = ann[k]['text']
        if len(text) == 0 or text == '*' or text == ' ':
            continue
        name = '{:0>5d}_{:0>3d}.jpg'.format(idx, k)
        msg = '{}/{}\t{}'.format(prefix, name, text)
        img_crop = get_rotate_crop_image(img, points)
        outname = os.path.join(output_img_dir, name)
        cv2.imwrite(outname, img_crop)
        result.append(msg)

    return result


def generate_text_image(img_dir, ann_file, prefix, output_dir, word_counts=None, is_aug_text=False):
    if is_aug_text:
        assert word_counts is not None

    data = pd.read_csv(ann_file)
    start_time = time.time()

    output_img_dir = os.path.join(output_dir, prefix)
    os.makedirs(output_img_dir, exist_ok=True)    
    text_data = []
    for i in range(len(data)):
        row = data.loc[i]
        img_name = json.loads(row[1])['tfspath'].split('/')[-1]
        img_file = os.path.join(img_dir, img_name)
        ann = json.loads(row[2], encoding='utf-8')
        ann = decode_annotation(ann)
        text_data.append((i, img_file, ann))

    num_works = os.cpu_count() 
    with ProcessPoolExecutor(num_works) as exe:
        func = functools.partial(process_one_image, prefix=prefix, output_img_dir=output_img_dir)
        results = exe.map(func, text_data)

    data_lines = []
    for r in results:
        cr = [c for c in r if c.find('*') == -1]
        data_lines.extend(cr)

    if is_aug_text:
        data_lines = augment_text(word_counts, data_lines)

    with open(os.path.join(output_dir, prefix+'.txt'), 'w', encoding='utf-8') as fh:
        for line in data_lines:
            fh.write("{}\n".format(line))

    split_data_text(data_lines, len(data), output_dir, prefix)

    end_time = time.time()
    print(os.path.basename(ann_file), 'text_lines:', len(data_lines))
    print(os.path.basename(ann_file), 'used time', '{:.2f}'.format(end_time - start_time))


def generate_dict(ann_files, output_fname, output_word_counts=False):
    os.makedirs(os.path.dirname(output_fname), exist_ok=True)
    word_counts = collections.defaultdict(lambda : 0)

    for ann_file in ann_files:
        data = pd.read_csv(ann_file) 
        for i in range(len(data)):
            row = data.loc[i]
            img_name = json.loads(row[1])['tfspath'].split('/')[-1]
            ann = json.loads(row[2], encoding='utf-8')
            ann = decode_annotation(ann)
            for k in range(len(ann)):
                text = ann[k]['text']
                if len(text) == 0 or text == '*' or text == ' ':
                    continue
                for t in text:
                    if t != '*' and t != ' ':
                        word_counts[t] += 1

    words = list(word_counts.keys())
    counts = list(word_counts.values())
    indices = np.argsort(counts)[::-1]
    with open(output_fname, 'w', encoding='utf-8') as fh:
        for i in indices:
            fh.write("{}\n".format(words[i]))

    if output_word_counts:
        count_fname = os.path.join(os.path.dirname(output_fname), 'word_counts.txt')
        with open(count_fname, 'w', encoding='utf-8') as fh:
            for i in indices:
                fh.write("{}\t{}\n".format(words[i], counts[i]))

    return word_counts



def augment_text(word_counts, data_lines, min_count=5):
    repeat_lists = collections.defaultdict(list)

    for i, line in enumerate(data_lines):
        text = line.strip().split('\t')[1]
        for t in text:
            if t != ' ' and t != '*' and word_counts[t] < min_count:
                repeat_lists[t].append(i)

    extral_lines = []
    for t, idxs in repeat_lists.items():
        count = word_counts[t]
        for i in itertools.cycle(idxs):
            extral_lines.append(data_lines[i])
            count += 1
            if count >= min_count:
                break
    data_lines.extend(extral_lines)
    return data_lines


def split_data_text(data_lines, num, output_dir, prefix, ratio=0.9):
    random.seed(100)
    flag = np.zeros(num, dtype='bool')
    num_train = int(ratio * num)
    indices = list(range(num))
    random.shuffle(indices)
    flag[indices[:num_train]] = True

    trian_fname = os.path.join(output_dir, prefix + '_train.txt')
    val_fname = os.path.join(output_dir, prefix + '_val.txt')
    fh_train = open(trian_fname, 'w', encoding='utf-8')
    fh_val = open(val_fname, 'w', encoding='utf-8')
    
    for line in data_lines:
        name = line.strip().split('\t')[0]
        name = name.split('/')[1]
        name = name.split('_')[0]
        idx = int(name)
        if flag[idx]:
            fh_train.write(line+'\n')
        else:
            fh_val.write(line+'\n')
    fh_train.close()
    fh_val.close()


def main():
    args = parse_args()
    data_dir = args.data_dir
    output_dir = args.output_dir
    mode =args.mode

    ann_files = [
        os.path.join(data_dir,'Xeon1OCR_round2_train1_20210816.csv'),
        os.path.join(data_dir,'Xeon1OCR_round2_train2_20210816.csv'),
    ]
    prefixes = ['train1', 'train2']

    dict_fname = os.path.join(output_dir, 'tianchi_dict.txt')
    word_counts = generate_dict(ann_files, dict_fname, output_word_counts=True)

    for i in range(2):
        basename = os.path.splitext(os.path.basename(ann_files[i]))[0]
        if mode == 0:
            img_dir = os.path.join(data_dir, basename)
        else:
            img_dir = os.path.join(data_dir, 'images')
        generate_text_image(img_dir, ann_files[i], prefixes[i], output_dir, word_counts, True)




if __name__ == '__main__':
    # show_data()

    main()
