import csv
import re
import torch
from boxex_utils.build_vocab import Vocab
from torch.utils import data
import csv
import re
from pathlib import Path
def normalize_bbox(bbox, size):
    return [
        int(1000 * bbox[0] / size[0]),
        int(1000 * bbox[1] / size[1]),
        int(1000 * bbox[2] / size[0]),
        int(1000 * bbox[3] / size[1]),
    ]


def simplify_bbox(bbox):
    return [
        min(bbox[0::2]),
        min(bbox[1::2]),
        max(bbox[2::2]),
        max(bbox[3::2]),
    ]


def merge_bbox(bbox_list):
    x0, y0, x1, y1 = list(zip(*bbox_list))
    return [min(x0), min(y0), max(x1), max(y1)]

def single_text_box(bbox):
    """
    box = [12,16,18,16,18,22,12,22,'abc']
    """
    text_box=[]
    for s in bbox[9]:
        single_box = [bbox[i] for i in [1,2,5,6]]
        single_box.extend(s)
        text_box.append(single_box)

    return text_box
def match_en_ch(seq):
    regex = r"[\u4e00-\u9fa5]+|[A-Za-z]+"
    match = re.findall(regex, seq, re.UNICODE)
    return ["".join(match)]

def file_bbox(file):
    res = []
    with open(file,mode='r',encoding='utf-8') as f:
        reader = csv.reader(f)
        for row in reader:
            points_text = single_text_box(row)
            if len(points_text)>1:
                for l in points_text:
                    res.append(l)
            else:
                res.append(points_text)
    en_ch_bbts = []
    for bbt in res:
        if len(bbt)==5:
            matched_box = bbt[:4]+match_en_ch(bbt[4])
            if match_en_ch(bbt[4])[0] !="":
                en_ch_bbts.append(matched_box)

    return en_ch_bbts

def pad_tokens(tokens, max_len, padding_token):
    if len(tokens) > max_len:
        return tokens[:max_len]
    return tokens + [padding_token] * (max_len - len(tokens))

def pad_boxes(boxes,max_len,padding_box):
    if len(boxes) > max_len:
        return boxes[:max_len]
    return boxes + [padding_box] * (max_len - len(boxes))

def genereate_raw_train(file,max_len):
    """
    func1:order by x1+y1
    func2: trans the type of point to int
    """
    box_inf = file_bbox(file)
    sored_box_info = sorted(box_inf,key= lambda x:x[0]+x[1])
    boxes = []
    tokens = []
    file_info = dict()
    for box_token in sored_box_info:
        boxes.append([int(float(i)) for i in box_token[:4]])
        tokens.append(box_token[4])
    padding_box = [0,0,0,0]
    padding_token ='pad'
    padded_box = pad_boxes(boxes,max_len,padding_box)
    padded_tokens = pad_tokens(tokens,max_len,padding_token)
    position_ids = [i for i in range(max_len)]
    file_info["boxes"] = padded_box
    file_info["tokens"] = padded_tokens
    file_info['position_id'] = position_ids
    file_info["label"] = 1
    return file_info

def build_vocab(file_dir,max_len):
    tokens = []
    for file in file_dir.glob(".tsv"):
        data = genereate_raw_train(file,max_len)
        tokens.extend(data['tokens'])
    return Vocab(tokens, min_freq=3, use_special_tokens=True)

def build_array(file_dir,max_len):
    all_file_info = []
    for file in file_dir.glob("*.tsv"):


        file_info = genereate_raw_train(file,max_len)

        all_file_info.append(file_info)
    vocab = build_vocab(file_dir,max_len)
    all_tokens = []
    all_boxes = []
    positions= []
    labels=[]
    for file_info in all_file_info:
        all_tokens.append([vocab[token] for token in file_info["tokens"]])
        all_boxes.append(file_info["boxes"])
        positions.append(file_info["position_id"])
        labels.append(file_info["label"])

    token_array = torch.tensor(all_tokens)
    boxes_array = torch.tensor(all_boxes)
    positions_array = torch.tensor(positions)
    labels_array = torch.tensor(labels)

    return boxes_array,token_array,positions_array,labels_array


def data_loader(file_dir,batch_size=1,max_len=200):
    boxes_array,token_array,positions_array,labels_array = build_array(Path(file_dir),max_len)
    train_data = data.TensorDataset(boxes_array,token_array,positions_array,labels_array)
    train_iter = data.DataLoader(train_data, batch_size, shuffle=True)
    return train_iter


if __name__=="__main__":
    import csv
    import re
    from pathlib import Path
    file_dir = Path("/mnt/myproject/classfication/translayout/boxes_and_transcripts")
    train_dataloader=data_loader(file_dir)
    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    for boxes_array,token_array,positions_array,labels_array in train_dataloader:
        boxes_array = boxes_array.to(DEVICE)
        token_array = token_array.to(DEVICE)
        positions_array = positions_array.to(DEVICE)
        labels_array = labels_array.to(DEVICE)
        boxes_input = boxes_array[:-1, :]
        tokens_input = token_array[:-1,:]
        position_input = positions_array
        labels = labels_array

        print(boxes_input)







