import os
import torch
import numpy as np
import Levenshtein
from torch.utils.data import Dataset
from IPython.display import display, Image
from shapely.geometry import Polygon, Point
from transformers import BertTokenizer
from fuzzysearch import find_near_matches


# 数据显示函数
def print_data(x: dict):
    print(f"point_seq_id: {x['point_seq_id']}")
    print(f"image_id: {x['image_id']}")
    if "name" in x:
        print(f"name: {x['name']}")
    print(f"texts:")
    for text in x['texts']:
        print(text)


def display_img(image_id):
    filename = get_image_filename(image_id)
    display(Image(filename=filename))


def get_image_filename(image_id):
    image_folder = "./data/desensitized_train_images_split"
    folders = [f"{image_folder}{i}" for i in [1, 2]] + ["./data/desensitized_TestA_images"]
    for folder in folders:
        filename = f"{folder}/{image_id}.jpg"
        if os.path.exists(filename):
            break
    return filename


# 判断是否完全匹配数据并给出拼接方式
def is_correct_text(msg: dict):
    """检测OCR的字符串是否可以组成name.

    Returns:
        flag (bool): texts的字符串如果可以完整地拼接为name，返回True.
        rankorder (List[int]): 每个text所处的位置（-1代表不属于POI名称）.
    """
    name = msg["name"]
    # texts = [x["text"] for x in msg["texts"]]
    # position = [name.find(x) for x in texts]    # 没找到返回-1
    # 考虑重复字符串的情况
    texts, position = deduplicate_situation(msg)
    # 组成一个[(text, text_idx, origin_idx)]的序列并进行排序
    text_idx = list(zip(texts, position, range(len(position))))
    text_idx = sorted(text_idx, key=lambda x: x[1])
    # 深度优先搜索匹配
    flag, seq = is_perfect_match(name, text_idx)
    if flag:
        rankorder = [-1 for _ in range(len(position))]
        for i, s in enumerate(seq):
            rankorder[text_idx[s][2]] = i
    else:
        rankorder = []
    return flag, rankorder


def is_deduplicate(texts: list):
    for i in range(len(texts)):
        if texts[i] in texts[i+1:]:
            return True
    return False


def deduplicate_situation(msg: dict):
    """预处理，去除重复text，根据面积大小确定保留的字符串
    """
    name = msg["name"]
    texts = [x["text"] for x in msg["texts"]]
    position = [name.find(x) for x in texts]    # 没找到返回-1
    if is_deduplicate(texts):
        # 简单处理，根据面积进行排序
        text2idx = {}
        for i, t in enumerate(texts):
            if t not in text2idx:
                text2idx[t] = [i]
            else:
                text2idx[t].append(i)
        text2idx = {t: v for t, v in text2idx.items() if len(v) > 1}
        # 获取所有重复的text及其索引
        text2area = {t: [get_area(msg['texts'][i]['contour']) for i in v] for t, v in text2idx.items()}
        # 选出最佳索引
        text_idx = {t: text2idx[t][int(np.argmax(areas))] for t, areas in text2area.items()}
        # 将position中非最佳索引全部标为-1
        for i in range(len(texts)):
            if texts[i] in text_idx and text_idx[texts[i]] != i:
                position[i] = -1
    return texts, position
    

# 利用深度优先搜索找到当前的texts是否能够完全拼接为name
def is_perfect_match(name: str, texts: list,
                     start: int = 0, match_len: int = 0):
    """是否完美匹配texts为无重复的有序列表（匹配位置从小到达排序）.

    Args:
        name: 标题名称，需要被texts匹配.
        texts: [(str, match_idx)]，match_idx为匹配的位置，默认只有一处地方有匹配.
        start: texts[start:]需要被考虑.
        match_len: 前面已经被匹配的长度.
    
    Returns:
        flag (bool): 是否找到匹配路径.
        match_seq (list[int]): 匹配路径对应于texts中的索引.
    """
    # 匹配停止条件
    if match_len == len(name):
        return True, []
    # 否则开始寻找 idx = match_len的点
    match_idxs = []
    for i in range(start, len(texts)):
        if texts[i][1] > match_len:
            break
        elif texts[i][1] == match_len:
            match_idxs.append(i)
    # 递归
    flag = False
    for text_idx in match_idxs:
        # idx > match_len的序列被传入
        # match_len更新
        flag, seq = is_perfect_match(name, texts, i, match_len+len(texts[text_idx][0]))
        if flag:
            return flag, [text_idx] + seq
    return False, []


def get_area(contour):
    pgon = Polygon(contour) # Assuming the OP's x,y coordinates
    return pgon.area


def get_bounding_box(contour):
    contour = np.array(contour)
    top_left = np.min(contour, axis=0)
    bottom_right = np.max(contour, axis=0)
    return top_left.tolist(), bottom_right.tolist()

    

def find_all(src: str, template: str):
    """找到不重叠的所有词
    """
    result = []
    idx = 0
    start = 0
    while True:
        idx = src[start:].find(template)
        if idx == -1:
            break
        
        result.append(start + idx)
        start += (idx + len(template))
    return result


# 包含所有情况，返回匹配距离以及rankorder
def defective_match(msg: dict):
    """启发式遍历所有可能的拼接方式，得到编辑距离最小的拼接方式并返回.
    
    Returns:
        dist (int): 拼接字符串与name的编辑距离.
        rankorder (list[int]): 不完美拼接的排序, -1代表不匹配.
        texts (list[str]): 不带重复字符的列表.
    """
    flag, rankorder = is_correct_text(msg)
    if flag:
        return 0, rankorder
    # 非完美匹配情况
    name = msg["name"]
    texts = [x["text"] for x in msg["texts"]]
    matches = get_matches(name, texts)
    paths = get_all_possible_paths(matches)
    # 获取编辑距离最小的拼接方式
    min_path, min_dist = get_min_cost_path(paths, matches, texts, name)
    # min_path的为texts的索引组成的list
    rankorder = [-1 for _ in range(len(texts))]
    for idx, p in enumerate(min_path):
        rankorder[p] = idx
    return min_dist, rankorder


def get_matches(name: str, texts: list[str]):
    """获取texts中模糊匹配的text，并给出匹配位置.

    Returns:
        result (list[tuple]): [(match_start, match_end, text_idx)].
    """
    result = []
    for idx, text in enumerate(texts):
        dist_threshold = min(4, len(text) // 2) # 模糊匹配条件，一般不至于错那么多字
        res = find_near_matches(text, name, max_l_dist=dist_threshold)
        if len(res) == 0:
            continue
        for match in res:
            result.append((match.start, match.end, idx))
    return result

        
def build_search_map(matches: list[tuple]):
    """获取每个点对应的潜在后续节点.

    Returns:
        search_map (dict): key为match_idx, value为list[match_idx].
    """
    search_map = {i: [] for i in range(len(matches))}
    for i in search_map:
        for j in range(len(search_map)):
            if (matches[j][2] != matches[i][2] and\
                matches[j][0] > matches[i][0] and \
                matches[j][1] > matches[i][1] and \
                j != i):
                search_map[i].append(j)
    return search_map


def get_all_possible_paths(matches: list[tuple]):
    """利用启发式方式获取所有可能的拼接方式.

    Returns:
        paths (list[list[int] ]).
    """
    paths = []
    search_map = build_search_map(matches)
    def search(current_path):
        # 递归遍历
        if current_path == []:
            for idx in range(len(matches)):
                search([idx])
        else:
            current_node = current_path[-1]
            # 判断是否还有后续节点
            next_possible_nodes = [x for x in search_map[current_node] if x not in current_path]
            if len(next_possible_nodes) == 0:
                paths.append(current_path)
                return
            else:
                for next_node in next_possible_nodes:
                    search(current_path + [next_node])
    search([])
    return paths


def get_min_cost_path(
    paths: list[list[int]],
    matches: list[tuple[int, int, int]],
    texts: list[str],
    name: str
) -> list[int]:
    """返回最小距离拼接方式
    """
    min_path = []
    min_dist = len(name) + 100

    for path in paths:
        path = [matches[idx][2] for idx in path]
        concat_str = "".join([texts[idx] for idx in path])
        dist = Levenshtein.distance(concat_str, name)
        if dist < min_dist:
            min_dist = dist
            min_path = path
    return min_path, min_dist


