from collections import deque
from utils.database_table import Similarity
from utils.basic_function import *
from prepocessing.remove_pseudo_nodes import RemovePseudoNodesTool
from shapely.geometry import CAP_STYLE, JOIN_STYLE


class MatchTool(PreprocessingTool):
    def __init__(self, rs_table, buff_dist_min, buff_dist_max, step, ldbm_col):
        super(MatchTool, self).__init__(rs_table)
        self.buff_dist_min = buff_dist_min
        self.buff_dist_max = buff_dist_max
        self.step = step
        self.ldbm_col = ldbm_col  # 上报路网路段的id

    def match_table_reverse(self, rp_table):
        # 添加匹配路段和相似度字段
        self.table.add_column(self.table.match_road, set_null=True)
        self.table.add_column(self.table.buffer_similarity, set_null=True)
        self.table.add_column(self.table.dir_similarity, set_null=True)
        self.table.add_column(self.table.hausdorff_dist, set_null=True)
        self.table.add_column(self.table.cross_num, set_null=True)

        # 预选可能与上报路网匹配的遥感路网
        rs_pk_list = pd.read_sql(f"""
            select distinct rs.{self.table.pk}
            from {self.table.name} rs, {rp_table.name} rp
            where
            st_intersects(
                rs.{self.table.geom},
                st_buffer(
                    rp.{rp_table.geom},
                    {self.buff_dist_min}
                )
            )
        """, conn)[self.table.pk].values.tolist()
        self.match_reverse_workflow(rs_pk_list, rp_table)

    def match_reverse_workflow(self, rs_pk_list, rp_table):
        for rs_pk in rs_pk_list:
            print(rs_pk)
            rs_geom = self.table.select_by_attr(self.table.pk, rs_pk, select_cols=[self.table.geom]).loc[0, self.table.geom]
            match_similarity = self.match_reverse(rs_pk, rs_geom, rp_table)
            if match_similarity is not None:
                # 更新数据库
                attr_dict = match_similarity.similarity_to_dict()
                self.table.update_attr(self.table.pk, rs_pk, attr_dict)

    def match_by_topology(self, rp_table, rp_pk):
        """
        按照上报路网再次匹配遥感路网
        1，选择上报路网的待定路段
        2，选择未匹配的待定路段
        3，若未匹配的待定路段两端连接的遥感路段均匹配到该上报路网则也匹配到此上报路网
            计算遥感路段两头端点到匹配路段的距离，距离小于一且附近都有一条匹配的遥感路段（端点附近多条匹配可能是环状结构）
        Returns:

        """
        sql = f"""
            select {self.table.pk} from {self.table.name} 
            where ST_Dwithin({self.table.geom}, 
                (select {rp_table.geom} from {rp_table.name} where {rp_table.pk}={rp_pk}), 
                {self.buff_dist_min}) 
            and {self.table.match_road} is null"""
        rs_list = pd.read_sql(sql, conn)[self.table.pk].values

        for rs_pk in rs_list:
            st_neighbor, ed_neighbor = self.get_matched_neighbor(rs_pk, rp_pk)

            if st_neighbor.shape[0] == 1 and ed_neighbor.shape[0] == 1:  # 环状结构一般为[2,2]
                print("第二次匹配：", rp_pk, rs_pk)
                self.table.update_attr(self.table.pk, rs_pk, {self.table.match_road: rp_pk})

    def try_merge(self, rp_pk, rp_geom):
        """
        尝试将匹配到同一路段的遥感路网合并。若成功匹配，则调整遥感路网的方向与上报路网一致。

        Args:
            rp_pk: 路段编码
            rp_geom: 路段geom

        Returns:
        合并成功返回True，失败返回False

        """
        # 1. 去除伪结点，尝试合并
        match_df = self.table.select_by_attr(self.table.match_road, rp_pk)

        if match_df.shape[0] > 1:
            # 1.a 为了加快速度，先尝试用linemerge，如果无法合并再用去除伪结点的方法弥补由于容差无法合并的问题。
            merged = linemerge(list(match_df[self.table.geom]))
            if isinstance(merged, LineString):
                with conn:  # 合并成功，删除原遥感路网，插入合并后的
                    for idx in match_df.index:
                        pk = match_df.loc[idx, self.table.pk]
                        self.table.delete_by_attr(self.table.pk, pk, auto_commit=False)
                    self.table.insert({self.table.geom: merged, self.table.match_road: rp_pk})
            else:  # 1.b 去除伪结点
                self.remove_match_pseudo_nodes_workflow(rp_pk, CLUSTER_TOLERANCE_R)

        # 2. 重新计算相似度
        self.recompute_match_similarity(rp_pk, rp_geom)

        match_df = self.table.select_by_attr(self.table.match_road, rp_pk)
        if len(match_df) == 1:
            # 3. 调整方向
            QD = Point(rp_geom.coords[0])
            rs_geom = match_df[self.table.geom].iloc[0]
            rs_pk = match_df[self.table.pk].iloc[0]
            st_dist = cal_dist(QD.coords[0], rs_geom.coords[0])
            ed_dist = cal_dist(QD.coords[0], rs_geom.coords[-1])
            if ed_dist < st_dist:
                new_geom = reverse_linestring(rs_geom)
                self.table.update_attr(self.table.pk, rs_pk, {self.table.geom: new_geom}, auto_commit=True)
            return True
        else:
            return False

    def match_postprocessing(self, rp_table):
        """
        匹配后处理，共分为3步：
        1.直接尝试合并；
        2.补充两端都匹配到同一上报路段的遥感路段，并尝试合并；
        3.沿上报路网方向重建遥感路网，去除分叉，并尝试合并

        Args:
            rp_table: 上报路网表名

        Returns:

        """
        match_road_list = pd.read_sql(f"select distinct {self.table.match_road} from {self.table.name} "
                                f"where {self.table.match_road} is not null order by {self.table.match_road} asc",
                                      conn)[self.table.match_road].values
        for match_road in match_road_list:
            print(match_road)

            rp_geom = rp_table.select_by_attr(self.ldbm_col, match_road)[self.table.geom].iloc[0]

            # 1. 直接尝试合并
            flag = self.try_merge(match_road, rp_geom)
            if flag:
                continue

            # 2. 用topology补充
            self.match_by_topology(rp_table, match_road)
            flag = self.try_merge(match_road, rp_geom)
            if flag:
                continue

            # 3. 从上报路网的起点开始检查，去除误匹配的分叉，并合并
            self.remove_multi_match(match_road, rp_geom)

    def get_matched_neighbor(self, rs_pk, rp_pk):
        """
        查询与rs_pk相邻且匹配到rp_pk的路段。

        Args:
            rs_pk:
            rp_pk:

        Returns: st_neighbor, ed_neighbor

        """

        rs_geom = self.table.select_by_attr(self.table.pk, rs_pk)[self.table.geom].iloc[0]
        st_pt = rs_geom.coords[0]
        ed_pt = rs_geom.coords[-1]
        # st（只在同一rp_pk内寻找，且不包括自身）
        st_neighbor = self.table.buffer_nodes(Point(st_pt), CLUSTER_TOLERANCE_R,
                                              and_where=f"and {self.table.match_road}={rp_pk} and {self.table.pk} != {rs_pk}")
        # ed
        ed_neighbor = self.table.buffer_nodes(Point(ed_pt), CLUSTER_TOLERANCE_R,
                                              and_where=f"and {self.table.match_road}={rp_pk} and {self.table.pk} != {rs_pk}")
        return st_neighbor, ed_neighbor

    def remove_multi_match(self, rp_pk, rp_geom):
        # 找路段起点
        QD = Point(rp_geom.coords[0])
        next_coord = QD.coords[0]
        match_df = self.table.select_by_attr(self.table.match_road, rp_pk)
        loop_count = 0
        pk_list_all = []
        pk_list = []
        coords_list = []
        coords_list_all = []
        while not match_df.empty:
            match_df["dist"] = None
            match_df["nearest_end"] = None
            # 找离next_coord最近的若干条遥感路网
            for idx in match_df.index:
                rs_geom = match_df.loc[idx, self.table.geom]
                st_dist = cal_dist(next_coord, rs_geom.coords[0])
                ed_dist = cal_dist(next_coord, rs_geom.coords[-1])
                match_df.loc[idx, "dist"] = min(st_dist, ed_dist)
                if st_dist < ed_dist:
                    match_df.loc[idx, "nearest_end"] = "st"
                else:
                    match_df.loc[idx, "nearest_end"] = "ed"

            nearest_dist = match_df["dist"].min() + CLUSTER_TOLERANCE_R  # 容差
            if loop_count > 0 and nearest_dist > 1:  # 前后不连续（对第一段不做要求）
                pk_list_all.append(pk_list)  # 保存前一段
                coords_list_all.append(coords_list)
                loop_count = 0  # 另起一段，但nearest_coord不做更新。
                pk_list = []
                coords_list = []
                continue

            loop_count += 1
            next_df = match_df[match_df["dist"] <= nearest_dist].copy()

            # 路径唯一
            if next_df.shape[0] == 1:
                nearest_idx = next_df.index[0]
                nearest_end = next_df["nearest_end"].iloc[0]
                nearest_geom = match_df.loc[nearest_idx, self.table.geom]
                new_coords = list(nearest_geom.coords)
                if nearest_end == "ed":
                    new_coords.reverse()
                # 入队
                pk_list.append(match_df.loc[nearest_idx, self.table.pk])
                coords_list = coords_list + new_coords
                # 弹出
                match_df = match_df[match_df.index != nearest_idx]

            # 出现分叉
            else:
                next_df["score"] = next_df[self.table.buffer_similarity] * next_df[self.table.length]
                max_score = next_df["score"].max()
                for idx in next_df.index:
                    pk = next_df.loc[idx, self.table.pk]
                    if next_df.loc[idx, "score"] < max_score:
                        init_cols = {self.table.match_road: None}
                        self.table.update_attr(self.table.pk, pk, init_cols)
                    else:
                        new_coords = list(next_df.loc[idx, self.table.geom].coords)
                        if next_df.loc[idx, "nearest_end"] == "ed":
                            new_coords.reverse()
                        # 入队
                        pk_list.append(pk)
                        coords_list = coords_list + new_coords
                    # 弹出
                    match_df = match_df[match_df.index != idx]
            next_coord = coords_list[-1]
        # end while

        if len(pk_list) > 0:
            pk_list_all.append(pk_list)
            coords_list_all.append(coords_list)

        for i in range(len(pk_list_all)):
            pk_list = pk_list_all[i]
            coords_list = coords_list_all[i]
            if len(pk_list) > 1:
                with conn:
                    merged = LineString(coords_list)
                    match_similarity = self.compute_similarities(rp_pk, merged, rp_geom)
                    attr_dict = match_similarity.similarity_to_dict()
                    attr_dict[self.table.geom] = merged
                    self.table.insert(attr_dict)
                    for pk in pk_list:
                        self.table.delete_by_attr(self.table.pk, pk, auto_commit=False)

    def remove_match_pseudo_nodes_workflow(self, rp_pk, tolerance):
        """
        与RemovePseudoNonestool的主要区别：
        1.这里的初始pk_list只包含某rp_pk的匹配遥感路网
        2.这里的度（len(intersect_pk_list））只考虑匹配到同一rp_pk的遥感路网

        Args:
            rp_pk:
            tolerance:

        Returns:

        """
        pk_list = self.table.select_by_attr(self.table.match_road, rp_pk, select_cols=[self.table.pk])[self.table.pk].values.tolist()
        rt = RemovePseudoNodesTool(self.table, CLUSTER_TOLERANCE_R)
        rt.set_pk_list(deque(pk_list))
        if len(rt.pk_list) == 1:
            return
        while len(rt.pk_list) != 0:
            pk = rt.pk_list.popleft()
            print(pk)
            geom = self.table.select_by_attr(self.table.pk, pk).loc[0, self.table.geom]
            st_coord = geom.coords[0]
            ed_coord = geom.coords[-1]
            intersect_pk_list = self.table.buffer_nodes(Point(st_coord), tolerance,
                                                        and_where=f"and {self.table.pk}!={pk} and {self.table.match_road}='{rp_pk}'")[self.table.pk].values
            is_modified = rt.remove_pesudo_nodes(pk, intersect_pk_list, {self.table.match_road: rp_pk})
            if not is_modified:  # 如果在上一步对pk_self进行了修改，统一添加新的形状到队尾、删去数据库中的pk_self，下一步就不再进行了
                intersect_pk_list = self.table.buffer_nodes(Point(ed_coord), tolerance,
                                                            and_where=f"and {self.table.pk}!={pk} and {self.table.match_road}='{rp_pk}'")[self.table.pk].values
                rt.remove_pesudo_nodes(pk, intersect_pk_list, {self.table.match_road: rp_pk})

    def recompute_match_similarity(self, rp_pk, rp_geom):
        """
        重新计算匹配到rp_pk的所有遥感路网的匹配参数

        Args:
            rp_pk:
            rp_geom:

        Returns:

        """
        pk_list = self.table.select_by_attr(self.table.match_road, rp_pk, select_cols=[self.table.pk])[self.table.pk].values.tolist()
        for pk in pk_list:
            rs_geom = self.table.select_by_attr(self.table.pk, pk, select_cols=[self.table.geom]).loc[0, self.table.geom]
            match_similarity = self.compute_similarities(rp_pk, rs_geom, rp_geom)
            attr_dict = match_similarity.similarity_to_dict()
            self.table.update_attr(self.table.pk, pk, attr_dict, auto_commit=True)

    def match_reverse(self, rs_pk, rs_geom, rp_table):
        # 遥感选上报
        rp_gdf = rp_table.buffer_intersect_by_polygon(rs_geom.buffer(self.buff_dist_min))
        # 找出相似度最大且大于一定阈值的，作为匹配结果。
        max_buff_overlay = -1
        match_similarity = None
        for idx in rp_gdf.index:
            rp_geom = rp_gdf.loc[idx, rp_table.geom]
            rp_pk = rp_gdf.loc[idx, rp_table.pk]
            similarity = self.compute_similarities(rp_pk, rs_geom, rp_geom)

            # 更新数据库 暂时用来分析没匹配上的参数，正式的可删掉
            attr_dict = similarity.similarity_to_dict()
            attr_dict.pop(self.table.match_road)
            self.table.update_attr(self.table.pk, rs_pk, attr_dict)

            flag = False

            if similarity.buffer_similarity > 0.7:
                flag = True
            elif 0.5 < similarity.buffer_similarity <= 0.7:
                if similarity.hausdorff_dist <= 60 and similarity.dir_similarity <= 10:
                    flag = True
            if similarity.cross_num > 3:  # 相交多次
                flag = True
            if flag:
                if similarity.buffer_similarity > max_buff_overlay:
                    max_buff_overlay = similarity.buffer_similarity
                    match_similarity = similarity
        return match_similarity

    @staticmethod
    def compute_buffer_similarity(rp_geom, rs_geom, buff_dist):
        """
        计算缓冲区相交部分的面积
        """
        rp_buff = rp_geom.buffer(buff_dist, cap_style=CAP_STYLE.square, join_style=JOIN_STYLE.round)
        rs_buff = rs_geom.buffer(buff_dist, cap_style=CAP_STYLE.square, join_style=JOIN_STYLE.round)

        intersect_area = rp_buff.intersection(rs_buff).area

        return intersect_area / rs_buff.area

    @staticmethod
    def compute_direction_similarity(geom1, geom2):
        a = np.array([geom1.coords[-1][0] - geom1.coords[0][0], geom1.coords[-1][1] - geom1.coords[0][1]])
        b = np.array([geom2.coords[-1][0] - geom2.coords[0][0], geom2.coords[-1][1] - geom2.coords[0][1]])
        cos = a.dot(b) / max((np.linalg.norm(a) * np.linalg.norm(b)), 1e-11)
        ang = np.arccos(np.abs(cos)) / np.pi * 180  # 取锐角为夹角，cos值域为0-1
        return ang

    @staticmethod
    def compute_distance(geom1, geom2):
        return geom1.distance(geom2)

    @staticmethod
    def compute_hausdorff_distance(geom_rs, geom_rp):
        if geom_rp.is_empty:
            h_distance = HAUSDORFF_DIST_MAX
        else:
            h_distance = geom_rs.hausdorff_distance(geom_rp)
        return h_distance

    @staticmethod
    def compute_cross_num(geom1, geom2):
        result = geom1.intersection(geom2)
        if result.type == 'Point':
            return 1
        elif result.type == 'MultiPoint':
            return len(result)
        else:
            return 0

    def compute_similarities(self, rp_pk, rs_geom, rp_geom):
        # 一般多条rs对应一条rp，因此用rs做缓冲区 不要用缓冲区了，会造成形状的扭曲。找最近点虚拟打断。

        rp_geom_clip = get_substring(rp_geom, rs_geom.coords[0], rs_geom.coords[-1])
        dir_similarity = self.compute_direction_similarity(rp_geom_clip, rs_geom)

        buffer_similarity = self.compute_buffer_similarity(rp_geom_clip, rs_geom, self.buff_dist_min)
        h_distance = self.compute_hausdorff_distance(rs_geom, rp_geom_clip)  # 用postgis的函数,可以选择进行加密
        cross_num = self.compute_cross_num(rs_geom, rp_geom_clip)

        similarity = Similarity(
            rp_pk,
            rs_geom,
            buffer_similarity,
            dir_similarity,
            h_distance,
            cross_num
        )
        return similarity
