import numpy as np
import pandas as pd
import geopandas as geopd
import time
import scipy.stats
from datetime import timedelta
from collections import deque

import TopologyModule as TopologyModule
import CalModule as CalModule
from settings import *
from CalModule import GPSInfo


class OHMMmatching:
    R_earth = 6378137  # meter

    # database
    conn = None
    datatable = None
    search_threshold = 0.00045  # 50 meter

    # transition probability
    svc = None
    MIN_V = 1e-10  # for momentum function

    def __init__(self, conn, datatable, svc, interval, sigma_g):
        self.conn = conn
        self.datatable = datatable
        self.svc = svc
        self.sample_rate = interval
        self.sigma_g = sigma_g

        # viterbi decode
        self.last_score = None
        self.last_df = None
        self.this_df = None
        self.lastpoint = None
        self.patharray = None  # 两点之间的路径矩阵，shape:(n_from, n_to)
        self.pre = {}  # enodeid:[last_edgeid, last_enodeid, patharray)]
        self.tail = []  # 滑窗的尾部，最近加入的一批
        self.front = []  # 与conv相连的边，搜索到这里停止

        # VSW
        self.v0 = {}  # 上一段末尾的速度，key：rid，val：[[v_lon],[v_lat]]
        self.window_size = 1
        self.max_window_size = 5
        self.CP_queue = deque()  # queue of convergence point
        self.match_edge_queue = deque()  # queue of matching result

    def get_candidates(self, gps):
        sql_str = f"""
        select id, snodeid, enodeid, rang, rtype,direction,geom,SPDLMTE2S,SPDLMTS2E, length_deg from {self.datatable} where 
        st_intersects(
            geom,
            st_buffer(
                st_geomFromText(
                    'Point({gps.lon} {gps.lat})', {SRID}
                ), 
                {self.search_threshold}
            )
        )
        """
        df = geopd.read_postgis(sql_str, conn, geom_col="geom")
        # df['snodeid'] = df['snodeid'].astype(str)
        # df['enodeid'] = df['enodeid'].astype(str)
        if df.shape[0] == 0:
            print(self.search_threshold, "fail")
            return None
        else:
            return df

    def init_first_point(self, firstgps):
        df = self.get_candidates(firstgps)
        self.this_df = df
        self.last_score = self.compute_emissionP(firstgps)  # 计算这一点所有candidates的发射概率 n_to
        self.last_df = df
        self.lastpoint = firstgps
        for i in range(df.shape[0]):
            snodeid = df.iloc[i]['snodeid']
            enodeid = df.iloc[i]['enodeid']
            self.tail.append(enodeid)
            self.pre[enodeid] = (df.iloc[i]['id'], snodeid, [])
            azi_rad = CalModule.d2r(df.iloc[i]['rang'])
            self.v0[df.iloc[i]['id']] = np.vstack([firstgps.v * np.sin(azi_rad), firstgps.v * np.cos(azi_rad)])

    def OHMM(self, gps, interval, avgv):
        self.this_df = self.get_candidates(gps)
        if self.this_df is None or self.this_df.shape[0] == 0:
            return
        this_score = np.zeros((self.this_df.shape[0], 1))  # gps对应的所有candidate的概率 n_to*1
        emission_p = self.compute_emissionP(gps)  # 计算这一点所有candidates的发射概率 n_to
        trans_p = self.compute_transitionP_OHMM(interval, avgv, gps)  # 转移概率矩阵 n_from * n_to
        self.tail = []
        for j in range(self.this_df.shape[0]):  # 对每个candidate
            temp = np.multiply(self.last_score.reshape(-1, ), trans_p[:, j].reshape(-1, ))  # 上一层*转移    n_from*1
            this_score[j, 0] = emission_p[j] * np.max(temp)  # 本层每个candidate的发射
            preseg_index = np.argmax(temp)  # 第j个candidate对应的最大可能的from。
            pre_edge_id = self.last_df.iloc[preseg_index]['id']
            pre_snodeid = self.last_df.iloc[preseg_index]['snodeid']
            this_snodeid = self.this_df.iloc[j]['snodeid']
            self.tail.append(this_snodeid)
            if not self.find_circle_in_pre(pre_snodeid, this_snodeid):  # 没有环
                self.pre[this_snodeid] = (pre_edge_id, pre_snodeid, self.patharray[preseg_index][j])
        self.last_score = this_score.reshape(-1, 1) / np.max(this_score)
        self.last_df = self.this_df
        self.lastpoint = gps

    def find_circle_in_pre(self, pre_snodeid, this_snodeid):
        """
        禁止环，如
        自循环：'10916932': ('12831665', '10916932', [])
        环：'5798531': ('12831665', '10916932', []) '10916932': ('12831666', '5798531', [])
        """
        if pre_snodeid == this_snodeid:
            return True
        while 1:
            item = self.pre.get(pre_snodeid)
            if item is None:  # 没有环
                return False
            if item[1] == this_snodeid:  # 有环
                return True
            pre_snodeid = item[1]

    def find_convergence_point(self):
        convid = None
        i = 0
        for each_tail in self.tail:
            snodeid = each_tail
            while self.pre.get(snodeid) and snodeid not in self.front:
                snodeid = self.pre.get(snodeid)[1]
            # 赋初值
            if i == 0:
                convid = snodeid
                if len(self.front) != 0 and convid not in self.front:  # len!=0：不是第一个cp
                    return None
            else:
                if snodeid != convid:  # 没有汇合
                    return None
            i += 1
        # 正常结束循环 所有candidate交会到同一点(convid)
        # 滑窗前移，更新front  # todo 弹出后清理front
        self.update_front(convid)
        # 不能连到conv的扔掉
        self.update_pre(convid)
        return convid

    def update_front(self, conv_node_id):
        new_front = []
        for key, val in self.pre.items():
            if val[1] == conv_node_id:
                new_front.append(key)
        self.front = new_front

    def update_pre(self, conv_id):
        """
        不能连到conv的扔掉
        """
        new_pre = {}
        if self.pre.get(conv_id):  # 自己先放进去
            new_pre = {conv_id: self.pre[conv_id]}
        for key in self.pre.keys():
            chain = [key]
            if key in new_pre:  # 处理过了 跳过
                continue
            item = self.pre.get(key)
            while item:
                if item[1] == conv_id or item[1] in new_pre.keys():  # chain里面所有的都可以连接到convid，一起加入newpre
                    for each in chain:
                        new_pre[each] = self.pre[each]
                    break
                chain.append(item[1])
                item = self.pre.get(item[1])
        self.pre = new_pre

    def online_viterbi_decode(self, exit_now=False):
        sol = []
        conv_node_id = self.find_convergence_point()  # 看最新的pre是否指到相同的结点 如果是的话指针置为null并弹出
        if conv_node_id:  # 有交会
            self.window_size = np.max([self.window_size-1, 0])
            self.CP_queue.append(conv_node_id)
            if len(self.CP_queue) == 1:  # 没有前一个cp
                return sol
            last_conv = self.CP_queue[-2]  # 上一个cp
        else:  # 没有交会
            if self.window_size >= self.max_window_size or exit_now:  # 达到上限，输出次优解，或达到最后一点，强制输出
                conv_node_id = self.last_df.iloc[np.argmax(self.last_score)]['snodeid']
                self.front = [self.last_df.iloc[np.argmax(self.last_score)]['enodeid']]
                self.window_size = np.max([self.window_size-1, 0])  # todo 这里window_size-1感觉不太对，但是我改不动了……
                self.CP_queue.append(conv_node_id)
                if len(self.CP_queue) == 1:  # 没有前一个cp
                    return sol
                last_conv = self.CP_queue[-2]  # 上一个cp
            else:
                self.window_size += 1
                return sol
        # 输出匹配结果
        if conv_node_id == last_conv:
            return sol
        if conv_node_id in self.pre.keys():
            # 在pre里面有记录，逆序查找
            item = self.pre.get(conv_node_id)
            while item:
                path = item[2].copy()  # patharray
                path.reverse()
                sol.extend(path)
                sol.append(item[0])  # edge 以convid为enode
                # pre: key:eid val:(edgeid, sid, patharray)
                item = self.pre.get(item[1])
            new_pre = {}
            for snodeid in self.pre.keys():
                edgeid = self.pre[snodeid][0]
                if edgeid not in sol:
                    new_pre[snodeid] = self.pre[snodeid]
            self.pre = new_pre
            sol.reverse()  # 只有这种方法要reverse
            # self.window_size = np.max([self.window_size - len(sol) + 1, 0])
        return sol

    def map_matching(self, trajectory_path, result_path, skip):
        self.last_df = None
        self.lastpoint = None
        trajectory_df = pd.read_csv(trajectory_path, parse_dates=['timestamp'])
        sumv = 0
        latency = []
        sttime = trajectory_df['timestamp'].iloc[0] - timedelta(seconds=self.sample_rate - skip)
        last_timestamp = sttime
        with open(result_path, "w") as f:
            f.write('rid\n')
            for i in range(len(trajectory_df)):
                lon = trajectory_df['lon'].iloc[i]
                lat = trajectory_df['lat'].iloc[i]
                pang = trajectory_df['pang'].iloc[i]
                v = trajectory_df['v'].iloc[i]
                gps = GPSInfo(lon, lat, pang, v=v)
                timestamp = trajectory_df['timestamp'].iloc[i]
                sumv += trajectory_df['v'].iloc[i]
                if (timestamp-sttime).seconds < self.sample_rate:
                    continue
                if self.last_df is None:  # initialize
                    self.init_first_point(gps)
                    sttime = timestamp
                    continue
                else:
                    interval = (timestamp - sttime).seconds
                    print(timestamp, lon, lat)
                    self.OHMM(gps, interval, sumv/interval)
                    sttime = timestamp
                    sumv = 0
                if i < len(trajectory_df)-1:  # 不是最后一个点。检查有无convergence point
                    edgelist = self.online_viterbi_decode()
                else:  # 是最后一个点
                    edgelist = self.online_viterbi_decode(exit_now=True)
                if len(edgelist) != 0:
                    latency.append((timestamp-last_timestamp).seconds)
                    last_timestamp = timestamp
                    for edgeid in edgelist:
                        self.match_edge_queue.append(edgeid)
                        f.write(f"{edgeid}\n")
                        f.flush()
                print(edgelist)
            print("ave delay: ", np.mean(latency))
            print("done")
            return np.mean(latency)

    def compute_emissionP(self, gps):
        df = self.this_df
        ps = np.zeros(df.shape[0])
        for i in range(df.shape[0]):
            w = 22.5  # half width todo 路网数据里的width很奇怪，干脆全取成一个值
            pt_list = list(df["geom"].iloc[i].coords)
            _, _, _, d_deg, _ = CalModule.projection_pl(gps.lon, gps.lat, pt_list)
            d = CalModule.d2r(d_deg) * self.R_earth

            # compute p(observation)
            # w: half-width of segment
            gaussian_norm = scipy.stats.norm(d, self.sigma_g)
            p = (gaussian_norm.cdf(w) - gaussian_norm.cdf(-w)) / 2 / w

            # penalty function S
            # vt: speed of car. vr: speed limit of segment  # todo 速度的数值也很奇怪，暂且认为是单位是10km/h
            if df['direction'].iloc[i] == '2' and CalModule.is_obtuse_angle(df['rang'].iloc[i]-gps.p_ang):
                vr = float(df['spdlmte2s'].iloc[i]) / 36
            else:
                vr = float(df['spdlmts2e'].iloc[i]) / 36
            vt = gps.v
            S = vr / (max(0, vt - vr) + vr)
            ps[i] = p * S
        return ps

    def distance_discrepancy_function(self, delta_t, avgv, thispoint):
        d = CalModule.r2d(delta_t * avgv / self.R_earth)  # meter化为deg
        D, self.patharray, path_dict = TopologyModule.compute_path_length(self.lastpoint, thispoint, self.last_df,
                                                                          self.this_df, self.conn, self.datatable)
        T = np.abs(d - D) / D
        return T, path_dict

    def compute_transitionP_OHMM(self, delta_t, avgv, thispoint):
        T, path_dict = self.distance_discrepancy_function(delta_t, avgv, thispoint)  # T: nfrom * nto
        M = self.momentum_change_function(self.lastpoint.v, thispoint.v, path_dict)
        T_temp = 1/(1 + T)
        M_temp = 1/(1 + M)
        P = self.svc.predict_proba(np.hstack([T_temp.reshape(-1, 1), M_temp.reshape((-1, 1))]))[:, 1]
        transitionP = P.reshape(T.shape)
        # transitionP = np.multiply(1/(1 + T), 1/(1 + M))  # 没训练SVM的时候用这个近似
        return transitionP

    def momentum_change_function(self, v_last, v_this, path_dict):
        """

        Parameters
        ----------
        v_last: 前一点车速大小（标量）
        v_this: 后一点车速大小（标量）
        path_dict: {key:id, val:(长度（degree），方位角（rad）)}

        Returns
        -------

        """
        new_v0_dict = {}
        n_from = self.last_df.shape[0]  # 上一个gps点对应的路段数量
        n_to = self.this_df.shape[0]  # 这一个gps点对应的路段数量
        M = np.zeros((n_from, n_to))

        # 把上一段会用到的路段查出来，得到df_v0
        v0_path = []
        for i in range(n_from):
            for j in range(n_to):
                # v0_path.append(self.patharray[i][j][-1])
                v0_path.extend(self.patharray[i][j])
        v0_path = list(set(v0_path))  # 去重
        v0_path_str = "','".join(v0_path)
        df_v0 = pd.read_sql(f"select id,rang,direction,length_deg from {self.datatable} where id in ('{v0_path_str}')",
                            self.conn)

        # from-to排列组合
        for i in range(n_from):
            for j in range(n_to):
                path = self.patharray[i][j]
                n_segment = len(path)  # path中的路段数
                # 根据经过的路段数量，线性插值得到v_arr
                if n_segment <= 2:  # n_segment==1的时候linspace(a,b,1)=[a,]，插值不对
                    v_arr = np.array([v_last, v_this])
                else:
                    v_arr = np.linspace(v_last, v_this, n_segment)
                # 获取azimuth, length
                azi_arr = np.zeros(n_segment)
                l_arr = np.zeros(n_segment)
                for k in range(n_segment):
                    if path_dict.get(path[k]):
                        l_arr[k], azi_arr[k] = path_dict[path[k]]
                    else:
                        l_arr[k] = df_v0[df_v0['id'] == path[k]]['length_deg']
                        azi_arr[k] = CalModule.d2r(df_v0[df_v0['id'] == path[k]]['rang'])
                diff_v_vector = self.compute_diff_v_vector(v_arr, azi_arr, path[-1], new_v0_dict, df_v0)
                mean_v = np.max([np.mean(v_arr), self.MIN_V])  # prevent zero division
                M[i, j] = np.sum(np.multiply(l_arr, diff_v_vector)) / np.sum(l_arr) / mean_v
        self.v0 = new_v0_dict
        return M

    def compute_diff_v_vector(self, v_arr, azi_rad, rid_v0, new_v0_dict, df_v0):
        """
        两行k列

        Parameters
        ----------
        v_arr: 速度大小（标量）,v1-vk
        azi_rad: 对应路段的方位角（-pi~pi）,a1-ak
        rid_v0: 上一个path的最后一个路段编号
        new_v0_dict:
        df_v0:

        Returns || v_i - v_i-1 ||
        -------

        """
        # 标量转换为矢量。v_vector:[v1, v2, ... vN]
        v_vector = np.vstack([v_arr * np.sin(azi_rad), v_arr * np.cos(azi_rad)])  # [[lon], [lat]] 对应sin cos
        # 计算v0，进而组成v_pre_vector:[v0, v1, ... vN-1]
        if rid_v0 in self.v0.keys():  # v0可以直接查找到
            v_pre_vector = np.hstack([self.v0[rid_v0], v_vector[:, :-1]])
        else:  # v0需要计算得到
            last_v = self.lastpoint.v
            ind = df_v0[df_v0['id'] == rid_v0].index[0]
            last_azi_rad = CalModule.d2r(df_v0['rang'].iloc[ind])
            if df_v0['direction'].iloc[ind] == '2':
                if CalModule.is_obtuse_angle(df_v0['rang'].iloc[ind] - self.lastpoint.p_ang):  # 夹角是钝角，反向
                    last_azi_rad = CalModule.d2r(df_v0['rang'].iloc[ind] + 180)
            v_pre_vector = np.hstack([
                                        [[last_v * np.sin(last_azi_rad)],
                                         [last_v * np.cos(last_azi_rad)]],
                                        v_vector[:, :-1]
                                    ])

        diff_v_vector = v_vector - v_pre_vector
        new_v0_dict[rid_v0] = v_vector[:, -1].reshape(-1, 1)
        return np.linalg.norm(diff_v_vector, axis=0)


def main(interval, dataindex):
    root = traj_roots[dataindex]
    sigma_g = sigma_g_list[dataindex]
    trajectory_data = root + trajectory_data_list[dataindex]
    ground_truth = root + ground_truth_list[dataindex]

    # output
    match_result = root + "OHMM.txt"
    log_file = root + "OHMM_log.txt"

    for iter_num in [1]:  # range(int(15 / interval)):
        for skip in [1]:  # range(interval):
            sttime = time.time()
            matcher = OHMMmatching(conn, datatable, svc_ohmm, interval, sigma_g)
            latency = matcher.map_matching(trajectory_data, match_result, skip)
            elapsed = time.time() - sttime
            CalModule.score_label(match_result, ground_truth, interval, skip, log_file, elapsed,
                                  conn, datatable, latency=latency)
            

if __name__ == "__main__":
    interval_list = list(range(1, 11)) + [15]
    # for interval in interval_list:
    #     main(interval, 0)
    for interval in interval_list:
        main(interval, 1)

    # main(1, 0)

