import numpy as np
import geopandas as geopd
import pandas as pd
import time
from datetime import timedelta

from map_matching.CalModule import GPSInfo
import map_matching.CalModule as CalModule
from settings import *


class Segment:
    def __init__(self, rid, snodeid, enodeid, rscore):
        """

        Parameters
        ----------
        rid: id of road segment
        snodeid: start node id of segment
        enodeid: end node id of segment
        rscore: svm predicted probability of being elevated
        """
        self.rid = rid
        self.snodeid = snodeid
        self.enodeid = enodeid
        self.rscore = rscore

    def copy(self):
        new_seg = Segment(self.rid, self.snodeid, self.enodeid, self.rscore)
        return new_seg


class IAMM:
    # database
    conn = None
    datatable = None

    # constant
    CONST_b = 0.1
    CONST_k = 10
    CONST_search_threshold = 0.00045  # 50 meters

    # dataset
    svc = None
    sample_rate = None

    # init
    lastdf = None
    thisdf = None
    lastpoint = None
    previous_match_seg = Segment(None, None, None, None)

    def __init__(self, conn, datatable, svc, sigma, use_img=True, img_dir=""):
        self.conn = conn
        self.datatable = datatable
        self.svc = svc
        self.use_img = use_img
        self.img_dir = img_dir
        self.sigma = sigma

    def set_sample_rate(self, new_rate):
        self.sample_rate = new_rate

    def get_roadtype_svm(self, gps_info):
        try:
            img_for_svm = CalModule.image_processing(gps_info.img_path)
            r_proba = self.svc.predict_proba(img_for_svm)  # normal, elevated, tunnel
        except Exception as e:  # fail to classify the image, use the previous result
            print(gps_info.img_path + "fail to classify")
            print(str(e))
            return self.previous_match_seg.rscore
        if np.argmax(r_proba) == 2:  # other type (eg. tunnel)
            return self.previous_match_seg.rscore
        else:
            return r_proba[0, :2]

    def compute_adj_index(self, series):
        sid = series['snodeid']
        eid = series['enodeid']
        node_count = sum([sid == self.previous_match_seg.snodeid,
                          sid == self.previous_match_seg.enodeid,
                          eid == self.previous_match_seg.snodeid,
                          eid == self.previous_match_seg.enodeid])
        if node_count < 1:  # has no common node
            adj_index = self.CONST_b
        else:
            adj_index = 1
        return adj_index

    def match(self, gps_info):
        sql_str = f"""
        select id, snodeid, enodeid, rang, rtype,direction,geom from {self.datatable} where 
        st_intersects(
            geom,
            st_buffer(
                st_geomFromText(
                    'Point({gps_info.lon} {gps_info.lat})', {SRID}
                ), 
                {self.CONST_search_threshold}
            )
        )
        """
        df = geopd.read_postgis(sql_str, conn, geom_col="geom")
        n = df.shape[0]

        if n == 0:  # find no candidates
            print("fail to find candidates")
            return

        # the scoring functions
        adj_score = np.ones(n)
        direction_score = np.ones(n)
        distance_score = np.ones(n)
        dist_score = np.ones(n)
        roadtype_score = np.ones(n)
        roadtype_default = [1, 1]  # [elevated probability, normal probability]

        # project raw GPS to matched segment
        lon_proj = np.ones(n)
        lat_proj = np.ones(n)

        if self.use_img:
            if sum(df['rtype'] == 1) > 0:  # has elevated segments in candidate set
                try:
                    roadtype_default = self.get_roadtype_svm(gps_info)  # [elevated probability, normal probability]
                except Exception as e:
                    print(str(e))
                    roadtype_default = self.previous_match_seg.rscore

        for i in range(n):
            # 本来这几个可以广播。公平起见大家都循环吧。
            adj_score[i] = self.compute_adj_index(df.iloc[i])
            direction_score[i] = self.compute_direction_index2(gps_info, df.iloc[i])
            distance_score[i], dist_score[i], lon_proj[i], lat_proj[i] = \
                self.compute_distance_index(gps_info, list(df['geom'].iloc[i].coords))
            # road type score
            if df['rtype'].iloc[i] == 1:  # is elevated
                roadtype_score[i] = roadtype_default[1]
            else:  # is normal
                roadtype_score[i] = roadtype_default[0]

        score = distance_score * adj_score * direction_score

        if self.use_img:
            score *= roadtype_score

        match_index = np.argmax(score)

        # output result
        match_seg = Segment(df['id'].iloc[match_index], df['snodeid'].iloc[match_index], df['enodeid'].iloc[match_index],
                            roadtype_default)

        self.previous_match_seg = match_seg
        return lon_proj[match_index], lat_proj[match_index], df['id'].iloc[match_index]

    def map_matching(self, trajectory_path, result_path, skip):
        trajectory_df = pd.read_csv(trajectory_path, parse_dates=['timestamp'])
        sttime = trajectory_df['timestamp'].iloc[0] - timedelta(seconds=self.sample_rate - skip)
        with open(result_path, "w") as f:
            f.write('rid,lon,lat,timestamp\n')
            for i in range(len(trajectory_df)):
                lon = trajectory_df['lon'].iloc[i]
                lat = trajectory_df['lat'].iloc[i]
                p_ang = trajectory_df['pang'].iloc[i]
                img_path = self.img_dir + trajectory_df['relation'].iloc[i]
                gps = GPSInfo(lon, lat, p_ang, img_path=img_path)
                timestamp = trajectory_df['timestamp'].iloc[i]
                if (timestamp-sttime).seconds < self.sample_rate:
                    continue
                sttime = timestamp
                print(timestamp)
                self.match(gps)
                f.write(f"{self.previous_match_seg.rid},{lon},{lat},{timestamp}\n")
                print(self.previous_match_seg.rid)
        print("done")

    def compute_distance_index(self, gps, pt_array):
        lon_proj, lat_proj, _, proj_dist, _ = CalModule.projection_pl(gps.lon, gps.lat, pt_array)
        distance_index = np.exp(-0.5 * (proj_dist / self.sigma) ** 2)
        return distance_index, proj_dist, lon_proj, lat_proj

    def compute_direction_index(self, gps, series):  # 这两个效果差不多。从理论上讲2更合理
        """
        ues the st and ed points of polyline to compute its azimuth
        """
        p_ang = gps.p_ang
        delta_azi = CalModule.d2r(series['rang'] - p_ang)
        cos_da = np.cos(delta_azi)
        if series['direction'] == '2' and cos_da < 0:
            direction_index = np.exp2(-cos_da * IAMM.CONST_k)
        else:
            direction_index = np.exp2(cos_da * IAMM.CONST_k)
        return direction_index

    def compute_direction_index2(self, gps, series):
        """
        用最近的line计算夹角
        """
        lon = gps.lon
        lat = gps.lat
        p_ang = gps.p_ang
        pt_list = list(series["geom"].coords)
        n_pt = len(pt_list)
        direction = series['direction']
        # 即使投影在线段上也要遍历全部。因为不确定是不是最近点
        mini = 0
        mindist = 999
        for i in range(0, n_pt - 1):
            _, _, proj_dist, _ = CalModule.projection(lon, lat, pt_list[i][0], pt_list[i][1],
                                                      pt_list[i + 1][0], pt_list[i + 1][1])
            if proj_dist < mindist:
                mindist = proj_dist
                mini = i
        # 计算i-i+1的方位角
        rang = np.arctan2(pt_list[mini + 1][0] - pt_list[mini][0], pt_list[mini + 1][1] - pt_list[mini][1])
        da = CalModule.d2r(p_ang) - rang
        cos_da = np.cos(da)
        if direction == '2' and cos_da < 0:
            direction_index = np.exp2(-cos_da*10)
        else:
            direction_index = np.exp2(cos_da*10)
        return direction_index


class IAMM_for_yolo:  # 给绿苗的
    def __init__(self, conn, datatable, svc, img_dir, sigma):
        matcher = IAMM(conn, datatable, svc, sigma, use_img=True, img_dir=img_dir)
        matcher.set_sample_rate(1)
        self.matcher = matcher

    def map_matching_for_yolo(self, lon, lat, p_ang, img_path):
        """
        这里让svm重新读图像，因为和yolo裁剪的尺寸不一样
        """
        gps_info = GPSInfo(lon, lat, p_ang, img_path=img_path)
        lon_proj, lat_proj, rid = self.matcher.match(gps_info)
        # 求路段azi。与lonlat-proj连线垂直，且与p_ang夹角小于90度（不太想从投影的地方一路把角度传出来，要改的地方太多了orz
        proj_azi = CalModule.r2d(np.arctan2(lon - lon_proj, lat - lat_proj))
        perpen_azi = (proj_azi + 90 + 360) % 360
        delta_azi = np.max([perpen_azi, p_ang]) - np.min([perpen_azi, p_ang])
        if 90 < delta_azi < 270:
            perpen_azi = (perpen_azi + 180 + 360) % 360
        return lon_proj, lat_proj, perpen_azi, rid


def main(interval):
    use_img = True
    data_index = 0  # 注意data_index = 1的时候use_img只能取False，因为没图片
    img_dir = img_roots[data_index]
    root = traj_roots[data_index]
    trajectory_data = root + trajectory_data_list[data_index]
    ground_truth = root + ground_truth_list[data_index]
    sigma = sigma_list[data_index]

    result_path = root + f"IAMM_{use_img}.txt"
    log_path = root + f"IAMM_{use_img}_log.txt"

    for iter_num in [1]:  # range(int(15 / interval)):  # 运行多次取运行时间的平均值
        for skip in [1]:  # range(interval):  # 采样间隔>1s时从第几个点开始
            sttime = time.time()
            matcher = IAMM(conn, datatable, svc_img, sigma, use_img=use_img, img_dir=img_dir)
            matcher.set_sample_rate(interval)
            matcher.map_matching(trajectory_data, result_path, skip)
            elapsed = time.time()-sttime  # 运行时间
            CalModule.score_label(result_path, ground_truth, interval, skip, log_path, elapsed, conn, datatable)


if __name__ == "__main__":
    interval_list = list(range(1, 11)) + [15]
    for interval in interval_list:
        main(interval)
