# @author：zwt

# 对一整个数据文件做数据预处理

import pandas as pd
from 重复跳转检测及处理一条龙 import jumpTest
from 重复跳转检测及处理一条龙 import ppDeal
import warnings

warnings.filterwarnings("ignore")
import datetime


def Rmp_detect(twice_df, time_th, times_th, file_result):
    return_pp_df = pd.DataFrame()
    # 开始检测
    for names, group_df in twice_df.groupby('id'):
        # 去除停留
        if len(group_df) >= times_th:
            res_df = jumpTest.JumpMain(group_df, time_th, times_th)  # 基于重复跳转
            if len(res_df) > 0:  # 避免无效运行
                # res_df.to_csv(file_result, mode='a', index=0, header=0)
                return_pp_df = return_pp_df.append(res_df)
    return_pp_df.reset_index(drop=True, inplace=True)
    # print(return_pp_df)
    print('----------------------检测完成！--------------------------------------')
    return return_pp_df


if __name__ == '__main__':
    file_in = r"D:\Desk\test\random1.csv"  # 源文件
    file_out = r"D:\Desk\test\random1_result.csv"  # 异常数据储存
    df = pd.read_csv(file_in)

    # -------------限定坐标名字（将输入的字段和前面一一对应上）----------
    col_map = {
        "uid": "id",
        "st": "time",
        "lon": "x",
        "lat": "y"
    }
    df = df.rename(columns=col_map)
    df['time'] = pd.to_datetime(df['time'], format="%Y-%m-%d %H:%M:%S", errors='coerce')
    df = df.sort_values(['id', 'time'])
    df['x'] = round(df['x'], 8)  # 避免出现坐标00001的情况
    df['y'] = round(df['y'], 8)
    df = df.drop_duplicates(subset=None, keep='first').reset_index(drop=True)
    print(len(df))
    count_id = df.groupby('id').size().reset_index(name='Size')
    # -----------------乒乓效应方法使用--------------------
    pp_df = Rmp_detect(df, 8, 6, file_out)

    # ------------------检测后处理----------------
    '''
        >>> 
        1.combine_df:将原始文件与乒乓效应异常数据合并后的dataframe
        2.last_df:做过临近值替换处理后的文件  
    '''
    combine_df = ppDeal.combine_dataset(df, pp_df)
    last_df = ppDeal.nearestTime(combine_df)
    print(len(last_df))
    print('结束了')
