from pathlib import Path
import pandas as pd
from tqdm import tqdm, trange
import os

# wsdream数据集的dataset1内数据路径
# 此处我们的数据跟当前文件在同一目录内
# 此处userlist和wslist的头部描述已经手动删除了

# txt文件存放根目录
TXT_ROOT_PATH = "dataset1/txt/"
FILE_userlist_txt = TXT_ROOT_PATH + "userlist.txt"
FILE_wslist_txt = TXT_ROOT_PATH + "wslist.txt"
FILE_rtMatrix_txt = TXT_ROOT_PATH + "rtMatrix.txt"
FILE_tpMatrix_txt = TXT_ROOT_PATH + "tpMatrix.txt"
FILE_rtdata_txt = TXT_ROOT_PATH + "rtdata.txt"
FILE_tpdata_txt = TXT_ROOT_PATH + "tpdata.txt"

# csv文件存放根目录
CSV_ROOT_PATH = "dataset1/csv/"
FILE_userlist_csv = CSV_ROOT_PATH + "userlist.csv"
FILE_wslist_csv = CSV_ROOT_PATH + "wslist.csv"
FILE_rtMatrix_csv = CSV_ROOT_PATH + "rt.csv"
FILE_tpMatrix_csv = CSV_ROOT_PATH + "tp.csv"
FILE_rtdata_csv = CSV_ROOT_PATH + "rtdata.csv"
FILE_tpdata_csv = CSV_ROOT_PATH + "tpdata.csv"

# dataset1 最终合并完成后的结果文件
FILE_rtMatrix_final_csv = CSV_ROOT_PATH + "rt_final.csv"
FILE_tpMatrix_final_csv = CSV_ROOT_PATH + "tp_final.csv"

# 经纬坐标加强文档
FILE_userlist_geo_csv = CSV_ROOT_PATH + "userlist_geo.csv"
FILE_wslist_geo_csv = CSV_ROOT_PATH + "wslist_geo.csv"

FILE_userlist_geo_cut_csv = CSV_ROOT_PATH + "userlist_geo_cut.csv"
FILE_wslist_geo_cut_csv = CSV_ROOT_PATH + "wslist_geo_cut.csv"

FILE_rtMatrix_final_geo_csv = CSV_ROOT_PATH + "rt_final_geo.csv"
FILE_tpMatrix_final_geo_csv = CSV_ROOT_PATH + "tp_final_geo.csv"


# WSDREAM数据集预处理函数
class PrepareWSDREAM(object):

    # userlist的txt格式转换为csv格式
    @staticmethod
    def prepare_userlist():
        print("开始处理userlist.txt数据文件")
        if Path(FILE_userlist_csv).exists():
            print("userlist的csv格式文件已存在")
        else:
            userlist_features = ["User ID", "IP Address", "Country", "IP No.", "AS", "Latitude", "Longitude"]
            userlist_txt_df = pd.read_csv(FILE_userlist_txt, skiprows=2, sep='\t', names=userlist_features)
            userlist_txt_df.to_csv(FILE_userlist_csv, index=False, sep='\t')
            print("userlist的csv格式转换已完成")
            del userlist_txt_df
        userlist_csv_df = pd.read_csv(FILE_userlist_csv, sep='\t')
        print("userlist的数据容量为: ", userlist_csv_df.shape)
        del userlist_csv_df

    # wslist的txt格式转换为csv格式
    # wslist中有两行元素数据重复，因此需要清洗一下
    @staticmethod
    def prepare_wslist():
        print("开始处理wslist.txt数据文件")
        if Path(FILE_wslist_csv).exists():
            print("wslist的csv格式文件已存在")
        else:
            wslist_features = ["Service ID", "WSDL Address", "Service Provider", "IP Address", "Country", "IP No.",
                               "AS",
                               "Latitude", "Longitude"]
            wslist_txt_df = pd.read_csv(FILE_wslist_txt, skiprows=2, sep='\t', names=wslist_features)
            wslist_txt_df.drop_duplicates(subset=None, keep='first', inplace=True)
            wslist_txt_df.to_csv(FILE_wslist_csv, index=False, sep='\t')
            print("wslist的csv格式转换已完成")
            del wslist_txt_df
        wslist_csv_df = pd.read_csv(FILE_wslist_csv, sep='\t')
        print("wslist的数据容量为: ", wslist_csv_df.shape)
        del wslist_csv_df

    # 基于trMatrix.txt生成三元组（矩阵→三元组）, 三元组为（User ID, Service ID, RT）, RT为response time
    # 此方法未处理 -1 值
    @staticmethod
    def prepare_rtmatrix():
        print("开始处理rtMatrix.txt数据文件")
        if Path(FILE_rtMatrix_csv).exists():
            print("rtMatrix的User ID，Service ID，RT三元组csv类型数据集已存在")
        else:
            rtMatrix_txt_df = pd.read_csv(FILE_rtMatrix_txt, header=None, sep='\t')
            # 删除最后一列全为nan的列
            rtMatrix_txt_df = rtMatrix_txt_df.dropna(axis=1, how='all')
            rtMatrix_array = rtMatrix_txt_df.values

            rtMatrix_tmp = []
            for row in range(0, len(rtMatrix_array)):
                for col in range(0, len(rtMatrix_array[row])):
                    rtMatrix_tmp.append((row, col, rtMatrix_array[row][col]))
            rt_features = ["User ID", "Service ID", "RT"]
            rtMatrix_tmp = pd.DataFrame(rtMatrix_tmp)
            rtMatrix_tmp.columns = rt_features
            rtMatrix_tmp.to_csv(FILE_rtMatrix_csv, index=False, sep='\t')
            print("User ID，Service ID，RT三元组csv类型数据集处理完成")
            del rtMatrix_txt_df
            del rtMatrix_tmp
        rtmatrix_csv_df = pd.read_csv(FILE_rtMatrix_csv, sep='\t')
        print("rtmatrix的数据容量为: ", rtmatrix_csv_df.shape)
        del rtmatrix_csv_df

    # 基于tpMatrix.txt生成三元组（矩阵→三元组）, 三元组为（User ID, Service ID, TP）, TP为throughput
    # 此方法未处理 -1 值
    @staticmethod
    def prepare_tpmatrix():
        print("开始处理tpMatrix.txt数据文件")
        if Path(FILE_tpMatrix_csv).exists():
            print("tpMatrix的User ID，Service ID，TP三元组csv类型数据集已存在")
        else:
            tpMatrix_txt_df = pd.read_csv(FILE_tpMatrix_txt, header=None, sep='\t')
            # 删除最后一列全为nan的列
            tpMatrix_txt_df = tpMatrix_txt_df.dropna(axis=1, how='all')
            tpMatrix_array = tpMatrix_txt_df.values

            tpMatrix_tmp = []
            for row in range(0, len(tpMatrix_array)):
                for col in range(0, len(tpMatrix_array[row])):
                    tpMatrix_tmp.append([row, col, tpMatrix_array[row][col]])
            tp_features = ["User ID", "Service ID", "TP"]
            tpMatrix_tmp = pd.DataFrame(tpMatrix_tmp)
            tpMatrix_tmp.columns = tp_features
            tpMatrix_tmp.to_csv(FILE_tpMatrix_csv, index=False, sep='\t')
            print("User ID，Service ID，TP三元组csv类型数据集处理完成")
            del tpMatrix_txt_df
            del tpMatrix_tmp
        tpmatrix_csv_df = pd.read_csv(FILE_tpMatrix_csv, sep='\t')
        print("tpmatrix的数据容量为: ", tpmatrix_csv_df.shape)
        del tpmatrix_csv_df

    # 基于rtdata.txt生成四元组，四元组为（User ID，Service ID，Time Slice ID, RT）, RT为response time
    # 此方法未处理 -1 值
    @staticmethod
    def prepare_rtdata():
        print("开始处理rtdata.txt数据文件")
        if Path(FILE_rtdata_csv).exists():
            print("rtdata的User ID，Service ID，Time Slice ID, RT四元组csv类型数据集已存在")
        else:
            rtdata_txt_df = pd.read_csv(FILE_rtdata_txt, header=None, sep="\t")
            rtdata_tmp = []
            for i in trange(0, len(rtdata_txt_df)):
                rtdata_tmp.append(rtdata_txt_df[0][i].split(' '))

            rt_features = ["User ID", "Service ID", "Time Slice ID", "RT"]
            rtdata_tmp = pd.DataFrame(rtdata_tmp)
            rtdata_tmp.columns = rt_features
            rtdata_tmp.to_csv(FILE_rtdata_csv, index=False, sep='\t')
            print("User ID，Service ID，Time Slice ID, RT四元组csv类型数据集处理完成")
            del rtdata_txt_df
            del rtdata_tmp
        rtdata_csv_df = pd.read_csv(FILE_rtdata_csv, sep='\t')
        print("rtdata的数据容量为: ", rtdata_csv_df.shape)
        del rtdata_csv_df

    # 基于tpdata.txt生成四元组，四元组为（User ID，Service ID，Time Slice ID, TP）, TP为throughput
    # 此方法未处理 -1 值
    @staticmethod
    def prepare_tpdata():
        print("开始处理tpdata.txt数据文件")
        if Path(FILE_tpdata_csv).exists():
            print("tpdata的User ID，Service ID，Time Slice ID, TP四元组csv类型数据集已存在")
        else:
            tpdata_txt_df = pd.read_csv(FILE_tpdata_txt, header=None, sep="\t")
            tpdata_tmp = []
            for i in trange(0, len(tpdata_txt_df)):
                tpdata_tmp.append(tpdata_txt_df[0][i].split(' '))

            tp_features = ["User ID", "Service ID", "Time Slice ID", "TP"]
            tpdata_tmp = pd.DataFrame(tpdata_tmp)
            tpdata_tmp.columns = tp_features
            tpdata_tmp.to_csv(FILE_tpdata_csv, index=False, sep='\t')
            print("User ID，Service ID，Time Slice ID, TP四元组csv类型数据集处理完成")
            del tpdata_txt_df
            del tpdata_tmp
        tpdata_csv_df = pd.read_csv(FILE_tpdata_csv, sep='\t')
        print("tpdata的数据容量为: ", tpdata_csv_df.shape)
        del tpdata_csv_df

    # 合并rt.csv, userlist.csv, wslist.csv
    # 删除所有RT为-1.0的值
    # 生成rt_final.csv文件
    @staticmethod
    def prepare_rtmatrix_final():
        print("开始处理rtmatrix_final中的三个数据文件合并操作")
        if Path(FILE_rtMatrix_final_csv).exists():
            print("rt_final数据合并结果文件已存在")
        else:
            # 这3个df都写了features是为了方便大家看列目录~
            rt_features = ["User ID", "Service ID", "RT"]
            rtMatrix_csv_df = pd.read_csv(FILE_rtMatrix_csv, skiprows=1, sep='\t', names=rt_features)
            # 筛除所有RT为 -1.0 的数字
            rtMatrix_csv_df.drop(rtMatrix_csv_df[rtMatrix_csv_df.RT == -1.0].index, inplace=True)
            rtMatrix_csv_df.drop_duplicates(subset=None, keep='first', inplace=True)

            userlist_features = ["User ID", "IP Address", "Country", "IP No.", "AS", "Latitude", "Longitude"]
            userlist_csv_df = pd.read_csv(FILE_userlist_csv, skiprows=1, sep='\t', names=userlist_features)

            wslist_features = ["Service ID", "WSDL Address", "Service Provider", "IP Address", "Country", "IP No.",
                               "AS",
                               "Latitude", "Longitude"]
            wslist_csv_df = pd.read_csv(FILE_wslist_csv, skiprows=1, sep='\t', names=wslist_features)

            # 开始做合并处理
            rtMatrix_final_df = pd.merge(rtMatrix_csv_df, userlist_csv_df, left_on="User ID", right_on="User ID",
                                         how='left')
            rtMatrix_final_df = pd.merge(rtMatrix_final_df, wslist_csv_df, left_on="Service ID",
                                         right_on="Service ID",
                                         how='left')
            # 坐标合并
            rtMatrix_final_df["Latitude_x"].fillna(0.0, inplace=True)
            rtMatrix_final_df["Longitude_x"].fillna(0.0, inplace=True)
            rtMatrix_final_df["Latitude_y"].fillna(0.0, inplace=True)
            rtMatrix_final_df["Longitude_y"].fillna(0.0, inplace=True)
            rtMatrix_final_df.fillna(-1, inplace=True)

            rtMatrix_final_df["coordinate_x"] = rtMatrix_final_df.agg('{0[Latitude_x]} {0[Longitude_x]}'.format, axis=1)
            rtMatrix_final_df["coordinate_y"] = rtMatrix_final_df.agg('{0[Latitude_y]} {0[Longitude_y]}'.format, axis=1)

            rtMatrix_final_df.to_csv(FILE_rtMatrix_final_csv, index=False, sep='\t')
            del rtMatrix_csv_df
            del userlist_csv_df
            del wslist_csv_df
            del rtMatrix_final_df
        rtMatrix_final_df = pd.read_csv(FILE_rtMatrix_final_csv, sep='\t')
        print("rt_final的数据容量为: ", rtMatrix_final_df.shape)
        del rtMatrix_final_df

    # 合并tp.csv, userlist.csv, wslist.csv
    # 删除所有TP为-1.0的值
    # 生成tp_final.csv文件
    @staticmethod
    def prepare_tpmatrix_final():
        print("开始处理tpmatrix_final中的三个数据文件合并操作")
        if Path(FILE_tpMatrix_final_csv).exists():
            print("tp_final数据合并结果文件已存在")
        else:
            # 这3个df都写了features是为了方便大家看列目录~
            tp_features = ["User ID", "Service ID", "TP"]
            tpMatrix_csv_df = pd.read_csv(FILE_tpMatrix_csv, skiprows=1, sep='\t', names=tp_features)
            # 筛除所有TP为 -1.0 的数字
            tpMatrix_csv_df.drop(tpMatrix_csv_df[tpMatrix_csv_df.TP == -1.0].index, inplace=True)
            tpMatrix_csv_df.drop_duplicates(subset=None, keep='first', inplace=True)

            userlist_features = ["User ID", "IP Address", "Country", "IP No.", "AS", "Latitude", "Longitude"]
            userlist_csv_df = pd.read_csv(FILE_userlist_csv, skiprows=1, sep='\t', names=userlist_features)

            wslist_features = ["Service ID", "WSDL Address", "Service Provider", "IP Address", "Country", "IP No.",
                               "AS",
                               "Latitude", "Longitude"]
            wslist_csv_df = pd.read_csv(FILE_wslist_csv, skiprows=1, sep='\t', names=wslist_features)

            # 开始做合并处理
            tpMatrix_final_df = pd.merge(tpMatrix_csv_df, userlist_csv_df, left_on="User ID", right_on="User ID",
                                         how='left')
            tpMatrix_final_df = pd.merge(tpMatrix_final_df, wslist_csv_df, left_on="Service ID", right_on="Service ID",
                                         how='left')
            # 坐标合并
            tpMatrix_final_df["Latitude_x"].fillna(0.0, inplace=True)
            tpMatrix_final_df["Longitude_x"].fillna(0.0, inplace=True)
            tpMatrix_final_df["Latitude_y"].fillna(0.0, inplace=True)
            tpMatrix_final_df["Longitude_y"].fillna(0.0, inplace=True)
            tpMatrix_final_df.fillna(-1, inplace=True)

            tpMatrix_final_df["coordinate_x"] = tpMatrix_final_df.agg('{0[Latitude_x]} {0[Longitude_x]}'.format, axis=1)
            tpMatrix_final_df["coordinate_y"] = tpMatrix_final_df.agg('{0[Latitude_y]} {0[Longitude_y]}'.format, axis=1)

            tpMatrix_final_df.to_csv(FILE_tpMatrix_final_csv, index=False, sep='\t')
            del tpMatrix_csv_df
            del userlist_csv_df
            del wslist_csv_df
            del tpMatrix_final_df
        tpMatrix_final_df = pd.read_csv(FILE_tpMatrix_final_csv, sep='\t')
        print("tp_final的数据容量为: ", tpMatrix_final_df.shape)
        del tpMatrix_final_df

    # 以下均为geo解析数据
    # 解析经纬坐标信息, userlist.csv格式转换为userlist_geo.csv
    @staticmethod
    def prepare_userlist_geo():
        print("开始处理userlist_geo.csv数据文件")
        if Path(FILE_userlist_geo_csv).exists():
            print("userlist_geo的csv格式文件已存在")
        else:
            userlist_features = ["User ID", "IP Address", "Country", "IP No.", "AS", "Latitude", "Longitude"]
            userlist_csv_df = pd.read_csv(FILE_userlist_csv, skiprows=1, sep='\t', names=userlist_features)
            from geopy.geocoders import Nominatim
            geolocator = Nominatim(user_agent="ysu528")
            userlist_csv_df["geo"] = "-1"
            for i in trange(0, len(userlist_csv_df)):
                location = geolocator.reverse(
                    str(userlist_csv_df.loc[i]["Latitude"]) + "," + str(userlist_csv_df.loc[i]["Longitude"]))
                userlist_csv_df["geo"][i] = str(location.address)

            userlist_csv_df.to_csv(FILE_userlist_geo_csv, index=False, sep='\t')
            print("userlist的csv格式转换已完成")
            del userlist_csv_df
        userlist_csv_geo_df = pd.read_csv(FILE_userlist_geo_csv, sep='\t')
        print("userlist_geo的数据容量为: ", userlist_csv_geo_df.shape)
        del userlist_csv_geo_df

    # 解析经纬坐标信息, wslist.csv格式转换为wslist_geo.csv
    @staticmethod
    def prepare_wslist_geo():
        print("开始处理wslist_geo.csv数据文件")
        if Path(FILE_wslist_geo_csv).exists():
            print("wslist_geo的csv格式文件已存在")
        else:
            wslist_features = ["Service ID", "WSDL Address", "Service Provider", "IP Address", "Country", "IP No.",
                               "AS", "Latitude", "Longitude"]
            wslist_csv_df = pd.read_csv(FILE_wslist_csv, skiprows=1, sep='\t', names=wslist_features)
            from geopy.geocoders import Nominatim

            geolocator = Nominatim(user_agent="ysu528")
            wslist_csv_df["geo"] = "-1"
            for i in trange(0, len(wslist_csv_df)):
                try:
                    location = geolocator.reverse(
                        str(wslist_csv_df.loc[i]["Latitude"]) + "," + str(wslist_csv_df.loc[i]["Longitude"]))
                    wslist_csv_df["geo"][i] = str(location.address)
                except:
                    wslist_csv_df["geo"][i] = "-1"

            wslist_csv_df.to_csv(FILE_wslist_geo_csv, index=False, sep='\t')
            del wslist_csv_df
        wslist_csv_geo_df = pd.read_csv(FILE_wslist_geo_csv, sep='\t')
        print("wslist_geo的数据容量为: ", wslist_csv_geo_df.shape)
        del wslist_csv_geo_df

    # 拆解geo信息, userlist_geo.csv格式转换为userlist_geo_cut.csv
    @staticmethod
    def prepare_userlist_geo_cut():
        print("开始处理userlist_geo_cut.csv数据文件")
        if Path(FILE_userlist_geo_cut_csv).exists():
            print("userlist_geo_cut的csv格式文件已存在")
        else:
            userlist_geo_features = ["User ID", "IP Address", "Country", "IP No.", "AS", "Latitude", "Longitude", "geo"]
            userlist_csv_geo_df = pd.read_csv(FILE_userlist_geo_csv, skiprows=1, sep='\t', names=userlist_geo_features)
            # userlist_geo最多解析特征项为13项, 我们假设其为U_0-U_12
            # 0-13项总计结果: [0, 0, 0, 4, 2, 10, 28, 95, 100, 60, 25, 9, 4, 2]

            userlist_csv_geo_df["LLgeo"] = ""
            for i in trange(0, len(userlist_csv_geo_df)):
                tmp = userlist_csv_geo_df["geo"][i].split(",")
                # 倒转, 然后去除前后空格
                tmp.reverse()
                # 后面的冗余信息意义不大，我们多收集前面的信息
                for j in range(0, min(4, len(tmp))):
                    tmp[j] = tmp[j].strip()
                    userlist_csv_geo_df["LLgeo"][i] += tmp[j]
            userlist_csv_geo_df.to_csv(FILE_userlist_geo_cut_csv, index=False, sep='\t')
            print("userlist_geo_cut的csv格式转换已完成")
            del userlist_csv_geo_df
        userlist_csv_geo_cut_df = pd.read_csv(FILE_userlist_geo_cut_csv, sep='\t')
        print("userlist_geo_cut的数据容量为: ", userlist_csv_geo_cut_df.shape)
        del userlist_csv_geo_cut_df

    # 拆解geo信息, wslist_geo.csv格式转换为wslist_geo_cut.csv
    @staticmethod
    def prepare_wslist_geo_cut():
        print("开始处理wslist_geo_cut.csv数据文件")
        if Path(FILE_wslist_geo_cut_csv).exists():
            print("wslist_geo_cut的csv格式文件已存在")
        else:
            wslist_geo_features = ["Service ID", "WSDL Address", "Service Provider", "IP Address", "Country", "IP No.",
                                   "AS", "Latitude", "Longitude", "geo"]
            wslist_csv_geo_df = pd.read_csv(FILE_wslist_geo_csv, skiprows=1, sep='\t', names=wslist_geo_features)
            # wslist_geo最多解析特征项为13项, 我们假设其为W_0-W_12
            # 0-13项总计结果: [821, 0, 9, 62, 94, 353, 818, 1578, 839, 760, 464, 18, 3, 4]

            wslist_csv_geo_df["LLgeo"] = ""
            for i in trange(0, len(wslist_csv_geo_df)):
                tmp = wslist_csv_geo_df["geo"][i].split(",")
                # 倒转, 然后去除前后空格
                tmp.reverse()
                # 后面的冗余信息意义不大，我们多收集前面的信息
                for j in range(0, min(4, len(tmp))):
                    tmp[j] = tmp[j].strip()
                    wslist_csv_geo_df["LLgeo"][i] += tmp[j]
            wslist_csv_geo_df.to_csv(FILE_wslist_geo_cut_csv, index=False, sep='\t')
            print("wslist_geo_cut的csv格式转换已完成")
            del wslist_csv_geo_df
        wslist_csv_geo_cut_df = pd.read_csv(FILE_wslist_geo_cut_csv, sep='\t')
        print("wslist_geo_cut的数据容量为: ", wslist_csv_geo_cut_df.shape)
        del wslist_csv_geo_cut_df

    # 合并rt.csv, userlist_geo_cut.csv, wslist_geo_cut.csv
    # 删除所有RT为-1.0的值
    # 生成rt_final_geo.csv文件
    @staticmethod
    def prepare_rtmatrix_final_geo():
        print("开始处理rt_final_geo.csv中的三个数据文件合并操作")
        if Path(FILE_rtMatrix_final_geo_csv).exists():
            print("rt_final_geo数据合并结果文件已存在")
        else:
            # 这3个df都写了features是为了方便大家看列目录~
            rt_features = ["User ID", "Service ID", "RT"]
            rtMatrix_csv_df = pd.read_csv(FILE_rtMatrix_csv, skiprows=1, sep='\t', names=rt_features)
            # 筛除所有RT为 -1.0 的数字
            rtMatrix_csv_df.drop(rtMatrix_csv_df[rtMatrix_csv_df.RT == -1.0].index, inplace=True)
            rtMatrix_csv_df.drop_duplicates(subset=None, keep='first', inplace=True)

            userlist_geo_cut_csv_df = pd.read_csv(FILE_userlist_geo_cut_csv, sep='\t')
            wslist_geo_cut_csv_df = pd.read_csv(FILE_wslist_geo_cut_csv, sep='\t')

            # 开始做合并处理
            rtMatrix_final_geo_df = pd.merge(rtMatrix_csv_df, userlist_geo_cut_csv_df, left_on="User ID",
                                             right_on="User ID",
                                             how='left')
            rtMatrix_final_geo_df = pd.merge(rtMatrix_final_geo_df, wslist_geo_cut_csv_df, left_on="Service ID",
                                             right_on="Service ID",
                                             how='left')

            rtMatrix_final_geo_df.drop(labels=["Latitude_x,Longitude_x,geo_x"], axis=1, inplace=True)
            rtMatrix_final_geo_df.to_csv(FILE_rtMatrix_final_geo_csv, index=False, sep='\t')
            del rtMatrix_csv_df
            del userlist_geo_cut_csv_df
            del wslist_geo_cut_csv_df
            del rtMatrix_final_geo_df
        rtMatrix_final_geo_df = pd.read_csv(FILE_rtMatrix_final_geo_csv, sep='\t')
        print("rt_final_geo的数据容量为: ", rtMatrix_final_geo_df.shape)
        del rtMatrix_final_geo_df


if __name__ == '__main__':
    print("开始数据处理!")

    # 创建csv文件夹

    if os.path.exists(CSV_ROOT_PATH):
        print("已存在csv文件夹")
    else:
        os.mkdir(CSV_ROOT_PATH)
        print("创建csv文件夹")

    # dataset1数据集处理
    PrepareWSDREAM.prepare_userlist()
    PrepareWSDREAM.prepare_wslist()
    PrepareWSDREAM.prepare_rtmatrix()
    PrepareWSDREAM.prepare_tpmatrix()
    PrepareWSDREAM.prepare_rtmatrix_final()
    PrepareWSDREAM.prepare_tpmatrix_final()

    # 此为dataset2数据集
    # PrepareWSDREAM.prepare_rtdata()
    # PrepareWSDREAM.prepare_tpdata()

    # dataset1数据集处理 + geo经纬坐标解析
    # PrepareWSDREAM.prepare_userlist_geo()
    # PrepareWSDREAM.prepare_wslist_geo()
    # PrepareWSDREAM.prepare_userlist_geo_cut()
    # PrepareWSDREAM.prepare_wslist_geo_cut()
    # PrepareWSDREAM.prepare_rtmatrix_final_geo()