# -*- encoding: utf-8 -*-

import os
import sys
import datetime
import time
import json
import math
from copy import deepcopy
import pandas as pd
import numpy as np
import pyspark
from math import radians, sin, cos, asin, sqrt

from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark import SparkContext as sc
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.window import Window
from pyspark.sql.types import IntegerType
from pyspark.sql.types import FloatType
from pyspark.sql.types import LongType
from pyspark.sql.types import TimestampType
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
from pyspark.sql.types import StringType
import logging
from utils import get_time_delta, geodistance, integer_division
from utils_var import numerical_features, origin_features, shift_feature_list

sc = SparkConf()  #

sc.set('spark.app.name', 'customer losing model')
sc.set('spark.master', 'yarn')
sc.set('spark.submit.deployMode', 'client')
sc.set('spark.executor.instances', 100)
sc.set('spark.executor.cores', 6)
sc.set('spark.executor.memory', '15g')
sc.set('spark.executor.memoryOverhead', '2g')
sc.set('spark.default.parallelism', 1200)
sc.set('spark.driver.memory', '4g')
sc.set("spark.driver.maxResultSize", "20g")
sc.set('spark.sql.execution.arrow.enabled', 'true')
sc.set('spark.debug.maxToStringFields', 1000)
sc.set('spark.scheduler.listenerbus.eventqueue.capacity', 100000)
print(sc.get('spark.master'))

spark = SparkSession.builder.config(conf=sc).enableHiveSupport().getOrCreate()

logging.basicConfig(level=logging.INFO, \
                    format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s', \
                    filename='../log/prj_faw_audi_nev_etl.log')

'''
数据的ETL流程：
1. 读取数据，数据清洗。
2. by vin 将数据聚合到单位SOC变化，并计算其他字段的统计量
  2.1 按照vin+时间排序后排序生成id，并删除档位为非行驶的情况。
  2.2 对id做差分并区分每个行程段，剔除累计电量消耗过少的行程段，切分过长行程段
  2.3 聚合id值生成宽表，过程中生成Y值与各种统计量特征
3. 将聚合后的数据落到本地Python环境中并存表。

input str: sql命令，用于从数据库中读取拉取数据
output: csv 清洗完毕，用于训练模型的数据集
'''

def read_data(vehtype):
    """
    用于获取原始数据
    """

    ### 读取数据
    rtm_dynamic_data = spark.sql(
        "select .. from ..where trim(vehtype) == '{0}'".format(
            vehtype))
    return rtm_dynamic_data


def insert_data(df, vehtype):
    """
    将 df表的数据储存到对应表中

    """
    if vehtype == "car_one":
        df.write.format("hive").mode("overwrite").saveAsTable('tmp_masrtm.carone_working_condition_result_df')
    elif vehtype == "car_two":
        df.write.format("hive").mode("overwrite").saveAsTable('tmp_masrtm.cartwo_working_condition_result_df')

    return None


def working_condition_feature_calc(distance_segment_df):
    """
    计算每个行程段的衍生字段:该函数仅返回数据格式为 (1 , n )形式的数据
    """
    # 第一条记录的time_delta和distance_delta设为0
    distance_segment_df.loc[distance_segment_df.index[0], 'time_delta'] = 0  # 第一记录的time_delta时无用的，和上一个切分点的计算，故设为0
    distance_segment_df.loc[distance_segment_df.index[0], 'distance_delta'] = 0

    # 添加加速度字段 (本时点车速 - 上一时点车速) / distance_segment_df['time_delta']
    distance_segment_df['last_cheshu'] = \
        distance_segment_df['cheshu'].shift(1)
    distance_segment_df['acceleration'] = \
        (distance_segment_df['cheshu'] - distance_segment_df['last_cheshu']) / distance_segment_df['time_delta']

    # 拿出本行程段 第一条记录
    '''record_first_df 作为行程段数据的载体'''
    record_first_df = distance_segment_df.loc[distance_segment_df.index[0]:distance_segment_df.index[0]]

    # 计算小时和周度特征（行程开始时间）
    record_first_df['week_day'] = record_first_df.apply(lambda x: x['report_time'].weekday(), axis=1)
    record_first_df['hour'] = record_first_df.apply(lambda x: x['report_time'].hour, axis=1)

    # 计算月份字段
    record_first_df['report_month'] = record_first_df.apply(lambda x: x['report_time'].month, axis=1)

    ### 计算起始部分状态数据
    # 采集开始时间
    record_first_df.rename(columns={'report_time': 'start_report_time'}, inplace=True)
    # 采集结束时间
    record_first_df['end_report_time'] = distance_segment_df.loc[distance_segment_df.index[-1], 'report_time']

    # 起始经纬度
    record_first_df.rename(columns={'longitude': 'start_longitude', 'latitude': 'start_latitude'}, inplace=True)
    # 终止经纬度
    record_first_df['end_longitude'] = distance_segment_df.loc[distance_segment_df.index[-1], 'longitude']
    record_first_df['end_latitude'] = distance_segment_df.loc[distance_segment_df.index[-1], 'latitude']

    # 起始累计里程
    record_first_df.rename(columns={'leijilicheng': 'start_leijilicheng'}, inplace=True)
    # 终止累计里程
    record_first_df['end_leijilicheng'] = distance_segment_df.loc[distance_segment_df.index[-1], 'leijilicheng']

    # 起始id
    record_first_df.rename(columns={'id': 'start_id'}, inplace=True)
    # 终止id
    record_first_df['end_id'] = distance_segment_df.loc[distance_segment_df.index[-1], 'id']

    ##############################################################################

    working_condition_feature_df = pd.DataFrame()

    # 起始状态soc值
    working_condition_feature_df.loc[0, 'start_soc'] = distance_segment_df['soc'].max()
    working_condition_feature_df.loc[0, 'end_soc'] = distance_segment_df['soc'].min()

    ### 计算工况特征值
    # 行程总时间
    working_condition_feature_df.loc[0, 'total_distance_time'] = (distance_segment_df['time_delta'].sum() / 3600)
    # 停车时间占比
    working_condition_feature_df.loc[0, 'proportion_parking_time'] = \
        distance_segment_df[distance_segment_df['dw'].isin(['0', '15'])]['time_delta'].sum() / distance_segment_df[
            'time_delta'].sum()

    # 踩刹车频率
    working_condition_feature_df.loc[0, 'braking_frequency'] = len(
        distance_segment_df[distance_segment_df['zhidong'] == '0']) / len(distance_segment_df)

    # 计算总的能耗
    working_condition_feature_df['power_delta'] = working_condition_feature_df['start_soc'] - \
                                                  working_condition_feature_df['end_soc']
    # 总行驶距离
    working_condition_feature_df.loc[0, 'distance'] = distance_segment_df['distance_delta'].sum()
    # 计算单位能耗里程:每1% soc 跑的里程数
    working_condition_feature_df['unit_mileage'] = working_condition_feature_df.apply(
        lambda x: x['distance'] / x['power_delta'] if x['power_delta'] > 0 else 0, axis=1)
    # 计算整个行程的平均速度
    working_condition_feature_df['speed'] = working_condition_feature_df['distance'] / working_condition_feature_df[
        'total_distance_time']
    # 计算整个行程的平均行驶速度:指车辆的行驶总里程除以车速不等于0的总时间
    working_condition_feature_df['average_driving_speed'] = working_condition_feature_df['distance'] / (
                distance_segment_df[distance_segment_df['cheshu'] != 0]['time_delta'].sum() / 3600)
    # 计算平均加速度
    working_condition_feature_df['average_acceleration'] = distance_segment_df.loc[distance_segment_df.index[0] + 1:][
        'acceleration'].mean()
    # 计算最大加速度
    working_condition_feature_df['max_acceleration'] = distance_segment_df.loc[distance_segment_df.index[0] + 1:][
        'acceleration'].max()
    # 加速比例
    working_condition_feature_df['acceleration_ratio'] = distance_segment_df[(distance_segment_df.index >
                                                                              distance_segment_df.index[0]) & (
                                                                                         distance_segment_df[
                                                                                             'acceleration'] > 0)][
                                                             'time_delta'].sum() / distance_segment_df[
                                                             distance_segment_df.index > distance_segment_df.index[0]][
                                                             'time_delta'].sum()
    # 减速比例
    working_condition_feature_df['deceleration_ratio'] = distance_segment_df[(distance_segment_df.index >
                                                                              distance_segment_df.index[0]) & (
                                                                                         distance_segment_df[
                                                                                             'acceleration'] < 0)][
                                                             'time_delta'].sum() / distance_segment_df[
                                                             distance_segment_df.index > distance_segment_df.index[0]][
                                                             'time_delta'].sum()
    # 匀速比例
    working_condition_feature_df['uniform_ratio'] = distance_segment_df[
                                                        (distance_segment_df.index > distance_segment_df.index[0]) & (
                                                                    distance_segment_df['acceleration'] == 0)][
                                                        'time_delta'].sum() / distance_segment_df[
                                                        distance_segment_df.index > distance_segment_df.index[0]][
                                                        'time_delta'].sum()
    # 怠速比例
    working_condition_feature_df['idle_speed_ratio'] = distance_segment_df[(distance_segment_df.index >
                                                                            distance_segment_df.index[0]) & (
                                                                                       distance_segment_df[
                                                                                           'cheshu'] == 0)][
                                                           'time_delta'].sum() / distance_segment_df[
                                                           distance_segment_df.index > distance_segment_df.index[0]][
                                                           'time_delta'].sum()
    # 计算速度的平方和
    distance_segment_df['squares_velocity'] = distance_segment_df.apply(lambda x: np.power(x['cheshu'], 2), axis=1)
    working_condition_feature_df['squares_velocity_sum'] = distance_segment_df['squares_velocity'].sum()
    # 正加速度的平均值
    working_condition_feature_df['positive_acceleration_average'] = distance_segment_df[
        (distance_segment_df.index > distance_segment_df.index[0]) & (distance_segment_df['acceleration'] > 0)][
        'acceleration'].mean()
    # 负加速度的平均值
    working_condition_feature_df['negative_acceleration_average'] = distance_segment_df[
        (distance_segment_df.index > distance_segment_df.index[0]) & (distance_segment_df['acceleration'] < 0)][
        'acceleration'].mean()

    # 加速踏板行程值最大
    working_condition_feature_df.loc[0, 'jiasutabanxingcheng_maximum'] = distance_segment_df[
        ''].max()
    # 加速踏板行程值最小
    working_condition_feature_df.loc[0, 'jiasutabanxingcheng_minimum'] = distance_segment_df[
        ''].min()
    # 加速踏板行程值平均
    working_condition_feature_df.loc[0, 'jiasutabanxingcheng_meanvalue'] = distance_segment_df[
        ''].mean()
    # 加速踏板行程值方差
    working_condition_feature_df.loc[0, 'jiasutabanxingcheng_variance'] = distance_segment_df[
        ''].var()
    # 加速踏板使用频率
    working_condition_feature_df.loc[0, 'jiasutabanxingcheng_frequency'] = (
            len(distance_segment_df[distance_segment_df[''] > 0]) / len(distance_segment_df))

    # 车速最大值
    working_condition_feature_df.loc[0, 'cheshu_maximum'] = distance_segment_df[''].max()
    # 车速最小值
    working_condition_feature_df.loc[0, 'cheshu_minimumm'] = distance_segment_df[''].min()
    # 车速均值
    working_condition_feature_df.loc[0, 'cheshu_mean_value'] = distance_segment_df[''].mean()
    # 车速方差
    working_condition_feature_df.loc[0, 'cheshu_mean_variance'] = distance_segment_df[''].var()
    # 车速中位数
    working_condition_feature_df.loc[0, 'cheshu_mean_median'] = distance_segment_df[''].median()

    # 驱动电机转矩最大值
    working_condition_feature_df.loc[0, 'maximum'] = distance_segment_df[''].max()
    # 驱动电机转矩最小值
    working_condition_feature_df.loc[0, 'minimumm'] = distance_segment_df[''].min()
    # 驱动电机转矩均值
    working_condition_feature_df.loc[0, 'mean_value'] = distance_segment_df[''].mean()
    # 驱动电机转矩方差
    working_condition_feature_df.loc[0, 'variance'] = distance_segment_df[''].var()
    # 驱动电机转矩中位数
    working_condition_feature_df.loc[0, 'median'] = distance_segment_df[''].median()

    # 总电压最大值
    working_condition_feature_df.loc[0, 'maximum'] = distance_segment_df[''].max()
    # 总电压最小值
    working_condition_feature_df.loc[0, 'minimumm'] = distance_segment_df[''].min()
    # 总电压均值
    working_condition_feature_df.loc[0, 'mean_value'] = distance_segment_df[''].mean()
    # 总电压方差
    working_condition_feature_df.loc[0, 'variance'] = distance_segment_df[''].var()
    # 总电压中位数
    working_condition_feature_df.loc[0, 'median'] = distance_segment_df[''].median()

    # 驱动电机温度最大值
    working_condition_feature_df.loc[0, 'maximum'] = distance_segment_df[''].max()
    # 驱动电机温度最小值
    working_condition_feature_df.loc[0, 'minimumm'] = distance_segment_df[''].min()
    # 驱动电机温度均值
    working_condition_feature_df.loc[0, 'mean_value'] = distance_segment_df[''].mean()
    # 驱动电机温度方差
    working_condition_feature_df.loc[0, 'variance'] = distance_segment_df[''].var()
    # 驱动电机温度中位数
    working_condition_feature_df.loc[0, 'median'] = distance_segment_df[''].median()

    # 总电流最大值
    working_condition_feature_df.loc[0, 'maximum'] = distance_segment_df[''].max()
    # 总电流最小值
    working_condition_feature_df.loc[0, 'minimumm'] = distance_segment_df[''].min()
    # 总电流均值
    working_condition_feature_df.loc[0, 'mean_value'] = distance_segment_df[''].mean()
    # 总电流方差
    working_condition_feature_df.loc[0, 'variance'] = distance_segment_df[''].var()
    # 总电流中位数
    working_condition_feature_df.loc[0, 'median'] = distance_segment_df[''].median()

    # 最高温度最大值
    working_condition_feature_df.loc[0, 'maximum'] = distance_segment_df[''].max()
    # 最高温度最小值
    working_condition_feature_df.loc[0, 'minimumm'] = distance_segment_df[''].min()
    # 最高温度均值
    working_condition_feature_df.loc[0, 'mean_value'] = distance_segment_df[''].mean()
    # 最高温度方差
    working_condition_feature_df.loc[0, 'variance'] = distance_segment_df[''].var()
    # 最高温度中位数
    working_condition_feature_df.loc[0, 'median'] = distance_segment_df[''].median()

    # 绝缘电阻最大值
    working_condition_feature_df.loc[0, 'maximum'] = distance_segment_df[''].max()
    # 绝缘电阻最小值
    working_condition_feature_df.loc[0, 'minimumm'] = distance_segment_df[''].min()
    # 绝缘电阻均值
    working_condition_feature_df.loc[0, 'mean_value'] = distance_segment_df[''].mean()
    # 绝缘电阻方差
    working_condition_feature_df.loc[0, 'variance'] = distance_segment_df[''].var()
    # 绝缘电阻中位数
    working_condition_feature_df.loc[0, 'median'] = distance_segment_df[''].median()

    working_condition_result_df = pd.concat([record_first_df, working_condition_feature_df], axis=1)

    working_condition_result_df = working_condition_result_df[
        ['vin', 'start_report_time', 'end_report_time', 'start_soc', 'end_soc', '...']]

    return working_condition_result_df


def delta_calc(df):
    """
    基于本行数据和一阶滞后项，计算一阶差分
    id_delta
    time_delta
    lat_lng_distance # 计算距离
    soc_delta
    distance_delta
    """

    df = df.fillna(0)
    df = df.orderBy("report_time")

    df = df.withColumn("id_delta", df['id'] - df['last_id'])
    df = df.fillna(1, subset=["id_delta"])

    # 计算相邻记录的采集时间差,单位为s
    df = df.withColumn("time_delta", get_time_delta(df["last_report_time"], df["report_time"]))

    # 使用相邻记录的经纬度计算相邻的两点距离
    df = df.withColumn("lat_lng_distance",
                       geodistance(df["last_longitude"], df["last_latitude"], df["longitude"], df["latitude"]))

    # 计算相邻记录的SOC差值
    df = df.withColumn("soc_delta", df["soc"] - df["last_soc"])
    df = df.fillna(0, subset=["soc_delta"])

    # 计算相邻记录的累计里程差值
    df = df.withColumn("distance_delta", df["leijilicheng"] - df["last_leijilicheng"])
    df = df.fillna(0, subset=["distance_delta"])  #

    # 第一行部分字段赋初始值，前文使用lag函数"向下"差分，如此第一行数据会出现NaN
    start_id = df[['id']].take(1)[0][0]
    df = df.withColumn("id_delta", F.when(df["id"] == start_id, 1).otherwise(df["id_delta"]))
    df = df.withColumn("time_delta", F.when(df["id"] == start_id, 10).otherwise(df["time_delta"]))
    df = df.withColumn("lat_lng_distance", F.when(df["id"] == start_id, 0).otherwise(df["lat_lng_distance"]))
    df = df.withColumn("soc_delta", F.when(df["id"] == start_id, 0).otherwise(df["soc_delta"]))
    df = df.withColumn("distance_delta", F.when(df["id"] == start_id, 0).otherwise(df["distance_delta"]))

    return df


def shift_feature_calc(df, shift_feature_list, shift_n):
    """
    生成前 shift_n 次的 shift_feature_list
    """
    for f in shift_feature_list:
        for i in range(1, shift_n + 1):
            df['last_' + str(i) + '_' + f] = df[f].shift(i)

    return df


def rtm_data_processing(rtm_data):
    """
    rtm_data 为 pyspark中使用sql获取的数据框dataframe格式的数据

    #### 关于 加速度相关 #### 电压、电流
    """
    ### 生成vin号列表：获取所有vin号码
    vin_list = [r[0] for r in rtm_data.select('vin').distinct().collect()]

    logging.info("vin total qty is {0}".format(len(vin_list)))

    #  基于RTM数据，建立行程宽表的 schema
    schema = StructType([
        #### 宽表 part one 基础的距离、soc信息
        StructField("vin", StringType(), True),
        StructField("start_report_time", TimestampType(), True),  # 每个行程段，report_time开始时间
        StructField("end_report_time", TimestampType(), True),
        StructField("start_soc", IntegerType(), True),
        StructField("end_soc", IntegerType(), True),
        StructField("start_longitude", FloatType(), True),
        StructField("start_latitude", FloatType(), True),
        StructField("end_longitude", FloatType(), True),
        StructField("end_latitude", FloatType(), True),
        StructField("start_leijilicheng", FloatType(), True),
        StructField("end_leijilicheng", FloatType(), True),
        StructField("start_id", IntegerType(), True),  # 每一条rtm数据会对应一条id数据，这里是行程段开始的id
        StructField("end_id", IntegerType(), True),
        StructField("report_month", IntegerType(), True),
        StructField("week_day", IntegerType(), True),
        StructField("hour", IntegerType(), True),
        StructField("total_distance_time", FloatType(), True), '......'])

    # 添加近几次行程段特征
    for f in shift_feature_list:
        for k in range(1, 11):
            schema.add(StructField('last_' + str(k) + '_' + f, FloatType(), True))

    # 创建一个空的RDD，并为此加上schema
    working_condition_result_spark_df = spark.createDataFrame(spark.sparkContext.emptyRDD(), schema)

    ### 计算每辆车的特征数据 ## by 每辆车 计算行程段
    # by每辆车建立特征
    for v in vin_list:
        logging.info("The {0} vin calc is start! {1}".format(vin_list.index(v), v))

        # 筛选vin号车辆数据，并根据report_time,vin字段去重
        # rtm_data为从数据库拉出来的原始数据表
        # 筛选出单独一辆车的所有rtm数据
        vin_data = rtm_data.filter("vin = '{0}'".format(v)).dropDuplicates( \
            ['report_time', 'vin'])

        # 根据采集时间排序
        vin_data = vin_data.orderBy(["report_time"])
        vin_data.cache()  # cache与collect，并非每个指令均是惰性运行？
        logging.info("The rows of number of data is {}".format(vin_data.count()))

        # 每个VIN号数据进行数据处理

        ### report_time字符转成整数类型
        vin_data = vin_data.withColumn("report_time", vin_data["report_time"].cast(LongType()))

        ### 转化成数值型之后 整除 1000
        vin_data = vin_data.withColumn("report_time", integer_division(vin_data["report_time"]))

        ### report_time转成datetime格式
        vin_data = vin_data.withColumn('report_time', intTodatetime(vin_data["report_time"]))

        # 过滤车辆状态为异常无效状态数据
        vin_data = vin_data.filter(vin_data['jiasutabanxingcheng'].isin(['254', '255']) == False)

        ### 部分特征字段转成float格式
        for n in numerical_features:
            vin_data = vin_data.withColumn(n, vin_data[n].cast(FloatType()))

        ### 选取用到的字段
        vin_data = vin_data[origin_features]

        ### 新增一列ID字段，用于截取行程段，计算行驶工况的特征
        '''id是用于切分行程段的字段：主要是以真实行程的开始与结束作为宽表形成的区分'''
        spec = Window.partitionBy("vin").orderBy("report_time")  # 生成开窗函数字段WindowSpec
        vin_data = vin_data.withColumn("id", F.row_number().over(spec))  # from pyspark.sql import functions as F

        # 筛选出 启动状态为'车辆启动状态'（此时开始 "id"字段不再连续）
        vin_data = vin_data[vin_data['cheliangzhuangtai'] == '1']

        # 拼接上一条rtm记录的部分字段，用于计算差分字段
        window = Window.partitionBy("vin").orderBy("id")
        vin_data = vin_data.withColumn("last_id", F.lag(vin_data['id']).over(window))
        vin_data = vin_data.withColumn("last_report_time", F.lag(vin_data['report_time']).over(window))
        vin_data = vin_data.withColumn("last_longitude", F.lag(vin_data['longitude']).over(window))
        vin_data = vin_data.withColumn("last_latitude", F.lag(vin_data['latitude']).over(window))
        vin_data = vin_data.withColumn("last_soc", F.lag(vin_data['soc']).over(window))
        vin_data = vin_data.withColumn("last_leijilicheng", F.lag(vin_data['leijilicheng']).over(window))

        # 部分字段缺失值填充,填充逻辑是使用上一条记录
        fill_col_list = ["last_report_time", "last_longitude", "last_latitude", "last_soc", "last_leijilicheng"]
        window = Window.partitionBy("vin").orderBy("id").rowsBetween(0, sys.maxsize)
        for f in fill_col_list:
            filled = F.first(vin_data[f], ignorenulls=True).over(window)
            vin_data = vin_data.withColumn(f, filled)

        # 计算差分字段
        vin_data = delta_calc(vin_data)

        ###################################################################3
        # 获取 切分 每个行程段的index
        # 时间差在15分钟之内 且 soc消耗量在1% 且 累计里程差在5km以内不做切分
        # 行程段的总累计里程小于1或者soc差小于1,则不计算该行程段
        # 如果累计里程差大于80km,需要对该行程段继续切分,暂定按照60km来进行切分'''

        # stepone: 基于id_delta 做行程段切分

        index_list = [r[0] for r in vin_data[vin_data['id_delta'] != 1][['id']].collect()]

        # steptwo: 1.  筛选时间差在15分钟之内 切分点
        loss_df = vin_data[(vin_data['time_delta'] <= (60 * 15)) & (vin_data['id_delta'] != 1)]

        if loss_df.count() > 0:
            loss_df = delta_calc(loss_df)
            loss_index_list = [r[0] for r in loss_df[(vin_data['soc_delta'] < 1) & \
                                                     (vin_data['distance_delta'] < 5)][['id']].collect()]
        else:
            loss_index_list = []
        index_list = list(set(index_list) - set(loss_index_list))
        index_list.append(1)

        ### 如果只有一段行程,需要把最后一个id数据添加到切分列表index_list中
        if len(index_list) == 1:  # 如果此判断结果为True, 则inde_list = [1]
            index_list.append(vin_data.groupBy().max('id').collect()[0][0] + 1)
        index_list.sort()

        # vin_data转成pandas dataframe
        vin_data = vin_data.toPandas()

        ### 切分出来的大行程段工况计算和继续切分

        working_condition_result_pandas_df = pd.DataFrame()  # 这个数据框用来获取 该车 的所有行程段数据
        for i in range(len(index_list) - 1):
            distance_segment_df = vin_data[
                (vin_data['id'] >= index_list[i]) & (vin_data['id'] < index_list[i + 1])]
            distance_segment_df.reset_index(drop=True, inplace=True)

            # 切分出来的单个行程段根据行程段总累计里程进行切分
            # 初始的累计里程
            start_leijilicheng = distance_segment_df.loc[distance_segment_df.index[0], \
                                                         'leijilicheng']
            # 终止的累计里程
            end_leijilicheng = distance_segment_df.loc[distance_segment_df.index[-1], \
                                                       'leijilicheng']

            # 初始soc
            start_soc = distance_segment_df.loc[distance_segment_df.index[0], 'soc']
            # 终止的soc
            end_soc = distance_segment_df.loc[distance_segment_df.index[-1], 'soc']

            # 初始采集时间
            start_report_time = distance_segment_df.loc[distance_segment_df.index[0], 'report_time']
            # 结束采集时间
            end_report_time = distance_segment_df.loc[distance_segment_df.index[-1], 'report_time']

            tmp_soc_delta = start_soc - end_soc
            tmp_distance_delta = end_leijilicheng - start_leijilicheng
            tmp_timedelta = (end_report_time - start_report_time).total_seconds()

            if tmp_distance_delta < 1 or tmp_soc_delta < 1: continue

            tmp_leijilicheng = deepcopy(start_leijilicheng)
            if (tmp_distance_delta) > 80:
                for j in range(math.ceil(tmp_distance_delta / 60)):

                    # 对累计里程大于80的，以60公里为一个切分点 # tmp_leijilicheng = start_leijilicheng
                    tmp_distance_segment_df = distance_segment_df[
                        (distance_segment_df['leijilicheng'] >= tmp_leijilicheng) & (
                                distance_segment_df['leijilicheng'] < tmp_leijilicheng + 60)].reset_index(drop=True)

                    if tmp_distance_segment_df.shape[0] > 0:
                        ''' 所有切分行程段的逻辑完毕， 建立宽表  '''
                        tmp_working_condition_result_df = working_condition_feature_calc(tmp_distance_segment_df)
                        working_condition_result_pandas_df = pd.concat([working_condition_result_pandas_df, \
                                                                        tmp_working_condition_result_df]).reset_index(
                            drop=True)
                        tmp_leijilicheng = tmp_leijilicheng + \
                                           working_condition_result_pandas_df.reset_index(drop=True, inplace=True)
            else:
                tmp_working_condition_result_df = working_condition_feature_calc(distance_segment_df)
                working_condition_result_pandas_df = pd.concat(
                    [working_condition_result_pandas_df, tmp_working_condition_result_df]).reset_index(drop=True)

        ### 该车的处理结果数据存储
        if working_condition_result_pandas_df.shape[0] > 0:
            ### 近几次行程特征平移计算
            working_condition_result_pandas_df = shift_feature_calc(working_condition_result_pandas_df, \
                                                                    shift_feature_list, 10)
            working_condition_result_spark_df = \
                working_condition_result_spark_df.unionByName(spark.createDataFrame(working_condition_result_pandas_df))

            logging.info(
                "The {0} vin data processed data rows is {1}".format(v, working_condition_result_pandas_df.shape[0]))
        logging.info("The {0} vin calc is end! {1}".format(vin_list.index(v), v))

    return working_condition_result_spark_df


def vin_data_processing():

    logging.info("vehtype etron data process is starting!!!")
    etron_rtm_dynamic_data = read_data('etron') # 这里是获取该车型的全量RTM数据
    etron_rtm_dynamic_data.cache()

    # 宽表前的步骤，rtm数据处理阶段，包括 字段类型调整、排序、生成差分字段
    etron_working_condition_result_df = rtm_data_processing(etron_rtm_dynamic_data)

    insert_data(etron_working_condition_result_df, "etron")
    del etron_rtm_dynamic_data
    logging.info("vehtype etron data process is ending!!!")

    logging.info("vehtype q2l data process is starting!!!")
    q2l_rtm_dynamic_data = read_data('q2Letron')
    q2l_rtm_dynamic_data.cache()
    q2l_working_condition_result_df = rtm_data_processing(q2l_rtm_dynamic_data)
    insert_data(q2l_working_condition_result_df, "q2Letron")
    del q2l_rtm_dynamic_data
    logging.info("vehtype q2l data process is ending!!!")

    return None


if __name__ == '__main__':

    ### 设置日志输出级别
    spark.sparkContext.setLogLevel("WARN")

    vin_data_processing()
