#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   spark_analisis.py
@Contact :   fengfeng.qiu@amh-group.com

@Modify Time      @Author
------------      -------
2022/1.csv/27 14:33   qiufengfeng
"""
import os
from pyspark.sql.functions import datediff, current_date
from pyspark.sql import SparkSession
from pyspark.sql.context import HiveContext
from pyspark.sql.context import SQLContext
from pyspark.sql.types import StringType, DoubleType,IntegerType
import pyspark.sql.functions as F

import math
from datetime import datetime

__base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
__decodemap = {}
for i in range(len(__base32)):
    __decodemap[__base32[i]] = i
del i


def geo_encode(latitude, longitude, precision=12):
    """
    Encode a position given in float arguments latitude, longitude to
    a geohash which will have the character count precision.
    """
    lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)
    geohash = []
    bits = [16, 8, 4, 2, 1]
    bit = 0
    ch = 0
    even = True
    while len(geohash) < precision:
        if even:
            mid = (lon_interval[0] + lon_interval[1]) / 2
            if longitude > mid:
                ch |= bits[bit]
                lon_interval = (mid, lon_interval[1])
            else:
                lon_interval = (lon_interval[0], mid)
        else:
            mid = (lat_interval[0] + lat_interval[1]) / 2
            if latitude > mid:
                ch |= bits[bit]
                lat_interval = (mid, lat_interval[1])
            else:
                lat_interval = (lat_interval[0], mid)
        even = not even
        if bit < 4:
            bit += 1
        else:
            geohash += __base32[ch]
            bit = 0
            ch = 0
    return ''.join(geohash)


def make_geohash_str(lat, lon, pecision=8):
    """

    :param lat: 地址经度
    :param lon: 地址纬度
    :param pecision: 多少位精度字符串
    :return:
    """
    return geo_encode(lat, lon, pecision)


def make_score(interval_days):
    """
    根据时间差来打分，只要是一个单调递减，且非负的函数就可以
    :param interval_days:时间差
    :return:分值
    """
    if interval_days > 360:
        score = 0
    else:
        score = 6.13 - 2.32 * math.log10(interval_days)
    return round(float(score), 4)


def make_risk_score(total_score):
    """
    根据一个货主或者司机的所有的 score相加以后，经过sigmoid映射到0,1之间
    :param total_score:总分
    :return:风险风(0-1.csv)
    """
    sigmoid_score = 1 / (1 + math.exp(-total_score))
    return round((float(sigmoid_score) - 0.5) * 2, 2)

def make_current_day():
    return int(datetime.now().strftime("%Y%m%d"))


common_make_day = F.udf(make_current_day, IntegerType())


def generator_address_geohash(dataframe):
    """
    地址经纬度geoHash处理函数
    :param dataframe: spark dataframe
    :return:
    """
    # 经纬度处理函数封装
    udf_make_geohash_str = F.udf(make_geohash_str, StringType())
    dataframe_with_geohash = dataframe.withColumn("start_geohash", udf_make_geohash_str(F.col("start_addr_lat"),
                                                                                        F.col("start_addr_lon")))
    dataframe_with_geohash = dataframe_with_geohash.withColumn("end_geohash",
                                                               udf_make_geohash_str(F.col("end_addr_lat"),
                                                                                    F.col("end_addr_lon")))
    return dataframe_with_geohash


def generator_interval_days_score(dataframe):
    """
    根据当前时间和记录发生的时间，生成一个打分
    :param dataframe: 
    :return: 
    """
    # 时间差计算，单位为day
    dataframe_with_interval_days = dataframe.withColumn("interval_days", datediff(current_date(), F.col("day")))
    udf_make_score = F.udf(make_score, DoubleType())
    dataframe_with_risk_score = dataframe_with_interval_days.withColumn("risk_point",
                                                                        udf_make_score(F.col("interval_days")))
    return dataframe_with_risk_score


def result_shipper_dataframe(dataframe,day_fuc = common_make_day):
    """
    生成货主相关的风险分和风险记录详细
    :param dataframe:
    :return:
    """
    shipper_total = dataframe.select(["shipper_id", "is_contraband_cargo", "day"]).filter(
        F.col("is_contraband_cargo") == 1).groupby('shipper_id').count().withColumnRenamed("count", "total_count")
    shipper_last_3_days_total = dataframe.select(["shipper_id", "is_contraband_cargo", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(datediff(current_date(), F.col("day")) <= 3).groupby(
        'shipper_id').count().withColumnRenamed("count", "last_3_days")
    shipper_week_total = dataframe.select(["shipper_id", "is_contraband_cargo", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(datediff(current_date(), F.col("day")) <= 7).groupby(
        'shipper_id').count().withColumnRenamed("count", "week_count")
    shipper_month_total = dataframe.select(["shipper_id", "is_contraband_cargo", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(datediff(current_date(), F.col("day")) <= 30).groupby(
        'shipper_id').count().withColumnRenamed("count", "month_count")
    shipper_last_6_months_total = dataframe.select(["shipper_id", "is_contraband_cargo", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(datediff(current_date(), F.col("day")) <= 180).groupby(
        'shipper_id').count().withColumnRenamed("count", "last_6_months")
    merge_shipper_df = shipper_total.join(shipper_last_6_months_total, on="shipper_id", how="left")
    merge_shipper_df = merge_shipper_df.join(shipper_month_total, on="shipper_id", how="left")
    merge_shipper_df = merge_shipper_df.join(shipper_week_total, on="shipper_id", how="left")
    merge_shipper_df = merge_shipper_df.join(shipper_last_3_days_total, on="shipper_id", how="left")
    merge_shipper_df = merge_shipper_df.na.fill(0)
    merge_shipper_df = merge_shipper_df.withColumn('day',day_fuc())

    udf_make_risk_score = F.udf(make_risk_score, DoubleType())
    shipper_risk_point = dataframe.select(["shipper_id", "is_contraband_cargo", "risk_point"]).filter(
        F.col("is_contraband_cargo") == 1).groupby('shipper_id').agg(F.sum("risk_point").alias("total_risk_point"))
    shipper_risk_scores = shipper_risk_point.withColumn("risk_score",
                                                        udf_make_risk_score(F.col("total_risk_point"))).withColumn('day',day_fuc()).select(
        ["shipper_id", "risk_score",'day'])

    return merge_shipper_df, shipper_risk_scores


def result_driver_dataframe(dataframe,day_fuc = common_make_day):
    driver_total = dataframe.select(["driver_id", "is_contraband_cargo", "driver_responsible", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(F.col("driver_responsible") == 1).groupby(
        'driver_id').count().withColumnRenamed("count", "total_count")
    driver_last_3_total = dataframe.select(["driver_id", "is_contraband_cargo", "driver_responsible", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(F.col("driver_responsible") == 1).filter(
        datediff(current_date(), F.col("day")) <= 3).groupby('driver_id').count().withColumnRenamed("count",
                                                                                                    "last_3_days")
    driver_week_total = dataframe.select(["driver_id", "is_contraband_cargo", "driver_responsible", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(F.col("driver_responsible") == 1).filter(
        datediff(current_date(), F.col("day")) <= 7).groupby('driver_id').count().withColumnRenamed("count",
                                                                                                    "week_count")
    driver_month_total = dataframe.select(["driver_id", "is_contraband_cargo", "driver_responsible", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(F.col("driver_responsible") == 1).filter(
        datediff(current_date(), F.col("day")) <= 30).groupby('driver_id').count().withColumnRenamed("count",
                                                                                                     "month_count")
    driver_last_6_month_total = dataframe.select(
        ["driver_id", "is_contraband_cargo", "driver_responsible", "day"]).filter(
        F.col("is_contraband_cargo") == 1).filter(F.col("driver_responsible") == 1).filter(
        datediff(current_date(), F.col("day")) <= 180).groupby('driver_id').count().withColumnRenamed("count",
                                                                                                      "last_6_months")
    merge_driver_df = driver_total.join(driver_last_6_month_total, on="driver_id", how="left")
    merge_driver_df = merge_driver_df.join(driver_month_total, on="driver_id", how="left")
    merge_driver_df = merge_driver_df.join(driver_week_total, on="driver_id", how="left")
    merge_driver_df = merge_driver_df.join(driver_last_3_total, on="driver_id", how="left")
    merge_driver_df = merge_driver_df.na.fill(0)
    merge_driver_df = merge_driver_df.withColumn('day',day_fuc())

    udf_make_risk_score = F.udf(make_risk_score, DoubleType())
    driver_risk_point = dataframe.select(
        ["driver_id", "is_contraband_cargo", "driver_responsible", "risk_point"]).filter(
        F.col("is_contraband_cargo") == 1).filter(F.col("driver_responsible") == 1).groupby('driver_id').agg(
        F.sum("risk_point").alias("total_risk_point"))
    driver_risk_scores = driver_risk_point.withColumn("risk_score",
                                                      udf_make_risk_score(F.col("total_risk_point"))).withColumn('day',day_fuc()).select(
        ["driver_id", "risk_score",'day'])

    return merge_driver_df, driver_risk_scores


def result_address_dataframe(dataframe,day_fuc = common_make_day):
    def make_geohash_risk_score(start_risk_point, end_risk_point):
        total = start_risk_point + end_risk_point
        score = make_risk_score(total)
        return score

    start_geohash_risk_score = dataframe.select(["start_geohash", "is_contraband_cargo", "risk_point"]).filter(
        F.col("is_contraband_cargo") == 1).groupby('start_geohash').agg(
        F.sum("risk_point").alias("total_start_risk_point")).withColumnRenamed("start_geohash", "geohash")
    end_geohash_risk_score = dataframe.select(["end_geohash", "is_contraband_cargo", "risk_point"]).filter(
        F.col("is_contraband_cargo") == 1).groupby('end_geohash').agg(
        F.sum("risk_point").alias("total_end_risk_point")).withColumnRenamed("end_geohash", "geohash")
    joined_geohash_risk_score = start_geohash_risk_score.join(end_geohash_risk_score, on="geohash", how="outer")
    joined_geohash_risk_score = joined_geohash_risk_score.na.fill(0)

    make_geohash_risk_score = F.udf(make_geohash_risk_score, DoubleType())
    joined_geohash_risk_score = joined_geohash_risk_score.withColumn("risk_score", make_geohash_risk_score(
        F.col("total_start_risk_point"), F.col("total_end_risk_point"))).withColumn('day',day_fuc()).select(["geohash", "risk_score",'day'])
    return joined_geohash_risk_score


if __name__ == '__main__':
    os.environ["PYSPARK_PYTHON"] = "/usr/local/bin/python3.8"
    os.environ["PYSPARK_DRIVER_PYTHON"] = "/usr/local/bin/python3.8"
    spark = SparkSession.builder.appName("nlpcv_contraband_geohash").getOrCreate()
    hc = HiveContext(sparkContext=spark.sparkContext)
    sc = SQLContext(sparkContext=spark.sparkContext)
    hc.setConf("hive.exec.dynamic.partition.mode", "nonstrict")

    df = hc.table("tmp.nlpcv_contraband_transaction_record").select(
        ["id", "shipper_id", "cargo_id", "driver_id", "driver_responsible", "is_contraband_cargo", "start_addr_lat",
         "start_addr_lon", "end_addr_lat", "end_addr_lon", "day"])
    # df = sc.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('fake_data.csv')
    df.show()
    # 生成地址geoHash
    df = generator_address_geohash(df)
    # 根据时间差计算出一个时间risk_point
    df = generator_interval_days_score(df)

    # 生成的相应的结果数据
    shipper_statistics, shipper_risk_score = result_shipper_dataframe(df)
    driver_statistics, driver_risk_score = result_driver_dataframe(df)
    address_risk_score = result_address_dataframe(df)
    shipper_statistics.show()
    shipper_risk_score.show()
    driver_statistics.show()
    driver_risk_score.show()
    address_risk_score.show()

    shipper_statistics.write.partitionBy("day").format("Hive").saveAsTable("tmp.nlpcv_contraband_shipper_statistics", mode="append")
    shipper_risk_score.write.partitionBy("day").format("Hive").saveAsTable("tmp.nlpcv_contraband_shipper_score", mode="append")
    driver_statistics.write.partitionBy("day").format("Hive").saveAsTable("tmp.nlpcv_contraband_driver_statistics", mode="append")
    driver_risk_score.write.partitionBy("day").format("Hive").saveAsTable("tmp.nlpcv_contraband_driver_score", mode="append")
    address_risk_score.write.partitionBy("day").format("Hive").saveAsTable("tmp.nlpcv_contraband_address_score", mode="append")
    spark.stop()
    print("spark stopped!")
