from pyspark.sql import Window
from pyspark.sql.session import SparkSession

from pyspark.sql.functions import *
from pyspark.sql.types import FloatType, DoubleType
import sys

spark = SparkSession.builder.enableHiveSupport().appName("dws_grid_stay_d_i").getOrCreate()

# 获取分区参数
ds = sys.argv[1]
print(f"正在处理：{ds}")

# 1、将同一个人在同一个网格中的数据合并成一条
staypoint = spark.table("dwd.dwd_staypoint_d_i_msk").where(col("ds") == ds)

# 取上一条数据的网格id
staypoint = staypoint.withColumn("last_grid", lag("grid_id", 1).over(Window.partitionBy("mdn").orderBy("start_time")))

# 如果当前网格和上一个网格是同一个网格标记0，如果不是标记1
staypoint = staypoint.withColumn("flag", when(col("grid_id") == col("last_grid"), 0).otherwise(1))

# 对标记累加求和，将数据分到不同的类别中
staypoint = staypoint.withColumn("stay_type", sum("flag").over(Window.partitionBy("mdn").orderBy("start_time")))

# 合并同一个人在同一个网格中的停留点
grid_point = staypoint \
    .groupby("mdn", "grid_id", "stay_type", "county") \
    .agg(count("mdn").alias("point_num"), min("start_time").alias("in_time"), max("end_time").alias("out_time")) \
    .withColumn("in_time", from_unixtime(unix_timestamp("in_time", 'yyyyMMddHHmmss'), 'yyyy-MM-dd HH:mm:ss')) \
    .withColumn("out_time", from_unixtime(unix_timestamp("out_time", 'yyyyMMddHHmmss'), 'yyyy-MM-dd HH:mm:ss')) \
    .drop("stay_type")

# 2、关联用户画像表获取用户属性
user_tag = spark.table("dim.dim_pub_usertag_msk")
grid_point = grid_point.join(user_tag, "mdn")

# 年龄分段
grid_point = grid_point.withColumn("age",
                                   when(col("age") < 18, "-18")
                                   .when((col("age") >= 18) & (col("age") < 25), "18-25")
                                   .when((col("age") >= 25) & (col("age") < 35), "26-35")
                                   .when((col("age") >= 35) & (col("age") < 45), "36-45")
                                   .otherwise("46+")
                                   )

# 消费潜力分段
grid_point = grid_point.withColumn("conpot",
                                   when((col("trmnl_price") > 5000) & (col("packg") > 199), "高")
                                   .when(((col("trmnl_price") > 3000) & (col("trmnl_price") <= 5000)) | (
                                           (col("packg") > 99) & (col("packg") <= 199)), "中")
                                   .otherwise("低")
                                   )

# 3、和上一个停留网格距离
# 获取上一个停留网格的id
grid_point = grid_point.withColumn("last_stay_grid_id",
                                   lag("grid_id", 1).over(Window.partitionBy("mdn").orderBy("in_time")))

# 117235031815040
from util import haversine_fun

# 注册自定义函数
haversine = udf(haversine_fun, DoubleType())
# 计算距离
grid_point = grid_point.withColumn("grid_distance", haversine("last_stay_grid_id", "grid_id"))

# 4、计算网格到常住地网格的距离（resi_grid_id）
grid_point = grid_point.withColumn("resi_distance", haversine("resi_grid_id", "grid_id"))

# 5、关联获取省市区
admin_code = spark.table("dim.dim_pub_admin_code")
grid_point = grid_point.join(admin_code.hint("broadcast"), col("county") == col("county_id"))

# 整理数据
dws_grid_stay_d_i = grid_point.select(
    "mdn",
    "gender",
    "age",
    "conpot",
    "resi_grid_id",
    "resi_county_id",
    "in_time",
    "out_time",
    year("in_time").alias("year"),
    month("in_time").alias("month"),
    dayofmonth("in_time").alias("day"),
    weekofyear("in_time").alias("week"),
    "prov_name",
    "city_name",
    "county_name",
    "grid_id",
    round((unix_timestamp("out_time", 'yyyy-MM-dd HH:mm:ss') - unix_timestamp("in_time", 'yyyy-MM-dd HH:mm:ss')) / 60, 3).alias(
        "stay_time"),
    "grid_distance",
    "resi_distance",
    "point_num"
)

# 保存到指定分区
dws_grid_stay_d_i.createOrReplaceTempView("tmp")
spark.sql(f"""
insert overwrite table dws.dws_grid_stay_d_i partition(ds='{ds}')
select * from tmp
""")
