from pyspark.sql import Window
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import *

spark= SparkSession.builder.appName('demo1_sparksession').enableHiveSupport().config("spark.jars", "jars/mysql-connector-java-8.0.29.jar").getOrCreate()
# /user/root/dm_db

# | 字段名        | 数据类型 | 描述      |
# | ------------- | -------- | --------- |
# | prov_id       | STRING   | 省份ID    |
# | prov_name     | STRING   | 省份名称  |
# | city_id       | STRING   | 城市ID    |
# | city_name     | STRING   | 城市名称  |
# | county_id     | STRING   | 区县ID    |
# | county_name   | STRING   | 区县名称  |
# | city_level    | STRING   | 城市等级  |
# | economic_belt | STRING   | 经济带    |
# | city_feature1 | STRING   | 城市特征1 |
staypoint_df = spark.read.format('csv').option('sep','\t').schema('mdn string,date string,county string,lon double,lat double,bsid string,grid_id string,biz_type string,event_type string,data_source string')\
    .load('/user/root/dm_db/staypoint')

usertag_df = spark.read.format('csv').option('sep',',').schema('mdn string,name string,gender string,age string,id_number string,number_attr string,trmnl_brand string,trmnl_price string,packg string,conpot string,resi_grid_id string,resi_county_id string')\
    .load('/user/root/dm_db/usertag')

admincode_df = spark.read.format('csv').option('sep',',').schema('prov_id string,prov_name string,city_id string,city_name string,county_id string,county_name string,city_level string,economic_belt string,city_feature1 string')\
    .load('/user/root/dm_db/admin_code')

##### 2.1 常住地验证分析（20分）

# - **需求描述**：验证系统记录的常住地与实际活动区域的一致性
# - **分析逻辑**：
#   - 计算用户在常住地区县的停留点占比
#   - 识别异常用户(常住地停留占比<30%)
# - **输出要求**：
#   - 常住地准确率统计表
#   - 异常用户名单及活动热区

# 改进后的代码如下：

# 计算每个用户的总停留点数量
joined_df = staypoint_df.join(usertag_df, 'mdn', 'inner')


result_df = joined_df.withColumn('total_num', count('county').over(Window.partitionBy('mdn'))) \
    .withColumn('residence_match', when(col('county') == col('resi_county_id'), 1).otherwise(0)) \
    .withColumn('residence_num',count('residence_match').over(Window.partitionBy('mdn','residence_match')))\
    .withColumn('residence_ratio', col('residence_num') / col('total_num'))

result_df.select('mdn', 'residence_ratio', 'residence_match').dropDuplicates(['mdn']).write \
    .format("jdbc") \
    .option("url", "jdbc:mysql://master:3306/dianxim") \
    .option("driver", "com.mysql.cj.jdbc.Driver") \
    .option("dbtable", "demo_3") \
    .option("user", "root") \
    .option("password", "123456") \
    .mode("overwrite") \
    .save()


result_df = result_df.withColumn('ex_user', when(col('residence_ratio') < 0.3, 0).otherwise(1))


result_df.groupBy("ex_user") \
    .agg(countDistinct("mdn").alias("user_count"),
    when(col("ex_user") == 0, lit("异常用户")).otherwise("正常用户").alias("user_type")
    ) \
    .select("user_type", "user_count").show()


result_df = result_df.filter(col('ex_user')==0).groupBy('mdn').agg(collect_list('county').alias('counties'))\
.withColumn('counties_str', concat_ws(',', 'counties'))
result_df.write \
    .format("jdbc") \
    .option("url", "jdbc:mysql://master:3306/dianxim") \
    .option("driver", "com.mysql.cj.jdbc.Driver") \
    .option("dbtable", "demo_4") \
    .option("user", "root") \
    .option("password", "123456") \
    .mode("overwrite") \
    .save()







