from pyspark.sql import SparkSession
from pyspark.sql.types import *
import random
from datetime import datetime, timedelta
import uuid

# 初始化SparkSession
spark = (SparkSession.builder \
    .appName("DamoDataGenerator") \
    .master("local[*]")  \
    .getOrCreate())

# ======================== 1. 定义数据Schema ========================
# 用户表Schema（匹配人口属性计算逻辑）
user_schema = StructType([
    StructField("user_id", IntegerType(), nullable=False),
    StructField("age", IntegerType(), nullable=False),
    StructField("has_child", BooleanType(), nullable=False),  # 人生阶段判断依据
    StructField("tao_qi_score", IntegerType(), nullable=False),  # 淘气值
    StructField("location_id", IntegerType(), nullable=False)  # 关联地理位置
])

# 订单表Schema（匹配消费能力计算逻辑）
order_schema = StructType([
    StructField("user_id", IntegerType(), nullable=False),
    StructField("order_id", StringType(), nullable=False),  # 唯一订单ID
    StructField("order_date", DateType(), nullable=False),  # 用于计算近1年消费
    StructField("payment_amount", DoubleType(), nullable=False)  # 支付金额
])

# 行为表Schema（匹配行为偏好计算逻辑）
behavior_schema = StructType([
    StructField("user_id", IntegerType(), nullable=False),
    StructField("behavior_id", StringType(), nullable=False),  # 唯一行为ID
    StructField("behavior_type", StringType(), nullable=False),  # view/purchase/add_cart/collect
    StructField("event_time", TimestampType(), nullable=False),  # 用于计算近30天行为
    StructField("category_id", IntegerType(), nullable=False),  # 类目ID
    StructField("price", DoubleType(), nullable=True)  # 仅purchase类型有值
])

# 地理位置表Schema（匹配城市层级计算逻辑）
geo_schema = StructType([
    StructField("location_id", IntegerType(), nullable=False),
    StructField("city", StringType(), nullable=False),  # 城市名称
    StructField("province", StringType(), nullable=False)  # 省份
])

# ======================== 2. 生成模拟数据 ========================
# ---------- 2.1 用户表数据 ----------
num_users = 1000  # 模拟1000个用户
user_data = []
for user_id in range(1, num_users + 1):
    age = random.randint(18, 65)
    # 30岁以上50%概率有孩子（人生阶段判断）
    has_child = random.choice([True, False]) if age >= 30 else False
    tao_qi_score = random.randint(100, 3000)  # 淘气值范围100-3000
    location_id = random.randint(1, 100)  # 关联地理位置表的location_id
    user_data.append((user_id, age, has_child, tao_qi_score, location_id))
user_df = spark.createDataFrame(user_data, schema=user_schema)

# ---------- 2.2 订单表数据 ----------
num_orders = 5000  # 模拟5000笔订单
order_data = []
start_date = datetime(2024, 7, 5)  # 近1年时间范围（当前假设为2025-07-05）
end_date = datetime(2025, 7, 5)
for _ in range(num_orders):
    user_id = random.randint(1, num_users)
    order_id = str(uuid.uuid4())  # 唯一订单ID
    order_date = start_date + timedelta(days=random.randint(0, 365))  # 近1年订单
    payment_amount = round(random.uniform(10, 10000), 2)  # 支付金额10-10000元
    order_data.append((user_id, order_id, order_date, payment_amount))
order_df = spark.createDataFrame(order_data, schema=order_schema)

# ---------- 2.3 行为表数据 ----------
num_behaviors = 20000  # 模拟20000条行为记录
behavior_types = ["view", "purchase", "add_cart", "collect"]
# 行为类型权重：浏览最多，购买次之，加购和收藏较少
behavior_weights = [0.6, 0.2, 0.15, 0.05]
behavior_data = []
for _ in range(num_behaviors):
    user_id = random.randint(1, num_users)
    behavior_id = str(uuid.uuid4())  # 唯一行为ID
    behavior_type = random.choices(behavior_types, weights=behavior_weights)[0]
    # 近30天行为时间（当前假设为2025-07-05，所以范围2025-06-05~2025-07-05）
    event_time = datetime(2025, 6, 5) + timedelta(days=random.randint(0, 30))
    category_id = random.randint(1, 20)  # 模拟20个商品类目
    price = round(random.uniform(10, 5000), 2) if behavior_type == "purchase" else None
    behavior_data.append((user_id, behavior_id, behavior_type, event_time, category_id, price))
behavior_df = spark.createDataFrame(behavior_data, schema=behavior_schema)

# ---------- 2.4 地理位置表数据 ----------
# 预定义城市-省份映射（覆盖一线、新一线、二线城市）
city_province_map = [
    ("北京", "北京"), ("上海", "上海"), ("广州", "广东"), ("深圳", "广东"),  # 一线城市
    ("杭州", "浙江"), ("成都", "四川"), ("南京", "江苏"),                  # 新一线城市
    ("武汉", "湖北"), ("西安", "陕西"), ("郑州", "河南")                   # 二线城市
]
geo_data = []
for loc_id in range(1, 101):  # 模拟100个地理位置
    if loc_id <= len(city_province_map):
        city, province = city_province_map[loc_id - 1]
    else:
        # 超出预定义的城市，随机分配
        idx = random.randint(0, len(city_province_map) - 1)
        city, province = city_province_map[idx]
    geo_data.append((loc_id, city, province))
geo_df = spark.createDataFrame(geo_data, schema=geo_schema)

# ======================== 3. 保存为Parquet文件 ========================
output_base_path = "/data/damo_demo_data"  # 本地测试路径，集群可改为HDFS路径
user_df.write.parquet(f"{output_base_path}/user_data.parquet", mode="overwrite")
order_df.write.parquet(f"{output_base_path}/order_data.parquet", mode="overwrite")
behavior_df.write.parquet(f"{output_base_path}/behavior_data.parquet", mode="overwrite")
geo_df.write.parquet(f"{output_base_path}/geo_data.parquet", mode="overwrite")

print(f"""
数据生成完成！路径：{output_base_path}
包含4张表：
- 用户表：user_data.parquet
- 订单表：order_data.parquet
- 行为表：behavior_data.parquet
- 地理位置表：geo_data.parquet
""")