# # %%
# # 将 user_behavior_20141118_20141218 文件格式化为统一的格式
# import os
#
# import pandas as pd
#
# # 读取CSV文件
# input_file = "../static/data/user_behavior_20141118_20141218.csv"
# df = pd.read_csv(input_file, encoding="utf-8")
#
# # 删除 user_geohash 列（如果存在）
# if "user_geohash" in df.columns:
#     df = df.drop(columns=["user_geohash"])
#     print("删除user_geohash列成功\n")
#     print(df.head())
#
# # 将 item_category 列移动到第三列
# if "item_category" in df.columns:
#     cols = list(df.columns)
#     print(cols)
#     cols.insert(2, cols.pop(cols.index("item_category")))
#     df = df[cols]
#     print("item_category列移动到第三列成功\n")
#     print(df.head())
#
# # 将 behavior_type 数字替换为对应的字符串
# behavior_mapping = {1: "pv", 2: "fav", 3: "cart", 4: "buy"}
# if "behavior_type" in df.columns:
#     df["behavior_type"] = df["behavior_type"].map(behavior_mapping)
#     print("behavior_type数字替换为对应的字符串成功\n")
#     print(df.head())
#
# # 构造输出文件名
# base, ext = os.path.splitext(input_file)
# output_file = f"{base}_edit{ext}"
#
# # 保存为新的文件
# df.to_csv(output_file, index=False, encoding="utf-8")
#
# print(f"数据处理完成，保存为：{output_file}")
#
# # %%
# # 将 user_behavior_20171125_20171203 文件格式化为统一的格式
#
# import os
#
# import pandas as pd
#
# # 读取CSV文件
# input_file = "../static/data/user_behavior_20171125_20171203.csv"
# df = pd.read_csv(input_file, encoding="utf-8", header=None)
#
# # 为这个文件添加一个表头user_id,item_id,item_category,behavior_type,time
# df.columns = ["user_id", "item_id", "item_category", "behavior_type", "time"]
# print(df.head())
#
# # 构造输出文件名
# base, ext = os.path.splitext(input_file)
# output_file = f"{base}_edit{ext}"
#
# # 保存为新的文件
# df.to_csv(output_file, index=False, encoding="utf-8")
# print(f"数据处理完成，保存为：{output_file}")
#
# # %%
# '''
# dtypes = {
#     'user_id': 'int32',
#     'item_id': 'int32',
#     'item_category': 'int32',
#     'behavior_type': 'str',
#     'timestamp': 'str'
# }
#
# # 加载数据集 2014
# data = pd.read_csv("../static/data/user_behavior_20141118_20141218_edit.csv", dtype=dtypes)
# # 保存为 Parquet 格式
# data.to_parquet("../static/data/user_behavior_20141118_20141218_edit.parquet", engine='pyarrow')
# # 加载数据集 2017
# data = pd.read_csv("../static/data/user_behavior_20171125_20171203_edit.csv", dtype=dtypes)
# # 保存为 Parquet 格式
# data.to_parquet("../static/data/user_behavior_20171125_20171203_edit.parquet", engine='pyarrow')
# '''

# %% 导入依赖包
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq

# 定义 schema1
schema1 = pa.schema([
    ('user_id', pa.string()),
    ('item_id', pa.string()),
    ('item_category', pa.string()),
    ('behavior_type', pa.string()),
    ('time', pa.int32())
])

# 定义 schema2
schema2 = pa.schema([
    ('user_id', pa.string()),
    ('item_id', pa.string()),
    ('item_category', pa.string()),
    ('behavior_type', pa.string()),
    ('date', pa.string()),
    ('hour', pa.int32())
])

# 读取数据
data = pd.read_parquet("../static/data/user_behavior_20171125_20171203_edit.parquet", schema=schema1)

# 数据清洗
data = data[data['time'] > 0]
start_time = pd.to_datetime("2017-11-25").timestamp()
end_time = pd.to_datetime("2017-12-04").timestamp()
data = data[(data['time'] >= start_time) & (data['time'] < end_time)].copy()

# 转换时间戳
data['time'] = pd.to_datetime(data['time'], unit='s')

# 生成 `date` 和 `hour` 列
data['date'] = data['time'].dt.date
data['hour'] = data['time'].dt.hour

# 删除原 `time` 列
data = data.drop(columns=['time'])
print("准备写入文件！")
# **分批写入**
output_file = "../static/data/user_behavior_20171125_20171203_edit_v1.1.parquet"
batch_size = 100000  # 每批次写入 100000 行

# 初始化 ParquetWriter
with pq.ParquetWriter(output_file, schema2) as writer:
    for start in range(0, len(data), batch_size):
        batch = data.iloc[start: start + batch_size]  # 按批量截取数据
        # **在这里转换 `date` 为字符串，避免全量转换导致内存问题**
        batch['date'] = batch['date'].astype(str)
        table = pa.Table.from_pandas(batch, schema=schema2)
        writer.write_table(table)
        print(f"已写入 {len(batch)} 条记录")

print("所有数据写入完成！")

