# import pandas as pd
#
#
#
# # （1）由于每个csv中大概都有100多万条数据，我们采取分数据块读取
# # 读取文件数据
# # def read_actionData(filePath, size=10000):
# #     """
# #     filePath ： 文件路径
# #     size ： 数据行数
# #     """
# #     # 使用迭代器的方式读取数据
# #     df = pd.read_csv(filePath, header=0, iterator=True)
# #     # 保存数据块的列表
# #     chunks = []
# #     # 循环起始值
# #     loop = True
# #     while loop:
# #         try:
# #             chunk = df.get_chunk(size)[["user_id", "sku_id", "type",
# #                                         "time", 'cate']]
# #             chunks.append(chunk)
# #         except StopIteration:
# #             loop = False
# #             print("Iteration is stopped")
# #     df_ac = pd.concat(chunks, ignore_index=True)
# #     return df_ac
#
#
# # 将多个表的数据合并在一起
# df_ac = read_actionData(filePath='Data_Action_201602.csv')
# # print(df_ac)
#
# """
# 标记潜在用户：
#     必须有购买行为；
#     对一个商品购买和其他交互行为时间相差一个多余一天
# 1.浏览商品详情页；2.加入购物车；3.购物车删除；4.下
# 单；5.关注；6.点击；
# """
# # 2.提取type为4的数据 --找出有过消费的用户
# df_ac_type = df_ac[df_ac['type'] == 4]
# # print(df_ac_type)
#
# # 3.查看每类商品用户下单的占比
# # 按照cate列的不同商品类别进行分组，统计每个类别包含的样本数量，结果存储在count中
# cate_count = df_ac_type.groupby('cate').size().reset_index(name='count')
# # print(cate_count)
# # 计算每个类别的样本数占总样本数的比例，结果存储在rate中
# cate_count['rate'] = cate_count['count'] / df_ac_type.shape[0]
# # print(cate_count)
#
# # 4.cate=4的数据占比最高，选取cate=4的数据
# df_ac_cate_4 = df_ac_type[df_ac_type['type'] == 4]
# print(df_ac_cate_4)
#
# # 5.计算每个用户的最后购买时间
# # 获取用户购买时间，按照‘user_id’分组，取time最大值，存储在time中
# df_usr_buy_time = df_ac_cate_4.groupby('user_id')['time'].max().reset_index(name='time')
# # print(df_usr_buy_time)
#
# # 6.用户最早与该商品交易的日期
# # 获取商品的类型数据
# df_ac_allcate = df_ac_type[df_ac['cate'] == 4]
# print(df_ac_allcate)
# # 筛选df_usr_buy_time中每个用户的行为数据
# df_all_buy_ac = pd.merge(df_usr_buy_time,df_ac_allcate,left_on='user_id',right_on='user_id')
# # print(df_all_buy_ac)
#
#
# # 获取每个用户行为最开始的时间
# # 将日期格式转换为标准格式
# # df_usr_buy_time['time'] = pd.to_datetime(df_usr_buy_time['time'])
# # df_all_buy_ac['time_x'] = pd.to_datetime(df_all_buy_ac['time_x'])
# # df_all_buy_ac['time_y'] = pd.to_datetime(df_all_buy_ac['time_y'])
# # 获取用户最开始行为时间，时间标题为time_x
# df_usr_ac_fi = df_all_buy_ac.groupby('user_id')['time_x'].min().reset_index(name='first_time')
# # print(df_usr_ac_fi)
#
# # 合并数据
# merged_time = pd.merge(
#     df_all_buy_ac[['user_id', 'time_x']],  # 包含用户每次交互的time_x
#     df_usr_ac_fi[['user_id', 'first_time']],  # 包含用户最早交互时间
#     on='user_id'  # 按用户ID对齐，确保同一用户的时间进行计算
# )
# # print(merged_time)
# # df.columns = ['user_id','buy_time','ac_time',]
# # df['days'] = (df['buy_time'].astype('datetime64[ns]') - df['ac_time'].astype('datetime64[ns]')).dt.days
#
# # 7.计算最早交互时间和最后购买时间差
# # df_usr_ac_fi['time_diff'] = (df_usr_ac_fi['first_time'] - df_all_buy_ac['time_x']).dt.days
# # print(df_usr_ac_fi)
# # merged_time['time_diff'] = (merged_time['time_x'] - merged_time['first_time']).dt.days
#
# # # 8.获取高潜用户
# # # high_dive = df_usr_ac_fi[df_usr_ac_fi['time_diff']>1]
# # # print(high_dive)
# # high_potential_users = merged_time.groupby('user_id').apply(
# #     lambda x: (x['time_diff'] > 1).any()  # 只要有一次满足条件即视为高潜用户
# # ).reset_index(name='is_high_potential')
# #
# # # 保存结果
# # # high_dive.to_csv('high_dive.csv',index=False)
# # high_dive = high_potential_users[high_potential_users['is_high_potential']]['user_id']
# # # print(high_dive)