# -*- coding: utf-8 -*-
"""
Created on Sat Apr 12 00:04:30 2025

@author: tianr
"""
import os
import pandas as pd
import numpy as np
import json
import gc
import time

start = time.time()


#指定数据集路径
folder_path2 = 'D:/DTR/MyWork/2025/DataMining/personalProject/10G_data_new'
folder_path3 = 'D:/DTR/MyWork/2025/DataMining/personalProject/30G_data_new'

#读取数据集文件夹中.parquet数据文件
def read_parquet_folder(folder_path):
    all_data = []
    # 遍历文件夹中的所有文件
    for filename in os.listdir(folder_path):
        if filename.endswith('.parquet'):
            file_path = os.path.join(folder_path, filename)
            try:
                #1.建立用户画像需要使用的数据字段
                columns_to_read = ['id','gender','age','income','purchase_history']

                # 读取和分析目标相关的指定列
                df = pd.read_parquet(file_path, columns=columns_to_read)
                all_data.append(df)
            except Exception as e:
                print(f"读取文件 {file_path} 时出错: {e}")

    # 合并所有数据
    combined_df = pd.concat(all_data, ignore_index=True)
    #垃圾回收多余的内存
    del all_data
    gc.collect()
    return combined_df

# 定义一个函数来计算总金额
def calculate_total_amount(json_str):
    try:
        data_dict = json.loads(json_str)
        average_price = data_dict.get('avg_price')
        items = data_dict.get('items', [])
        item_count = len(items)
        return average_price * item_count
    except (json.JSONDecodeError, TypeError):
        return None
    
data = read_parquet_folder(folder_path2)

# 判断是否有重复行
has_duplicates = data.duplicated().any()

if has_duplicates:
    print("DataFrame中存在重复行。")
    # 打印出重复的行
    duplicate_rows = data.duplicated()
    print("重复的行如下：")
    print(duplicate_rows)
else:
    print("DataFrame中不存在重复行。")


#2.数据预处理
#2.1处理缺失值
data = data.dropna()  # 删除包含缺失值的行

#2.2去除 gender 列中不是男或者女的数据，并将性别描述分为'男': 0, '女': 1进行表示
valid_genders = ['男', '女']
data = data[data['gender'].isin(valid_genders)]

#将gender列中的男和女用0或1表示
gender_mapping = {'男': 0, '女': 1}
data['gender'] = data['gender'].map(gender_mapping)

#2.3 处理异常值（年龄不在合理范围内的去除）
q1 = data['age'].quantile(0.25)
q3 = data['age'].quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
data = data[(data['age'] >= lower_bound) & (data['age'] <= upper_bound)]

df = pd.DataFrame(data)

del data
gc.collect()

df['total_amount'] = df['purchase_history'].apply(calculate_total_amount)


# 分析数据属性值之间的相关性
# 选择需要分析的数值列
correlation_df = df[['id','total_amount','age','gender','income']]

# 计算皮尔逊相关系数
pearson_corr = correlation_df.corr(method='pearson')
print("皮尔逊相关系数：")
print(pearson_corr)

# 计算斯皮尔曼相关系数
spearman_corr = correlation_df.corr(method='spearman')
print("\n斯皮尔曼相关系数：")
print(spearman_corr)

# 计算肯德尔相关系数
kendall_corr = correlation_df.corr(method='kendall')
print("\n肯德尔相关系数：")
print(kendall_corr)


# 2.用户画像构建
# 计算每个用户的总购买金额
total_spent = df.groupby('id')['total_amount'].sum()
# 计算每个用户的购买次数
purchase_count = df.groupby('id')['total_amount'].count()
# 构建用户画像 DataFrame
user_profile = pd.DataFrame({
    'user_id': df['id'],
    'total_spent': total_spent,
    'purchase_count': purchase_count,
    })


# 3.识别潜在高价值用户
# 定义高价值用户的条件，这里简单假设总消费金额高且购买次数多的为高价值用户
high_value_threshold_spent = user_profile['total_spent'].quantile(0.8)
high_value_threshold_count = user_profile['purchase_count'].quantile(0.8)

user_profile['is_high_value'] = np.where(
    (user_profile['total_spent'] >= high_value_threshold_spent) &
    (user_profile['purchase_count'] >= high_value_threshold_count),
    True,
    False
    )


print("用户画像：")
print(user_profile)
print("\n潜在高价值用户：")
print(user_profile[user_profile['is_high_value']])
                
# 将 user profile 导出到 CSV 文件
user_profile.to_csv('highValuableUser.csv', index=False, encoding='utf-8')
print("user profile has been saved in the user_profile.csv file.")

end = time.time()
print(f"运行时间为：{end - start} 秒")