import os
import pandas as pd
import json
import numpy as np  # 新增用于处理缺失值

# 定义三组ID集合
service_provider_ids = {
    "1752201132575887362", "1745393239782543361", "1747883569961246721",
    "1746723605406294018", "1747545536628006913", "1747549630780416002",
    "1748176276113858562", "1752547440390115330", "1762764208850960386",
    "1777220425443643393"
}

demand_ids = {
    "1945698599953195010", "1943850179177177089", "1799254413328060418",
    "1792088154094243841", "1784169542658363393", "1784143514028281858",
    "1784180218621661185", "1784145154827423745", "1784144143995965442",
    "1784142763507912706"
}

product_ids = {
    "1940341982390190081", "1914162459386523650", "1772159794201956354",
    "1914165353569529857", "1783054785324613633", "1760648919532113921",
    "1752300430601379842", "1752692190192230402", "1752166454800834561",
    "1751877693189808129"
}

# 定义输入和输出目录
input_dir = "/data/GuoCu_data/raw_data"
output_dir = "/data/GuoCu_data/processed_data/click-event_data"
# 单个输出文件路径
output_file = os.path.join(output_dir, "all_clicks.csv")

# 用户特征和物品特征路径
user_feature_path = "/data/GuoCu_data/processed_data/feature/user/user_features.csv"
item_feature_path = "/data/GuoCu_data/processed_data/feature/item/item_features.csv"

# 新增JSON验证函数
def has_required_json_fields(json_str):
    try:
        data = json.loads(json_str)
        return 'id' in data and 'father' in data
    except:
        return False

# 确保输出目录存在
os.makedirs(output_dir, exist_ok=True)

# 1. 加载用户特征数据
if os.path.exists(user_feature_path):
    user_features = pd.read_csv(user_feature_path)
    # 确保IP列为字符串类型
    user_features['ip'] = user_features['ip'].astype(str)
    # 重命名列，避免合并冲突
    user_features = user_features.rename(columns={col: f'user_{col}' for col in user_features.columns if col != 'ip'})
    print(f"已加载用户特征数据，共 {len(user_features)} 条记录")
else:
    print(f"警告: 用户特征文件 {user_feature_path} 不存在，跳过用户特征添加")
    user_features = pd.DataFrame(columns=['ip'])

# 2. 加载物品特征数据
if os.path.exists(item_feature_path):
    item_features = pd.read_csv(item_feature_path)
    # 确保content_id列为字符串类型
    item_features['content_id'] = item_features['content_id'].astype(str)
    # 重命名列，避免合并冲突
    item_features = item_features.rename(columns={col: f'item_{col}' for col in item_features.columns if col != 'content_id'})
    print(f"已加载物品特征数据，共 {len(item_features)} 条记录")
else:
    print(f"警告: 物品特征文件 {item_feature_path} 不存在，跳过物品特征添加")
    item_features = pd.DataFrame(columns=['content_id'])

# 初始化文件标志，确保只写入一次表头
is_first_file = True

# 遍历输入目录下的所有文件夹和文件
for root, dirs, files in os.walk(input_dir):
    for file in files:
        if file.endswith('.csv'):
            file_path = os.path.join(root, file)
            try:
                # 读取CSV文件
                df = pd.read_csv(file_path, header=None)
                # 选择符合条件的行
                if len(df.columns) >= 7:  # 确保至少有7列数据
                    # 组合筛选条件：第二列=click，第六列=homepage，第七列=floor，第三列JSON包含id和father
                    click_rows = df[
                        (df.iloc[:, 1] == 'click') &
                        (df.iloc[:, 5] == 'homepage') &
                        (df.iloc[:, 6] == 'floor') &
                        df.iloc[:, 2].apply(has_required_json_fields)
                    ]
                    if not click_rows.empty:
                        # 处理数据扩充
                        expanded_rows = []
                        for _, row in click_rows.iterrows():
                            try:
                                # 解析第三列JSON数据
                                json_str = row.iloc[2]
                                json_data = json.loads(json_str)
                                current_id = json_data.get('id')
                                father_value = json_data.get('father')  # 提取father值
                            
                                if not current_id:
                                    continue  # 跳过没有id的记录
                            
                                # 确定当前ID所属的组
                                group_ids = None
                                if current_id in service_provider_ids:
                                    group_ids = service_provider_ids
                                elif current_id in demand_ids:
                                    group_ids = demand_ids
                                elif current_id in product_ids:
                                    group_ids = product_ids
                            
                                if group_ids:
                                    # 处理原始行 - 修改为exposure，label=1
                                    original_row = row.copy()
                                    original_row.iloc[1] = 'exposure'
                                    original_row = original_row.to_frame().transpose()
                                    original_row['label'] = 1
                                    # 添加新列: content_id和father
                                    original_row['content_id'] = current_id
                                    original_row['father'] = father_value
                                    expanded_rows.append(original_row)
                            
                                    # 处理扩充行 - 替换id，label=0
                                    other_ids = group_ids - {current_id}
                                    for replacement_id in other_ids:
                                        new_json_data = json_data.copy()
                                        new_json_data['id'] = replacement_id
                                        new_json_str = json.dumps(new_json_data)
                            
                                        new_row = row.copy()
                                        new_row.iloc[1] = 'exposure'
                                        new_row.iloc[2] = new_json_str
                                        new_row = new_row.to_frame().transpose()
                                        new_row['label'] = 0
                                        # 添加新列: content_id和father
                                        new_row['content_id'] = replacement_id
                                        new_row['father'] = json_data.get('father')  # 保持原father值
                                        expanded_rows.append(new_row)
                            except Exception as e:
                                print(f"处理行数据时出错: {e}")
                                continue
                        
                        # 合并所有扩充数据
                        if expanded_rows:
                            result_df = pd.concat(expanded_rows, ignore_index=True)
                            
                            # 删除指定列：第二列(exposure)、第三列(extraPara)和原始6-10列(索引5-9)
                            columns_to_drop = [1, 2, 5, 6, 7, 8, 9]
                            result_df = result_df.drop(columns=result_df.columns[columns_to_drop], errors='ignore')
                            
                            # 将label列移到第一列
                            if 'label' in result_df.columns:
                                label_column = result_df.pop('label')
                                result_df.insert(0, 'label', label_column)
                            
                            # 添加IP列（原始数据中的第3列）
                            result_df['ip'] = result_df.iloc[:, 3]
                            
                            # 3. 合并用户特征
                            if not user_features.empty:
                                # 确保ip列为字符串类型
                                result_df['ip'] = result_df['ip'].astype(str)
                                result_df = pd.merge(
                                    result_df, 
                                    user_features, 
                                    on='ip', 
                                    how='left'
                                )
                                # 处理缺失的用户特征
                                user_feature_cols = [col for col in user_features.columns if col != 'ip']
                                if user_feature_cols:
                                    result_df[user_feature_cols] = result_df[user_feature_cols].fillna(np.nan)
                            
                            # 4. 合并物品特征
                            if not item_features.empty:
                                # 确保content_id列为字符串类型
                                result_df['content_id'] = result_df['content_id'].astype(str)
                                result_df = pd.merge(
                                    result_df, 
                                    item_features, 
                                    on='content_id', 
                                    how='left'
                                )
                                # 处理缺失的物品特征
                                item_feature_cols = [col for col in item_features.columns if col != 'content_id']
                                if item_feature_cols:
                                    result_df[item_feature_cols] = result_df[item_feature_cols].fillna(np.nan)
                            
                            # 写入文件
                            result_df.to_csv(
                                output_file, 
                                index=False, 
                                header=is_first_file,  # 只在第一次写入表头
                                mode='a' if not is_first_file else 'w'
                            )
                            
                            if is_first_file:
                                is_first_file = False
            except Exception as e:
                print(f"处理文件 {file_path} 时出错: {e}")