import os
import pandas as pd
import json
from datetime import datetime, timedelta

# 定义数据路径和保存路径
raw_data_path = '/data/GuoCu_data/raw_data/'
save_path = '/data/GuoCu_data/processed_data/feature/item/item_inter_num.csv'

# 收集所有csv文件路径
csv_files = []
for root, dirs, files in os.walk(raw_data_path):
    for file in files:
        if file.endswith('.csv'):
            csv_files.append(os.path.join(root, file))

# 存储符合条件的数据
selected_data = []

# 读取所有csv文件并筛选数据
for csv_file in csv_files:
    try:
        df = pd.read_csv(csv_file)
        for index, row in df.iterrows():
            if row.iloc[1] == 'click':
                try:
                    json_data = json.loads(str(row.iloc[2]))
                    if 'id' in json_data and 'father' in json_data:
                        time_str = row.iloc[0]
                        try:
                            click_time = datetime.strptime(str(time_str), '%Y-%m-%dT%H:%M:%S')
                        except ValueError:
                            continue
                        content_id = int(json_data['id'])
                        selected_data.append([click_time, content_id])
                except json.JSONDecodeError:
                    continue
    except Exception as e:
        print(f"读取文件 {csv_file} 时出错: {e}")

# 创建DataFrame
selected_df = pd.DataFrame(selected_data, columns=['click_time', 'content_id'])

# 获取当前时间
now = datetime.now()

# 计算不同时间窗口的点击量
time_windows = [3, 7, 14, 21]
result_dfs = []

for days in time_windows:
    start_time = now - timedelta(days=days)
    filtered_df = selected_df[selected_df['click_time'] >= start_time]
    click_num = filtered_df.groupby('content_id').size().reset_index(name=f'item_click_num_{days}d')
    result_dfs.append(click_num)

# 合并所有统计结果
if result_dfs:
    result = result_dfs[0]
    for df in result_dfs[1:]:
        result = pd.merge(result, df, on='content_id', how='outer')
    result = result.fillna(0)
    
    # 添加必须的content_id补全机制
    service_providers = [
        1752201132575887362, 1745393239782543361, 1747883569961246721,
        1746723605406294018, 1747545536628006913, 1747549630780416002,
        1748176276113858562, 1752547440390115330, 1762764208850960386,
        1777220425443643393
    ]
    demands = [
        1945698599953195010, 1943850179177177089, 1799254413328060418,
        1792088154094243841, 1784169542658363393, 1784143514028281858,
        1784180218621661185, 1784145154827423745, 1784144143995965442,
        1784142763507912706
    ]
    products = [
        1940341982390190081, 1914162459386523650, 1772159794201956354,
        1914165353569529857, 1783054785324613633, 1760648919532113921,
        1752300430601379842, 1752692190192230402, 1752166454800834561,
        1751877693189808129
    ]
    
    # 合并所有必须的content_id并去重
    all_content_ids = list(set(service_providers + demands + products))
    all_ids_df = pd.DataFrame({'content_id': all_content_ids}, dtype='int64')
    
    # 确保所有必须的content_id都存在，缺失的点击量填充为0
    result = pd.merge(all_ids_df, result, on='content_id', how='left')
    result = result.fillna(0).astype({col: 'int64' for col in result.columns if col != 'content_id'})
    
    # 添加排名功能
    for days in time_windows:
        num_col = f'item_click_num_{days}d'
        rank_col = f'item_click_rank_{days}d'
        result[rank_col] = result[num_col].rank(ascending=False, method='min').astype('int64')
    
    # 检查保存路径是否存在，如果不存在则创建
    os.makedirs(os.path.dirname(save_path), exist_ok=True)

    # 检查文件是否存在，如果存在则合并数据
    if os.path.exists(save_path):
        existing_data = pd.read_csv(save_path)
        # 在合并前重命名existing_data中的排名列，避免冲突
        for days in time_windows:
            rank_col = f'item_click_rank_{days}d'
            if rank_col in existing_data.columns:
                existing_data = existing_data.rename(columns={rank_col: f'existing_{rank_col}'})
        
        # 合并数据
        merged_data = pd.merge(existing_data, result, on='content_id', how='outer')
        
        # 合并各时间窗口的点击量
        for days in time_windows:
            num_col = f'item_click_num_{days}d'
            # 处理可能的后缀列
            x_col = f'{num_col}_x'
            y_col = f'{num_col}_y'
            if x_col in merged_data.columns and y_col in merged_data.columns:
                merged_data[num_col] = merged_data[x_col].fillna(0) + merged_data[y_col].fillna(0)
                merged_data = merged_data.drop(columns=[x_col, y_col])
            elif x_col in merged_data.columns:
                merged_data = merged_data.rename(columns={x_col: num_col})
            elif y_col in merged_data.columns:
                merged_data = merged_data.rename(columns={y_col: num_col})
        # 计算点击总量（修正版）
        # 从原始数据中统计每个content_id的总点击量
        total_clicks = selected_df.groupby('content_id').size().reset_index(name='item_click_total')
        # 合并到merged_data中
        merged_data = pd.merge(merged_data, total_clicks, on='content_id', how='left')
        merged_data['item_click_total'] = merged_data['item_click_total'].fillna(0).astype('int64')
        # 删除旧的排名列
        for days in time_windows:
            # 删除existing_data重命名的列
            existing_rank_col = f'existing_item_click_rank_{days}d'
            if existing_rank_col in merged_data.columns:
                merged_data = merged_data.drop(columns=[existing_rank_col])
            # 删除result带来的列
            result_rank_col = f'item_click_rank_{days}d'
            if result_rank_col in merged_data.columns:
                merged_data = merged_data.drop(columns=[result_rank_col])
        
        # 根据合并后的点击量重新计算排名
        for days in time_windows:
            num_col = f'item_click_num_{days}d'
            rank_col = f'item_click_rank_{days}d'
            merged_data[rank_col] = merged_data[num_col].rank(ascending=False, method='min').astype('int64')
        
        # 确保只保留需要的列
        required_columns = ['content_id', 'item_click_total']  # 添加总点击量列
        for days in time_windows:
            required_columns.append(f'item_click_num_{days}d')
            required_columns.append(f'item_click_rank_{days}d')
        merged_data = merged_data[required_columns]
        
        merged_data.to_csv(save_path, index=False)
    else:
        result.to_csv(save_path, index=False)
