# -*- coding: utf-8 -*-
import os
import logging
from typing import List, Optional
import hashlib
import pandas as pd
import numpy as np

# ----------------- 可配置项 -----------------
RAW_DATA_DIR = '/data/gongzhijia/data/rawdata'
PROCESSED_DATA_DIR = '/data/gongzhijia/data/processeddata'
USER_FEATURES_PATH = '/data/gongzhijia/data/features/user_features.csv'
ITEM_FEATURES_PATH = '/data/gongzhijia/data/features/item_features.csv'

# 事件时间与登录时间的时区（None 表示按原样处理；如需要可设 'UTC'、'Asia/Shanghai' 等）
EVENT_TIME_TZ: Optional[str] = None
LOGIN_TIME_TZ: Optional[str] = None

# openid 分桶数（可根据需要调整）
OPENID_NUM_BUCKETS: int = 50

# 是否在保存前删除原始 openid（产线建议 True）
DROP_ORIGINAL_OPENID: bool = True
# -------------------------------------------

os.makedirs(PROCESSED_DATA_DIR, exist_ok=True)

logging.basicConfig(
    format='[%(levelname)s] %(message)s',
    level=logging.INFO
)

PAGE_ID_MAP = {
    'view_home1': 1,
    'view_home2': 2,
    'view_home3': 3,
    'view_home4': 4
}

def list_folders(raw_data_dir: str) -> List[str]:
    if not os.path.exists(raw_data_dir):
        raise FileNotFoundError(f"原始数据目录不存在: {raw_data_dir}")
    return [
        name for name in os.listdir(raw_data_dir)
        if os.path.isdir(os.path.join(raw_data_dir, name))
    ]

def read_folder_csv(folder_path: str, filename='labeled_viewport_events.csv') -> Optional[pd.DataFrame]:
    csv_path = os.path.join(folder_path, filename)
    if not os.path.exists(csv_path):
        logging.warning(f"文件不存在: {csv_path}")
        return None
    try:
        df = pd.read_csv(csv_path)
        logging.info(f"成功加载: {csv_path} (rows={len(df)})")
        return df
    except Exception as e:
        logging.error(f"读取文件失败 {csv_path}: {e}")
        return None

def process_ip_address(df: pd.DataFrame) -> pd.DataFrame:
    if 'ip_address' not in df.columns:
        logging.warning("数据中没有 ip_address 列")
        return df

    # 初始化为可空整型，保证是整数而不是 float
    for i in range(1, 5):
        df[f'ip-{i}'] = pd.Series([-1] * len(df), dtype='Int16')

    non_empty_ip = df['ip_address'].notna() & (df['ip_address'] != '')
    if non_empty_ip.any():
        parts = df.loc[non_empty_ip, 'ip_address'].str.split('.', expand=True)
        for i in range(min(parts.shape[1], 4)):
            vals = pd.to_numeric(parts[i], errors='coerce').astype('Int16')
            vals = vals.fillna(-1)
            df.loc[non_empty_ip, f'ip-{i+1}'] = vals

    return df

def map_page_id(df: pd.DataFrame) -> pd.DataFrame:
    if 'page_id' not in df.columns:
        logging.warning("数据中没有 page_id 列")
        return df
    df['page_id_mapped'] = df['page_id'].map(PAGE_ID_MAP).fillna(0).astype('Int8')
    return df

def _parse_time(series: pd.Series, tz: Optional[str]) -> pd.Series:
    # 按原样解析；如配置 tz，则进行本地化
    s = pd.to_datetime(series, errors='coerce', utc=False)
    if tz:
        # 将“朴素时间”本地化到指定时区；老版本 pandas 仅支持 errors='coerce'
        try:
            s = s.dt.tz_localize(tz, errors='coerce')
        except Exception:
            pass
        # 已是该时区，通常无需再 tz_convert
    return s

def process_event_time(df: pd.DataFrame) -> pd.DataFrame:
    if 'event_time' not in df.columns:
        logging.warning("数据中没有 event_time 列")
        return df

    ts = _parse_time(df['event_time'], EVENT_TIME_TZ)
    df['event_time'] = ts

    df['month'] = ts.dt.month.astype('Int8')
    df['day_of_month'] = ts.dt.day.astype('Int8')
    df['day_of_week'] = ts.dt.dayofweek.astype('Int8')  # 0=周一..6=周日
    df['day_of_year'] = ts.dt.dayofyear.astype('Int16')
    df['hour'] = ts.dt.hour.astype('Int8')
    df['is_weekend'] = df['day_of_week'].isin([5, 6]).astype('Int8')

    df['is_morning'] = df['hour'].between(6, 10, inclusive='left').astype('Int8')
    df['is_noon'] = df['hour'].between(10, 14, inclusive='left').astype('Int8')
    df['is_afternoon'] = df['hour'].between(14, 18, inclusive='left').astype('Int8')
    df['is_evening'] = df['hour'].between(18, 22, inclusive='left').astype('Int8')
    df['is_deep_night'] = ((df['hour'] >= 22) | (df['hour'] < 6)).astype('Int8')

    return df

def _safe_fillna_by_dtype(df: pd.DataFrame, cols: List[str]) -> pd.DataFrame:
    for c in cols:
        if c not in df.columns:
            continue
        if pd.api.types.is_datetime64_any_dtype(df[c]):
            df[c] = pd.to_datetime(df[c], errors='coerce').fillna(pd.Timestamp('1970-01-01'))
        elif pd.api.types.is_numeric_dtype(df[c]):
            df[c] = pd.to_numeric(df[c], errors='coerce')
            try:
                df[c] = df[c].fillna(-1).astype('Int64')
            except Exception:
                df[c] = df[c].fillna(-1)
        else:
            df[c] = df[c].fillna('')
    return df

def add_user_features(df: pd.DataFrame, user_features_path: str) -> pd.DataFrame:
    if 'openid' not in df.columns:
        logging.warning("原始数据中没有 openid 列")
        df['user_last_login_time'] = pd.Timestamp('1970-01-01')
        return df

    if not os.path.exists(user_features_path):
        logging.warning(f"用户特征文件不存在: {user_features_path}")
        df['user_last_login_time'] = pd.Timestamp('1970-01-01')
        return df

    try:
        uf = pd.read_csv(user_features_path)
    except Exception as e:
        logging.error(f"读取用户特征文件失败: {e}")
        df['user_last_login_time'] = pd.Timestamp('1970-01-01')
        return df

    if 'openid' not in uf.columns:
        logging.warning("用户特征文件中没有 openid 列")
        df['user_last_login_time'] = pd.Timestamp('1970-01-01')
        return df

    left = df.copy()
    left['openid'] = left['openid'].astype(str)
    uf = uf.copy()
    uf['openid'] = uf['openid'].astype(str)

    user_feature_columns = [c for c in uf.columns if c not in ['openid', 'deviceid']]

    # 合并（不使用 indicator=True，不产生 _merge）
    merged = left.merge(
        uf[['openid'] + user_feature_columns],
        on='openid',
        how='left',
        suffixes=('', '_src')
    )

    # 针对新增列做缺失值按 dtype 安全填充
    merged = _safe_fillna_by_dtype(merged, user_feature_columns)

    # 登录时间兜底
    if 'user_last_login_time' in merged.columns:
        merged['user_last_login_time'] = pd.to_datetime(
            merged['user_last_login_time'], errors='coerce'
        ).fillna(pd.Timestamp('1970-01-01'))
    else:
        merged['user_last_login_time'] = pd.Timestamp('1970-01-01')

    return merged

def process_login_time(df: pd.DataFrame) -> pd.DataFrame:
    if 'user_last_login_time' not in df.columns:
        logging.warning("数据中没有 user_last_login_time 列；现有列: %s", list(df.columns))
        return df

    ts = _parse_time(df['user_last_login_time'], LOGIN_TIME_TZ)
    df['user_last_login_time'] = ts

    df['login_month'] = ts.dt.month.astype('Int8')
    df['login_day'] = ts.dt.day.astype('Int8')
    df['login_day_of_week'] = ts.dt.dayofweek.astype('Int8')
    df['login_hour'] = ts.dt.hour.astype('Int8')
    df['login_minute'] = ts.dt.minute.astype('Int8')
    df['login_second'] = ts.dt.second.astype('Int8')

    df['login_is_morning'] = df['login_hour'].between(6, 10, inclusive='left').astype('Int8')
    df['login_is_noon'] = df['login_hour'].between(10, 14, inclusive='left').astype('Int8')
    df['login_is_afternoon'] = df['login_hour'].between(14, 18, inclusive='left').astype('Int8')
    df['login_is_evening'] = df['login_hour'].between(18, 22, inclusive='left').astype('Int8')
    df['login_is_deep_night'] = ((df['login_hour'] >= 22) | (df['login_hour'] < 6)).astype('Int8')

    return df

def add_item_features(df: pd.DataFrame, item_features_path: str) -> pd.DataFrame:
    has_article_id = 'articleId' in df.columns
    has_video_id = 'videoId' in df.columns
    if not has_article_id and not has_video_id:
        logging.warning("数据中没有 articleId 和 videoId 列")
        return df

    if not os.path.exists(item_features_path):
        logging.warning(f"物品特征文件不存在: {item_features_path}")
        return df

    try:
        it = pd.read_csv(item_features_path)
    except Exception as e:
        logging.error(f"读取物品特征文件失败: {e}")
        return df

    left = df.copy()
    if has_article_id:
        left['articleId'] = left['articleId'].astype(str)
    if has_video_id:
        left['videoId'] = left['videoId'].astype(str)

    feature_cols = [c for c in it.columns if c not in ['articleId', 'videoId']]

    out = left

    if 'articleId' in out.columns and 'articleId' in it.columns:
        it_a = it.dropna(subset=['articleId']).copy()
        it_a['articleId'] = it_a['articleId'].astype(str)
        out = out.merge(it_a[['articleId'] + feature_cols],
                        on='articleId', how='left', suffixes=('', '_a'))

    if 'videoId' in out.columns and 'videoId' in it.columns:
        it_v = it.dropna(subset=['videoId']).copy()
        it_v['videoId'] = it_v['videoId'].astype(str)
        out = out.merge(it_v[['videoId'] + feature_cols],
                        on='videoId', how='left', suffixes=('', '_v'))

    # 若同名特征同时来自 article/video，优先 article（也可改为优先 video）
    for c in feature_cols:
        c_a, c_v = f'{c}_a', f'{c}_v'
        if c_a in out.columns and c_v in out.columns:
            out[c] = out[c_a].combine_first(out[c_v])
            out.drop([c_a, c_v], axis=1, inplace=True)
        elif c_a in out.columns:
            out.rename(columns={c_a: c}, inplace=True)
        elif c_v in out.columns:
            out.rename(columns={c_v: c}, inplace=True)

    # 针对物品特征做缺失值按 dtype 安全填充
    out = _safe_fillna_by_dtype(out, feature_cols)
    return out

def bucket_openid(df: pd.DataFrame, n_buckets: int = 50) -> pd.DataFrame:
    """对 openid 做稳定哈希分桶，跨进程/多次运行保持一致。"""
    if 'openid' not in df.columns:
        return df

    def _stable_bucket(x: str) -> int:
        s = str(x)
        h = hashlib.md5(s.encode('utf-8')).digest()
        num = int.from_bytes(h[:8], byteorder='big', signed=False)
        return num % n_buckets

    df['openid_bucket'] = df['openid'].apply(_stable_bucket).astype('Int32')
    return df

def process_single_folder(df: pd.DataFrame, folder_name: str) -> pd.DataFrame:
    # 1) 丢弃不需要的字段
    drop_columns = ['event_type', 'scene_id', 'element_id']
    exist = [c for c in drop_columns if c in df.columns]
    if exist:
        df = df.drop(columns=exist)
        logging.info(f"[{folder_name}] 已丢弃字段: {exist}")
    else:
        logging.info(f"[{folder_name}] 无需丢弃额外字段")

    # 2) IP
    logging.info(f"[{folder_name}] 处理 IP 地址...")
    df = process_ip_address(df)

    # 3) page_id 映射
    logging.info(f"[{folder_name}] 映射 page_id...")
    df = map_page_id(df)

    # 4) event_time
    logging.info(f"[{folder_name}] 处理 event_time...")
    df = process_event_time(df)

    # 5) 删除原始列
    for col in ['ip_address', 'event_time', 'page_id']:
        if col in df.columns:
            df = df.drop(columns=[col])
    logging.info(f"[{folder_name}] 已删除处理后的原始列（如存在）")

    # 6) 用户特征
    logging.info(f"[{folder_name}] 合并用户特征...")
    df = add_user_features(df, USER_FEATURES_PATH)

    # 7) 登录时间特征
    logging.info(f"[{folder_name}] 处理用户登录时间...")
    df = process_login_time(df)

    # 8) 物品特征
    logging.info(f"[{folder_name}] 合并物品特征...")
    df = add_item_features(df, ITEM_FEATURES_PATH)

    # 9) 删除 user_last_login_time 原始列（只保留派生特征）
    if 'user_last_login_time' in df.columns:
        df = df.drop(columns=['user_last_login_time'])
        logging.info(f"[{folder_name}] 已删除 user_last_login_time 字段")

    return df

def main():
    folders = list_folders(RAW_DATA_DIR)
    logging.info(f"找到 {len(folders)} 个文件夹")
    for folder_name in folders:
        logging.info(f"开始处理文件夹: {folder_name}")
        folder_path = os.path.join(RAW_DATA_DIR, folder_name)
        df = read_folder_csv(folder_path)
        if df is None or df.empty:
            logging.warning(f"[{folder_name}] 跳过空数据")
            continue

        df_processed = process_single_folder(df, folder_name)

        # —— 保存前的最终处理 —— #
        # A) 兜底删除 _merge（防止外部改动误加 indicator=True）
        if '_merge' in df_processed.columns:
            df_processed = df_processed.drop(columns=['_merge'])

        # B) openid 分桶（稳定哈希）
        df_processed = bucket_openid(df_processed, n_buckets=OPENID_NUM_BUCKETS)

        # C) 删除原始 openid（去标识化）
        if DROP_ORIGINAL_OPENID and 'openid' in df_processed.columns:
            df_processed = df_processed.drop(columns=['openid'])

        # 输出
        out_dir = os.path.join(PROCESSED_DATA_DIR, folder_name)
        os.makedirs(out_dir, exist_ok=True)
        out_path = os.path.join(out_dir, 'processed_data.csv')
        df_processed.to_csv(out_path, index=False)
        logging.info(f"[{folder_name}] 处理完成，已保存: {out_path}")

    logging.info("所有文件夹处理完成!")

if __name__ == '__main__':
    main()
