# -*- coding:utf-8 -*-
import os
import pandas as pd
import numpy as np
import time
from datetime import datetime

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data', 'data_format1')

FILENAME = {
    "train": os.path.join(DATA_DIR, "train_format1.csv"),
    "user_log": os.path.join(DATA_DIR, "user_log_format1.csv"),
    "user_info": os.path.join(DATA_DIR, "user_info_format1.csv"),
}
TESTNAME = os.path.join(DATA_DIR, "test_format1.csv")


def check_files_exist():
    files = [
        FILENAME["train"],
        FILENAME["user_log"],
        FILENAME["user_info"],
        TESTNAME
    ]
    missing_files = [f for f in files if not os.path.exists(f)]
    if missing_files:
        print("Missing files:")
        for f in missing_files:
            print(f"  - {f}")
        return False
    print("All data files verified")
    return True


def time_cost(func):
    def wrapper(*args, **kw):
        start = time.time()
        res = func(*args, **kw)
        end = time.time()
        print(f"Function: {func.__name__}, Cost: {(end - start):.3f}sec")
        return res

    return wrapper


def safe_divide(numerator, denominator):
    return np.where(denominator == 0, 0, numerator / denominator)


@time_cost
def load_data():
    if not check_files_exist():
        raise FileNotFoundError("Required data files missing")

    print('Loading data...')
    train = pd.read_csv(FILENAME["train"])
    user_info = pd.read_csv(FILENAME["user_info"])
    user_log = pd.read_csv(FILENAME["user_log"])

    # 清理不需要的列
    for df in [train, user_info, user_log]:
        if 'brand_id' in df.columns:
            df.drop(columns=['brand_id'], inplace=True)

    # 填充缺失值 - 更安全的方式
    # 处理age_range
    if 'age_range' in user_info.columns:
        user_info['age_range'] = user_info['age_range'].astype(float)
        age_mode = user_info["age_range"].mode()
        age_mode = age_mode[0] if not age_mode.empty else 0
        user_info["age_range"] = user_info["age_range"].fillna(age_mode).astype(int)

    # 处理gender
    if 'gender' in user_info.columns:
        user_info['gender'] = user_info['gender'].astype(float)
        gender_mode = user_info["gender"].mode()
        gender_mode = gender_mode[0] if not gender_mode.empty else 2
        user_info["gender"] = user_info["gender"].fillna(gender_mode).astype(int)

    # 添加用户画像组合特征
    user_info['gender_age'] = user_info['gender'].astype(str) + '_' + user_info['age_range'].astype(str)

    print('Processing time features...')
    # 时间特征处理
    user_log['time_stamp'] = pd.to_datetime(
        user_log['time_stamp'],
        format='%Y%m%d',
        errors='coerce'
    )

    # 删除转换失败的记录
    invalid_dates = user_log['time_stamp'].isna()
    if invalid_dates.sum() > 0:
        print(f"Warning: Dropping {invalid_dates.sum()} rows with invalid dates")
        user_log = user_log[~invalid_dates]

    user_log['day_of_week'] = user_log['time_stamp'].dt.dayofweek
    user_log['is_weekend'] = user_log['day_of_week'].isin([5, 6]).astype(int)
    user_log['month'] = user_log['time_stamp'].dt.month
    user_log['day'] = user_log['time_stamp'].dt.day
    user_log['hour'] = user_log['time_stamp'].dt.hour

    # 行为序列特征
    print('Creating behavior sequence features...')
    user_log = user_log.sort_values(['user_id', 'time_stamp'])
    user_log['prev_action'] = user_log.groupby('user_id')['action_type'].shift(1)
    user_log['next_action'] = user_log.groupby('user_id')['action_type'].shift(-1)
    user_log['action_transition'] = user_log['prev_action'].astype(str) + '_to_' + user_log['action_type'].astype(str)

    # 行为转移统计
    transition_counts = user_log.groupby(['user_id', 'seller_id', 'action_transition']).size().unstack(fill_value=0)
    transition_counts.columns = [f'trans_{col}' for col in transition_counts.columns]

    print('Creating features...')
    # 用户-商家行为聚合
    user_seller_agg = user_log.groupby(["user_id", "seller_id"]).agg(
        total_logs=("item_id", "count"),
        item_count=("item_id", "nunique"),
        cat_count=("cat_id", "nunique"),
        browse_days=("time_stamp", "nunique"),
        avg_day_of_week=("day_of_week", "mean"),
        weekend_ratio=("is_weekend", "mean"),
        month_variety=("month", "nunique"),
        hour_std=("hour", "std")
    ).reset_index()

    # 修复行为类型计数 - 确保列名正确
    # 检查 action_type 的值范围
    action_types = user_log['action_type'].unique()
    print(f"Found action types: {action_types}")

    # 为所有可能的动作类型创建列
    for i in range(4):  # 动作类型 0-3
        col_name = f'action_type_{i}'
        if col_name not in user_log.columns:
            user_log[col_name] = (user_log['action_type'] == i).astype(int)

    # 行为类型计数聚合
    action_agg = user_log.groupby(["user_id", "seller_id"]).agg(
        click_on=("action_type_0", "sum"),
        add_cart=("action_type_1", "sum"),
        buy_up=("action_type_2", "sum"),
        mark_down=("action_type_3", "sum")
    ).reset_index()

    # 合并特征
    user_seller_features = pd.merge(user_seller_agg, action_agg, on=["user_id", "seller_id"])
    user_seller_features = pd.merge(user_seller_features, transition_counts, on=["user_id", "seller_id"], how='left')
    user_seller_features.rename(columns={"seller_id": "merchant_id"}, inplace=True)

    # 用户购买率
    user_action_agg = user_log.groupby("user_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()
    user_action_agg["total_actions"] = user_action_agg["action0"] + user_action_agg["action1"] + \
                                       user_action_agg["action2"] + user_action_agg["action3"]
    user_action_agg["bought_rate"] = safe_divide(user_action_agg["action2"], user_action_agg["total_actions"])
    user_action_agg = user_action_agg[["user_id", "bought_rate"]]

    # 商家销售率
    seller_action_agg = user_log.groupby("seller_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()
    seller_action_agg["total_actions"] = seller_action_agg["action0"] + seller_action_agg["action1"] + \
                                         seller_action_agg["action2"] + seller_action_agg["action3"]
    seller_action_agg.rename(columns={"seller_id": "merchant_id"}, inplace=True)
    seller_action_agg["sold_rate"] = safe_divide(seller_action_agg["action2"], seller_action_agg["total_actions"])
    seller_action_agg = seller_action_agg[["merchant_id", "sold_rate"]]

    # 合并所有特征
    train = pd.merge(train, user_seller_features, on=["user_id", "merchant_id"], how="left")
    train = pd.merge(train, user_action_agg[["user_id", "bought_rate"]], on="user_id", how="left")
    train = pd.merge(train, seller_action_agg[["merchant_id", "sold_rate"]], on="merchant_id", how="left")
    train = pd.merge(train, user_info, on="user_id", how="left")

    # 填充空值
    rate_cols = ["bought_rate", "sold_rate"]
    count_cols = ["total_logs", "item_count", "cat_count", "browse_days",
                  "click_on", "add_cart", "buy_up", "mark_down"]

    # 添加行为转移特征列
    trans_cols = [col for col in train.columns if col.startswith('trans_')]

    train[rate_cols] = train[rate_cols].fillna(0)
    train[count_cols] = train[count_cols].fillna(0)
    if trans_cols:  # 确保列存在
        train[trans_cols] = train[trans_cols].fillna(0)

    # 添加新特征
    train['buy_ratio'] = safe_divide(train['buy_up'], train['total_logs'])
    train['click_ratio'] = safe_divide(train['click_on'], train['total_logs'])
    train['cart_ratio'] = safe_divide(train['add_cart'], train['total_logs'])
    train['mark_ratio'] = safe_divide(train['mark_down'], train['total_logs'])
    train['cat_per_item'] = safe_divide(train['cat_count'], train['item_count'])
    train['days_per_log'] = safe_divide(train['browse_days'], train['total_logs'])

    label = train['label']
    features = train.drop(columns=["user_id", "merchant_id", "label"])

    print(f'Dataset shape: {features.shape}')
    return features, label, list(features.columns)


@time_cost
def load_test():
    if not check_files_exist():
        raise FileNotFoundError("Required data files missing")

    print('Loading test data...')
    test = pd.read_csv(TESTNAME)
    user_info = pd.read_csv(FILENAME["user_info"])
    user_log = pd.read_csv(FILENAME["user_log"])

    # 清理不需要的列
    for df in [test, user_info, user_log]:
        if 'brand_id' in df.columns:
            df.drop(columns=['brand_id'], inplace=True)

    # 填充缺失值 - 使用相同的安全方式
    # 处理age_range
    if 'age_range' in user_info.columns:
        user_info['age_range'] = user_info['age_range'].astype(float)
        age_mode = user_info["age_range"].mode()
        age_mode = age_mode[0] if not age_mode.empty else 0
        user_info["age_range"] = user_info["age_range"].fillna(age_mode).astype(int)

    # 处理gender
    if 'gender' in user_info.columns:
        user_info['gender'] = user_info['gender'].astype(float)
        gender_mode = user_info["gender"].mode()
        gender_mode = gender_mode[0] if not gender_mode.empty else 2
        user_info["gender"] = user_info["gender"].fillna(gender_mode).astype(int)

    user_info['gender_age'] = user_info['gender'].astype(str) + '_' + user_info['age_range'].astype(str)

    print('Processing time features...')
    # 时间特征处理
    user_log['time_stamp'] = pd.to_datetime(
        user_log['time_stamp'],
        format='%Y%m%d',
        errors='coerce'
    )

    # 删除转换失败的记录
    invalid_dates = user_log['time_stamp'].isna()
    if invalid_dates.sum() > 0:
        print(f"Warning: Dropping {invalid_dates.sum()} rows with invalid dates")
        user_log = user_log[~invalid_dates]

    user_log['day_of_week'] = user_log['time_stamp'].dt.dayofweek
    user_log['is_weekend'] = user_log['day_of_week'].isin([5, 6]).astype(int)
    user_log['month'] = user_log['time_stamp'].dt.month
    user_log['day'] = user_log['time_stamp'].dt.day
    user_log['hour'] = user_log['time_stamp'].dt.hour

    # 行为序列特征
    print('Creating behavior sequence features...')
    user_log = user_log.sort_values(['user_id', 'time_stamp'])
    user_log['prev_action'] = user_log.groupby('user_id')['action_type'].shift(1)
    user_log['next_action'] = user_log.groupby('user_id')['action_type'].shift(-1)
    user_log['action_transition'] = user_log['prev_action'].astype(str) + '_to_' + user_log['action_type'].astype(str)

    # 行为转移统计
    transition_counts = user_log.groupby(['user_id', 'seller_id', 'action_transition']).size().unstack(fill_value=0)
    transition_counts.columns = [f'trans_{col}' for col in transition_counts.columns]

    print('Creating test features...')
    # 用户-商家行为聚合
    user_seller_agg = user_log.groupby(["user_id", "seller_id"]).agg(
        total_logs=("item_id", "count"),
        item_count=("item_id", "nunique"),
        cat_count=("cat_id", "nunique"),
        browse_days=("time_stamp", "nunique"),
        avg_day_of_week=("day_of_week", "mean"),
        weekend_ratio=("is_weekend", "mean"),
        month_variety=("month", "nunique"),
        hour_std=("hour", "std")
    ).reset_index()

    # 修复行为类型计数 - 确保列名正确
    # 为所有可能的动作类型创建列
    for i in range(4):  # 动作类型 0-3
        col_name = f'action_type_{i}'
        if col_name not in user_log.columns:
            user_log[col_name] = (user_log['action_type'] == i).astype(int)

    # 行为类型计数聚合
    action_agg = user_log.groupby(["user_id", "seller_id"]).agg(
        click_on=("action_type_0", "sum"),
        add_cart=("action_type_1", "sum"),
        buy_up=("action_type_2", "sum"),
        mark_down=("action_type_3", "sum")
    ).reset_index()

    # 合并特征
    user_seller_features = pd.merge(user_seller_agg, action_agg, on=["user_id", "seller_id"])
    user_seller_features = pd.merge(user_seller_features, transition_counts, on=["user_id", "seller_id"], how='left')
    user_seller_features.rename(columns={"seller_id": "merchant_id"}, inplace=True)

    # 用户购买率
    user_action_agg = user_log.groupby("user_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()
    user_action_agg["total_actions"] = user_action_agg["action0"] + user_action_agg["action1"] + \
                                       user_action_agg["action2"] + user_action_agg["action3"]
    user_action_agg["bought_rate"] = safe_divide(user_action_agg["action2"], user_action_agg["total_actions"])
    user_action_agg = user_action_agg[["user_id", "bought_rate"]]

    # 商家销售率
    seller_action_agg = user_log.groupby("seller_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()
    seller_action_agg["total_actions"] = seller_action_agg["action0"] + seller_action_agg["action1"] + \
                                         seller_action_agg["action2"] + seller_action_agg["action3"]
    seller_action_agg.rename(columns={"seller_id": "merchant_id"}, inplace=True)
    seller_action_agg["sold_rate"] = safe_divide(seller_action_agg["action2"], seller_action_agg["total_actions"])
    seller_action_agg = seller_action_agg[["merchant_id", "sold_rate"]]

    # 合并所有特征
    test = pd.merge(test, user_seller_features, on=["user_id", "merchant_id"], how="left")
    test = pd.merge(test, user_action_agg[["user_id", "bought_rate"]], on="user_id", how="left")
    test = pd.merge(test, seller_action_agg[["merchant_id", "sold_rate"]], on="merchant_id", how="left")
    test = pd.merge(test, user_info, on="user_id", how="left")

    # 填充空值
    rate_cols = ["bought_rate", "sold_rate"]
    count_cols = ["total_logs", "item_count", "cat_count", "browse_days",
                  "click_on", "add_cart", "buy_up", "mark_down"]

    # 添加行为转移特征列
    trans_cols = [col for col in test.columns if col.startswith('trans_')]

    test[rate_cols] = test[rate_cols].fillna(0)
    test[count_cols] = test[count_cols].fillna(0)
    if trans_cols:  # 确保列存在
        test[trans_cols] = test[trans_cols].fillna(0)

    # 添加新特征
    test['buy_ratio'] = safe_divide(test['buy_up'], test['total_logs'])
    test['click_ratio'] = safe_divide(test['click_on'], test['total_logs'])
    test['cart_ratio'] = safe_divide(test['add_cart'], test['total_logs'])
    test['mark_ratio'] = safe_divide(test['mark_down'], test['total_logs'])
    test['cat_per_item'] = safe_divide(test['cat_count'], test['item_count'])
    test['days_per_log'] = safe_divide(test['browse_days'], test['total_logs'])

    info = test[["user_id", "merchant_id"]].copy()
    features = test.drop(columns=["user_id", "merchant_id"])

    print(f'Test dataset shape: {features.shape}')
    return features, info


if __name__ == '__main__':
    print("Checking data files...")
    check_files_exist()