# -*- coding:utf-8 -*-
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import time

# 获取当前脚本所在目录
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# 设置数据目录路径
DATA_DIR = os.path.join(BASE_DIR, 'data', 'data_format1')

# 定义文件路径
FILENAME = {
    "train": os.path.join(DATA_DIR, "train_format1.csv"),
    "user_log": os.path.join(DATA_DIR, "user_log_format1.csv"),
    "user_info": os.path.join(DATA_DIR, "user_info_format1.csv"),
}
TESTNAME = os.path.join(DATA_DIR, "test_format1.csv")


# 检查数据文件是否存在
def check_files_exist():
    """检查所有必需的数据文件是否存在"""
    files = [
        FILENAME["train"],
        FILENAME["user_log"],
        FILENAME["user_info"],
        TESTNAME
    ]

    missing_files = [f for f in files if not os.path.exists(f)]

    if missing_files:
        print("以下文件缺失:")
        for f in missing_files:
            print(f"  - {f}")
        return False

    print("所有数据文件验证通过")
    return True


def time_cost(func):
    def wrapper(*args, **kw):
        start = time.time()
        res = func(*args, **kw)
        end = time.time()
        print(
            "Function: {}, Cost: {:.3f}sec".format(
                func.__name__,
                (end - start)))
        return res

    return wrapper


def data_clean(data, fea, sigma=3):
    data_mean = np.mean(data[fea])
    data_std = np.std(data[fea], ddof=1)
    delta = sigma * data_std
    lower_thr = data_mean - delta
    upper_thr = data_mean + delta
    data[fea + '_outlier'] = data[fea].apply(
        lambda x: str('T') if x > upper_thr or x < lower_thr else str('F'))
    return data


@time_cost
def load_data(filename=FILENAME):
    # 检查文件是否存在
    if not check_files_exist():
        raise FileNotFoundError("缺少必需的数据文件")

    print('Loading Samples ...')
    train = pd.read_csv(filename["train"])
    user_info = pd.read_csv(filename["user_info"])
    user_log = pd.read_csv(filename["user_log"])

    # 检查并删除brand_id列（如果存在）
    if "brand_id" in user_log.columns:
        user_log = user_log.drop(columns=["brand_id"])
    print('Done.')
    print('*' * 20)

    print('Filling NaN Items ...')
    # 获取众数并填充
    age_mode = user_info["age_range"].mode()
    if not age_mode.empty:
        age_mode = age_mode[0]
    else:
        age_mode = 0  # 如果没有众数，使用0填充

    user_info["age_range"] = user_info["age_range"].fillna(age_mode)
    user_info["gender"] = user_info["gender"].fillna(2)
    print('Done.')
    print('*' * 20)

    print('Merging Train Dataset ...')
    # 用户-商家行为聚合
    user_seller_agg = user_log.groupby(["user_id", "seller_id"]).agg(
        total_logs=("item_id", "count"),
        item_count=("item_id", "nunique"),
        cat_count=("cat_id", "nunique"),
        browse_days=("time_stamp", "nunique")
    ).reset_index()

    # 行为类型计数
    action_dummies = pd.get_dummies(user_log, columns=["action_type"])
    action_agg = action_dummies.groupby(["user_id", "seller_id"]).agg(
        click_on=("action_type_0", "sum"),
        add_cart=("action_type_1", "sum"),
        buy_up=("action_type_2", "sum"),
        mark_down=("action_type_3", "sum")
    ).reset_index()

    # 合并用户-商家特征
    user_seller_features = pd.merge(user_seller_agg, action_agg, on=["user_id", "seller_id"])
    user_seller_features.rename(columns={"seller_id": "merchant_id"}, inplace=True)

    # 用户购买率 - 修复聚合表达式
    user_action_agg = action_dummies.groupby("user_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()

    # 计算总行为和购买率
    user_action_agg["total_actions"] = user_action_agg["action0"] + user_action_agg["action1"] + \
                                       user_action_agg["action2"] + user_action_agg["action3"]
    user_action_agg["bought_rate"] = user_action_agg["action2"] / user_action_agg["total_actions"]
    user_action_agg = user_action_agg[["user_id", "bought_rate"]]

    # 商家销售率 - 修复聚合表达式
    seller_action_agg = action_dummies.groupby("seller_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()

    # 计算总行为和销售率
    seller_action_agg["total_actions"] = seller_action_agg["action0"] + seller_action_agg["action1"] + \
                                         seller_action_agg["action2"] + seller_action_agg["action3"]
    seller_action_agg["sold_rate"] = seller_action_agg["action2"] / seller_action_agg["total_actions"]
    seller_action_agg.rename(columns={"seller_id": "merchant_id"}, inplace=True)
    seller_action_agg = seller_action_agg[["merchant_id", "sold_rate"]]

    # 合并所有特征到训练集
    train = pd.merge(train, user_seller_features, on=["user_id", "merchant_id"], how="left")
    train = pd.merge(train, user_action_agg, on="user_id", how="left")
    train = pd.merge(train, seller_action_agg, on="merchant_id", how="left")

    # 填充可能存在的空值
    rate_cols = ["bought_rate", "sold_rate"]
    count_cols = ["total_logs", "item_count", "cat_count", "browse_days",
                  "click_on", "add_cart", "buy_up", "mark_down"]

    train[rate_cols] = train[rate_cols].fillna(0)
    train[count_cols] = train[count_cols].fillna(0)

    print('Done.')
    print('*' * 20)

    label = train['label']
    train = train.drop(columns=["user_id", "merchant_id", "label"])

    print('Shape of Dataset: {}'.format(train.shape))

    return train, label


def load_test(testname=TESTNAME):
    # 检查文件是否存在
    if not check_files_exist():
        raise FileNotFoundError("缺少必需的数据文件")

    print('Loading Tests ...')
    test = pd.read_csv(testname)
    user_info = pd.read_csv(FILENAME["user_info"])
    user_log = pd.read_csv(FILENAME["user_log"])

    # 检查并删除brand_id列（如果存在）
    if "brand_id" in user_log.columns:
        user_log = user_log.drop(columns=["brand_id"])
    print('Done.')
    print('*' * 20)

    print('Filling NaN Items ...')
    # 获取众数并填充
    age_mode = user_info["age_range"].mode()
    if not age_mode.empty:
        age_mode = age_mode[0]
    else:
        age_mode = 0  # 如果没有众数，使用0填充

    user_info["age_range"] = user_info["age_range"].fillna(age_mode)
    user_info["gender"] = user_info["gender"].fillna(2)
    print('Done.')
    print('*' * 20)

    print('Merging Test Dataset ...')
    # 使用与训练集相同的特征工程
    # 用户-商家行为聚合
    user_seller_agg = user_log.groupby(["user_id", "seller_id"]).agg(
        total_logs=("item_id", "count"),
        item_count=("item_id", "nunique"),
        cat_count=("cat_id", "nunique"),
        browse_days=("time_stamp", "nunique")
    ).reset_index()

    # 行为类型计数
    action_dummies = pd.get_dummies(user_log, columns=["action_type"])
    action_agg = action_dummies.groupby(["user_id", "seller_id"]).agg(
        click_on=("action_type_0", "sum"),
        add_cart=("action_type_1", "sum"),
        buy_up=("action_type_2", "sum"),
        mark_down=("action_type_3", "sum")
    ).reset_index()

    # 合并用户-商家特征
    user_seller_features = pd.merge(user_seller_agg, action_agg, on=["user_id", "seller_id"])
    user_seller_features.rename(columns={"seller_id": "merchant_id"}, inplace=True)

    # 用户购买率 - 修复聚合表达式
    user_action_agg = action_dummies.groupby("user_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()

    # 计算总行为和购买率
    user_action_agg["total_actions"] = user_action_agg["action0"] + user_action_agg["action1"] + \
                                       user_action_agg["action2"] + user_action_agg["action3"]
    user_action_agg["bought_rate"] = user_action_agg["action2"] / user_action_agg["total_actions"]
    user_action_agg = user_action_agg[["user_id", "bought_rate"]]

    # 商家销售率 - 修复聚合表达式
    seller_action_agg = action_dummies.groupby("seller_id").agg(
        action0=("action_type_0", "sum"),
        action1=("action_type_1", "sum"),
        action2=("action_type_2", "sum"),
        action3=("action_type_3", "sum")
    ).reset_index()

    # 计算总行为和销售率
    seller_action_agg["total_actions"] = seller_action_agg["action0"] + seller_action_agg["action1"] + \
                                         seller_action_agg["action2"] + seller_action_agg["action3"]
    seller_action_agg["sold_rate"] = seller_action_agg["action2"] / seller_action_agg["total_actions"]
    seller_action_agg.rename(columns={"seller_id": "merchant_id"}, inplace=True)
    seller_action_agg = seller_action_agg[["merchant_id", "sold_rate"]]

    # 合并所有特征到测试集
    test = pd.merge(test, user_seller_features, on=["user_id", "merchant_id"], how="left")
    test = pd.merge(test, user_action_agg, on="user_id", how="left")
    test = pd.merge(test, seller_action_agg, on="merchant_id", how="left")

    # 填充可能存在的空值
    rate_cols = ["bought_rate", "sold_rate"]
    count_cols = ["total_logs", "item_count", "cat_count", "browse_days",
                  "click_on", "add_cart", "buy_up", "mark_down"]

    test[rate_cols] = test[rate_cols].fillna(0)
    test[count_cols] = test[count_cols].fillna(0)

    print('Done.')
    print('*' * 20)

    # 保存ID信息
    info = test[["user_id", "merchant_id"]].copy()

    # 删除ID列（保留其他特征）
    test = test.drop(columns=["user_id", "merchant_id"])

    print('Shape of Dataset: {}'.format(test.shape))

    return test, info


# 如果直接运行此脚本，检查文件是否存在
if __name__ == '__main__':
    print("检查数据文件...")
    check_files_exist()