import os
# 返回当前工作目录
# os.getcwd()
import numpy as np
import pandas as pd
from collections import Counter
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import gc
import warnings
from pylab import mpl

plt.rcParams['font.sans-serif'] = ['PingFang SC', 'SimHei', 'Songti SC']
plt.rcParams['axes.unicode_minus'] = False
# 设置显示中文字体
# mpl.rcParams["font.sans-serif"] = ["SimHei"]
# 设置正常显示符号
mpl.rcParams["axes.unicode_minus"] = False
warnings.filterwarnings('ignore')

# 读取数据
test_data = pd.read_csv(r'../../data/raw/data_format1/test_format1.csv')
train_data = pd.read_csv(r'../../data/raw/data_format1/train_format1.csv')
user_info = pd.read_csv(r'../../data/raw/data_format1/user_info_format1.csv')
user_log = pd.read_csv(r'../../data/raw/data_format1/user_log_format1.csv')


# 用于减少内存使用
def reduce_mem_usage(df, verbose=True):
    start_mem = df.memory_usage().sum() / 1024 ** 2
    numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']

    for col in df.columns:
        col_type = df[col].dtypes
        if col_type in numerics:
            c_min = df[col].min()
            c_max = df[col].max()
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                    df[col] = df[col].astype(np.int64)
            else:
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)

    end_mem = df.memory_usage().sum() / 1024 ** 2
    print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
    print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
    print('===' * 30)
    return df


#  内存优化
train_data = reduce_mem_usage(train_data)
test_data = reduce_mem_usage(test_data)
user_info = reduce_mem_usage(user_info)
user_log = reduce_mem_usage(user_log)


#  数据处理+图像绘制
def draw_picture():
    #  正负样本的数量
    label_gp = train_data.groupby('label')['user_id'].count()
    print('正负样本的数量：\n', label_gp)
    _, axe = plt.subplots(1, 2, figsize=(12, 6))
    train_data.label.value_counts().plot(kind='pie', autopct='%1.1f%%', shadow=True, explode=[0, 0.1], ax=axe[0])
    sns.countplot(x='label', data=train_data, ax=axe[1])
    # plt.savefig("../../data/fig/正负样本数量.png")
    # plt.show()
    #  购买次数前5的店铺
    print('选取top5店铺\n店铺\t购买次数')
    print(train_data.merchant_id.value_counts().head(5))
    train_data_merchant = train_data.copy()
    train_data_merchant['TOP5'] = train_data_merchant['merchant_id'].map(
        lambda x: 1 if x in [4044, 3828, 4173, 1102, 4976] else 0)
    train_data_merchant = train_data_merchant[train_data_merchant['TOP5'] == 1]
    plt.figure(figsize=(8, 6))
    plt.title('Merchant VS Label')
    ax = sns.countplot(x='merchant_id', hue='label', data=train_data_merchant)
    for p in ax.patches:
        height = p.get_height()
    # plt.show()
    # plt.savefig("../../data/fig/购买次数前5的店铺.png")
    #  用户重复购买率
    #   待理解
    user_repeat_buy = [rate for rate in train_data.groupby(['user_id'])['label'].mean() if rate <= 1 and rate > 0]
    plt.figure(figsize=(8, 6))
    ax = plt.subplot(1, 2, 1)
    # 直方图
    sns.distplot(user_repeat_buy, fit=stats.norm)
    ax = plt.subplot(1, 2, 2)
    # qq图
    res = stats.probplot(user_repeat_buy, plot=plt)
    # plt.show()
    #  用户性别 与 购买关系
    # 先合并数据
    train_data_user_info = train_data.merge(user_info, on=['user_id'], how='left')
    # 处理缺失值并转换类型
    train_data_user_info['gender'] = train_data_user_info['gender'].fillna(2).astype(int)
    train_data_user_info['age_range'] = train_data_user_info['age_range'].fillna(-1).astype(int)
    train_data_user_info['label'] = train_data_user_info['label'].fillna(-1).astype(int)
    # 缺失值占比
    print(train_data_user_info.isnull().mean())
    # 绘图
    plt.figure(figsize=(8, 8))
    plt.title('Gender VS Label')
    ax = sns.countplot(x='gender', hue='label', data=train_data_user_info)
    for p in ax.patches:
        height = p.get_height()
    # plt.show()
    #  用户性别与复购分布
    repeat_buy = [rate for rate in train_data_user_info.groupby(['gender'])['label'].mean()]
    plt.figure(figsize=(8, 4))
    ax = plt.subplot(1, 2, 1)
    sns.distplot(repeat_buy, fit=stats.norm)
    ax = plt.subplot(1, 2, 2)
    res = stats.probplot(repeat_buy, plot=plt)
    # plt.show()
    #  用户年龄与复购分布
    plt.figure(figsize=(8, 8))
    plt.title('Age VS Label')
    ax = sns.countplot(x='age_range', hue='label', data=train_data_user_info)
    # 不同月份的购买次数
    all_data_1 = user_log.merge(train_data, on=['user_id'], how='left')
    all_data_1[all_data_1['label'].notnull()].head()
    all_data_2 = all_data_1[all_data_1['label'].notnull()]
    all_data_2_sum = all_data_2.groupby(['time_stamp'])['label'].sum().reset_index()
    all_data_2_sum.head()
    all_data_2_sum['time_stamp'] = all_data_2_sum['time_stamp'].astype(str)
    all_data_2_sum['label'] = all_data_2_sum['label'].astype(int)
    a = []
    for i in range(len(all_data_2_sum)):
        if len(all_data_2_sum['time_stamp'][i]) == 3:
            a.append(all_data_2_sum['time_stamp'][i][0])
        else:
            a.append(all_data_2_sum['time_stamp'][i][0:2])
    all_data_2_sum['month'] = a
    all_data_2_sum = all_data_2_sum.astype(int)
    plt.figure(figsize=(20, 8))
    c = 5
    for i in range(1, 8):
        plt.subplot(3, 3, i)
        b = all_data_2_sum[all_data_2_sum["month"] == c]
        plt.plot(b['time_stamp'], b['label'], linewidth=1, color="orange", marker="o", label="Mean value")
        c += 1
    c = all_data_2_sum.groupby(['month'])['label'].sum().reset_index()
    plt.plot(c['month'], c['label'], linewidth=1, color="orange", marker="o", label="Mean value")
    plt.show()


# draw_picture()

# 特征工程
def feature_engineering():
    # print(train_data[train_data['label'] == 1])
    # print(user_log[(user_log['user_id'] == 34176) & (user_log['seller_id'] == 3906)])
    #  age_range,gender特征添加
    df_train = pd.merge(train_data, user_info, on="user_id", how="left")
    # print(df_train.head())
    total_logs_temp = user_log.groupby([user_log["user_id"], user_log["seller_id"]]).count().reset_index()[
        ["user_id", "seller_id", "item_id"]]
    total_logs_temp.rename(columns={"seller_id": "merchant_id", "item_id": "total_logs"}, inplace=True)
    df_train = pd.merge(df_train, total_logs_temp, on=["user_id", "merchant_id"], how="left")

    unique_item_ids_temp = \
        user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["item_id"]]).count().reset_index()[
            ["user_id", "seller_id", "item_id"]]
    unique_item_ids_temp1 = unique_item_ids_temp.groupby(
        [unique_item_ids_temp["user_id"], unique_item_ids_temp["seller_id"]]).count().reset_index()
    unique_item_ids_temp1.rename(columns={"seller_id": "merchant_id", "item_id": "unique_item_ids"}, inplace=True)
    df_train = pd.merge(df_train, unique_item_ids_temp1, on=["user_id", "merchant_id"], how="left")

    categories_temp = \
        user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["cat_id"]]).count().reset_index()[
            ["user_id", "seller_id", "cat_id"]]
    categories_temp1 = categories_temp.groupby(
        [categories_temp["user_id"], categories_temp["seller_id"]]).count().reset_index()
    categories_temp1.rename(columns={"seller_id": "merchant_id", "cat_id": "categories"}, inplace=True)
    df_train = pd.merge(df_train, categories_temp1, on=["user_id", "merchant_id"], how="left")

    browse_days_temp = \
        user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["time_stamp"]]).count().reset_index()[
            ["user_id", "seller_id", "time_stamp"]]
    browse_days_temp1 = browse_days_temp.groupby(
        [browse_days_temp["user_id"], browse_days_temp["seller_id"]]).count().reset_index()
    browse_days_temp1.rename(columns={"seller_id": "merchant_id", "time_stamp": "browse_days"}, inplace=True)
    df_train = pd.merge(df_train, browse_days_temp1, on=["user_id", "merchant_id"], how="left")

    one_clicks_temp = \
        user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["action_type"]]).count().reset_index()[
            ["user_id", "seller_id", "action_type", "item_id"]]
    one_clicks_temp.rename(columns={"seller_id": "merchant_id", "item_id": "times"}, inplace=True)
    one_clicks_temp["one_clicks"] = one_clicks_temp["action_type"] == 0
    one_clicks_temp["one_clicks"] = one_clicks_temp["one_clicks"] * one_clicks_temp["times"]
    one_clicks_temp["shopping_carts"] = one_clicks_temp["action_type"] == 1
    one_clicks_temp["shopping_carts"] = one_clicks_temp["shopping_carts"] * one_clicks_temp["times"]
    one_clicks_temp["purchase_times"] = one_clicks_temp["action_type"] == 2
    one_clicks_temp["purchase_times"] = one_clicks_temp["purchase_times"] * one_clicks_temp["times"]
    one_clicks_temp["favourite_times"] = one_clicks_temp["action_type"] == 3
    one_clicks_temp["favourite_times"] = one_clicks_temp["favourite_times"] * one_clicks_temp["times"]
    four_features = one_clicks_temp.groupby(
        [one_clicks_temp["user_id"], one_clicks_temp["merchant_id"]]).sum().reset_index()
    four_features = four_features.drop(["action_type", "times"], axis=1)
    df_train = pd.merge(df_train, four_features, on=["user_id", "merchant_id"], how="left")
    # print(df_train.info())
    #   缺失值统计
    print(df_train.isnull().sum(axis=0))
    # age_range、gender缺失值填充
    df_train['age_range'] = df_train['age_range'].fillna(-1).astype(int)
    df_train['gender'] = df_train['gender'].fillna(2).astype(int)
    #   缺失值统计
    print(df_train.isnull().sum(axis=0))
    df_train.to_csv('../../data/processed/featurePor_data.csv')
    # 可视化
    plt.style.use('ggplot')
    sns.countplot(x='age_range', order=[1, 2, 3, 4, 5, 6, 7, 8], hue='gender', data=df_train)
    plt.title('训练集用户性别年龄分布')

    colnm = df_train.columns.tolist()
    print(colnm)
    plt.figure(figsize=(5, 4))
    color = sns.color_palette()
    color[1] = 'm'
    df_train[colnm[5]].hist(range=[0, 80], bins=80, color=color[1])
    plt.xlabel(colnm[5], fontsize=12)
    plt.ylabel('用户数')
    plt.show()

    df_train[colnm[6]].hist(range=[0, 40], bins=40, color=color[1])
    plt.xlabel(colnm[6], fontsize=12)
    plt.ylabel('用户数')
    plt.show()

    df_train[colnm[7]].hist(range=[0, 10], bins=10, color=color[1])
    plt.xlabel(colnm[7], fontsize=12)
    plt.ylabel('用户数')
    plt.show()

    df_train[colnm[8]].hist(range=[0, 10], bins=10, color=color[1])
    plt.xlabel(colnm[8], fontsize=12)
    plt.ylabel('用户数')
    plt.show()

    df_train[colnm[9]].hist(range=[0, 50], bins=50, color=color[1])
    plt.xlabel(colnm[9], fontsize=12)
    plt.ylabel('用户单击次数统计')
    plt.show()

    df_train[colnm[10]].hist(range=[0, 3], bins=3, color=color[1])
    plt.xlabel(colnm[10], fontsize=12)
    plt.ylabel('用户数')
    plt.show()

    df_train[colnm[11]].hist(range=[0, 6], bins=7, color=color[1])
    plt.xlabel(colnm[11], fontsize=12)
    plt.ylabel("用户数")
    plt.show()

    df_train[colnm[12]].hist(range=[0, 6], bins=6, color=color[1])
    plt.xlabel(colnm[12], fontsize=12)
    plt.ylabel("用户数")
    plt.show()

    # 相关性热力图
    sns.set_style("dark")
    plt.figure(figsize=(10, 8))
    colnm = df_train.columns.tolist()[2:13]
    mcorr = df_train[colnm].corr()
    # np.zero_like的意思就是生成一个和你所给数组a相同shape的全0数组。
    mask = np.zeros_like(mcorr, dtype=np.bool_)
    # np.triu_indices_from()返回方阵的上三角矩阵的索引
    mask[np.triu_indices_from(mask)] = True
    cmap = sns.diverging_palette(220, 10, as_cmap=True)
    g = sns.heatmap(mcorr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f')


# feature_engineering()
if __name__ == '__main__':
    # draw_picture()
    feature_engineering()
