import collections
import datetime
import math
import time
from typing import Optional

import numpy as np
import pandas as pd
import tqdm
from numpy import ndarray
from pandas import DataFrame
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
from pandas.core.indexes.numeric import IntegerIndex


def reduce_mem(df: DataFrame) -> DataFrame:
    """
    优化DataFrame的存储类型，减少存储空间
    :return 优化后的DataFrame
    """
    starttime = time.time()
    numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
    # 开始的内存
    start_mem = df.memory_usage().sum() / 1024 ** 2
    for col in df.columns:
        col_type = df[col].dtypes
        if col_type in numerics:
            c_min = df[col].min()
            c_max = df[col].max()
            if pd.isnull(c_min) or pd.isnull(c_max):
                continue
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(int_type=np.int16).min and c_max < np.iinfo(int_type=np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(int_type=np.int32).min and c_max < np.iinfo(int_type=np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(int_type=np.int64).min and c_max < np.iinfo(int_type=np.int64).max:
                    df[col] = df[col].astype(np.int64)
            elif str(col_type)[:3] == 'float':
                if c_min > np.finfo(dtype=np.float16).min and c_max < np.finfo(dtype=np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(dtype=np.float32).min and c_max < np.finfo(dtype=np.float32).max:
                    df[col] = df[col].astype(np.float32)
                elif c_min > np.finfo(dtype=np.float64).min and c_max < np.finfo(dtype=np.float64).max:
                    df[col] = df[col].astype(np.float64)
    end_mem = df.memory_usage().sum() / 1024 ** 2
    endtime = time.time()
    print('reduce mem from {:5.2f}Mb to {:5.2f}Mb saving {:.1f}% spend time {:.3f} seconds'.format(
        start_mem,
        end_mem,
        100 * (start_mem - end_mem) / start_mem,
        endtime - starttime
    ))
    return df


def get_all_click_sample(data_paths: ndarray, sample_nums: int = 10000) -> Optional["DataFrame"]:
    """
    采样路径下的数据集列表

    :param data_paths:数据集列表
    :param sample_nums:采样用户数量，大于0才会执行采样
    :return:采样(可能没有采样)后的DataFrame
    """
    all_click = None
    for data_path in data_paths:
        if all_click is None:
            all_click = pd.read_csv(data_path)
        else:
            all_click = all_click.append(pd.read_csv(data_path))
    if all_click is None:
        return None

    if sample_nums > 0:
        all_user_ids = all_click["user_id"].unique()
        np.random.seed(100)
        sample_user_ids = np.random.choice(all_user_ids, size=sample_nums, replace=False)
        all_click = all_click[all_click["user_id"].isin(sample_user_ids)]
    all_click.drop_duplicates(["user_id", "click_article_id", "click_timestamp"], inplace=True)

    return all_click


def get_user_item_time(click_df: DataFrame) -> dict:
    """
    根据时间顺序排序获取用户文章时间的字典 {user1:[(item1,time1),(item2,time2)...]...}
    :param click_df:原始点击表格
    :return:字典
    """
    click_df = click_df.sort_values("click_timestamp")

    def make_item_time_pair(df: DataFrame):
        return list(zip(df["click_article_id"], df["click_timestamp"]))

    user_item_time_group_df: DataFrameGroupBy = click_df.groupby(["user_id"])[["click_article_id", "click_timestamp"]]
    user_item_time_df: DataFrame = user_item_time_group_df.apply(lambda x: make_item_time_pair(x)).reset_index()
    user_item_time_df = user_item_time_df.rename(columns={0: 'item_time_list'})
    user_item_time_dict = dict(zip(user_item_time_df["user_id"], user_item_time_df['item_time_list']))
    return user_item_time_dict


def get_item_topk_click(click_df: DataFrame, k: int) -> IntegerIndex:
    """
    获取近期点击最多的文章
    """
    top_click = click_df["click_article_id"].value_counts().index[:k]
    return top_click


def itemcf_sim(df: DataFrame) -> dict:
    """
    获取文章相似度矩阵
    """
    user_item_time_dict = get_user_item_time(df)
    i2i_sim = {}
    item_cnt = collections.defaultdict(int)
    for user, item_time_list in tqdm.tqdm(user_item_time_dict.items()):
        for i, i_click_time in item_time_list:
            item_cnt[i] += 1
            i2i_sim.setdefault(i, {})
            for j, j_click_time in item_time_list:
                if i == j:
                    continue
                i2i_sim_one: dict = i2i_sim[i]
                i2i_sim_one.setdefault(j, 0)
                i2i_sim_one[j] += 1 / math.log(len(item_time_list) + 1)
    for i, related_items in i2i_sim.items():
        related_items: dict = related_items
        for j, wij in related_items.items():
            related_items[j] = wij / math.sqrt(item_cnt[i] * item_cnt[j])
    return i2i_sim


def item_based_recommend(user_id: int, user_item_time_dict: dict, i2i_sim: dict, sim_item_topk: int,
                         recall_item_num: int, item_topk_click: IntegerIndex) -> list:
    """
    基于文章协同过滤的召回（某个用户user_id）
    """
    user_hist_items = user_item_time_dict[user_id]
    user_hist_items_ = {user_id for user_id, _ in user_hist_items}
    item_rank = {}
    for loc, (i, click_time) in enumerate(user_hist_items):
        i2i_sim_i: dict = i2i_sim[i]
        for j, wij in sorted(i2i_sim_i.items(), key=lambda x: x[1], reverse=True)[:sim_item_topk]:
            if j in user_hist_items_:
                continue
            item_rank.setdefault(j, 0)
            item_rank[j] += wij
    if len(item_rank) < recall_item_num:
        for i, item in enumerate(item_topk_click):
            if item in item_rank:
                continue
            item_rank[item] = -i - 100
            if len(item_rank) == recall_item_num:
                break
    item_rank = sorted(item_rank.items(), key=lambda x: x[1], reverse=True)[:recall_item_num]
    return item_rank


def item_based_recommend_users(click_df: DataFrame, i2i_sim: dict, sim_item_topk: int, recall_item_num: int,
                               topk: int) -> dict:
    """
    基于文章协同过滤的召回（所有用户）
    """
    user_recall_items_dict = collections.defaultdict(list)
    user_item_time_dict = get_user_item_time(click_df)
    top_click = get_item_topk_click(click_df, topk)
    for user_id in tqdm.tqdm(click_df["user_id"].unique()):
        user_recall_items_dict[user_id] = item_based_recommend(user_id, user_item_time_dict, i2i_sim, sim_item_topk,
                                                               recall_item_num, top_click)
    return user_recall_items_dict


def dict2DataFrame(user_recall_items_dict: dict) -> DataFrame:
    """
    dict to DataFrame
    """
    user_item_score_list = []
    for user, items in tqdm.tqdm(user_recall_items_dict.items()):
        for item, score in items:
            user_item_score_list.append([user, item, score])
    recall_df = pd.DataFrame(user_item_score_list, columns=["user_id", "click_article_id", "pred_score"])
    return recall_df


def submit(recall_df: DataFrame, topk: int, model_name: str):
    """
    生成提交文件
    """
    recall_df = recall_df.sort_values(["user_id", "pred_score"])
    score_group: SeriesGroupBy = recall_df.groupby(["user_id"])["pred_score"]
    recall_df["rank"] = score_group.rank(ascending=False, method="first")

    temp: DataFrameGroupBy = recall_df.groupby(["user_id"])

    def getMax(x: DataFrame):
        return x["rank"].max()

    temp = temp.apply(getMax)
    assert temp.min() >= topk

    del recall_df["pred_score"]
    submit: DataFrame = recall_df[recall_df["rank"] <= topk].set_index(["user_id", "rank"]).unstack(-1).reset_index()
    submit.columns = [int(col) if isinstance(col, float) else col for col in submit.columns.droplevel(0)]
    submit = submit.rename(
        columns={"": "user_id", 1: 'article_1', 2: 'article_2', 3: 'article_3', 4: 'article_4', 5: 'article_5'})
    submit.to_csv(model_name + "_" + datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S") + ".csv", index=False,
                  header=True)
