import gc
import json
from datetime import datetime

import pandas as pd
import random
import numpy as np
import os
import ast

from tqdm import tqdm


def load_log():
    data_file_names = [i for i in os.listdir() if "log_standard" in i]
    logs = []
    usecols = ("user_id,video_id,time_ms,is_click,is_like,is_follow,is_comment,"
               "is_forward,is_hate,long_view,is_profile_enter,tab").split(",")
    for data_file_name in data_file_names:
        logs.append(pd.read_csv(data_file_name, sep=',', usecols=usecols))

    return pd.concat(logs)



def get_user_behavior_file():
    print("读取item info")
    item_names = ['video_id', 'author_id', 'video_type', 'tag', 'music_id']
    items = pd.read_csv('video_features_basic_1k.csv', sep=',' ,
                        usecols=item_names)
    print("读取log文件")
    logs = load_log()

    items['tag'] = items['tag'].apply(lambda x: str(x).split(',')[0].strip('"'))
    video_type_dic = {
        video_type:idx+1 for idx, video_type in enumerate(list(set(items['video_type'])))
    }
    start_timestamp = np.min(logs['time_ms'])
    logs['time_ms'] = logs['time_ms'].apply(lambda x: x - start_timestamp)
    items['video_type'] = items['video_type'].apply(lambda x: video_type_dic[x])
    logs['action'] = 1
    print("开始合并")
    logs_with_iteminfo = pd.merge(left=logs, right=items, on='video_id')
    del logs
    del items
    gc.collect()
    logs_with_iteminfo = logs_with_iteminfo.sort_values(by=['user_id', 'time_ms'], ascending=[True, False])
    logs_with_iteminfo.fillna(0, inplace=True)
    logs_with_iteminfo = logs_with_iteminfo.groupby('user_id').agg({
        col: lambda x: list(x)
        for col in logs_with_iteminfo.columns if col != 'user_id'
    }).reset_index()
    logs_with_iteminfo.to_csv("user_behavior.csv", index=False)
    behavoir_len = []
    for i in logs_with_iteminfo['is_forward']:
        behavoir_len.append(len(i))
    print(f"user average behavior num : {np.mean(behavoir_len)}")
    print(f"user min behavior num : {np.min(behavoir_len)}")
    print(f"user max behavior num : {np.max(behavoir_len)}")
    print(f"user median behavior num : {np.median(behavoir_len)}")
    return logs_with_iteminfo

def get_active_user_files():
    data = pd.read_csv("8000_user_actions.csv", sep=',')
    data['bn'] = data['video_id'].apply(ast.literal_eval).apply(len)
    # data = data[data['bn'] >= 8000]
    # data.to_csv("8000_user_actions.csv")
    print("8000 len:", len(data))
    print("8000 mean:", data['bn'].mean())
    print("8000 max:", data['bn'].max())
    print("8000 min:", data['bn'].min())
    print("8000 median:", data['bn'].median())

def get_json():
    # print("读取item info")
    # items = pd.read_csv('video_features_basic_1k.csv',
    #                     usecols=['video_id', 'author_id', 'video_type', 'tag', 'music_id'])
    #
    # video_type_dic = {
    #     video_type: idx+1 for idx, video_type in enumerate(list(set(items['video_type'])))
    # }
    # items['video_type'] = items['video_type'].apply(lambda x: video_type_dic[x])
    # logs_with_iteminfo = dict(zip(items.iloc[:, 0], items.iloc[:, 1:].values.tolist()))
    # with open(f"item_info.json", "w", encoding='utf-8') as f:
    #     json.dump(logs_with_iteminfo, f)
    # del logs_with_iteminfo
    # (user_id, video_id, author_id, video_type, tag, music_id,
    #  time_ms, is_click, is_like, is_follow, is_forward, is_hate,
    #  is_comment, long_view, is_profile_enter, tab)
    usecols = ['video_id', 'author_id']
    user_actions = pd.read_csv("video_features_basic_1k.csv",
                               usecols=usecols)
    user_actions = np.array(user_actions)

    video_ids = []
    author_ids = []

    for video_id, author_id in user_actions:
        video_ids.append(video_id)
        author_ids.append(author_id)

    video_ids = list(sorted(set(video_ids)))
    author_ids = list(sorted(set(author_ids)))
    video_map = {
        int(video_id):i+1 for i, video_id in enumerate(video_ids)
    }
    author_map = {
        int(author_id):i+1 for i, author_id in enumerate(author_ids)
    }

    with open("video_id2idx.json", "w", encoding='utf-8') as f:
        json.dump(video_map, f)

    with open("author_id2idx.json", "w", encoding='utf-8') as f:
        json.dump(author_map, f)

    # map_dic = {}
    # for col in usecols:
    #     map_dic[col] = set()
    #     print(f"get {col} set")
    #     for i in tqdm(range(len(user_actions))):
    #         map_dic[col].add(user_actions.loc[i,col])
    #
    # for col in usecols:
    #     print(f"save {col} json")
    #     map_dic[col] = {
    #         j:i+1 for i, j in enumerate(sorted(list(map_dic[col])))
    #     }
    #     with open(f"{col}2idx.json", "w", encoding='utf-8') as f:
    #         json.dump(map_dic[col], f)




def split_dataset():
    print("读取数据")
    cols = ("video_id,time_ms,is_click,is_like,is_follow,is_comment,is_forward,is_hate,"
            "long_view,is_profile_enter,tab,author_id,video_type,music_id,tag").split(",")
    user_behavior = pd.read_csv("8000_user_actions.csv", sep=',',
                                usecols=["user_id"]+cols)
    for col in cols:
        user_behavior[col] = user_behavior[col].apply(ast.literal_eval)
    print("读取数据完成")
    # 打乱顺序（非常关键）
    user_behavior = user_behavior.sample(frac=1, random_state=42).reset_index(drop=True)

    # 分割成三部分：8:1:1
    # 分割成三部分：8:1:1
    train_size = int(0.8 * len(user_behavior))
    val_size = int(0.1 * len(user_behavior))

    df_train = user_behavior[:train_size]
    df_val = user_behavior[train_size:train_size + val_size]
    df_test = user_behavior[train_size + val_size:]


    with open('item_info.json', 'r', encoding='utf-8') as f:
        item_info = json.load(f)

    def get_pos_neg_data(df):
        pos_data = pd.DataFrame(columns=["user_id"]+cols)
        tmp_pos_data = pd.DataFrame(columns=["user_id"]+cols)
        for i in range(0,500):
            tmp_pos_data['user_id'] = df['user_id']
            for col in cols:
                tmp_pos_data[col] = df[col].apply(lambda x: x[i])
            tmp_pos_data['inter_type'] = 1
            pos_data = pd.concat([pos_data, tmp_pos_data], ignore_index=True)

        def get_neg_item(row,idx):
            behavior_item_ids = row['video_id'][idx:]
            behavior_item_ids = [int(item_id) for item_id in behavior_item_ids]
            behavior_num = len(behavior_item_ids)
            select_behavior_item_ids = behavior_item_ids[int(0.3 * behavior_num):]
            other_behavior_item_ids = behavior_item_ids[: int(0.3 * behavior_num)]
            behavior_item_ids = [int(item_id) for item_id in behavior_item_ids]
            proc = random.random()
            while 1:
                if proc < 0.5:
                    neg_item = random.choice(list(item_info.keys()))
                    if int(neg_item) not in behavior_item_ids:
                        return neg_item, item_info[neg_item]
                neg_item = random.choice(list(select_behavior_item_ids))
                if int(neg_item) not in other_behavior_item_ids:
                    return neg_item, item_info[neg_item]

        neg_data = pd.DataFrame(columns=["user_id"]+cols)
        tmp_neg_data = pd.DataFrame(columns=["user_id"]+cols)
        for i in range(0,500):
            # ['video_id', 'author_id', 'video_type', 'tag', 'music_id']
            tmp_neg_data['user_id'] = df['user_id']
            tmp_neg_data['tmp_row'] = df.apply(lambda x: get_neg_item(x,i),axis=1)
            tmp_neg_data['video_id'] = tmp_neg_data['tmp_row'].apply(lambda x: x[0])
            tmp_neg_data['author_id'] = tmp_neg_data['tmp_row'].apply(lambda x: x[1][0])
            tmp_neg_data['video_type'] = tmp_neg_data['tmp_row'].apply(lambda x: x[1][1])
            tmp_neg_data['tag'] = tmp_neg_data['tmp_row'].apply(lambda x: x[1][2])
            tmp_neg_data['music_id'] = tmp_neg_data['tmp_row'].apply(lambda x: x[1][3])
            for col in cols:
                if col not in ['video_id', 'author_id', 'video_type', 'tag', 'music_id','time_ms']:
                    tmp_neg_data[col] = 0
            tmp_neg_data = tmp_neg_data.drop(columns=['tmp_row'])
            tmp_neg_data['time_ms'] = df['time_ms'].apply(lambda x: int(x[i]) + 10)
            tmp_neg_data['inter_type'] = 0
            neg_data = pd.concat([neg_data, tmp_neg_data], ignore_index=True)
        data = pd.concat([pos_data, neg_data], ignore_index=True)
        data = data.sort_values(['user_id', 'time_ms']).reset_index(drop=True)
        return data

    final_train_data = get_pos_neg_data(df_train)
    final_val_data = get_pos_neg_data(df_val)
    final_test_data = get_pos_neg_data(df_test)

    final_train_data.to_csv('./final_train_data.csv')
    final_val_data.to_csv('./final_valid_data.csv')
    final_test_data.to_csv('./final_test_data.csv')



if __name__ == '__main__':
    # get_user_behavior_file()
    # get_active_user_files()
    get_json()
    # get_active_user_files()