import pandas as pd
import numpy as np
from datetime import datetime
import os
import ast
import random
import json
from tqdm import tqdm
import gc
def get_logs():
    logs_filenames = [i for i in os.listdir() if i.startswith("tianchi_2014002_rec_tmall_log_")]
    logs_ = []
    log_names = ['item_id', 'user_id', 'action', 'vtime']
    for i in logs_filenames:
        logs = pd.read_csv(i, names= log_names, sep='\u0001', nrows=100000000)
        logs_.append(logs)
        del logs
    combined_logs = pd.concat(logs_, ignore_index=True)
    return combined_logs

def get_user_behavior_file():
    print("读取item info")
    item_names = ['item_id', 'title', 'pict_url', 'category', 'brand_id', 'seller_id']
    items = pd.read_csv('tianchi_2014001_rec_tmall_product.txt', names=item_names, sep='\u0001' ,
                        usecols=['item_id', 'category', 'brand_id', 'seller_id'])
    items['sub_category'] = items['category'].apply(lambda x: x.split('-')[1])
    items['category'] = items['category'].apply(lambda x: x.split('-')[0])
    items['brand_id'] = items['brand_id'].fillna('b0')
    items['seller_id'] = items['seller_id'].fillna('s0')
    items['brand_id'] = items['brand_id'].apply(lambda x: int(x[1:]))
    items['seller_id'] = items['seller_id'].apply(lambda x: int(x[1:]))
    print("读取log文件")
    logs = get_logs()
    date_format = "%Y-%m-%d %H:%M:%S"

    def process_date(date):
        date_obj = datetime.strptime(date, date_format)
        return int(date_obj.timestamp())

    start_date = np.min(logs['vtime'])
    start_timestamp = process_date(start_date)
    logs['vtime'] = logs['vtime'].apply(lambda x: process_date(x) - start_timestamp)
    logs['action'] = logs['action'].apply(lambda x: 1)
    print("开始合并·")
    logs_with_iteminfo = pd.merge(left=logs, right=items, on='item_id')
    del logs
    del items
    gc.collect()
    logs_with_iteminfo = logs_with_iteminfo.sort_values(by=['user_id', 'vtime'], ascending=[True, False])
    logs_with_iteminfo = logs_with_iteminfo.groupby('user_id').agg({
        'item_id': lambda x: list(x),
        'action': lambda x: list(x),
        'category': lambda x: list(x),
        'brand_id': lambda x: list(x),
        'seller_id': lambda x: list(x),
        'sub_category': lambda x: list(x),
        'vtime': lambda x: list(x)
    }).reset_index()
    logs_with_iteminfo.to_csv("user_behavior.csv", index=False)
    behavoir_len = []
    for i in logs_with_iteminfo['category']:
        behavoir_len.append(len(i))
    print(f"user average behavior num : {np.mean(behavoir_len)}")
    return logs_with_iteminfo


def get_json():
    print("读取item info")
    item_names = ['item_id', 'title', 'pict_url', 'category', 'brand_id', 'seller_id']
    items = pd.read_csv('tianchi_2014001_rec_tmall_product.txt', names=item_names, sep='\u0001',
                        usecols=['item_id', 'category', 'brand_id', 'seller_id'])
    items['sub_category'] = items['category'].apply(lambda x: x.split('-')[1])
    items['category'] = items['category'].apply(lambda x: x.split('-')[0])
    items['brand_id'] = items['brand_id'].fillna('b0')
    items['seller_id'] = items['seller_id'].fillna('s0')
    items['brand_id'] = items['brand_id'].apply(lambda x: int(x[1:]))
    items['seller_id'] = items['seller_id'].apply(lambda x: int(x[1:]))
    print("读取log文件")
    logs = get_logs()
    date_format = "%Y-%m-%d %H:%M:%S"

    def process_date(date):
        date_obj = datetime.strptime(date, date_format)
        return int(date_obj.timestamp())

    start_date = np.min(logs['vtime'])
    start_timestamp = process_date(start_date)
    logs['vtime'] = logs['vtime'].apply(lambda x: process_date(x) - start_timestamp)
    logs['action'] = logs['action'].apply(lambda x: 1)
    print("开始合并·")
    logs_with_iteminfo = pd.merge(left=logs, right=items, on='item_id')
    del logs
    del items
    gc.collect()
    logs_with_iteminfo = logs_with_iteminfo[['item_id', 'category', 'brand_id', 'seller_id', 'sub_category']]
    logs_with_iteminfo.drop_duplicates(inplace=True)
    logs_with_iteminfo = dict(zip(logs_with_iteminfo.iloc[:, 0], logs_with_iteminfo.iloc[:, 1:].values.tolist()))
    with open(f"item_info.json", "w", encoding='utf-8') as f:
        json.dump(logs_with_iteminfo, f)
    del logs_with_iteminfo
    usecols = ['item_id', 'category','brand_id', 'seller_id', 'sub_category']
    user_actions = pd.read_csv("800_user_actions.csv",
                               usecols=usecols)

    map_dic = {}
    for col in usecols:
        user_actions[col] = user_actions[col].apply(ast.literal_eval)
        map_dic[col] = set()
        print(f"get {col} set")
        for i in tqdm(range(len(user_actions))):
            map_dic[col].update(user_actions.loc[i,col])

    for col in usecols:
        print(f"save {col} json")
        tmp_set = map_dic[col].copy()
        map_dic[col] = {
            j:i for i, j in enumerate(sorted(list(tmp_set)))
        }
        with open(f"{col}2idx.json", "w", encoding='utf-8') as f:
            json.dump(map_dic[col], f)

def get_active_user_files():
    data = pd.read_csv("800_user_actions.csv", sep=',',
                       usecols=['user_id', 'item_id', 'category', 'brand_id', 'seller_id', 'sub_category', 'vtime'])
    data['bn'] = data['item_id'].apply(ast.literal_eval).apply(len)
    # print("-----------------------------")
    # data = data[data['bn'] >= 200]
    # data.to_csv("200_user_actions.csv")
    # print("200 len:", data['bn'].mean())
    # print("200 max:", data['bn'].max())
    # print("200 min:", data['bn'].min())
    # print("200 median:", data['bn'].median())
    # print("-----------------------------")
    # data = data[data['bn'] >= 500]
    # data.to_csv("500_user_actions.csv")
    # print("500 len:", len(data))
    # print("500 mean:", data['bn'].mean())
    # print("500 max:", data['bn'].max())
    # print("500 min:", data['bn'].min())
    # print("500 median:", data['bn'].median())
    print("-----------------------------")
    # data = data[data['bn'] >= 800]
    # data.to_csv("800_user_actions.csv")
    print("800 len:", len(data))
    print("800 mean:", data['bn'].mean())
    print("800 max:", data['bn'].max())
    print("800 min:", data['bn'].min())
    print("800 median:", data['bn'].median())
    # print("-----------------------------")
    # data = data[data['bn'] >= 1000]
    # data.to_csv("1000_user_actions.csv")
    # print("1000 len:", len(data))
    # print("1000 mean:", data['bn'].mean())
    # print("1000 max:", data['bn'].max())
    # print("1000 min:", data['bn'].min())
    # print("1000 median:", data['bn'].median())

def split_dataset():
    print("读取数据")
    cols = ['item_id', 'category', 'brand_id', 'seller_id', 'sub_category', 'vtime']
    user_behavior = pd.read_csv("800_user_actions.csv", sep=',', usecols=["user_id"]+cols)
    for col in cols:
        user_behavior[col] = user_behavior[col].apply(ast.literal_eval)
    print("读取数据完成")
    # 打乱顺序（非常关键）
    user_behavior = user_behavior.sample(frac=1, random_state=42).reset_index(drop=True)

    # 分割成三部分：8:1:1
    # 分割成三部分：8:1:1
    train_size = int(0.8 * len(user_behavior))
    val_size = int(0.1 * len(user_behavior))

    df_train = user_behavior[:train_size]
    df_val = user_behavior[train_size:train_size + val_size]
    df_test = user_behavior[train_size + val_size:]


    with open('item_info.json', 'r', encoding='utf-8') as f:
        item_info = json.load(f)

    def get_pos_neg_data(df):
        pos_data = pd.DataFrame(columns=["user_id"]+cols)
        tmp_pos_data = pd.DataFrame(columns=["user_id"]+cols)
        for i in range(3):
            tmp_pos_data['user_id'] = df['user_id']
            for col in cols:
                tmp_pos_data[col] = df[col].apply(lambda x: x[i])
            tmp_pos_data['inter_type'] = 1
            pos_data = pd.concat([pos_data, tmp_pos_data], ignore_index=True)

        def get_neg_item(row,idx):
            behavior_item_ids = row['item_id'][idx:]
            behavior_num = len(behavior_item_ids)

            select_behavior_item_ids = behavior_item_ids[int(0.3*behavior_num):]
            other_behavior_item_ids = behavior_item_ids[: int(0.3*behavior_num)]
            behavior_item_ids = [int(item_id) for item_id in behavior_item_ids]
            proc = random.random()
            while 1:
                if proc < 0.5:
                    neg_item = random.choice(list(item_info.keys()))
                    if int(neg_item) not in behavior_item_ids:
                        return neg_item, item_info[str(neg_item)]
                neg_item = random.choice(list(select_behavior_item_ids))
                if int(neg_item) not in other_behavior_item_ids:
                    return neg_item, item_info[str(neg_item)]

        neg_data = pd.DataFrame(columns=["user_id"]+cols)
        tmp_neg_data = pd.DataFrame(columns=["user_id"]+cols)
        for i in range(3):
            tmp_neg_data['user_id'] = df['user_id']
            tmp_neg_data['tmp_row'] = df.apply(lambda x: get_neg_item(x,i),axis=1)
            tmp_neg_data['item_id'] = df['tmp_row'].apply(lambda x: x[0],axis=1)
            tmp_neg_data['category'] = df['tmp_row'].apply(lambda x: x[1][0],axis=1)
            tmp_neg_data['brand_id'] = df['tmp_row'].apply(lambda x: x[1][1],axis=1)
            tmp_neg_data['seller_id'] = df['tmp_row'].apply(lambda x: x[1][2],axis=1)
            tmp_neg_data['sub_category'] = df['tmp_row'].apply(lambda x: x[1][3],axis=1)
            tmp_neg_data = tmp_neg_data.drop(columns=['tmp_row'])
            tmp_neg_data['vtime'] = df['vtime'].apply(lambda x: int(x[i]) + 10)
            tmp_neg_data['inter_type'] = 0
            neg_data = pd.concat([neg_data, tmp_neg_data], ignore_index=True)
        data = pd.concat([pos_data, neg_data], ignore_index=True)
        data = data.sort_values(['user_id', 'vtime']).reset_index(drop=True)
        return data

    final_train_data = get_pos_neg_data(df_train)
    final_val_data = get_pos_neg_data(df_val)
    final_test_data = get_pos_neg_data(df_test)

    final_train_data.to_csv('./final_train_data.csv')
    final_val_data.to_csv('./final_valid_data.csv')
    final_test_data.to_csv('./final_test_data.csv')




if __name__ == '__main__':
    split_dataset()
