import random
import pickle
import joblib
import pandas as pd

import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence

item_feat = {}
user_feat = {}
MAX_LEN = 50
random.seed(1234)

with open('../Data/Processed/customer_feat.pkl', 'rb') as f:
    user_feat['customer_id'] = pickle.load(f)
    user_feat['FN'] = pickle.load(f)
    user_feat['Active'] = pickle.load(f)
    user_feat['club_member_status'] = pickle.load(f)
    user_feat['fashion_news_frequency'] = pickle.load(f)
    user_feat['age'] = pickle.load(f)
    customer_cnt = pickle.load(f)
    _ = pickle.load(f)

with open('../Data/Processed/article_sparse.pkl', 'rb') as f:
    item_feat['artid'] = pickle.load(f)
    item_feat['pcode'] = pickle.load(f)
    _ = pickle.load(f)
    _ = pickle.load(f)
    artid_cnt, pcode_cnt = pickle.load(f)

with open('../Data/Processed/topk_index_100.pkl', 'rb') as f:
    topk_index_100 = pickle.load(f)

df = pd.read_csv('../Data/transactions_translated.csv',index_col=0)

train_set = []
valid_set = []
cnt = 0
for reviewerID, seq in df.groupby('customer_id'):
    if cnt % 100000 == 0: print(cnt)
    cnt += 1
    pos_list = seq['article_id'].tolist()
    neg_list = []
    for art in pos_list:
        neg = pos_list[0]
        while (neg in pos_list):
            neg = topk_index_100[art-1, random.randint(0,99)]+1
        neg_list.append(neg)

    for i in range(1, len(pos_list)):
        idx = pos_list[max(0,i-MAX_LEN):i]
        idx = [x-1 for x in idx]
        seq_length = len(idx)
        user_part = tuple(x[reviewerID] for x in user_feat.values())
        pos_item_part = tuple(x[pos_list[i]-1] for x in item_feat.values())
        neg_item_part = tuple(x[neg_list[i]-1] for x in item_feat.values())
        seq_part = tuple(x[idx] for x in item_feat.values())
        if i != len(pos_list) - 1:
            train_set.append(user_part + pos_item_part + seq_part + (seq_length, 1))
            train_set.append(user_part + neg_item_part + seq_part + (seq_length, 0))
        else:
            valid_set.append(user_part + pos_item_part + seq_part + (seq_length, 1))
            valid_set.append(user_part + neg_item_part + seq_part + (seq_length, 0))
    
random.shuffle(train_set)
random.shuffle(valid_set)

print(len(train_set), len(valid_set))
print(train_set[0])
print(valid_set[0])
print(user_feat.keys())
print(item_feat.keys())


train_dict, valid_dict = {}, {}
keys = list(user_feat.keys()) + list(item_feat.keys()) + ['hist_'+ x for x in item_feat.keys()] + ['seq_length', 'y']
user_feat, item_feat, df = None, None, None
print(keys)

# Process Validation Set
valid_set = list(zip(*valid_set))
for i in range(len(keys)):
    if keys[i].startswith('hist_'):
        valid_dict[keys[i]] = pad_sequence([torch.from_numpy(x) for x in valid_set[i]], batch_first=True).numpy()
        assert valid_dict[keys[i]].shape[1] == MAX_LEN
        print(valid_dict[keys[i]][0])
    else:
        valid_dict[keys[i]] = np.asarray(valid_set[i])
    valid_set[i] = None
valid_set = None

with open('../Data/Processed/valid_dict.db', 'wb') as f:
    joblib.dump(valid_dict, f)
valid_dict = None
print("valid dict dumped")


# Process Train Set
train_set = list(zip(*train_set))
for i in range(len(keys)):
    if keys[i].startswith('hist_'):
        train_dict[keys[i]] = pad_sequence([torch.from_numpy(x) for x in train_set[i]], batch_first=True).numpy()
        assert train_dict[keys[i]].shape[1] == MAX_LEN
        print(train_dict[keys[i]][0])
    else:
        train_dict[keys[i]] = np.asarray(train_set[i])
    train_set[i] = None
train_set = None

with open('../Data/Processed/train_dict.db', 'wb') as f:
    joblib.dump(train_dict, f)
train_dict = None
print("train dict dumped")
