import numpy as np
import torch
import pickle as pkl
import random
import os
from deepctr_torch.inputs import SparseFeat, DenseFeat, VarLenSparseFeat, get_feature_names
from deepctr_torch.models import DIN

MAX_LEN = 2

def seed_torch(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
seed_torch(1234)


keys = ['customer_id', 'FN', 'Active', 'club_member_status',\
     'fashion_news_frequency', 'age', 'artid','pcode', 'y']

feature_columns = [SparseFeat('customer_id', 1371980, embedding_dim=64, use_hash=False),
                   SparseFeat('FN', 2, embedding_dim=4, use_hash=False),
                   SparseFeat('Active', 2, embedding_dim=4, use_hash=False),
                   SparseFeat('club_member_status', 2, embedding_dim=4, use_hash=False),
                   SparseFeat('fashion_news_frequency', 2, embedding_dim=4, use_hash=False),
                   SparseFeat('artid', 105542 + 1, embedding_dim=64, use_hash=False),
                   SparseFeat('pcode', 47224 + 1, embedding_dim=64, use_hash=False),
                   DenseFeat('age', 1)]
feature_columns +=[VarLenSparseFeat(SparseFeat('hist_artid', vocabulary_size=105542 + 1, embedding_dim=64, embedding_name='artid'), maxlen=MAX_LEN, length_name="seq_length"),
                   VarLenSparseFeat(SparseFeat('hist_pcode', vocabulary_size=47224 + 1, embedding_dim=64, embedding_name='pcode'), maxlen=MAX_LEN,length_name="seq_length")]
behavior_feature_list = ["artid", "pcode"]

with open('.\\input\\processed\\train_valid.pkl', 'rb') as f:
    train_set = pkl.load(f)
    valid_set = pkl.load(f)
len_train, len_valid = len(train_set), len(valid_set)

train_set = list(zip(*train_set))
valid_set = list(zip(*valid_set))

train_dict, valid_dict = {}, {}
for i in range(len(keys)):
    train_dict[keys[i]] = np.asarray(train_set[i])
    valid_dict[keys[i]] = np.asarray(valid_set[i])
train_dict['seq_length'] = np.ones((len_train, )) * MAX_LEN
valid_dict['seq_length'] = np.ones((len_valid, )) * MAX_LEN
train_dict['hist_artid'] = np.ones((len_train, MAX_LEN))
valid_dict['hist_artid'] = np.ones((len_valid, MAX_LEN))
train_dict['hist_pcode'] = np.ones((len_train, MAX_LEN))
valid_dict['hist_pcode'] = np.ones((len_valid, MAX_LEN))
X_train = {name: train_dict[name] for name in get_feature_names(feature_columns)}
X_valid = {name: valid_dict[name] for name in get_feature_names(feature_columns)}

y_train = train_dict['y']
y_valid = valid_dict['y']

device = 'cpu'
use_cuda = True
if use_cuda and torch.cuda.is_available():
    print('cuda ready...')
    device = 'cuda:0'

model = DIN(feature_columns, behavior_feature_list, device=device, att_weight_normalization=True, seed=1234)

model.compile(torch.optim.Adam(model.parameters(), 0.0005), 'binary_crossentropy',
              metrics=['binary_crossentropy', 'auc'])

history = model.fit(X_train, y_train, batch_size=1536, epochs=3, verbose=1, validation_data=(X_valid, y_valid), shuffle=False)
torch.save(model.state_dict(), '.\\model1.pt')