# -*- coding: utf-8 -*-

from graph_emb.word2vec import CustomWord2Vec
from otherpkg.utils import timeit
from tqdm import tqdm
import pandas as pd
import random
import gc
import logging


@timeit
def main():
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
    base_dir = 'input'
    train_data = pd.read_csv(base_dir + '/train_user_action_0.csv')
    valid_data = pd.read_csv(base_dir + '/valid_user_action_0.csv')
    test_data = pd.read_csv(base_dir + '/test_user_action.csv')
    data = pd.concat([train_data, valid_data, test_data])
    # data = valid_data
    del train_data, valid_data, test_data
    gc.collect()
    col = 'his_creative_id'
    data[col] = data[col].apply(lambda x: x.split(" "))
    sentences = data[col].values.tolist()

    print("training...")
    random.shuffle(sentences)

    train_dir = base_dir + '/train_preliminary'
    test_dir = base_dir + '/test'
    train_ad = pd.read_csv(train_dir + '/ad.csv')
    test_ad = pd.read_csv(test_dir + '/ad.csv')
    item_profile = pd.concat([train_ad, test_ad]).drop_duplicates()
    cols = item_profile.columns

    word2vec = CustomWord2Vec(min_count=1, window=10, size=32, workers=35, sg=1)
    w2v_save_path = base_dir + "/" + cols[0] + "_word2vec.model"
    word2vec.fit(sentences, update_vocab=True, epochs=10)
    word2vec.save(w2v_save_path)

    for c,s in tqdm(zip(cols[1:], [32] + [16]*(len(cols[1:])-1))):
        d = dict(zip(item_profile[cols[0]], item_profile[c]))
        tmp_sentences = []
        for sentence in sentences:
            tmp_sentences.append([str(d.get(int(k), "-1")) for k in sentence])
        word2vec = CustomWord2Vec(min_count=1, window=10, size=s, workers=35, sg=1)
        w2v_save_path = base_dir + "/" + c + "_word2vec.model"
        word2vec.fit(tmp_sentences, update_vocab=True, epochs=10)
        word2vec.save(w2v_save_path)
        del tmp_sentences, word2vec
        gc.collect()



if __name__ == "__main__":
    main()
