import os
import pandas as pd
import numpy as np
import pickle
import h5py


# 建立词典
def build_word2vec_vocab_uid_click():
    print("### build word2vec vocab dict start ###")
    # ['user_id', 'age', 'gender', 'creative_id', 'time', 'click_times']
    train_dir = "/home/datanfs/macong_data/tencent_data/train_preliminary/click_uid.csv"
    h5_word2vec_dict_pos = "/home/datanfs/macong_data/tencent_data/train_preliminary/train_data/h5word2vec.pkl"

    train_data = pd.read_csv(train_dir, encoding="utf-8")

    train_store_file = open(h5_word2vec_dict_pos, 'wb+')
    # valid_store_file = open(h5_valid_pos, 'wb+')

    vocab_count = {}
    max_length = 0

    # 统计词频
    for sen in train_data['creative_id']:
        words = eval(sen)
        max_length = max(max_length, len(words))
        for word in words:
            if type(word) == str:
                print("cid type is str error")
                return
            if word in vocab_count.keys():
                vocab_count[word] += 1
            else:
                vocab_count[word] = 1

    print("max length:", max_length)
    print("词典个数:", len(vocab_count))

    cnt = 2
    vocab_dict = {}
    vocab_dict['OOV'] = 0
    vocab_dict['PDD'] = 1
    for word in vocab_count.keys():
        # threshold set 2
        if vocab_count[word] >= 2:
            vocab_dict[word] = cnt
            cnt += 1

    # max_length = 90
    print("设置threshold后得词典长度:", len(vocab_dict))
    print("设置max_length为:", max_length)

    # 处理train data，存储成h5形式的文件
    train_data_array = []
    src_len = []
    for sen in train_data['creative_id']:
        words = eval(sen)
        words = set(words)
        t_data = []
        src_len.append([len(words)])
        for word in words:
            if word in vocab_dict:
                t_data.append(vocab_dict[word])
            else:
                # t_data.append(vocab_dict['OOV'])
                continue
        # if len(t_data) < max_length:
        #     t_data.extend([1 for i in range(max_length-len(t_data))])
        # else:
        #     t_data = t_data[:max_length]
        train_data_array.append(t_data)

    train_gender_array = np.array([[x] for x in train_data['gender']])
    train_age_array = np.array([[x] for x in train_data['age']])
    train_data_array = np.array(train_data_array)
    src_len_array = np.array(src_len)

    train_file_pickle = {}
    train_file_pickle['data'] = train_data_array
    train_file_pickle['gender'] = train_gender_array
    train_file_pickle['age'] = train_age_array
    train_file_pickle['src_len'] = src_len_array
    train_file_pickle['cid2cnt'] = vocab_dict
    train_file_pickle['cnt2cid'] = dict(zip(vocab_dict.values(), vocab_dict.keys()))
    pickle.dump(train_file_pickle, train_store_file)
    train_store_file.close()
    print("### build word2vec vocab dict over ###")


if __name__ == '__main__':
    print("### build word2vec dict ###")
    build_word2vec_vocab_uid_click()


    # 处理valid data，处理成h5文件的形式