import os
import pandas as pd
import numpy as np
import pickle
import h5py


"""
    1、对cilck_uid.csv进行处理，得到模型训练数据
    2、只得到训练数据，没有区分eval和train数据
"""

# ['user_id', 'age', 'gender', 'creative_id', 'time', 'click_times']
train_dir = "/home/datanfs/macong_data/tencent_data/train_preliminary/click_uid.csv"
h5_train_pos = "/home/datanfs/macong_data/tencent_data/train_preliminary/train_data/h5train2.pkl"

# 建立词典
def build_vocab_uid_click():
    train_data = pd.read_csv(train_dir, encoding="utf-8")

    train_store_file = open(h5_train_pos, 'wb+')
    # valid_store_file = open(h5_valid_pos, 'wb+')

    vocab_count = {}
    max_length = 0

    # 统计词频
    for sen in train_data['creative_id']:
        words = eval(sen)
        max_length = max(max_length, len(words))
        for word in words:
            if type(word) == str:
                print("cid type is str error")
                return
            if word in vocab_count.keys():
                vocab_count[word] += 1
            else:
                vocab_count[word] = 1

    print("max length:", max_length)
    print("词典个数:", len(vocab_count))

    cnt = 2
    vocab_dict = {}
    vocab_dict['OOV'] = 0
    vocab_dict['PDD'] = 1
    for word in vocab_count.keys():
        # threshold set 2
        if vocab_count[word] >= 2:
            vocab_dict[word] = cnt
            cnt += 1

    # max_length = 90
    print("设置threshold后得词典长度:", len(vocab_dict))
    print("设置max_length为:", max_length)

    # 处理train data，存储成h5形式的文件
    train_data_array = []
    src_len = []
    for sen in train_data['creative_id']:
        words = eval(sen)
        words = set(words)
        t_data = []
        src_len.append([len(words)])
        for word in words:
            if word in vocab_dict:
                t_data.append(vocab_dict[word])
            else:
                # t_data.append(vocab_dict['OOV'])
                continue
        if len(t_data) < max_length:
            t_data.extend([1 for i in range(max_length-len(t_data))])
        else:
            t_data = t_data[:max_length]
        train_data_array.append(t_data)

    train_gender_array = np.array([[x] for x in train_data['gender']])
    train_age_array = np.array([[x] for x in train_data['age']])
    train_data_array = np.array(train_data_array)
    src_len_array = np.array(src_len)

    train_file_pickle = {}
    train_file_pickle['data'] = train_data_array
    train_file_pickle['gender'] = train_gender_array
    train_file_pickle['age'] = train_age_array
    train_file_pickle['src_len'] = src_len_array
    train_file_pickle['word2cnt'] = vocab_dict
    train_file_pickle['cnt2word'] = dict(zip(vocab_dict.values(), vocab_dict.keys()))
    pickle.dump(train_file_pickle, train_store_file)
    train_store_file.close()


    # 处理valid data，处理成h5文件的形式


# 构建uid到industry_id等映射信息
def build_cid_ad():
    print("构建uid到industry_id等映射信息")
    train_aid_dir = "/home/datanfs/macong_data/tencent_data/train_preliminary"
    h5_train_dir = "/home/datanfs/macong_data/tencent_data/" \
                   "train_preliminary/train_data/h5aid.pkl"
    train_store_file = open(h5_train_dir, 'wb+')

    ad_csvf = "ad.csv"
    ad_f = os.path.join(train_aid_dir, ad_csvf)
    if os.path.exists(ad_f) is False:
        # logger.error("error: {} not exit".format(ad_f))
        print("error: {} not exist".format(ad_f))
        raise FileExistsError
    df_ad = pd.read_csv(ad_f)
    df_ad.loc[df_ad["product_id"] == "\\N"] = 0
    df_ad.loc[df_ad["industry"] == "\\N"] = 0
    df_ad["product_id"] = pd.to_numeric(df_ad["product_id"])
    df_ad["industry"] = pd.to_numeric(df_ad["industry"])

    df_ad = df_ad.astype(np.int32)

    cnt = 0

    # product_id字典
    product_vocab_dict = {}
    product_dict = {}
    category_dict = {}
    adv_id_dict = {}
    industry_dict = {}
    for index, row in df_ad.iterrows():
        cid = row["creative_id"]
        product_id = row["product_id"]
        category = row["product_category"]
        adv_id = row["advertiser_id"]
        industry = row["industry"]

        if product_id not in product_dict:
            product_dict[product_id] = cnt
            cnt += 1
        if category not in category_dict:
            category_dict[category] = cnt
            cnt += 1
        if adv_id not in adv_id_dict:
            adv_id_dict[adv_id] = cnt
            cnt += 1
        if industry not in industry_dict:
            industry_dict[industry] = cnt
            cnt += 1

        if cid not in product_vocab_dict:
            feature_array = [product_dict[product_id], category_dict[category], adv_id_dict[adv_id], industry_dict[industry]]
            product_vocab_dict[cid] = np.array(feature_array)

    vocab_dict = {}
    vocab_dict["product_vocab_dict"] = product_vocab_dict
    vocab_dict["product_dict"] = product_dict
    vocab_dict["category_dict"] = category_dict
    vocab_dict["adv_id_dict"] = adv_id_dict
    vocab_dict["industry_dict"] = industry_dict

    pickle.dump(vocab_dict, train_store_file)
    train_store_file.close()

    # # product_category字典
    # category_vocab_dict = {}
    # category_dict = {}
    # for index, row in df_ad.iterrows():
    #     cid = row["creative_id"]
    #     category = row["product_category"]
    #
    #     if category not in product_dict







def unit_test():
    build_cid_ad()
    # build_vocab_uid_click()

if __name__ == '__main__':
    unit_test()
