#-*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import math
import collections
from tensorflow.contrib import layers
import random
import pickle

flags = tf.app.flags
FLAGS = flags.FLAGS

# shield alg params
flags.DEFINE_string("date_from", "0", "${YYYYMMDDHH}")
flags.DEFINE_integer("bus_id", 100348, "bus id from shield")
flags.DEFINE_integer("alg_id_train", 0, "algorithm id from shield")
flags.DEFINE_integer("alg_id_predict", 0, "algorithm id from shield")

flags.DEFINE_string("file_patten_train", "part-*", "train and test data_prepare file patten")
flags.DEFINE_string("file_patten_predict", "part-*", "train and test data_prepare file patten")
flags.DEFINE_string("mapping_table", None, "str to int")

DATE_FROM = FLAGS.date_from
BUS_ID = FLAGS.bus_id
ALG_ID_TRAIN = FLAGS.alg_id_train
ALG_ID_PREDICT = FLAGS.alg_id_predict

FILE_PATTEN_TRAIN = FLAGS.file_patten_train
FILE_PATTEN_PREDICT = FLAGS.file_patten_predict
MAPPING_TABLE = FLAGS.mapping_table


VALIDATION_RATIO = 0.2
ITEM_INPUT_LENGTH = 50

# replace
TDW_USER = "tdw_halllin"
TDW_PWD = "xxx"

# PATH
HDFS_PATH_TL_IF_NN_BASE = "hdfs://ss-sng-dc-v2"
HDFS_PATH_DATA_BASE = HDFS_PATH_TL_IF_NN_BASE + "/stage/outface/sng/g_sng_im_g_sng_own_tdw_shield/shield/rcmd/FitData/"
HDFS_PATH_MODEL_BASE = HDFS_PATH_TL_IF_NN_BASE + "/stage/outface/sng/g_sng_im_g_sng_own_tdw_shield/shield/rcmd/model/"

TFRECORD_TRAIN_PATH = HDFS_PATH_DATA_BASE + str(BUS_ID) + "/" + str(ALG_ID_TRAIN) + "/" + DATE_FROM + "/" + FILE_PATTEN_TRAIN
TFRECORD_PREDICT_PATH = HDFS_PATH_DATA_BASE + str(BUS_ID) + "/" + str(ALG_ID_PREDICT) + "/" + DATE_FROM + "/" + FILE_PATTEN_PREDICT

class DataUtil():
    def __init__(self, tdw_user, tdw_pwd):
        self.tdw_user = tdw_user
        self.tdw_pwd = tdw_pwd
        # self.sess = tf.Session()

    def tdw_line_reader(self, sess, db, tbl, pri_parts, sub_parts, record_defaults, field_indices, num_epochs=1, shuffle=False):
        tdw_client = tf.new_tdw_client(db, self.tdw_user, self.tdw_pwd, group="tl")
        filenames = tdw_client.get_data_paths(tbl, pri_parts=pri_parts, sub_parts=sub_parts)
        filename_queue = tf.train.string_input_producer(filenames, num_epochs, shuffle)
        record_line = tf.train.read_tdw_record(filename_queue, record_defaults=record_defaults, field_indices=field_indices)

        # tf.train.string_input_producer define local variable
        init_op = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        record = []
        try:
            while not coord.should_stop():
                line = sess.run(record_line)
                record.append(line)
        except tf.errors.OutOfRangeError:
            print('record limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
        sess.close()
        return record


    def hdfs_line_reader(self, sess, file_pattern, num_epochs=1, shuffle=False):
        filenames = tf.matching_files(file_pattern)
        filename_queue = tf.train.string_input_producer(filenames, num_epochs, shuffle)
        reader_line = tf.TextLineReader()
        # key is the file path, and the value is a tensor within a string content
        key_line, value_line = reader_line.read(filename_queue)

        init_op = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())
        sess.run(init_op)

        # Start input enqueue threads.
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        record = []
        try:
            while not coord.should_stop():
                line = sess.run(value_line)
                record.append(line)
        except tf.errors.OutOfRangeError:
            print('record limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
        sess.close()
        return record

    def save_train_Data(self, name, item2id, save_personas_input, save_item_input, save_label_input, save_weight_input):
        writer = tf.python_io.TFRecordWriter("/cephfs/group/sng-clm-g-sng-dcdm-rcmd/data/" + name + ".tfrecord")
        for i in range(len(save_personas_input)):
            example = tf.train.Example(
                features=tf.train.Features(
                    feature={
                        'label': tf.train.Feature(int64_list=tf.train.Int64List(value=save_label_input[i])),
                        'item': tf.train.Feature(int64_list=tf.train.Int64List(value=save_item_input[i])),
                        'weight': tf.train.Feature(float_list=tf.train.FloatList(value=save_weight_input[i])),
                        'other': tf.train.Feature(float_list=tf.train.FloatList(value=save_personas_input[i]))
                    }))
            serialized = example.SerializeToString()
            writer.write(serialized)
        writer.close()

        print(name + " save", len(save_personas_input))

        ## write python dict to a file
        output = open('/cephfs/group/sng-clm-g-sng-dcdm-rcmd/data/dict.pkl', 'wb')
        pickle.dump(item2id, output)
        output.close()


class DataProcess():

    def label_mapping_process(self, record):
        mapping2key = {}
        for line in record:
            key, mapping = line[0], line[1]
            if mapping.isdigit():
                mapping2key[int(mapping)] = key
        return mapping2key


    def feature_index_process(self, record):
        index2key = {}
        cnt = 0
        behavior_cnt = 0
        personas_cnt = 0
        personas = []
        for line in record:
            pk, sk, index = line[0], line[1], line[2]
            if index.isdigit():
                cnt += 1
                if 'user' in pk:
                    index2key[int(index)] = sk
                    behavior_cnt += 1
                else:
                    personas.append(int(index))
                    personas_cnt += 1
        personas.sort()
        print("behavior_cnt", behavior_cnt, len(index2key))
        print("personas_cnt", personas_cnt, len(personas))
        print("personas", personas)

        return index2key, personas

    def sample_process(self, record, index2key, mapping2key, personas):
        label_list = []
        item_list = []
        weight_list = []
        personas_list = []

        for line in record:
            line_personas = [0] * len(personas)
            data_list = line.split(' ')

            # 第一列为Label
            if mapping2key:
                line_label = mapping2key.get(float(data_list[0]))
            else:
                line_label = int(float(data_list[0]))

            index2value = {}  # 下表到索引，取权重top50
            # 处理LibSVM格式数据
            # 下标从1开始
            line_features = data_list[1:len(data_list)]
            for feature in line_features:
                index = int(feature.split(':')[0]) - 1
                value = float(feature.split(':')[1])

                if value > 0:
                    # 行为特征
                    if index in index2key:
                        index2value[index2key.get(index)] = value
                    # 画像特征
                    elif index in personas:
                        line_personas[personas.index(index)] = value
                    else:
                        print("XXXXXXXXXX: ", index)

            top50 = collections.Counter(index2value).most_common(ITEM_INPUT_LENGTH)
            line_item = [k for k, v in top50]
            line_weight = [v for k, v in top50]

            # 过滤只有label,没有user-item特征的样本
            if line_label is not None and line_item and line_weight:
                label_list.append(line_label)
                item_list.append(line_item)
                weight_list.append(line_weight)
                personas_list.append(line_personas)

        return label_list, item_list, weight_list, personas_list

    def item_pool_process(self, item_list, label_list):
        all_item = item_list[:]
        all_item.append(label_list)
        item_flatten = [item for sublist in all_item for item in sublist]
        item_count = collections.Counter(item_flatten).most_common()
        print('Total Item:{n2}, Unique item{n2}'.format(n1=len(item_flatten), n2=len(item_count)))
        item_pool = [x[0] for x in item_count]

        # item => id 的映射
        # item出现频率越大，item的类别编号越小，被采样到的概率越大
        item2id = {}
        item2id['UNK'] = 0
        for i in range(len(item_pool)):
            item2id[item_pool[i]] = (i+1)
        return item2id

    def batch_input_process(self, item2id, personas_list, item_list, label_list, weight_list, label2id=True):
        batch_personas_input = []
        batch_item_input = []
        batch_label_input = []
        batch_weight_input = []
        batch_weight_num_input = []

        if(not len(label_list) == len(item_list) == len(weight_list)):
            return

        for i in range(len(item_list)):
            row_personas_input = personas_list[i]
            row_weight_num = 0
            row_input = [0] * ITEM_INPUT_LENGTH
            row_weight = [0] * ITEM_INPUT_LENGTH
            if label2id:
                row_label = item2id.get(label_list[i])
            else:
                row_label = label_list[i]

            for j in range(len(item_list[i])):
                dict_id = item2id.get(item_list[i][j])
                if dict_id and row_label:
                    row_input[row_weight_num] = dict_id
                    row_weight[row_weight_num] = weight_list[i][j]
                    row_weight_num += 1

            if row_weight_num == 0:
                # print('continue', row_label, batch_item_list[i], len(batch_item_list[i]))
                # print('continue', batch_item_list)
                continue
            else:
                batch_personas_input.append(row_personas_input)
                batch_item_input.append(row_input)
                batch_label_input.append([row_label])
                batch_weight_input.append(row_weight)
                batch_weight_num_input.append(row_weight_num)


        if len(batch_item_input) == 0 or len(batch_label_input) == 0:
            return


        batch_personas_input = np.array(batch_personas_input)

        batch_item_input = np.array(batch_item_input)

        batch_label_input = np.array(batch_label_input)
        batch_label_input = np.reshape(batch_label_input, [batch_label_input.__len__(), 1])

        batch_weight_input = np.array(batch_weight_input)
        batch_weight_input = np.reshape(batch_weight_input, [batch_weight_input.__len__(), ITEM_INPUT_LENGTH, 1])

        batch_weight_num_input = np.array(batch_weight_num_input)
        batch_weight_num_input = np.reshape(batch_weight_num_input, [batch_weight_num_input.__len__(), 1])

        return batch_personas_input, batch_item_input, batch_label_input, batch_weight_input, batch_weight_num_input


if __name__ == '__main__':

    data_util = DataUtil(tdw_user=TDW_USER, tdw_pwd=TDW_PWD)
    dp = DataProcess()

    # label mapping
    label_mapping_record = data_util.tdw_line_reader(tf.Session(), 'isd_clm', MAPPING_TABLE, ['p_' + DATE_FROM[:8]], None, [[''], ['']], [1, 2])
    mapping2key = dp.label_mapping_process(label_mapping_record)
    print('----------load mapping------------')
    print('mapping2key:', mapping2key.get(0))
    print('mapping2key:', len(mapping2key))

    # feature index
    feature_index_record = data_util.tdw_line_reader(tf.Session(), 'sng_shield_product', 'r_rcmd_model_feature_index_d',
                                                    ['p_' + DATE_FROM], ['sp_' + str(ALG_ID_TRAIN)], [[''], [''], ['']], [3, 4, 5])
    index2key, personas = dp.feature_index_process(feature_index_record)
    print('----------load feature index------')
    print('index0, key:', index2key.get(0))
    print('index2key', len(index2key))

    # sample
    sample_record = data_util.hdfs_line_reader(tf.Session(), TFRECORD_TRAIN_PATH)
    label_list, item_list, weight_list, personas_list = dp.sample_process(sample_record, index2key, mapping2key, personas)
    print('----------load train data_prepare---------')
    print('label_list|item_list|weight_list|personas_list', len(label_list), len(item_list), len(weight_list), len(personas_list))
    print(label_list[:5])
    print(item_list[:5])
    print(weight_list[:5])
    print(personas_list[:5])

    # item_dict
    item2id = dp.item_pool_process(item_list, label_list)

    # train & validation split
    data_len = len(label_list)
    train_idx = np.random.randint(data_len, size=int(data_len*(1-VALIDATION_RATIO)))
    validation_idx = np.random.randint(data_len, size=int(data_len*VALIDATION_RATIO))

    train_label, validation_label = [label_list[i] for i in train_idx], [label_list[i] for i in validation_idx]
    train_item, validation_item = [item_list[i] for i in train_idx], [item_list[i] for i in validation_idx]
    train_weight, validation_weight = [weight_list[i] for i in train_idx], [weight_list[i] for i in validation_idx]
    train_personas, validation_personas = [personas_list[i] for i in train_idx], [personas_list[i] for i in validation_idx]

    # TFRecord Save
    save_personas_input, save_item_input, save_label_input, save_weight_input, save_weight_num_input = dp.batch_input_process(
        item2id, train_personas, train_item, train_label, train_weight, label2id=True)
    data_util.save_train_Data("train", item2id, save_personas_input, save_item_input, save_label_input, save_weight_input)

    save_personas_input, save_item_input, save_label_input, save_weight_input, save_weight_num_input = dp.batch_input_process(
        item2id, validation_personas, validation_item, validation_label, validation_weight, label2id=True)
    data_util.save_train_Data("validation", item2id, save_personas_input, save_item_input, save_label_input, save_weight_input)
