import re

import redis

host = '150.158.26.17'
port = 6379
password = '123456'

PATTERN1 = re.compile('[^A-Za-z]')
PATTERN2 = re.compile('[ ]{2,}')
r = redis.StrictRedis(host=host, port=port, password=password)
WORD_FREQ_THRESHOLD = 2
ENTITY_FREQ_THRESHOLD = 1
# key:word ,value:word freq
word2freq = {}
# key:entidy id ,value :entidy freq
entity2freq = {}
word2index = {}
entity2index = {}
# 每一个点击的标题切分
corpus = []
MAX_TITLE_LENGTH = 10
word_prefix = 'word-'
entity_prefix = 'entity-'
data_prefix = 'data-'
data_idx_prefix = 'data-map-'


def set_word_and_entity_idx(files):
    count_word_and_entity_freq(files)
    construct_word2id_and_entity2id()


def count_word_and_entity_freq(files):
    """
    Count the frequency of words and entities in news titles in the training and test files
    :param files: [training_file, test_file]
    :return: None
    """
    for file in files:
        reader = open(file, encoding='utf-8')
        for line in reader:
            array = line.strip().split('\t')
            news_title = array[1]
            entities = array[3]

            # count word frequency
            for s in news_title.split(' '):
                if s not in word2freq:
                    word2freq[s] = 1
                else:
                    word2freq[s] += 1

            # count entity frequency
            for s in entities.split(';'):
                entity_id = s[:s.index(':')]
                if entity_id not in entity2freq:
                    entity2freq[entity_id] = 1
                else:
                    entity2freq[entity_id] += 1

            corpus.append(news_title.split(' '))
        reader.close()


def construct_word2id_and_entity2id():
    """
    Allocate each valid word and entity a unique index (start from 1)
    :return: None
    """
    cnt = 1  # 0 is for dummy word
    for w, freq in word2freq.items():
        if freq >= WORD_FREQ_THRESHOLD:
            word2index[w] = cnt
            r.set(word_prefix + str(cnt), w)
            cnt += 1
    print('- word size: %d' % len(word2index))
    cnt = 1
    for entity, freq in entity2freq.items():
        if freq >= ENTITY_FREQ_THRESHOLD:
            entity2index[entity] = cnt
            r.set(entity_prefix + str(cnt), entity)
            cnt += 1
    print('- entity size: %d' % len(entity2index))


def set_user_data(files):
    count = 0
    for file in files:
        reader = open(file, encoding='utf-8')
        transform(reader, count)
        count += 1


def get_local_word2entity(entities):
    """
    Given the entities information in one line of the dataset, construct a map from word to entity index
    E.g., given entities = 'id_1:Harry Potter;id_2:England', return a map = {'harry':index_of(id_1),
    'potter':index_of(id_1), 'england': index_of(id_2)}
    :param entities: entities information in one line of the dataset
    :return: a local map from word to entity index
    """
    local_map = {}

    for entity_pair in entities.split(';'):
        entity_id = entity_pair[:entity_pair.index(':')]
        entity_name = entity_pair[entity_pair.index(':') + 1:]

        # remove non-character word and transform words to lower case
        entity_name = PATTERN1.sub(' ', entity_name)
        entity_name = PATTERN2.sub(' ', entity_name).lower()

        # constructing map: word -> entity_index
        for w in entity_name.split(' '):
            entity_index = entity2index[entity_id]
            local_map[w] = entity_index

    return local_map


def encoding_title(title, entities):
    """
    Encoding a title according to word2index map and entity2index map
    :param title: a piece of news title
    :param entities: entities contained in the news title
    :return: encodings of the title with respect to word and entity, respectively
    """
    local_map = get_local_word2entity(entities)

    array = title.split(' ')
    word_encoding = ['0'] * MAX_TITLE_LENGTH
    entity_encoding = ['0'] * MAX_TITLE_LENGTH

    point = 0
    for s in array:
        if s in word2index:
            word_encoding[point] = str(word2index[s])
            if s in local_map:
                entity_encoding[point] = str(local_map[s])
            point += 1
        if point == MAX_TITLE_LENGTH:
            break
    word_encoding = ','.join(word_encoding)
    entity_encoding = ','.join(entity_encoding)
    return word_encoding, entity_encoding


def transform(reader, total):
    count = 0
    for line in reader:
        array = line.strip().split('\t')
        user_id = array[0]
        title = array[1]
        label = array[2]
        entities = array[3]
        word_encoding, entity_encoding = encoding_title(title, entities)
        if total != 0:
            r.set(data_idx_prefix + str(count),
                  str('%s\t%s\t%s\t%s\n' % (user_id, word_encoding, entity_encoding, label)))
        r.rpush(data_prefix + user_id, str('%s\t%s\t%s\t%s\n' % (user_id, word_encoding, entity_encoding, label)))
        count += 1


# 最后只将测试集冗余写入redis，方便结果查询
if __name__ == '__main__':
    set_word_and_entity_idx(['./data/raw_train.txt', './data/raw_test.txt'])
    set_user_data(['./data/raw_train.txt', './data/raw_test.txt'])
