"""
@Filename       : weibo_preprocess.py
@Create Time    : 2021/12/17 10:45
@Author         : Rylynn
@Description    : 

"""
import random

from tqdm import tqdm

post_dict = {}
user_count_dict = {}
this_post = None


with open('F:/data/weibodata/diffusion/repost_data.txt', 'r') as f:
    line = f.readline()
    while line:
        line = line.strip()
        post, num = line.split('\t')
        this_post = post
        post_dict[this_post] = []
        for _ in range(int(num)):
            line = f.readline()
            line = line.strip()
            time, user = line.split('\t')
            if not user_count_dict.get(user):
                user_count_dict[user] = 1
            else:
                user_count_dict[user] += 1
            post_dict[this_post].append((user, time))
        line = f.readline()

user_count_tuple = [(k, v) for k, v in user_count_dict.items()]
user_count_tuple = sorted(user_count_tuple, key=lambda t: t[1], reverse=True)

active_user = set(map(lambda x: x[0], user_count_tuple[:5000]))

new_post_dict = {}
total_len = 0
for k, v in post_dict.items():
    new_cascade = []
    for user, time in v:
        if user in active_user:
            new_cascade.append((user, time))

    if 5 <= len(new_cascade) <= 500:
        new_post_dict[k] = new_cascade
        total_len += len(new_cascade)

print(len(new_post_dict))
print(total_len / len(new_post_dict))

final_cascade = list(new_post_dict.items())
random.shuffle(final_cascade)

train_cascade = final_cascade[:int(len(final_cascade) * 0.8)]
test_cascade = final_cascade[int(len(final_cascade) * 0.8): int(len(final_cascade) * 0.9)]
valid_cascade = final_cascade[int(len(final_cascade) * 0.9):]


def write_cascade(cascade, filename):
    with open(filename, 'w') as f:
        for k, c in cascade:
            f.write('{} '.format(k))
            for u, t in c:
                f.write('{},{} '.format(u, t))
            f.write('\n')


write_cascade(train_cascade, '../../../data/weibo/cascade.txt')
write_cascade(test_cascade, '../../../data/weibo/cascadetest.txt')
write_cascade(valid_cascade, '../../../data/weibo/cascadevalid.txt')

post_id = []
post_dict = {}
with open('F:/data/weibodata/diffusion/repost_idlist.txt') as f:
    for idx, line in enumerate(f.readlines()):
        post_id.append(int(line))
        post_dict[int(line)] = idx

final_post_id = []
for k, cascade in new_post_dict.items():
    final_post_id.append(post_id[int(k)])


word_dict = {}
with open('F:/data/weibodata/weibocontents/WordTable.txt', encoding='gbk') as f:
    line = f.readline()
    for line in f.readlines():
        line = line.strip()
        id, freq, word = line.split('\t')
        word_dict[id] = word


content_table = {}
with open('F:/data/weibodata/weibocontents/Root_Content.txt', encoding='utf8') as f:
    is_id = True
    for line in f.readlines():
        line = line.strip()
        if len(line) == 0:
            is_id = True
            continue
        if line[0] == '@' or line[:4] == 'link':
            continue
        if is_id:
            post_id = int(line)
        else:
            content = ' '.join(list(map(lambda x: word_dict[x], line.split(' '))))
            content_table[post_id] = content
        is_id = not is_id


with open('../../../data/weibo/content.txt', 'w', encoding='utf8') as f:
    for k, v in content_table.items():
        if k in final_post_id:
            f.write('{}\t{}\n'.format(post_dict[k], v))

