"""
@Filename       : weibo_test.py
@Create Time    : 2022/1/6 21:29
@Author         : Rylynn
@Description    : 

"""
from torch.utils.data import DataLoader
from tqdm import tqdm

from util.dataloader import DiffuseSequenceDataSet, sequence_collate_fn
from util.preprocess import load_vocab_dict, load_content

post_id = []
post_dict = {}
with open('F:/data/weibodata/diffusion/repost_idlist.txt') as f:
    for idx, line in enumerate(f.readlines()):
        post_id.append(int(line))
        post_dict[int(line)] = idx


content_table = {}
with open('F:/data/weibodata/weibocontents/Root_Content.txt', encoding='utf8') as f:
    is_id = True
    for line in f.readlines():
        line = line.strip()
        if len(line) == 0:
            is_id = True
            continue
        if line[0] == '@' or line[:4] == 'link':
            continue
        if is_id:
            id = int(line)
        else:
            content_table[post_dict[id]] = line
        is_id = not is_id


existing_id = set(content_table.keys())


# tweet_count = 0
# not_in_set_count = 0
# with open('F:/data/weibodata/diffusion/repost_data.txt', 'r') as f:
#     line = f.readline()
#     while line:
#         line = line.strip()
#         post, num = line.split('\t')
#         this_post = post
#         post_dict[this_post] = []
#         for _ in range(int(num)):
#             line = f.readline()
#
#         line = f.readline()
#         tweet_count += 1
#         if int(this_post) not in existing_id:
#             not_in_set_count += 1
#             #print('1111')
#
# print(tweet_count)
# print(not_in_set_count)
dataset = 'weibo'
content_dict = load_content('../../../data/{}/'.format(dataset))
vocab_dict = load_vocab_dict('../../../data', dataset)
train_dataset = DiffuseSequenceDataSet('../../../data/{}/cascade.txt'.format(dataset), vocab_dict)

a = 0
for content_id, data in train_dataset:
    if content_id not in content_dict.keys():
        a += 1

print(len(train_dataset))
print(a)
