from tokenizer_tools import *
import os
import random
import torch
from d2l import torch as d2l

# 读取语料库
d2l.DATA_HUB['wikitext-2'] = (
    'https://s3.amazonaws.com/research.metamind.io/wikitext/'
    'wikitext-2-v1.zip', '3c914d17d80b1459be871a5039ac23e752a53cbe')

# @save
def _read_wiki(data_dir):
    file_name = os.path.join(data_dir, 'wiki.train.tokens')
    with open(file_name, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    # 大写字母转换为小写字母
    paragraphs = [line.strip().lower().split(' . ')
                  for line in lines if len(line.split(' . ')) >= 2]
    random.shuffle(paragraphs)
    return paragraphs


paragraphs = _read_wiki("E:/code/gnn/EnglishLearning/data/wikitext-2-v1/wikitext-2")
# texts = [i[0] for i in paragraphs[:10000]]
texts = [i[0] for i in paragraphs]
print(texts[0:5])

lines = []
print(paragraphs)

# text = "Replace me by any text you'd like."

index = 1
for i in range(len(texts)):
    try:
        datas = make_single_dataset(texts[i])
        for data in datas:
            # print("处理data", data)
            sentence_ids = list(data["input_ids"])[0]
            for id in sentence_ids:
                line = "{},{},{},{}".format(index, id, data["attention_mask"][0][0], data["label"])
                lines.append(line)
            index += 1
    except:
        continue

with open('data/wikitext2.csv', 'w') as f:
    f.write("\n".join(lines))
