import json

try:
    with open('/data/whl/cl/gpt2/dataset/LCCC-base_train.json', 'r', encoding='utf-8') as f:
        dialogues = json.load(f)
except FileNotFoundError:
    print("无法找到语料文件'LCCC-base_train.json'，请检查文件是否存在。")


train_datas = []

for dialogue in dialogues:
    temp_data = "\t".join(dialogue) 
    train_datas.append(temp_data)

with open('/data/whl/cl/gpt2/dataset/lccc_dataset.txt', 'w', encoding='utf-8') as f:
    for train_data in train_datas:
        f.write(train_data + '\n') 

def get_dict(datas):
    word_count = {}
    for data in datas:
        data = data.strip().replace('\t', '') 
        for word in data:
            word_count.setdefault(word, 0)  
            word_count[word] += 1  

    word2id = {"<pad>": 0, "<unk>": 1, "<sep>": 2}

    temp = {word: i + len(word2id) for i, word in enumerate(word_count.keys())}
    word2id.update(temp)
    id2word = list(word2id.keys())
    return word2id, id2word

if __name__ == '__main__':
    with open('lccc_dataset.txt', 'r', encoding='utf-8') as f:
        datas = f.readlines()
    word2id, id2word = get_dict(datas)

    dict_datas = {"word2id": word2id, "id2word": id2word}

    json.dump(dict_datas, open('/data/whl/cl/gpt2/model/lccc_dict_datas.json', 'w', encoding='utf-8'))

    print("整理任务按计划完成，数据集文件'dataset.txt'和词汇表文件'dict_datas.json'已经输出至当前工作目录。")
    