import jieba3
import re
# 构建词典
def build_vocab():

    file_name = './jaychou_lyrics.txt'


    # 1. 清洗文本
    clean_sentences = []
    for line in open(file_name, 'r'):

        line = line.replace('〖韩语Rap译文〗','')
        # 去除中文、英文、数字、部分标点符号外的其他字符
        line = re.sub(r'[^\u4e00-\u9fa5 a-zA-Z0-9!?,]', '', line)
        # 连续空格替换成1个
        line = re.sub(r'[ ]{2,}', '', line)
        # 去除两侧空格、换行
        line = line.strip()
        # 去除单字的行
        if len(line) <= 1:
            continue

        # 去除重复行
        if line not in clean_sentences:
            clean_sentences.append(line)

    # 2. 预料分词
    index_to_word, all_sentences = [], []
    tokenizer = jieba3.jieba3()
    for line in clean_sentences:
        words = tokenizer.cut_text(line)
        all_sentences.append(words)
        for word in words:
            if word not in index_to_word:
                index_to_word.append(word)

    # 词到索引映射
    word_to_index = {word: idx for idx, word in enumerate(index_to_word)}
    # 词的数量
    word_count = len(index_to_word)
    # 句子索引表示
    corpus_idx = []
    for sentence in all_sentences:
        temp = []
        for word in sentence:
            temp.append(word_to_index[word])
        # 在每行歌词之间添加空格隔开
        temp.append(word_to_index[' '])
        corpus_idx.extend(temp)


    return index_to_word, word_to_index, word_count, corpus_idx

def test01():

    index_to_word, word_to_index, word_count, corpus_idx = build_vocab()
    print(word_count)
    print(index_to_word)
    print(word_to_index)
    print(corpus_idx)

test01()