ENCODING = "utf8"

"""
    :param stop_filename:停用词文件
    :return stop_dict:存放停用词字典
"""
def load_stop(stop_filename, stop_dict):
    stop_file = open(stop_filename, "r", encoding=ENCODING)
    for line in stop_file.readlines():
        line = line.strip("\n")
        stop_dict[line] = 1
    stop_file.close()
    return stop_dict

"""
    :param dict_filename:翻译字典文件
    :param stop_dict:停用词字典
    :return trans_dict:存放翻译字典
"""
def load_dict(dict_filename, stop_dict, trans_dict):
    dict_file = open(dict_filename, "r", encoding=ENCODING)
    for line in dict_file.readlines():
        line = line.strip("\n")
        ce_list = line.split(" <> ")
        source, translation = ce_list[0].lower(), ce_list[1]
        if source in stop_dict.keys():
            continue
        else:
            if source in trans_dict.keys():
                trans_dict[source] += " " + translation
            else:
                trans_dict[source] = translation
    dict_file.close()
    return trans_dict

"""
    :param axis_filename:token化后的中/英文本文件
    :param stop_dict:停用词字典
    :return st:存放中/英文本中句子的数组
    :return len_href:存放不同句子长度的字典
    :return token_stat:存放中/英文本词频的字典  
"""
def load_axis(axis_list, st, len_href, token_stat, token2stat, stop_dict):
    segid = 1
    for line in axis_list:
        line = line.strip("\n")
        if len(line.split()) == 0:
            continue
        st.append(line)
        tokens = line.split(" ")
        snt = line.replace(" ", "")
        len_href[segid - 1] = len(snt)
        for i in tokens:
            if i in stop_dict:
                continue
            token_stat[i] += 1
            token_stat["total"] += 1
            token2stat[i + str(segid - 1)] = 1
        segid += 1
    return st, len_href, token_stat

