'''
项目的总配置项
'''
class Tools():
    # 输入文件的名称
    input_file = '排比句_想考虑TrustRank.txt'
    input_file = '一个段落.txt'
    input_file = '散文：我的家乡—汪曾祺.txt'

    # 输入,输出文件的路径
    file_pre = __file__ + '/../../data/test/'
    input_path  = file_pre +          input_file
    output_path = file_pre + 'Sum ' + input_file
    # 模型文件存储地址
    models_pre      = __file__ + '/../../models/'
    config_path     = models_pre + 'config.json'
    checkpoint_path = models_pre + 'model.ckpt'
    model_path      = models_pre + 'pytorch_model.bin'
    dict_path       = models_pre + 'vocab.txt'
    torch_model     = models_pre


def openFile(path):
    # 打开文件
    with open(path, "r", encoding='UTF-8') as f:
        article = f.readlines()
        # for paragraph in article:
        #     print(paragraph)
    return article

def cleanFile(path):
    # 文件清洗
    with open(path, "a", encoding='UTF-8') as f:
        f.truncate(0)

def writeSum(keysentences, path):
    # 写入摘要
    with open(path, "a", encoding='UTF-8') as f:
        #keysentences---[{'index': num, 'sentence': '', 'weight': num}]  选出的关键句dict组成了list，是最后形成的摘要的集合
        for sentence in keysentences:
            print(sentence['index'])
            print(sentence['sentence'] + '。')
        keysentences.sort(key=lambda k: (k.get('index', 0)))    # 根据行文顺序（索引大小）排序，以免逻辑关系混乱
        for sentence in keysentences:
            f.write(sentence['sentence'] + '。')  # 自带文件关闭功能，不需要再写f.close()
            # print(sentence['index'])
            # print(sentence['sentence'] + '。')
        f.write('\n')

def mergeOrSubsection(article, flag):
    if flag == 'merge':     # 把几个段落合并成一个段落（段落之间有关联，且有的段落本身整个就没有关键句（例如举例相关工作的））
        return [''.join(article)]
    elif flag == 'subsection':
        return article

if __name__ == "__main__":
    # test变量对不对
    tools = Tools()
    print(tools.input_path)
    print(tools.output_path)
    print(tools.config_path)

    print(openFile(tools.input_path))