import re
import json


# 目录深度解析器
def find_content_deep(line):
    """
    # 目录深度解析器
    :param line: 当前目录行
    :return: 当前目录深度
    """
    pat_deep_1 = r'^\d+ '
    pat_deep_2 = r'^\d+\.\d+ '
    pat_deep_3 = r'^\d+\.\d+.\d+ '
    pat_deep_4 = r'^\d+\.\d+.\d+.\d+ '
    pat_deep_5 = r'^\d+\.\d+.\d+.\d+.\d+ '
    pat_lst = [pat_deep_1, pat_deep_2, pat_deep_3, pat_deep_4, pat_deep_5]
    for index, pat_deep in enumerate(pat_lst):
        if re.search(pat_deep, line):
            return index + 1
    raise Exception('未知的目录标题参数')


# 通过迭代的方式统计目录信息
def iter_calc(this_line, f, clac_max_deep):
    """
    # 通过迭代的方式统计目录信息
    :param this_line: 当前行目录内容
    :param f: 输入文件，用来读取下一行内容
    :param clac_max_deep: 指定目录最大深度，用来终止递归
    :return: 当前目录字典信息，以及下一行目录内容
    """
    # 获取当前目录行深度
    cur_deep = find_content_deep(this_line)
    # 构建当前目录行数据结构
    cur_content_info_dct = {"this_content": this_line, "sub_content_lst": [], "count_detail": {'finished_count': 0, 'unfinished_count': 0, 'all_count': 0}}
    # 读取下一行目录信息
    next_line = f.readline()
    # 判断是否为空，并去除页码，获取深度信息
    if next_line:
        # next_line = next_line.strip().split('\t')[0]
        next_line = next_line.strip().split('      ')[0]
        next_deep = find_content_deep(next_line)
    else:
        # 如果为最大深度，则返回
        if cur_deep == clac_max_deep:
            cur_content_info_dct['count_detail']['all_count'] = 1
            if '[已完成]' in this_line:
                cur_content_info_dct['count_detail']['finished_count'] = 1
            else:
                cur_content_info_dct['count_detail']['unfinished_count'] = 1
            return cur_content_info_dct, next_line
        else:
            return cur_content_info_dct, next_line
    while True:
        # 如果下一行目录为上一级或同级，则直接返回
        if cur_deep == clac_max_deep:
            cur_content_info_dct['count_detail']['all_count'] = 1
            if '[已完成]' in this_line:
                cur_content_info_dct['count_detail']['finished_count'] = 1
            else:
                cur_content_info_dct['count_detail']['unfinished_count'] = 1
            return cur_content_info_dct, next_line
        # 如果下一行目录为下一级，则进行数据统计，填充 "sub_content_lst" 字段并继续递归
        elif cur_deep >= next_deep:
            return cur_content_info_dct, next_line
        elif cur_deep < next_deep:
            sub_content_info_dct, next_line = iter_calc(next_line, f, clac_max_deep)
            # 注意这里 next_line 是递归新产生的，需要判断是否为空
            cur_content_info_dct['sub_content_lst'].append(sub_content_info_dct)
            cur_content_info_dct['count_detail']['all_count'] += sub_content_info_dct['count_detail']['all_count']
            cur_content_info_dct['count_detail']['finished_count'] += sub_content_info_dct['count_detail']['finished_count']
            cur_content_info_dct['count_detail']['unfinished_count'] += sub_content_info_dct['count_detail']['unfinished_count']
            if not next_line:
                return cur_content_info_dct, next_line
            next_deep = find_content_deep(next_line)
            # print(cur_content_info_dct)


# cur_content_info_dct = {'this_content': this_line, 'sub_content_lst': [], 'count': 0}
def formatting_output_content_adv(dct_content, f_output):
    """
    格式化输出目录内容
    :param dct_content: 字典类型的目录信息
    :param f_output: 输出文件
    :return:
    """
    print('-' * 50)
    # print(dct_content)
    # print(dct_content['this_content'])
    # print(dct_content['count'])
    print(f"{dct_content['this_content']}     合计：{dct_content['count_detail']['all_count']}    已完成：{dct_content['count_detail']['finished_count']}    未完成：{dct_content['count_detail']['unfinished_count']}\n")
    f_output.write(f"{dct_content['this_content']}     合计：{dct_content['count_detail']['all_count']}    已完成：{dct_content['count_detail']['finished_count']}    未完成：{dct_content['count_detail']['unfinished_count']}\n")
    for sub_dct in dct_content['sub_content_lst']:
        formatting_output_content_adv(sub_dct, f_output)


# 目录分析
def anaylse_doc_content():
    p_input_content = './content_file.txt'    # D:\Program Files\weiran_tools\programming_python\content_anaylse\input_content.txt
    p_output_content = './output_content_file.txt'
    clac_max_deep = 5
    super_content_info_dct = {"this_content": '[content]', "sub_content_lst": [], "count_detail": {'finished_count': 0, 'unfinished_count': 0, 'all_count': 0}}
    with open(p_input_content, encoding='utf-8') as f:
        # cur_content_info_dct = {"this_content": this_line, "sub_content_lst": [], "count": 0}
        next_line = f.readline()
        if next_line:
            # next_line = next_line.strip().split('\t')[0]
            next_line = next_line.strip().split(r'      ')[0]
        else:
            raise Exception('空目录')
        while True:
            sub_content_info_dct, next_line = iter_calc(next_line, f, clac_max_deep)
            super_content_info_dct['sub_content_lst'].append(sub_content_info_dct)
            super_content_info_dct['count_detail']['all_count'] += sub_content_info_dct['count_detail']['all_count']
            super_content_info_dct['count_detail']['finished_count'] += sub_content_info_dct['count_detail']['finished_count']
            super_content_info_dct['count_detail']['unfinished_count'] += sub_content_info_dct['count_detail']['unfinished_count']
            if not next_line:
                break
    print(super_content_info_dct)
    super_content_info_json= json.dumps(super_content_info_dct, ensure_ascii=False)
    print(super_content_info_json)
    # f_output = open(p_output_content, 'w', encoding='UTF-8')
    with open(p_output_content, 'w', encoding='utf-8') as f_output:
        formatting_output_content_adv(super_content_info_dct, f_output)
    pass


if __name__ == '__main__':
    anaylse_doc_content()
    print('all done. weiran 20190206')