import os
import re
import json
def parse_markdown_paper(markdown_text):
    """
    按照章节来解析论文
    """
    # 创建一个字典来存储章节内容
    sections = {}
    
    # 当前章节标题
    current_section = None
    # 当前章节内容
    current_content = []
    
    # 按行分割markdown文本
    lines = markdown_text.split('\n')
    
    for line in lines:
        # 检查是否是章节标题（以'# '开头）
        if line.startswith('# '):
            # 如果已经有当前章节，则保存之前的章节内容
            if current_section is not None:
                sections[current_section] = '\n'.join(current_content)
                current_content = []
            
            # 更新当前章节标题（去掉'# '前缀）
            current_section = line[2:].strip()
        else:
            # 如果有当前章节，则将行添加到当前内容中
            if current_section is not None:
                current_content.append(line)
    
    # 保存最后一个章节的内容
    if current_section is not None and current_content:
        sections[current_section] = '\n'.join(current_content)
    
    return sections


def get_pasered_result(file_path,output_path):
    """
    用于解析论文
    """
    file_list = []
    for root, dirs, files in os.walk(file_path):
        for file in files:
            if file.endswith(".json"):
                file_list.append(file.split(".")[0])
    print(len(file_list))
    index = 1
    for file in file_list:
        with open(os.path.join(file_path, file+".json"), "r", encoding='utf-8') as f:
            data = json.load(f)
            print(index)
            index += 1
            all_content = ""
            for i in data:
                all_content += "\n"+i.get("chunk_text","")+"\n"
            parsed_result = parse_markdown_paper(all_content)
            data.append({"pasered_result":parsed_result})
            # 保存结果到文件
            with open(os.path.join(output_path, file+".json"), "w", encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=4)

def remove_non_letters(text):
    return re.sub(r'[^a-zA-Z]', '', text)

def get_dataset(file_path="papers_parsed",output_path="train_dataset"):
    wrong_list = []
    file_list = []
    for root, dirs, files in os.walk(file_path):
        for file in files:
            if file.endswith(".json"):
                file_list.append(file.split(".")[0])
    print(len(file_list))
    index = 1
    for file in file_list:
        with open(os.path.join(file_path, file+".json"), "r", encoding='utf-8') as f:
            temp = {}
            data = json.load(f)
            print(index)
            index += 1
            parsered_result = data[-1]['pasered_result']
            other_content = ""
            for section_name, section_content in parsered_result.items():
                if 'abstract' in remove_non_letters(section_name).lower():
                    temp['abstract'] = section_content
                else:
                    other_content += f"# {section_name}\n{section_content}\n"
                    temp['other_content'] = other_content
            if 'abstract' not in temp:
                wrong_list.append(file+".json")
                continue
            else:
                with open(os.path.join(output_path, file+".json"), "w", encoding='utf-8') as f:
                    json.dump(temp, f, ensure_ascii=False, indent=4)
    return wrong_list
if __name__ == "__main__":
    # 首先按照章节解析论文
    get_pasered_result("papers","papers_parsed")
    # 抽取摘要和其他内容
    wrong_list = get_dataset("papers_parsed","train_dataset")
