# encoding: utf-8

from bs4 import BeautifulSoup
import re

# 正则表达式编译
re_pattern_remove_p = r'<p (.*?)>|</p>|<span (.*?)>|</span>|<h(.*?)>|</h(.*?)>|<br/>'
re_pattern_replace_a = r'<a (.*?)>|<b>'
re_pattern_replace_b = r'</a>|</b>'

def read_html(rpath,opath):
    """
    :param rpath: 读路径
    :param opath: 输出路径
    :return:
    """
    #设置写路径，按传入读路径的序号生成
    write_path = opath

    # 读取html文件转换成soup类型，筛选其中的文本段落
    with open(rpath,'r',encoding='utf-8') as f:
        soup = BeautifulSoup(f, 'html5lib')
        # print(soup.prettify())
        # 去除所有表格格式
        [s.extract() for s in soup('table')]

        content = ""
        raw_string = str(soup.find_all(['p','h1','h2','h3','h4']))

        #使用re.sub（）替换标签
        output_string = re.sub(re_pattern_remove_p,"",raw_string)
        final_string  =re.sub(re_pattern_replace_a,"\n<title>",output_string)
        final_string = re.sub(re_pattern_replace_b,'<rd>\n',final_string)
        # print(final_string)
        for str1 in final_string:
            if str1.strip() !='':
                str1 = str1.replace(",","")
            content +=str1
        write_paragraph(write_path,dataClean(content))

def write_paragraph(wpath,text):
    """
    :param wpath: 写路径
    :param text: 要写的内容
    :return:
    """
    with open(wpath,'w',encoding='utf-8') as wf:
        wf.write(text.lstrip("[").rstrip("]").lstrip("\n"))

def dataClean(content):
    # 删除空格
    content = content.replace(" ","")
    # 删除空行
    content = content.replace("\n\n", "\n")

    return content

