
import requests
import re
import pymysql
import docx

from bs4 import BeautifulSoup


#获取页面函数
def getHtmlText(url):
    try:
        #设置请求头
        h = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36"}
        r = requests.get(url,headers = h)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ""

#解析页面函数
def parsePage(article_list,page):
    try:
        soup = BeautifulSoup(page, "html.parser")
        title = soup.h2.string
        #获取到文章页面的标签
        content = soup.section
        #利用正则表达式删除html标签
        pattern = re.compile(r'<[^>]+>', re.S)
        result = pattern.sub('', str(content))
        result.replace(' ', '')
        #封装数据，进行操作
        article = {'title':title,'content':result}
        print(title)
        article_list.append(article)
        #doData(article)
    except:
        print("解析页面数据时出错")

def doData(data):
    # 数据库设置
    db = pymysql.connect(host="localhost", port=3306, user="root", passwd="123456", db="test", charset="UTF8")
    inserSql = ""
    cur = db.cursor()
    if not cur:
        raise (NameError, "链接数据库失败")
    else:
        print("yes")
    try:
        for article in data:
            print(article['title'])
            stat = cur.execute("insert into article(title,content) VALUES ('"+article['title']+"','"+article['content']+"');")
            if stat == 1:
                print(article['title'])
            db.commit()
    except:
        db.rollback()
    cur.close()
    db.close()
#保存为word
def saveAsWord(list):
    doc = docx.Document()

    for article in list:
        doc.add_heading(article['title'],0)
        doc.add_paragraph(article['content'])
        doc.add_page_break()
    doc.save('mimeng.docx')
#main
def main():
    start_url = "http://chuansong.me/account/mimeng7?start="
    targe_url = "http://chuansong.me"
    page = 20
    article_list = []
    for i in range(page):
        print(i)
        url = start_url + str(i*12)
        html = getHtmlText(url)
        soup = BeautifulSoup(html, "html.parser")
        print("解析链接")
        for article in soup.findAll("a",{'class':'question_link'}):
            url = targe_url + article['href']
            html = getHtmlText(url)
            parsePage(article_list,html)
    saveAsWord(article_list)
main()
