#！D:\MyWork\Program\python
# -*- coding: utf-8 -*-
import requests_html
import re
from bs4 import BeautifulSoup
import random
import time
import traceback
import pymysql
import gc

#重新抓取没有抓到内容的页面
if __name__ == '__main__':

    conn = pymysql.Connect(host = '10.170.128.56',
                           port = 3306,
                           user = 'root',
                           passwd = '123456',
                           db = 'books',
                           charset='utf8mb4')
    cur = conn.cursor()

    try:
        sql="select book_id,`index` from books_tmp where `内容`='' and `index` is not null"
        cur.execute(sql)
        result=cur.fetchall()
        print("未抓取到页面数据共"+str(len(result))+"条")
        for book in result:
            print(book)
            book_id = book[0]
            index= book[1]
            url = "https://www.allhistory.com/book/detail?id=" + book_id
            user_agent = requests_html.user_agent()
            session = requests_html.HTMLSession()
            headers = {"User-Agent": user_agent,
                       "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; CNZZDATA1277615859=332467984-1592983263-%7C1594343231; page.prev.id=1fy9t2pkcfkjbep; page.id=1fy9t1hkcfkjpjz; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3A2V_EvxXWUY1ETnJHEA83pAK8w1VoM26k.m%2BGuOkYJaPQ0Ad2YZOwm2mvLRPIPQgkbCxkunOBGhMk"
                       }
            r = session.get(url, headers=headers)
            r.html.render(sleep=1, timeout=15)
            html = r.html.html
            # 获取摘要
            content=r'<meta name="description" content="(.*?)">';content=re.findall(content,html);content=''.join(content)
            name = r'<div class="bookname" title=".*?">(.*?)</div>';
            name = re.findall(name, html);
            name = ''.join(name)
            author = r'<div title="作者：(.*?) "';
            author = re.findall(author, html);
            author = ''.join(author)
            nationality = r'国籍: <span>(.*?)</span></div>&nbsp;';
            nationality = re.findall(nationality, html);
            nationality = ''.join(nationality)
            date = r'创作时间: <span>(.*?)</span></div>&nbsp';
            date = re.findall(date, html);
            date = ''.join(date)
            period = r'创作时期: <span>(.*?)</span></div>&nbsp';
            period = re.findall(period, html);
            period = ''.join(period)
            language = r'版本语言: <span>(.*?)</span></div>&nbsp';
            language = re.findall(language, html);
            language = ''.join(language)
            domain = r'书籍领域: <span>(.*?)</span></div>&nbsp';
            domain = re.findall(domain, html);
            domain = ''.join(domain)
            text_url = 'https://www.allhistory.com/book/read?id=' + book_id + '&restrict=text'
            session = requests_html.HTMLSession()
            headers = {"User-Agent": user_agent,
                       "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; CNZZDATA1277615859=332467984-1592983263-%7C1594343231; page.prev.id=1fy9t2pkcfkjbep; page.id=1fy9t1hkcfkjpjz; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3A2V_EvxXWUY1ETnJHEA83pAK8w1VoM26k.m%2BGuOkYJaPQ0Ad2YZOwm2mvLRPIPQgkbCxkunOBGhMk"
                       }
            text = session.get(text_url, headers=headers)
            text.html.render(sleep=1, timeout=15)
            text = text.html.html
            catalogs = r'<li class="book-menu-list-item.*?title="(.*?)".*?>';
            catalogs = re.compile(catalogs, re.S);
            catalogs = catalogs.findall(text)
            vids = r'<li class="book-menu-list-item.*?data-volumnid="(.*?)".*?>';
            vids = re.compile(vids, re.S);
            vids = vids.findall(text)
            print("id: " + book_id.replace('\n', '') + " 目录数：" + str(len(catalogs)))

            vid_url = 'https://www.allhistory.com/book/read?id=' + book_id + '&vid=' + vids[
                index] + '&restrict=text&page=1&mode=undefined&single=ORIGINAL&left=ZH_CC&right='
            print(catalogs[index])
            session = requests_html.HTMLSession()
            headers = {"User-Agent": user_agent,
                       "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; CNZZDATA1277615859=332467984-1592983263-%7C1594343231; page.prev.id=1fy9t2pkcfkjbep; page.id=1fy9t1hkcfkjpjz; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3A2V_EvxXWUY1ETnJHEA83pAK8w1VoM26k.m%2BGuOkYJaPQ0Ad2YZOwm2mvLRPIPQgkbCxkunOBGhMk"
                       }
            data = session.get(vid_url, headers=headers)
            data.html.render(sleep=1, timeout=30)
            data = data.html.html
            soup = BeautifulSoup(data, 'lxml')
            datas = []
            # 将每个详情页的内容放入集合
            for x in soup.find_all('div', class_="dls-p"):
                if x.find('p', class_='p1'):
                    a = x.find('p', class_='p1').get_text()
                else:
                    a = x.get_text()
                # a为标签内容
                datas.append(a)
                del a
            # 每个详情页内容拼接
            datas = '\n'.join(datas)
            into = "INSERT INTO books_tmp(`书名`,`简介`,`作者`,`国籍`,`创作时间`,`创作时期`,`时期标志`,`书籍领域`,`版本语言`,`目录`,`内容`,book_id,`index`,catalogs) VALUES (%s,%s, %s, %s,%s,%s, %s, %s,%s,%s,%s,%s,%s,%s)"
            values = (name,content,author,nationality,date,period,"1100",domain,language,catalogs[index],datas,book_id,index,len(catalogs))
            # print(datas)
            cur.execute(into, values)
            conn.commit()
            del datas
            del data
            time.sleep(random.randint(1, 2))
            del content
            del name
            del author
            del nationality
            del date
            del period
            del language
            del domain
            del user_agent
            del session
            del headers
            del r
            del html
            gc.collect()
            time.sleep(random.randint(3, 6))
    except Exception:
        traceback.print_exc()
        #print("超时id: " + book_id)

