#！D:\MyWork\Program\python
# -*- coding: utf-8 -*-
import requests_html
import re
from bs4 import BeautifulSoup
import random
import time
import traceback
import pymysql
import gc
import os


if __name__ == '__main__':
    #先获取当前抓取的book_id,index和catalogs
    conn = pymysql.Connect(host = '10.170.128.56',
                           port = 3306,
                           user = 'root',
                           passwd = '123456',
                           db = 'books',
                           charset='utf8mb4')
    cur1 = conn.cursor()
    into="select book_id,`index`,catalogs from books_tmp where id=(select max(id) from books_tmp)"
    cur1.execute(into)
    result=cur1.fetchall()
    #print(result)
    user_agent=requests_html.user_agent()
    if result:
        cur_book_id=result[0][0]
        cur_index=result[0][1]
        catalogs=result[0][2]
        flag=False
        #未爬取id
        os.remove("C:\\Users\\diao\\Desktop\\tmp_ids.txt")
        ids=open("C:\\Users\\diao\\Desktop\\tmp_ids.txt","a",encoding='utf-8')
        #id汇总文件
        with open("C:\\Users\\diao\\Desktop\\100bc.txt","r",encoding='utf-8') as r:
            for id in r:
                #print(id.replace('\n', ''))
                if flag == True:
                    ids.write(id)
                else:
                    if id.replace('\n', '') !=cur_book_id:
                        continue
                    else:
                        flag=True
        ids.close()
        r.close()
        #判断该书目录是否抓取完毕，没完先将剩余目录抓取完毕

        if not cur_index is None:
            if cur_index!=catalogs-1:
                url="https://www.allhistory.com/book/detail?id="+cur_book_id
                session=requests_html.HTMLSession()
                headers={"User-Agent":user_agent,
                         "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; _cbshow=true; CNZZDATA1277615859=332467984-1592983263-%7C1593658519; page.prev.id=1fy9t13kc42t50y; page.id=1fy9t1akc4a2ngf; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3AijgWq0_OQhrR4m0QJsB8ErokdhAfcE5C.w3QbAw6bLl3lSxaDogVc9GHPaI2NeAA6ufklEyIzDb4"
                         }
                r=session.get(url,headers=headers)
                r.html.render(sleep=1,timeout=15)
                html=r.html.html
                #获取摘要
                content=r'<meta name="description" content="(.*?)">';content=re.findall(content,html);content=''.join(content)
                name=r'<div class="bookname" title=".*?">(.*?)</div>';name=re.findall(name,html);name=''.join(name)
                author=r'<div title="作者：(.*?) "';author=re.findall(author,html);author=''.join(author)
                nationality=r'国籍: <span>(.*?)</span></div>&nbsp;';nationality=re.findall(nationality,html);nationality=''.join(nationality)
                date=r'创作时间: <span>(.*?)</span></div>&nbsp';date=re.findall(date,html);date=''.join(date)
                period=r'创作时期: <span>(.*?)</span></div>&nbsp';period=re.findall(period,html);period=''.join(period)
                language=r'版本语言: <span>(.*?)</span></div>&nbsp';language=re.findall(language,html);language=''.join(language)
                domain=r'书籍领域: <span>(.*?)</span></div>&nbsp';domain=re.findall(domain,html);domain=''.join(domain)
                text_url='https://www.allhistory.com/book/read?id='+cur_book_id+'&restrict=text'
                #user_agent=requests_html.user_agent()
                session=requests_html.HTMLSession()
                headers={"User-Agent":user_agent,
                         "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; _cbshow=true; CNZZDATA1277615859=332467984-1592983263-%7C1593658519; page.prev.id=1fy9t13kc42t50y; page.id=1fy9t1akc4a2ngf; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3AijgWq0_OQhrR4m0QJsB8ErokdhAfcE5C.w3QbAw6bLl3lSxaDogVc9GHPaI2NeAA6ufklEyIzDb4"
                         }
                text=session.get(text_url,headers=headers)
                text.html.render(sleep=1,timeout=15)
                text=text.html.html
                catalogs=r'<li class="book-menu-list-item.*?title="(.*?)".*?>';catalogs=re.compile(catalogs,re.S);catalogs=catalogs.findall(text)
                vids=r'<li class="book-menu-list-item.*?data-volumnid="(.*?)".*?>';vids=re.compile(vids,re.S);vids=vids.findall(text)
                last=0
                print("有详情页book_id: "+cur_book_id+" 目录数："+str(len(catalogs)))

                if len(catalogs)%2!=0:
                    last=len(catalogs)-1
                for i in range((cur_index//2),len(catalogs)//2):
                    for j in range(2):
                        index=2*i+j
                        if index>=cur_index+1:
                            vid_url='https://www.allhistory.com/book/read?id='+cur_book_id+'&vid='+vids[index]+'&restrict=text&page=1&mode=undefined&single=ORIGINAL&left=ZH_CC&right='
                            ##print(vid_url)
                            #user_agent=requests_html.user_agent()
                            session=requests_html.HTMLSession()
                            headers={"User-Agent":user_agent,
                                     "Cookie":"sid=c33acb969ed91b06e2a98e55d405c4e8; sys_lang=zh; UM_distinctid=172f369309e477-0121f77c4e3b6-551d3911-144000-172f369309f4b2; _cbshow=true; CNZZDATA1277615859=1385762309-1593221660-%7C1593248835; aha=WEBmsg8618634976469LOGIN; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzNDI1MjY4MDczNTgiLCJjcmVhdGVkIjoxNTkzMjQ5ODQ2ODEzLCJleHAiOjE1OTU4NDE4NDYsInVzZXJpZCI6MTM0MjUyNn0.BTNaUgUmhnMuPuSNBemw3m5vJsYfnHiGluQUnpnJ51-TcFUip1EbKigeb2nIZge4AJSQwzwy1GSyR5iChoV98A; free=free; page.prev.id=11v96nj13kbxfluzc; page.id=11v96nj1skbxftmmm; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list%2F; connect.sid=s%3A3HhrLMtGhtlwHp0OTICuZTiO1UqGwWS9.nJ%2B%2B1tpfya8vb9Yntezcy77jE5YEJk6HAg6bRZkinSE"
                                     }
                            data=session.get(vid_url,headers=headers)
                            data.html.render(sleep=1,timeout=30)
                            data=data.html.html
                            soup = BeautifulSoup(data,'lxml')
                            datas=[]
                            #将每个详情页的内容放入集合
                            for x in soup.find_all('div',class_="dls-p"):
                                if x.find('p',class_='p1'):
                                    a=x.find('p',class_='p1').get_text()
                                else:
                                    a=x.get_text()
                                #a为标签内容
                                datas.append(a)
                                del a
                            #每个详情页内容拼接
                            datas='\n'.join(datas)
                            into = "INSERT INTO books_tmp(`书名`,`简介`,`作者`,`国籍`,`创作时间`,`创作时期`,`时期标志`,`书籍领域`,`版本语言`,`目录`,`内容`,book_id,`index`,catalogs) VALUES (%s,%s, %s, %s,%s,%s, %s, %s,%s,%s,%s,%s,%s,%s)"
                            values = (name,content,author,nationality,date,period,"700AD",domain,language,catalogs[index],datas,cur_book_id,index,len(catalogs))
                            cur1.execute(into , values)
                            conn.commit()
                            print(str(index)+": "+catalogs[index])
                            ##print(datas)

                            del datas
                            del data
                            time.sleep(random.randint(1,2))

                    time.sleep(1)
                if last!=0:
                    ##print("last= "+str(last))
                    vid_url='https://www.allhistory.com/book/read?id='+cur_book_id+'&vid='+vids[last]+'&restrict=text&page=1&mode=undefined&single=ORIGINAL&left=ZH_CC&right='
                    ##print(vid_url)
                    #user_agent=requests_html.user_agent()
                    session=requests_html.HTMLSession()
                    headers={"User-Agent":user_agent,
                             "Cookie":"sid=c33acb969ed91b06e2a98e55d405c4e8; UM_distinctid=172f369309e477-0121f77c4e3b6-551d3911-144000-172f369309f4b2; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzNDI1MjY4MDczNTgiLCJjcmVhdGVkIjoxNTkzMjQ5ODQ2ODEzLCJleHAiOjE1OTU4NDE4NDYsInVzZXJpZCI6MTM0MjUyNn0.BTNaUgUmhnMuPuSNBemw3m5vJsYfnHiGluQUnpnJ51-TcFUip1EbKigeb2nIZge4AJSQwzwy1GSyR5iChoV98A; free=free; sys_lang=zh; CNZZDATA1277615859=1385762309-1593221660-%7C1593432032; _cbshow=true; page.prev.id=1fy9t13kc0ig8mp; page.id=1fy9t13kc0imozx; refer.url=https%3A%2F%2Fwww.allhistory.com%2F; connect.sid=s%3ATzzXQyisJrU5Z9mvS4JKlaEs6uVbMuuh.s1y%2FSZJIjlsutTkrg9r9NAKwnsWnP3srk2qG5qOwT2E"
                             }
                    data=session.get(vid_url,headers=headers)
                    data.html.render(sleep=1,timeout=30)
                    data=data.html.html
                    soup = BeautifulSoup(data,'lxml')
                    datas=[]
                    #将每个详情页的内容放入集合
                    for x in soup.find_all('div',class_="dls-p"):
                        if x.find('p',class_='p1'):
                            a=x.find('p',class_='p1').get_text()
                        else:
                            a=x.get_text()
                        #a为标签内容
                        datas.append(a)
                        del a
                    #每个详情页内容拼接
                    datas='\n'.join(datas)
                    #print(catalogs[last])
                    into = "INSERT INTO books_tmp(`书名`,`简介`,`作者`,`国籍`,`创作时间`,`创作时期`,`时期标志`,`书籍领域`,`版本语言`,`目录`,`内容`,book_id,`index`,catalogs) VALUES (%s,%s, %s, %s,%s,%s, %s, %s,%s,%s,%s,%s,%s,%s)"
                    values = (name,content,author,nationality,date,period,"700AD",domain,language,catalogs[last],datas,cur_book_id,last,len(catalogs))
                    cur1.execute(into, values)
                    conn.commit()
                    del datas
                    del data
                cur1.close()
                conn.close()


    with open("C:\\Users\\diao\\Desktop\\tmp_ids.txt","r")as f:
        for line in f:
            conn = pymysql.Connect(host = '10.170.128.56',
                                   port = 3306,
                                   user = 'root',
                                   passwd = '123456',
                                   db = 'books',
                                   charset='utf8mb4')
            cur = conn.cursor()
            try:
                url="https://www.allhistory.com/book/detail?id="+line.replace('\n', '')
                #user_agent=requests_html.user_agent()
                session=requests_html.HTMLSession()
                headers={"User-Agent":user_agent,
                         "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; _cbshow=true; CNZZDATA1277615859=332467984-1592983263-%7C1593658519; page.prev.id=1fy9t13kc42t50y; page.id=1fy9t1akc4a2ngf; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3AijgWq0_OQhrR4m0QJsB8ErokdhAfcE5C.w3QbAw6bLl3lSxaDogVc9GHPaI2NeAA6ufklEyIzDb4"
                         }
                r=session.get(url,headers=headers)
                r.html.render(sleep=1,timeout=15)
                html=r.html.html
                #获取摘要
                content=r'<meta name="description" content="(.*?)">';content=re.findall(content,html);content=''.join(content)
                name=r'<div class="bookname" title=".*?">(.*?)</div>';name=re.findall(name,html);name=''.join(name)
                author=r'<div title="作者：(.*?) "';author=re.findall(author,html);author=''.join(author)
                nationality=r'国籍: <span>(.*?)</span></div>&nbsp;';nationality=re.findall(nationality,html);nationality=''.join(nationality)
                date=r'创作时间: <span>(.*?)</span></div>&nbsp';date=re.findall(date,html);date=''.join(date)
                period=r'创作时期: <span>(.*?)</span></div>&nbsp';period=re.findall(period,html);period=''.join(period)
                language=r'版本语言: <span>(.*?)</span></div>&nbsp';language=re.findall(language,html);language=''.join(language)
                domain=r'书籍领域: <span>(.*?)</span></div>&nbsp';domain=re.findall(domain,html);domain=''.join(domain)
                #判断是否有阅读书籍按钮
                read=r'<div class="button-read book-text-bottom">([\s\S]*?)</div>'; read=re.compile(read,re.S);read=read.findall(html)

                if read:
                    if '阅读书籍' in read[0]:
                        text_url='https://www.allhistory.com/book/read?id='+line.replace('\n', '')+'&restrict=text'
                        #user_agent=requests_html.user_agent()
                        session=requests_html.HTMLSession()
                        headers={"User-Agent":user_agent,
                                 "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; _cbshow=true; CNZZDATA1277615859=332467984-1592983263-%7C1593658519; page.prev.id=1fy9t13kc42t50y; page.id=1fy9t1akc4a2ngf; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3AijgWq0_OQhrR4m0QJsB8ErokdhAfcE5C.w3QbAw6bLl3lSxaDogVc9GHPaI2NeAA6ufklEyIzDb4"
                                 }
                        text=session.get(text_url,headers=headers)
                        text.html.render(sleep=1,timeout=15)
                        text=text.html.html
                        catalogs=r'<li class="book-menu-list-item.*?title="(.*?)".*?>';catalogs=re.compile(catalogs,re.S);catalogs=catalogs.findall(text)
                        vids=r'<li class="book-menu-list-item.*?data-volumnid="(.*?)".*?>';vids=re.compile(vids,re.S);vids=vids.findall(text)
                        last=0
                        print("有详情页book_id: "+line.replace('\n', '')+" 目录数："+str(len(catalogs)))
                        if len(catalogs)==1 :
                            vid_url='https://www.allhistory.com/book/read?id='+line.replace('\n', '')+'&vid='+vids[0]+'&restrict=text&page=1&mode=undefined&single=ORIGINAL&left=ZH_CC&right='
                            ##print(vid_url)
                            #user_agent=requests_html.user_agent()
                            session=requests_html.HTMLSession()
                            headers={"User-Agent":user_agent,
                                     "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; _cbshow=true; CNZZDATA1277615859=332467984-1592983263-%7C1593658519; page.prev.id=1fy9t13kc42t50y; page.id=1fy9t1akc4a2ngf; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3AijgWq0_OQhrR4m0QJsB8ErokdhAfcE5C.w3QbAw6bLl3lSxaDogVc9GHPaI2NeAA6ufklEyIzDb4"
                                     }
                            data=session.get(vid_url,headers=headers)
                            data.html.render(sleep=1,timeout=30)
                            data=data.html.html
                            soup = BeautifulSoup(data,'lxml')
                            datas=[]
                            #将每个详情页的内容放入集合
                            for x in soup.find_all('div',class_="dls-p"):
                                if x.find('p',class_='p1'):
                                    a=x.find('p',class_='p1').get_text()
                                else:
                                    a=x.get_text()
                                #a为标签内容
                                datas.append(a)
                                del a
                            #每个详情页内容拼接
                            datas='\n'.join(datas)
                            #print(str(0)+": "+catalogs[0])
                            into = "INSERT INTO books_tmp(`书名`,`简介`,`作者`,`国籍`,`创作时间`,`创作时期`,`时期标志`,`书籍领域`,`版本语言`,`目录`,`内容`,book_id,`index`,catalogs) VALUES (%s,%s, %s, %s,%s,%s, %s, %s,%s,%s,%s,%s,%s,%s)"
                            values = (name,content,author,nationality,date,period,"700AD",domain,language,catalogs[0],datas,line.replace('\n', ''),0,1)
                            cur.execute(into, values)
                            conn.commit()
                            del datas
                            del data
                            #目录+内容
                            # texts=catalogs[0]+":::"+datas
                            time.sleep(2)
                        else:
                            if len(catalogs)>2000:
                                with open("C:\\Users\\diao\\Desktop\\large_id1.txt","a",encoding='utf-8') as target:
                                    target.write(line)
                                target.close()
                                del target
                            else:
                                if len(catalogs)%2!=0:
                                    last=len(catalogs)-1
                                for i in range(len(catalogs)//2):
                                    for j in range(2):
                                        index=2*i+j
                                        vid_url='https://www.allhistory.com/book/read?id='+line.replace('\n', '')+'&vid='+vids[index]+'&restrict=text&page=1&mode=undefined&single=ORIGINAL&left=ZH_CC&right='
                                        ##print(vid_url)
                                        #user_agent=requests_html.user_agent()
                                        session=requests_html.HTMLSession()
                                        headers={"User-Agent":user_agent,
                                                 "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; _cbshow=true; CNZZDATA1277615859=332467984-1592983263-%7C1593658519; page.prev.id=1fy9t13kc42t50y; page.id=1fy9t1akc4a2ngf; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3AijgWq0_OQhrR4m0QJsB8ErokdhAfcE5C.w3QbAw6bLl3lSxaDogVc9GHPaI2NeAA6ufklEyIzDb4"
                                                 }
                                        data=session.get(vid_url,headers=headers)
                                        data.html.render(sleep=1,timeout=30)
                                        data=data.html.html
                                        soup = BeautifulSoup(data,'lxml')
                                        datas=[]
                                        #将每个详情页的内容放入集合
                                        for x in soup.find_all('div',class_="dls-p"):
                                            if x.find('p',class_='p1'):
                                                a=x.find('p',class_='p1').get_text()
                                            else:
                                                a=x.get_text()
                                            #a为标签内容
                                            datas.append(a)
                                            del a
                                        #每个详情页内容拼接
                                        datas='\n'.join(datas)
                                        into = "INSERT INTO books_tmp(`书名`,`简介`,`作者`,`国籍`,`创作时间`,`创作时期`,`时期标志`,`书籍领域`,`版本语言`,`目录`,`内容`,book_id,`index`,catalogs) VALUES (%s,%s, %s, %s,%s,%s, %s, %s,%s,%s,%s,%s,%s,%s)"
                                        values = (name,content,author,nationality,date,period,"700AD",domain,language,catalogs[index],datas,line.replace('\n', ''),index,len(catalogs))
                                        cur.execute(into , values)
                                        conn.commit()
                                        #print(str(index)+": "+catalogs[index])
                                        ##print(datas)
                                        del datas
                                        del data
                                        time.sleep(random.randint(1,2))
                                    time.sleep(1)
                                if last!=0:
                                    ##print("last= "+str(last))
                                    vid_url='https://www.allhistory.com/book/read?id='+line.replace('\n', '')+'&vid='+vids[last]+'&restrict=text&page=1&mode=undefined&single=ORIGINAL&left=ZH_CC&right='
                                    ##print(vid_url)
                                    #user_agent=requests_html.user_agent()
                                    session=requests_html.HTMLSession()
                                    headers={"User-Agent":user_agent,
                                             "Cookie":"sid=c33acb969ed91b06e2a98e55d405c4e8; UM_distinctid=172f369309e477-0121f77c4e3b6-551d3911-144000-172f369309f4b2; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzNDI1MjY4MDczNTgiLCJjcmVhdGVkIjoxNTkzMjQ5ODQ2ODEzLCJleHAiOjE1OTU4NDE4NDYsInVzZXJpZCI6MTM0MjUyNn0.BTNaUgUmhnMuPuSNBemw3m5vJsYfnHiGluQUnpnJ51-TcFUip1EbKigeb2nIZge4AJSQwzwy1GSyR5iChoV98A; free=free; sys_lang=zh; CNZZDATA1277615859=1385762309-1593221660-%7C1593432032; _cbshow=true; page.prev.id=1fy9t13kc0ig8mp; page.id=1fy9t13kc0imozx; refer.url=https%3A%2F%2Fwww.allhistory.com%2F; connect.sid=s%3ATzzXQyisJrU5Z9mvS4JKlaEs6uVbMuuh.s1y%2FSZJIjlsutTkrg9r9NAKwnsWnP3srk2qG5qOwT2E"
                                             }
                                    data=session.get(vid_url,headers=headers)
                                    data.html.render(sleep=1,timeout=30)
                                    data=data.html.html
                                    soup = BeautifulSoup(data,'lxml')
                                    datas=[]
                                    #将每个详情页的内容放入集合
                                    for x in soup.find_all('div',class_="dls-p"):
                                        if x.find('p',class_='p1'):
                                            a=x.find('p',class_='p1').get_text()
                                        else:
                                            a=x.get_text()
                                        #a为标签内容
                                        datas.append(a)
                                        del a
                                    #每个详情页内容拼接
                                    datas='\n'.join(datas)
                                    #print(str(last)+": "+catalogs[last])
                                    into = "INSERT INTO books_tmp(`书名`,`简介`,`作者`,`国籍`,`创作时间`,`创作时期`,`时期标志`,`书籍领域`,`版本语言`,`目录`,`内容`,book_id,`index`,catalogs) VALUES (%s,%s, %s, %s,%s,%s, %s, %s,%s,%s,%s,%s,%s,%s)"
                                    values = (name,content,author,nationality,date,period,"700AD",domain,language,catalogs[last],datas,line.replace('\n', ''),last,len(catalogs))
                                    cur.execute(into, values)
                                    conn.commit()
                                    del datas
                                    del data

                        # catalogs='|'.join(catalogs)
                        # all_datas='|||'.join(all_datas)
                        # ##print(all_datas)
                        # output=name+'|*|'+content+'|*|'+author+'|*|'+nationality+'|*|'+date+'|*|'+period+'|*|'+domain+'|*|'+language+'|*|'+line.replace('\n', '')+'|*|'+catalogs
                        # with open("C:\\Users\\diao\\Desktop\\target1.txt","a",encoding='utf-8') as target:
                        #     target.write(output+"\n")
                        # target.close()
                        # with open("C:\\Users\\diao\\Desktop\\details\\"+line.replace('\n', '')+".txt","w",encoding='utf-8') as detail:
                        #     detail.write(all_datas)
                        # detail.close()
                else:
                    print("无详情页book_id: " + line.replace('\n', ''))
                    into = "INSERT INTO books_tmp(`书名`,`简介`,`作者`,`国籍`,`创作时间`,`创作时期`,`时期标志`,`书籍领域`,`版本语言`,book_id) VALUES (%s,%s, %s, %s,%s,%s, %s, %s,%s,%s)"
                    values = (name,content,author,nationality,date,period,"700AD",domain,language,line.replace('\n', ''))
                    cur.execute(into, values)
                    conn.commit()
                del content
                del name
                del author
                del nationality
                del date
                del period
                del language
                del domain
                #del user_agent
                del session
                del headers
                del r
                del html
                gc.collect()
                time.sleep(random.randint(2,4))
            except Exception:
                traceback.print_exc()
                print("超时id: "+line)
                continue
            cur.close()
            conn.close()
        f.close()

