#更新古籍简介信息
import requests_html
import re
from bs4 import BeautifulSoup
import random
import time
import traceback
import pymysql
import gc
import os





if __name__ == '__main__':

    #先获取当前抓取的book_id,index和catalogs
    conn = pymysql.Connect(host = '10.170.128.56',
                           port = 3306,
                           user = 'root',
                           passwd = '123456',
                           db = 'books',
                           charset='utf8mb4')
    cur1 = conn.cursor()
    pat=r'<div><div>(.*?)</div>'
    sql="select book_id from (select `书名`,`简介`,book_id from books  where 简介 is not null and 简介 !=''   group by 书名,简介,book_id )t1 where 简介 not like '%。' and 简介 not like '%）' and 简介 not like '%。”'"
    cur1.execute(sql)
    result=cur1.fetchall()
    user_agent=requests_html.user_agent()
    values=[]
    if result:
        for bookid in result:
            id=bookid[0]
            #print(id)
            url="https://www.allhistory.com/book/detail?id="+id
            session=requests_html.HTMLSession()
            headers={"User-Agent":user_agent,
                     "Cookie":"sid=01ed00704771ac06cd54bce221bdf1d3; UM_distinctid=172e54dc4ba918-09e998c97a9071-4353761-1fa400-172e54dc4bbb13; bearer=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJBSDEzMzMwNjI5NDkzMTQiLCJjcmVhdGVkIjoxNTkyOTg2OTY4OTU1LCJleHAiOjE1OTU1Nzg5NjgsInVzZXJpZCI6MTMzMzA2Mn0._pKjEjeqhoH1DYmtk3tgyNReI1uxPDpUht9keHfZgsx1iC5aN7LvFrFkyYmaBOuvaWvtrnsiL4SB5OtvmzdCrg; free=free; sys_lang=zh; _cbshow=true; CNZZDATA1277615859=332467984-1592983263-%7C1593658519; page.prev.id=1fy9t13kc42t50y; page.id=1fy9t1akc4a2ngf; refer.url=https%3A%2F%2Fwww.allhistory.com%2Fbook%2Fhome-list; connect.sid=s%3AijgWq0_OQhrR4m0QJsB8ErokdhAfcE5C.w3QbAw6bLl3lSxaDogVc9GHPaI2NeAA6ufklEyIzDb4"
                     }
            r=session.get(url,headers=headers)
            r.html.render(sleep=1,timeout=10)
            html=r.html.html
            soup = BeautifulSoup(html,'lxml')
            #print(soup)
            datas=[]

            divs=soup.find_all('div',class_="desc-content book-content-desc selected")
            if len(divs)>0:
                content='\t'+divs[0].get_text().replace('\n','').replace(' ','').replace('\t','')
            else:
                print(id)
            cons=content.split('。')
            con=''
            for i in range(0,len(cons)-1):
                con=con+cons[i]+'。'

            # ps=soup.select('div[class="desc-content book-content-desc selected"]')
            # for p in ps:
            #     conss=list(p.stripped_strings)
            #     print(conss)

            #update="update books set `简介`='"+con+"' where book_id='"+id+"'"
            update="update books set `简介`=%s where book_id=%s"
            # print(id)
            # print(con)
            value=(con,id)
            values.append(value)
            if len(values)>10:
                cur1.executemany(update,values)
                conn.commit()
                print('提交修改')
                values=[]
            time.sleep(1)
        update="update books set `简介`=%s where book_id=%s"
        cur1.executemany(update,values)
        conn.commit()

    cur1.close()
    conn.close()
