#!/usr/bin/python3
#coding:utf-8
'''
Created on 2018-06-22

@author: 许瑞锐

@email：1549359145@qq.com
'''
import requests,random,re,time,sys
import user_agent,DBOperator
from bs4 import BeautifulSoup

class DBSpider():
    def __init__(self):
        self.headers={'User-Agent':random.choice(user_agent.list),
            'Accept':"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
            'Accept-Encoding':'gzip',
        }
        self.tagUrl="https://book.douban.com"
        self.pageLimit=20
        self.sleepTime=5
        self.debug=False
        
    def getTags(self):#获取标签的ＵＲＬ
        r=requests.get(self.tagUrl,headers=self.headers)
        r.Encoding='utf-8'
        soup=BeautifulSoup(r.text,'html.parser')
        tags=soup.find_all(attrs={'class':'tag'})
        tagSet=set()
        for tag in tags:
            if(tag['class']==['tag']):
                tagSet.add(self.tagUrl+tag['href'])
        if len(tagSet) >0:
            print('获取标签成功！')
            print(tagSet)        
        return tagSet
    
    def getBookPageUrl(self,tagSet):#获取书排列页面
        bookPageUrl=set()
        for tagSetUrl in tagSet:
            for num in range(1,20):
                url=tagSetUrl+'?start='+str(num*20)+'&type=T'
                bookPageUrl.add(url)
        #print(bookPageUrl)
        if len(bookPageUrl) >0:
            print('获取书叶成功！')
            print(bookPageUrl) 
        return bookPageUrl

    def parseBookPageContent(self,bookPageUrls):#解析书排列页面
        bookUrl=set()
        for BookPageUrl in bookPageUrls:
            r=requests.get(BookPageUrl,headers=self.headers)
            r.Encoding='utf-8'
            soup=BeautifulSoup(r.text,'html.parser')
            pageA=soup.find_all(attrs={'class':'info'})
            for A in pageA:
                bookUrl.add(A.a['href'])
            print(BookPageUrl+'解析成功')
            print('解析暂停')
            time.sleep(self.sleepTime)
            print('解析重启')
        return bookUrl

    def parseAllBookPageContent(self,bookPageUrl):#解析所有书页面
            print(bookPageUrl)
            urls=self.parseBookPageContent(bookPageUrl)
            dataSet=list()
            for url in urls:
                print('爬取'+url+'的图书')
                try:
                    data=self.parseBookConent(url)
                except:
                    print('提取数据有误,忽略该条数据')
                    continue
                print('提取成功！')
                dataSet.append(data)
                print(data)
                print('爬取暂停')
                time.sleep(self.sleepTime)
                print('爬取重启')
            return dataSet

    def parseBookConent(self,bookUrl):#解析特定ＵＲＬ的书页面
        r=requests.get(bookUrl,headers=self.headers)
        r.Encoding='utf-8'
        soup=BeautifulSoup(r.text,'html.parser')
        result=re.search('[0-9]{1,11}',bookUrl)
        bookID=result.group()
        bookName=soup.find('span',attrs={'property':'v:itemreviewed'}).text
        print(bookName)
        bookAuthor=soup.find('a',href=re.compile("author")).text.replace(' ','').replace('\n','')
        pl=soup.find('div',attrs={'id':'info'})
        result=re.search('[0-9]{1,3}[.]{1}[0-9]{1,2}',pl.text)    
        bookPrice=result.group()
        bookIntro=soup.find('div',attrs={'class':'intro'}).text.replace('\n','')
        bookTags=set()
        tagResult=soup.find_all('a',attrs={'class':'tag'})
        for tag in tagResult:
            bookTags.add(tag.text)
        result=soup.find('strong',attrs={'class':'ll'})
        bookRateNum=result.text
        result=soup.find('span',attrs={'property':'v:votes'})
        bookVoteNum=result.text
        result=soup.find('img',attrs={'rel':'v:photo'})
        bookImgUrl=result['src']
        bookComment=set()
        result=soup.find_all('p',attrs={'class','comment-content'})
        for resultNode in result:
            bookComment.add(resultNode.text)

        data={'id':bookID,'name':bookName,'author':bookAuthor,'price':bookPrice,'intro':bookIntro,
        'tags':bookTags,'rate_num':bookRateNum,'vote_num':bookVoteNum,'img_url':bookImgUrl,'comment':bookComment
        }
        return data

    def run(self):
        tagSet=self.getTags()
        bookPageUrls=self.getBookPageUrl(tagSet)
        dataSet=self.parseAllBookPageContent(bookPageUrls)
        db=DBOperator.DB()
        db.InserInfos(dataSet)            
if __name__=='__main__':
    db_spider=DBSpider()
    db_spider.run()
    
