#-*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import lxml
import requests
import xlsxwriter
import html5lib

class Spider():
    # 初始化参数
    def __init__(self, urlList):
        self.urlList = urlList
        self.third_url = []
        self.row = 0
        self.col = 0
        self.wookbook = ''
        self.wooksheet = ''
        self.id = 0

    # 下载
    def downLoad(self, add_url):
        print('开始下载html')
        res = requests.get(add_url)
        # res.encoding = 'gb2312'
        html = res.text
        return html

    # 一级解析
    def analy_one(self, html):
        print('----------开始一级解析-----------------')
        soup = BeautifulSoup(html, 'html5lib')
        alist = soup.select('.zxsm_subnav div a')
        second_url = []
        # 生成二级url
        for a in alist:
            a = a['href']
            new_url = "%s%s" % ('https://www.d1xz.net', a)
            second_url.append(new_url)
        return second_url

    # 二级解析
    def analy_two(self, html):
        print('----------开始二级解析-----------------')
        soup = BeautifulSoup(html, 'html5lib')
        title = soup.select('.dream_box .title')[0].get_text()
        alist = soup.select('.words_list_ui li a')
        for a in alist:
            new_url = '%s%s'%('https://www.d1xz.net',a['href'])
            self.third_url.append(new_url)

    #三级解析
    def analy_three(self,html):
        print('----------开始三级解析-----------------')
        try:
            soup = BeautifulSoup(html, 'html5lib')
            sort = soup.select('.cur_postion span:nth-of-type(3) a')[0].string
            title = soup.select('.art_detail_title')[0].string
            explain =  soup.select('.common_det_con p:nth-of-type(1)')[0].get_text()
            img_url =  soup.select('.common_det_con p:nth-of-type(2) img')[0]['src']
            title1 = soup.select('.common_det_con p:nth-of-type(3)')[0].get_text()
            title2 = soup.select('.common_det_con p:nth-of-type(4)')[0].get_text()
            title3 = soup.select('.common_det_con p:nth-of-type(5)')[0].get_text()
            title4 = soup.select('.common_det_con p:nth-of-type(6)')[0].get_text()
            title5 = soup.select('.common_det_con p:nth-of-type(7)')[0].get_text()
            title6 = soup.select('.common_det_con p:nth-of-type(8)')[0].get_text()
            title7 = soup.select('.common_det_con p:nth-of-type(9)')[0].get_text()
            title8 = soup.select('.common_det_con p:nth-of-type(10)')[0].get_text()
            title9 = soup.select('.common_det_con p:nth-of-type(11)')[0].get_text()
            title10 = soup.select('.common_det_con p:nth-of-type(12)')[0].get_text()
            answerUrl =  soup.select('.common_det_con p:nth-of-type(13) a')[0]['href']
            content = self.downLoad(answerUrl)
            answer = self.answerAnaly(content)
            self.pushData(sort,title,explain,img_url,title1,title2,title3,title4,title5,title6,title7,title8,title9,title10,answer)
        except:
            print('出错了')

    # 答案解析
    def answerAnaly(self,html):
        soup = BeautifulSoup(html, 'html5lib')
        answerContent = soup.select('.common_det_con')[0].get_text()
        return answerContent

    # 建立excel
    def setExcel(self):
        self.wookbook = xlsxwriter.Workbook('/home/wwwroot/default/python/test.xlsx')
        self.wooksheet = self.wookbook.add_worksheet()
        self.wooksheet.write(self.row, self.col, 'id')
        self.wooksheet.write(self.row, self.col+1, 'sort')
        self.wooksheet.write(self.row, self.col+2, 'title')
        self.wooksheet.write(self.row, self.col+3, 'explain')
        self.wooksheet.write(self.row, self.col+4, 'img_url')
        self.wooksheet.write(self.row, self.col+5, 'title1')
        self.wooksheet.write(self.row, self.col+6, 'title2')
        self.wooksheet.write(self.row, self.col+7, 'title3')
        self.wooksheet.write(self.row, self.col+8, 'title4')
        self.wooksheet.write(self.row, self.col+9, 'title5')
        self.wooksheet.write(self.row, self.col+10, 'title6')
        self.wooksheet.write(self.row, self.col+11, 'title7')
        self.wooksheet.write(self.row, self.col+12, 'title8')
        self.wooksheet.write(self.row, self.col+13, 'title9')
        self.wooksheet.write(self.row, self.col+14, 'title10')
        self.wooksheet.write(self.row, self.col+15, 'answer')
        
    #插入excel数据
    def pushData(self,sort,title,explain,img_url,title1,title2,title3,title4,title5,title6,title7,title8,title9,title10,answer):
        self.row += 1
        self.id  += 1
        self.wooksheet.write(self.row, self.col, self.id)
        self.wooksheet.write(self.row, self.col+1, sort)
        self.wooksheet.write(self.row, self.col+2, title)
        self.wooksheet.write(self.row, self.col+3, explain)
        self.wooksheet.write(self.row, self.col+4, img_url)
        self.wooksheet.write(self.row, self.col+5, title1)
        self.wooksheet.write(self.row, self.col+6, title2)
        self.wooksheet.write(self.row, self.col+7, title3)
        self.wooksheet.write(self.row, self.col+8, title4)
        self.wooksheet.write(self.row, self.col+9, title5)
        self.wooksheet.write(self.row, self.col+10, title6)
        self.wooksheet.write(self.row, self.col+11, title7)
        self.wooksheet.write(self.row, self.col+12, title8)
        self.wooksheet.write(self.row, self.col+13, title9)
        self.wooksheet.write(self.row, self.col+14, title10)
        self.wooksheet.write(self.row, self.col+15, answer)
        
    #关闭excle
    def closeExcel(self):
        self.wookbook.close()

    # 主调度函数
    def main(self):
        # 先下载入口url
        html = self.downLoad(self.urlList)
        print('--------下载入口url完毕---------------')
        print('------------开始解析---------------------')
        second_url = self.analy_one(html)
        print('----------一级解析完毕-------------------')
        print('-------------开始下载一级解析出来的新的url---------------')
        self.setExcel()
        for url in second_url:
            second_html = self.downLoad(url)
            self.analy_two(second_html)
        print('------------二级下载解析完毕-----------')
        print('--------开始第三次下载解析-------------')
        for url in self.third_url:
             third_html = self.downLoad(url)
             self.analy_three(third_html)
        print("-------全部数据下载完毕并保存到excel中-------")
        self.closeExcel()

# 入口函数
if __name__ == "__main__":
    urlList = "https://www.d1xz.net/test/"
    craw = Spider(urlList)
    craw.main()
