# -*- coding: utf-8 -*-
# 在这里实现请求、响应、解析的过程

import re
import threading

import scrapy
import urllib
from scrapy import Selector
from novelget.items import BookInfoItem, ChapterItem
from lxml import etree

class BookFinder(scrapy.Spider):
    name = 'context'       #爬虫名称，需要这个名称才能启动爬虫
    lock = threading.Lock()
    def __init__(self):
        self.cursor=0
        self.allowed_domains = ['www.biquge.com.cn']
        self.prefix='https://www.biquge.com.cn'
        searchpage = 'https://www.biquge.com.cn/book/39731/'

        chapterHref = self.getChapterIndex(searchpage)
        self.start_urls=[self.prefix + i for i in chapterHref ]
        print(self.start_urls)

    #从start_requests发送请求
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url = url)  #请求网址，设置响应函数，同时向响应函数传递参数
            # yield scrapy.Request(url = url,callback = self.parse1)  #请求网址，设置响应函数，同时向响应函数传递参数

    #解析response,获得文章名称、连接、阅读数目，还可以进行二次请求。
    def parse(self, response):
        Item =ChapterItem()
        Item['index']=self.getIndex(response)
        Item['title']=self.getTitle(response)
        Item['context']=self.getContext(response)
        return Item

    # def parse1(self, response):
    #     bookinfo = BookInfoItem()
    #     bookinfo['name']=self.getBookName(response)
    #     bookinfo['description']=self.getDescribe(response)
    #     num = self.getPageNum(response)
    #     return bookinfo


    def getIndex(self,response):
        url = response.url
        start = url.rfind('/')
        end = url.rfind('.html')
        index = int(url[start+1:end])
        return index

    def getTitle(self,response):
        regx = "//div[@class='content_read']//div[@class='bookname']/h1/text()"
        title = response.xpath(regx).extract()[0]
        return title

    def getContext(self, response):
        regx = "string(//div[@class='content_read']//div[@id='content'])"
        data = response.xpath(regx).extract()[0]
        return data

    def getChapterIndex(self, searchpage):
        rawdata = urllib.request.urlopen(searchpage).read().decode("utf-8")
        response=etree.HTML(rawdata)
        regx = "//div[@class='box_con']/div[@id='list']/dl//dd/a/@href"
        num = response.xpath(regx)
        return num