# -*- coding: utf-8 -*-
# 在这里实现请求、响应、解析的过程

import re
import threading

import scrapy
import urllib
from scrapy import Selector
from novelget.items import BookInfoItem
from lxml import etree

class BookFinder(scrapy.Spider):
    name = 'bookinfo'       #爬虫名称，需要这个名称才能启动爬虫
    lock = threading.Lock()
    index = 0
    def __init__(self,searchname):
        print(searchname)
        self.allowed_domains = ['www.biquge.com.cn']
        searchpage = 'https://www.biquge.com.cn/search.php?q='+searchname
        endnum = self.getPageNum(searchname)
        self.start_urls = ['https://www.biquge.com.cn/search.php?q='+searchname+"&p="+str(i) for i in range(1,int(endnum)+1)]
        print(self.start_urls)

    #从start_requests发送请求
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url = url)  #请求网址，设置响应函数，同时向响应函数传递参数
            # yield scrapy.Request(url = url,callback = self.parse1)  #请求网址，设置响应函数，同时向响应函数传递参数

    #解析response,获得文章名称、连接、阅读数目，还可以进行二次请求。
    def parse(self, response):
        bookdict = {}
        bookdict["index"]=self.getPage(response.url)
        namelist=self.getBookName(response)
        deslist=self.getDescribe(response)
        for i in range(len(namelist)):
            # bookinfo = BookInfoItem()
            # bookinfo['name'] = namelist[i]
            # bookinfo['description'] = deslist[i]
            # bookdict[i]=bookinfo
            bookinfo={}
            bookinfo['name'] = namelist[i]
            bookinfo['description'] = deslist[i]
            bookdict[i]=bookinfo
        return bookdict

    # def parse1(self, response):
    #     bookinfo = BookInfoItem()
    #     bookinfo['name']=self.getBookName(response)
    #     bookinfo['description']=self.getDescribe(response)
    #     num = self.getPageNum(response)
    #     return bookinfo

    def getBookName(self,response):
        regx = "//div[contains(@class,'result-item')]//a[@cpos='title']/span/text()"
        bookname = response.xpath(regx).extract()
        return bookname

    def getDescribe(self, response):
        regx = "//div[contains(@class,'result-item')]//p[@class='result-game-item-desc']/text()"
        data = response.xpath(regx).extract()
        return data

    def getPageNum(self, searchname):
        searchpage = 'https://www.biquge.com.cn/search.php?q='+searchname
        realurl ='https://www.biquge.com.cn/search.php?q='+urllib.parse.quote(searchname)
        rawdata = urllib.request.urlopen(realurl).read().decode("utf-8")
        response=etree.HTML(rawdata)
        regx = "//div[@class='search-result-page-main']//a[not(@title)]/@href"
        num = response.xpath(regx)
        indexstr=1
        if len(num):
            indexstr = num[0]
            index = indexstr.rfind('=')
            if (index > -1):
                indexstr = indexstr[index + 1:]
        return indexstr

    def getPage(self, url):
        indexstr = url
        index =indexstr.rfind('p=')
        if(index>-1) :
            indexstr=indexstr[index+2:]
        return indexstr