# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from lxml import etree

import math

from shanbei.items import ShanbeiItem

""" 抓取扇贝网上指定的单词书
"""


class BookcrawlerSpider(scrapy.Spider):
    name = 'bookcrawler'
    scheme = "http://"
    allowed_domains = ['www.shanbay.com']
    start_urls = ['http://www.shanbay.com/wordbook/']
    pageset = set()

    def ___init__(self, bookid=None, *args, **kwargs):
        super(BookcrawlerSpider, self).___init__(args, kwargs)
        pass

    def start_requests(self):
        # 从 command line 输入一个 bookid :scrapy crawl bookcrawler -a bookid=16
        assert hasattr(self, "bookid"), "请指定一个bookid, 例如： scrapy crawl bookcrawler -a bookid=16 "
        # print("bookid in start_request :", self.bookid)
        for url in self.start_urls:
            url = url + self.bookid
            yield Request(url, callback=self.parse, dont_filter=True)
        pass

    def parse(self, response):
        """解析书籍页面, 获取单词列表页面
        :param response:
        :return:
        """
        # print(response.text)
        html = etree.HTML(response.text, parser=etree.HTMLParser(encoding="utf-8"))
        links = html.xpath(r'//td[@class="wordbook-wordlist-name"]/a/@href')
        baseurl = self.allowed_domains[0]
        for link in links:
            url = self.scheme + baseurl + link + "?page=1"
            self.pageset.add(url)
            yield Request(url, callback=self.parse_wordlist)
            pass
        pass

    def parse_wordlist(self, response):
        """解析词汇表首页, 将词汇表发送到 items 保存
        :param response:
        :return:
        """
        print("processing ", response.url)
        item = ShanbeiItem()
        html = etree.HTML(response.text, parser=etree.HTMLParser(encoding="utf-8"))
        # /html/body/div[3]/div/div[1]/div[1]/div/div/div[3]/p/a[1]
        bookname = html.xpath(r'//div[@class="wordbook-contains-this-wordlist"]/p/a[1]/text()')[0]
        name = html.xpath(r'string(//div[@class="span6"]/h4)')  # 利用 string() 函数提取全部字符串
        name = "".join(name).replace(" ", "").replace("\n", "")
        english = html.xpath(r'//tr[@class="row"]/td[@class="span2"]/strong/text()')
        chinese = html.xpath(r'//tr[@class="row"]/td[@class="span10"]/text()')
        # 获取当前页数
        req_params = BookcrawlerSpider.getParams(response.url)
        # print(req_params)
        active_page = int(req_params['page'])
        total_words = int(html.xpath(r'string(//div[@class="span6"]/h4/span[@id="wordlist-num-vocab"]/text())'))
        total_page = math.ceil(total_words / len(english))  # 默认一页是 20 个

        item['bookname'] = bookname
        item['name'] = name
        item['word_english'] = english
        item['word_chinese'] = chinese
        # print(bookname, name, english, chinese)
        yield item

        if active_page < total_page:
            url = req_params['base_url'] + ("?page=%d" % (active_page+1))
            if url not in self.pageset:  # 防止多次下载
                self.pageset.add(url)
                yield Request(url, callback=self.parse_wordlist)
                pass
            pass
        pass

    @staticmethod
    def getParams(url):
        param_list = url.split("?")[1].split("&")
        params = {'base_url': url.split("?")[0]}
        for param in param_list:
            pair = param.split("=")
            params[pair[0]] = pair[1]
            pass
        return params
        pass
