# -*- coding: utf-8 -*-
import scrapy
from scrapy.loader import ItemLoader
from scrapy import Request
from bookcrawl.items import *

from scrapy.loader.processors import Join,MapCompose
import re

re_pid = re.compile("\/([0-9]+)\.")
re_star = re.compile("([0-9]\.[0-9])")
re_isbn = re.compile("[iI][sS][bB][nN][:：]([0-9\-]+)")

product_url = "http://www.bookschina.com/{pid}.htm"
class BookschinaSpider(scrapy.Spider):
    name = 'bookschina'
    allowed_domains = ['bookschina.com']
    start_urls = ["http://www.bookschina.com/7964545.htm",
                  "http://www.bookschina.com/7252001.htm",
                  "http://www.bookschina.com/6249426.htm",
                  "http://www.bookschina.com/6526222.htm",
                  "http://www.bookschina.com/4141017.htm",
                  "http://www.bookschina.com/522124.htm",
                  "http://www.bookschina.com/6275434.htm",
                  "http://www.bookschina.com/4651481.htm",
                  "http://www.bookschina.com/6309589.htm",
                  "http://www.bookschina.com/7177550.htm",
                  "http://www.bookschina.com/6362386.htm",
                  "http://www.bookschina.com/6452933.htm",
                  "http://www.bookschina.com/6504571.htm",
                  "http://www.bookschina.com/5235010.htm",
                  "http://www.bookschina.com/5013187.htm",
                  "http://www.bookschina.com/5261780.htm",
                  "http://www.bookschina.com/6955642.htm",
                  "http://www.bookschina.com/6258286.htm",
                  "http://www.bookschina.com/7462869.htm",
                  "http://www.bookschina.com/6897793.htm",
                  "http://www.bookschina.com/6800604.htm",
                  "http://www.bookschina.com/5387751.htm",
                  "http://www.bookschina.com/6889235.htm",
                  "http://www.bookschina.com/6781378.htm",
                  "http://www.bookschina.com/5822194.htm",
                  "http://www.bookschina.com/5908013.htm",
                  "http://www.bookschina.com/5066416.htm",
                  "http://www.bookschina.com/1568304.htm",
                  "http://www.bookschina.com/6650011.htm",
                  "http://www.bookschina.com/6371606.htm",
                  "http://www.bookschina.com/6569434.htm",
                  "http://www.bookschina.com/1262778.htm",
                  "http://www.bookschina.com/6458174.htm",
                  "http://www.bookschina.com/6377923.htm",
                  "http://www.bookschina.com/5195774.htm",
                  "http://www.bookschina.com/6640558.htm",
                  "http://www.bookschina.com/1385805.htm",
                  "http://www.bookschina.com/6774768.htm",
                  "http://www.bookschina.com/6542918.htm",
                  "http://www.bookschina.com/5790321.htm",
                  "http://www.bookschina.com/6369364.htm",
                  "http://www.bookschina.com/6294765.htm",
                  ]

    custom_settings = {
        "DOWNLOAD_DELAY": 1,
        "CONCURRENT_REQUESTS_PER_DOMAIN": 1
    }
    def parse(self, response):
        loader = ItemLoader(item=BookItem(),response=response)
        loader.default_input_processor = MapCompose(str)
        loader.default_output_processor = Join("")

        loader.add_xpath("title",'//*[@class="bookInfo"]//h1/text()')
        loader.add_xpath("author",'//*[@class="bookInfo"]//*[@class="author"]/a/text()')
        loader.add_xpath("press",'//*[@class="bookInfo"]//*[@class="publisher"]/a/text()')

        star = response.xpath('//*[@class="bookInfo"]//*[@class="startWrap"]/em/text()').get()
        self.log(star)
        if star is not None:
            star_match = re.search(re_star,star)
            if star_match is not None:
                star = float(star_match.group(1))
                star *= 20
                loader.add_value("star",star)

        loader.add_xpath("price",'//*[@class="bookInfo"]//*[@class="priceWrap"]//*[@class="sellPrice"]/text()')

        base = response.xpath('//*[@class="copyrightInfor"]//li')
        for info in base:
            text = info.xpath("text()").get()
            if text is None:
                btype = "-".join(info.xpath("div/a/text()").extract())
                loader.add_value("type",btype)
            else:
                isbn_match = re.search(re_isbn,text)
                if isbn_match is not None:
                    loader.add_value("isbn",isbn_match.group(1))

        loader.add_value("field","bookschina")
        loader.add_value("link",response.url)

        pid_match = re.search(re_pid,response.url)
        if pid_match is not None:
            loader.add_value("pid",pid_match.group(1))

        item = loader.load_item()
        yield item
        relation = response.meta.get("relation",None)
        if relation is not None:
            rloader = ItemLoader(item=LinkItem())
            rloader.default_input_processor = MapCompose(str)
            rloader.default_output_processor = Join(" ")

            rloader.add_value("relation",relation)
            rloader.add_value("pid",response.meta["pid"])
            rloader.add_value("link_pid",item["pid"])
            yield rloader.load_item()

        also_buy = response.xpath('//*[@class="otherBuyWrap"]//*[@class="cover"]//@href').extract()
        for buy in also_buy:
            pid_match = re.search(re_pid,buy)
            if pid_match is not None:
                yield Request(product_url.format(pid=pid_match.group(1)),callback=self.parse,meta={"relation":1,"pid":item['pid']})

