# -*- coding: utf-8 -*-
import scrapy
import json
from scrapy.loader import ItemLoader
from scrapy import Request
from scrapy.loader.processors import Join,MapCompose
import re
from bookcrawl.items import BookItem,LinkItem

re_isbn = re.compile("[aAiI][sS][iIbB][nN]")
re_press = re.compile("出版社")
re_star = re.compile("用户评分")
re_star_num = re.compile("([0-9]\.[0-9])")
re_price = re.compile("([0-9]+\.[0-9]+)")
re_pid = re.compile(r"\/([a-zA-Z0-9]+)$")
re_pid_filter = re.compile("dp\/([a-zA-Z0-9]+)[\/]?")
product_url = "https://www.amazon.cn/dp/{pid}"

class AmazonSpider(scrapy.Spider):
    name = 'amazon'
    allowed_domains = ['amazon.cn']
    start_urls = ["https://www.amazon.cn/dp/B07MX29XDX",
                   "https://www.amazon.cn/dp/B07Q8DS9LB",
                   "https://www.amazon.cn/dp/B00VUMWHCQ",
                   "https://www.amazon.cn/dp/B07KLFJD2H",
                   "https://www.amazon.cn/dp/B07C5HJ2L6",
                   "https://www.amazon.cn/dp/B07LD4P39C",
                   "https://www.amazon.cn/dp/B07NRDV652",
                   "https://www.amazon.cn/dp/B07Q97K71Z",
                   "https://www.amazon.cn/dp/B07LD8Q64M",
                   "https://www.amazon.cn/dp/B07NN37WF4",
                   "https://www.amazon.cn/dp/B07PK6FSVQ",
                   "https://www.amazon.cn/dp/B07KZ5NTR4",
                   "https://www.amazon.cn/dp/B07NRF4K91",
                   "https://www.amazon.cn/dp/B075QWMC2Y",
                   "https://www.amazon.cn/dp/B07N1GR4XR",
                   "https://www.amazon.cn/dp/B07P5D9ZTY",
                   "https://www.amazon.cn/dp/B06XWQ9WY8",
                   "https://www.amazon.cn/dp/B075KZ7D67",
                   "https://www.amazon.cn/dp/B07NHWGSFK",
                   "https://www.amazon.cn/dp/B07MTCTPQY",
                   "https://www.amazon.cn/dp/B075SWP6LG",
                   "https://www.amazon.cn/dp/B0011CRO32",
                   "https://www.amazon.cn/dp/B07NN6HJTT",
                   "https://www.amazon.cn/dp/B07GVXD647",
                   "https://www.amazon.cn/dp/B078X679TC",
                   "https://www.amazon.cn/dp/B07895ZXR3",
                   "https://www.amazon.cn/dp/B00XH8KAQ6",
                   "https://www.amazon.cn/dp/B07HSJQ11N",
                   "https://www.amazon.cn/dp/B07DXS2KY4",
                   "https://www.amazon.cn/dp/B00G9RSR7Q",
                   "https://www.amazon.cn/dp/B0753CXMRZ",
                   "https://www.amazon.cn/dp/B07B64T2X7",
                   "https://www.amazon.cn/dp/B07B64T2X7",
                   "https://www.amazon.cn/dp/B01HNLEA8G",
                   "https://www.amazon.cn/dp/B01EXQVUYG",
                   "https://www.amazon.cn/dp/B078Y352M2",
                   "https://www.amazon.cn/dp/B00TGV7GCE",
                   "https://www.amazon.cn/dp/B015XFFDCG",
                   "https://www.amazon.cn/dp/B07L6FP4NS",
                   "https://www.amazon.cn/dp/B01M22C5TZ",
                   "https://www.amazon.cn/dp/B07PZ6Q4TT",
                   "https://www.amazon.cn/dp/B00957T6X6",]
    custom_settings = {
        "DOWNLOAD_DELAY": 2,
        "CONCURRENT_REQUESTS_PER_DOMAIN": 1
    }

    def parse(self, response):
        loader = ItemLoader(item=BookItem(),response=response)
        loader.default_input_processor = MapCompose(str)
        loader.default_output_processor = Join(" ")

        base = response.xpath('//*[@class="bucket"]//li')
        for info in base:
            temp = info.extract()
            if re.search(re_isbn,temp):
                isbn = info.xpath("text()").extract()[0].strip()
                loader.add_value("isbn",isbn)
            elif re.search(re_press,temp):
                press = info.xpath("text()").extract()[0].strip()
                loader.add_value("press",press)
            elif re.search(re_star,temp):
                star_selector = response.xpath('//*[@class="bucket"]//li')
                if len(star_selector)<10:
                    continue
                else:
                    star = star_selector[9].xpath('span//*[@class="a-icon-alt"]/text()').extract()
                    if len(star) == 0:
                        continue
                    else:
                        star = star[0]
                    star_match = re.search(re_star_num,star)
                    if star_match is not None:
                        score = float(star_match.group(1))*20
                        loader.add_value("star",score)

        loader.add_xpath("author",'//*[contains(@class,"author")]/a/text()')
        loader.add_xpath("title", '//*[@id="productTitle"]/text()')

        price = "".join(response.xpath('//*[@id="soldByThirdParty"]//*[contains(@class,"a-color-price")]/text()').extract())
        price_match = re.search(re_price,price)
        if price_match is not None:
            loader.add_value("price",price_match.group(1))

        loader.add_value("field",self.name)
        loader.add_value("link",response.url)

        pid = "".join(re.search(re_pid, response.url).group(1))
        loader.add_value("pid",pid)

        item = loader.load_item()
        yield item

        relation = response.meta.get("relation",None)
        if relation is not None:
            rloader = ItemLoader(item=LinkItem())
            rloader.default_input_processor = MapCompose(str)
            rloader.default_output_processor = Join(" ")

            rloader.add_value("relation",relation)
            rloader.add_value("pid",response.meta["pid"])
            rloader.add_value("link_pid",pid)
            yield rloader.load_item()

        # yield Request(also_url.format(id=pid), call
        # back=self.parse_list, meta={"id":pid})
        also_view = response.xpath(
            '//*[@data-similarity-type="desktop-dp-sims_session-similarities"]//li//a[@class="a-link-normal"]/@href')
        # self.log(len(also_view))
        for view in also_view:
            text = "".join(view.extract())
            # self.log(text)
            pid_match = re.search(re_pid_filter,text)
            if pid_match is not None:
                pid = "".join(pid_match.group(1))
                # self.log(pid)
                yield Request(product_url.format(pid = pid),callback=self.parse,meta={"pid":item["pid"],"relation":2})

        also_buy = response.xpath(
            '//*[@data-similarity-type="desktop-dp-sims_purchase-similarities"]//li//a[@class="a-link-normal"]/@href')

        # self.log(len(also_buy))
        for view in also_buy:
            pid_match = re.search(re_pid_filter,view.extract()[0])
            if pid_match is not None:
                pid = "".join(pid_match.group(1))
                # self.log(pid)
                yield Request(product_url.format(pid = pid),callback=self.parse,meta={"pid":item["pid"],"relation":1})
