# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join,MapCompose
from bookcrawl.items import *
import pymongo
import re
from pymongo.command_cursor import CommandCursor
client = pymongo.MongoClient("211.87.227.243",27017)
db = client.get_database("bookcrawl")
books = db.get_collection("books")

amazon_search = "https://www.amazon.cn/s?k={key}&i=stripbooks"
amazon_link = "https://www.amazon.cn/dp/{pid}"
re_amazon_pid = re.compile("dp\/([a-zA-Z0-9]+)[\/]?")
re_amazon_isbn = re.compile("[aAiI][sS][iIbB][nN]")
re_amazon_press = re.compile("出版社")
re_amazon_star = re.compile("用户评分")
re_amazon_star_num = re.compile("([0-9]\.[0-9])")
re_amazon_price = re.compile("([0-9]+\.[0-9]+)")

dang_search = "http://title.dangdang.com/?key={key}&category_path=01.00.00.00.00.00&type=01.00.00.00.00.00"
dang_link = "http://product.dangdang.com/{pid}.html"
re_dang_pid = re.compile("\/([0-9]+)\.")
re_dang_isbn = re.compile("[iI][sS][bB][nN][:：]([0-9\-]+)")
re_dang_star = re.compile("width:([0-9]+)")
re_dang_id = re.compile("/([^/]+).html")


books_search = "http://www.bookschina.com/book_find2/?stp={key}&sCate=0"
books_link = "http://www.bookschina.com/{pid}.htm"
re_books_pid = re.compile("\/([0-9]+)\.")
re_books_star = re.compile("([0-9]\.[0-9])")
re_books_isbn = re.compile("[iI][sS][bB][nN][:：]([0-9\-]+)")



class MergetypeSpider(scrapy.Spider):
    name = 'mergetype'
    allowed_domains = ['dangdang.com','amazon.cn',"bookschina.com"]

    custom_settings = {
        "DOWNLOAD_DELAY": 1,
        "CONCURRENT_REQUESTS_PER_DOMAIN": 2
    }
    def start_requests(self):
        cur = books.find({"img_link":{"$exists":False}},no_cursor_timeout=True)

        for i,c in enumerate(cur):

            if "img_link" in c:
                continue
            st = c["field"]
            if st == "dangdang":
                yield Request(c["link"], callback=self.parse_dang_link)
            elif st == "amazon":
                yield Request(c["link"],callback=self.parse_amazon_link)
            elif st == 'bookschina':
                yield Request(c["link"],callback=self.parse_books_link)
            self.log(f"当前遍历到底{i}个")
        cur.close()

    def parse_amazon_link(self,response):
        loader = ItemLoader(item=BookItem(),response=response)
        loader.default_input_processor = MapCompose(str)
        loader.default_output_processor = Join(" ")

        base = response.xpath('//*[@class="bucket"]//li')
        for info in base:
            temp = info.extract()
            if re.search(re_amazon_isbn, temp):
                isbn = info.xpath("text()").extract()[0].strip()
                loader.add_value("isbn",isbn)
            elif re.search(re_amazon_press, temp):
                press = info.xpath("text()").extract()[0].strip()
                loader.add_value("press",press)
            elif re.search(re_amazon_star, temp):
                star_selector = response.xpath('//*[@class="bucket"]//li')
                if len(star_selector)<10:
                    continue
                else:
                    star = star_selector[9].xpath('span//*[@class="a-icon-alt"]/text()').extract()
                    if len(star) == 0:
                        continue
                    else:
                        star = star[0]
                    star_match = re.search(re_amazon_star_num, star)
                    if star_match is not None:
                        score = float(star_match.group(1))*20
                        loader.add_value("star",score)

        loader.add_xpath("author",'//*[contains(@class,"author")]/a/text()')
        loader.add_xpath("title", '//*[@id="productTitle"]/text()')



        img_link = response.xpath("//img[@id='imgBlkFront']/@data-a-dynamic-image").re("http.+?[jp][pn][g]")[0]
        loader.add_value("img_link", img_link)

        type = response.xpath("//*[@class='a-list-item']/a/text()")[1].get().strip()
        loader.add_value("type",type)

        price = "".join(response.xpath('//*[@id="soldByThirdParty"]//*[contains(@class,"a-color-price")]/text()').extract())
        price_match = re.search(re_amazon_price, price)
        if price_match is not None:
            loader.add_value("price",price_match.group(1))

        loader.add_value("field","amazon")
        loader.add_value("link",response.url)

        pid = "".join(re.search(re_amazon_pid, response.url).group(1))
        loader.add_value("pid",pid)

        item = loader.load_item()
        yield item

    def parse_dang_link(self,response):
        loader = ItemLoader(item=BookItem(),response=response)
        loader.default_input_processor = MapCompose(str)
        loader.default_output_processor = Join(" ")

        base = response.xpath('//*[@id="detail_describe"]/ul/li/text()')
        base = [i.extract() for i in base]

        for i in base:
            match = re.search(re_dang_isbn, i)
            if match is not None:
                loader.add_value("isbn",match.group(1))


        loader.add_xpath("title",'//*[@class="name_info"]/h1/@title')
        loader.add_xpath("author",'//*[@id="author"]/a/text()')
        loader.add_xpath("press",'//a[@dd_name="出版社"]/text()')
        loader.add_xpath("img_link","//*[@id='largePic']/@src")

        loader.add_xpath("type",'//*[@id="breadcrumb"]/a[2]/text()')


        star = response.xpath('//*[@class="star"]/@style').extract()
        if isinstance(star,list):
            star = "".join(star)
        star_match = re.search(re_dang_star, star)
        if star_match is not None:
            loader.add_value("star",star_match.group(1))

        price = response.xpath('//*[@id="dd-price"]/text()[2]').extract()[0].strip()
        loader.add_value("price",price)
        loader.add_value("field","dangdang")
        loader.add_value("link",response.url)

        pid = re.search(re_dang_id, response.url).group(1)
        loader.add_value("pid",pid)

        yield loader.load_item()

    def parse_books_link(self,response):
        loader = ItemLoader(item=BookItem(), response=response)
        loader.default_input_processor = MapCompose(str)
        loader.default_output_processor = Join("")

        loader.add_xpath("title", '//*[@class="bookInfo"]//h1/text()')
        loader.add_xpath("author", '//*[@class="bookInfo"]//*[@class="author"]/a/text()')
        loader.add_xpath("press", '//*[@class="bookInfo"]//*[@class="publisher"]/a/text()')

        star = response.xpath('//*[@class="bookInfo"]//*[@class="startWrap"]/em/text()').get()



        self.log(star)
        if star is not None:
            star_match = re.search(re_books_star, star)
            if star_match is not None:
                star = float(star_match.group(1))
                star *= 20
                loader.add_value("star", star)

        loader.add_xpath("price", '//*[@class="bookInfo"]//*[@class="priceWrap"]//*[@class="sellPrice"]/text()')

        base = response.xpath('//*[@class="copyrightInfor"]//li')
        for info in base:
            text = info.xpath("text()").get()
            if text is None:
                btype = "-".join(info.xpath("div/a/text()").extract())
                loader.add_value("type", btype)
            else:
                isbn_match = re.search(re_books_isbn, text)
                if isbn_match is not None:
                    loader.add_value("isbn", isbn_match.group(1))

        loader.add_value("field", "bookschina")
        loader.add_value("link", response.url)
        loader.add_xpath("img_link",'//*[@class="jqzoom"]/@src')

        pid_match = re.search(re_books_pid, response.url)
        if pid_match is not None:
            loader.add_value("pid", pid_match.group(1))

        item = loader.load_item()
        yield item
