import time

import scrapy
from scrapy import Selector, Request
from amazon.items import JdItem
from scrapy.http import HtmlResponse
from utils import create_chrome_driver, get_cookies_dict
from selenium.webdriver.common.by import By


class AmazonSpider(scrapy.Spider):
    name = "jd"
    allowed_domains = ["jd.com"]
    # start_urls = ["https://www.amazon.com/s?k=obsbot"]

    def __init__(self):
        super().__init__()
        self.product = 'obsbot'
        self.url = None
        self.page = 1
        self.chrome = create_chrome_driver()
        self.jd = JdItem()
        self.sel = None

    def start_requests(self):
        self.url = f"https://search.jd.com/Search?keyword={self.product}&stop=3&qrst=1&vt=3&wq=obsbot&stock=1&pvid=3001aaa99e0a4133b941f2008dc73753&isList=0&page={self.page}"
        yield scrapy.Request(
            url=self.url,
            callback=self.parse
        )

    def parse(self, response: HtmlResponse, **kwargs):
        self.sel = Selector(response)
        product_url_list = self.sel.css('#J_goodsList > ul > li')
        for list_item in product_url_list:
            self.jd['title'] = list_item.css('div > div.p-name.p-name-type-2 > a > em::text').extract()
            price = list_item.css('div > div.p-price > strong > i::text').extract_first()
            self.jd['price'] = price.split(',')
            count_header = list_item.css('div > div.p-commit > strong > a::text').extract_first()
            count_footer = list_item.css('div > div.p-commit > strong::text').extract_first()
            self.jd['deal_count'] = count_header + count_footer
            self.jd['shop_name'] = list_item.css('div > div.p-shop > span > a::text').extract_first()
            yield self.jd


        time.sleep(3)
        if self.page < 2:
            self.page += 2
            yield scrapy.Request(
                url=f"https://search.jd.com/Search?keyword={self.product}&stop=3&qrst=1&vt=3&wq=obsbot&stock=1&pvid=3001aaa99e0a4133b941f2008dc73753&isList=0&page={self.page}",
                callback=self.parse
            )
        else:
            self.crawler.engine.close_spider(self, "JD商品信息已收集完")


    def spider_closed(self, spider, reason):
        print('爬虫已关闭:', reason)