import random
import time

import scrapy
from scrapy import Selector, Request
from Spider.items import TaobaoItem
from scrapy.http import HtmlResponse
from utils import create_chrome_driver, get_cookies_dict
from selenium.webdriver.common.by import By


class DoubanSpider(scrapy.Spider):
    name = "taobao"
    allowed_domains = ["taobao.com"]
    # start_urls = ["https://s.taobao.com/search?localImgKey=&page=1&q=&tab=all"]


    def __init__(self):
        super().__init__()
        self.product = input("请输入查询的商品：")
        self.url = None
        self.page = 1
        self.chrome = create_chrome_driver()
        self.taobao = TaobaoItem()
        self.sel = None
        self.title = None

    def start_requests(self):
        self.url = f"https://s.taobao.com/search?localImgKey=&page={self.page}&q={self.product}&tab=all"
        yield scrapy.Request(
            url=self.url,
            callback=self.parse
        )

    def parse(self, response: HtmlResponse, **kwargs):
        self.sel = Selector(response)
        url_list = self.sel.xpath('//*[@id="pageContent"]/div[1]/div[3]/div[3]/div/div')
        for i in url_list:
            self.taobao['title'] = i.css(
                'a > div > div.Card--mainPicAndDesc--wvcDXaK > div.Title--descWrapper--HqxzYq0.Title--normalMod--HpNGsui > div > span::text').extract()
            price_header = i.css(
                'a > div > div.Card--mainPicAndDesc--wvcDXaK > div.Price--priceWrapper--Q0Dn7pN > div:nth-child(2) > span.Price--priceInt--ZlsSi_M::text').extract_first()
            price_footer = i.css(
                'a > div > div.Card--mainPicAndDesc--wvcDXaK > div.Price--priceWrapper--Q0Dn7pN > div:nth-child(2) > span.Price--priceFloat--h2RR0RK::text').extract_first()
            self.taobao['price'] = price_header + price_footer
            self.taobao['deal_count'] = i.css(
                'a > div > div.Card--mainPicAndDesc--wvcDXaK > div.Price--priceWrapper--Q0Dn7pN > span.Price--realSales--FhTZc7U::text').extract_first()
            self.taobao['shop_name'] = i.css(
                'a > div > div.ShopInfo--shopInfo--ORFs6rK > div.ShopInfo--TextAndPic--yH0AZfx > a::text').extract_first()
            location2 = i.css(
                'a > div > div.Card--mainPicAndDesc--wvcDXaK > div.Price--priceWrapper--Q0Dn7pN > div:nth-child(5) > span::text').extract_first()
            location1 = i.css(
                    'a > div > div.Card--mainPicAndDesc--wvcDXaK > div.Price--priceWrapper--Q0Dn7pN > div:nth-child(4) > span::text').extract_first()
            if location2 is None:
                self.taobao['location'] = location1
                yield self.taobao
            else:
                self.taobao['location'] = location1 + location2
                yield self.taobao

        time.sleep(5)
        page_num = self.sel.xpath('//*[@id="pageContent"]/div[1]/div[3]/div[4]/div/div/button').extract()
        print("Page总数:", len(page_num))
        if self.page < len(page_num):
            self.page += 1
            yield scrapy.Request(
                url=f"https://s.taobao.com/search?localImgKey=&page={self.page}&q={self.product}&tab=all",
                callback=self.parse
            )
        else:
            self.crawler.engine.close_spider(self, "商品信息已收集完")

    def spider_closed(self, spider, reason):
        print('爬虫已关闭:', reason)




