import time

import scrapy
from scrapy import Request
from selenium import webdriver
from scrapy.selector import Selector
from scrapy_1.items import Scrapy1Item
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class CarSpider(scrapy.Spider):
    name = 'car'

    def __init__(self, *args, **kwargs):
        super(CarSpider, self).__init__(*args, **kwargs)
        self.driver = webdriver.Chrome('D:/zip/chromedriver.exe')

    def start_requests(self):
        urls = ['https://xl.16888.com/ev-1.html','https://xl.16888.com/ev-2.html','https://xl.16888.com/ev-3.html','https://xl.16888.com/ev-4.html']  # 想要爬取的页面列表 , 'https://xl.16888.com/ev-2.html','https://xl.16888.com/ev-3.html','https://xl.16888.com/ev-4.html'
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        self.driver.get(response.url)
        # 等待页面加载完毕，等待tr可见
        self.driver.find_element(By.XPATH,'/html/body/div[5]/div[3]/div[2]/div/div[2]/div[1]/table/tbody/tr').is_displayed()
        # 获取页面源代码
        body = self.driver.page_source
        # 使用Scrapy的Selector解析页面
        selector = Selector(text=body)
        # 提取所需数据
        list_items = selector.xpath('/html/body/div[5]/div[3]/div[2]/div/div[2]/div[1]/table/tbody/tr')

        for j in list_items:
            item = Scrapy1Item()
            item['Carname'] = j.xpath('./td[2]/a/text()').get()
            item['Manufacturers'] = j.xpath('./td[4]/a/text()').get()
            item['Price'] = j.xpath('./td[5]/a/text()').get()
            list_url = j.xpath('./td[6]/div/a[1]/@href').get()
            list_url2 = j.xpath('./td[6]/div/a[2]/@href').get()
            next_url = response.urljoin(list_url)
            next_url2 = response.urljoin(list_url2)
            yield Request(url=next_url, callback=self.mdetail, cb_kwargs={'jc_item': item})
            yield Request(url=next_url2, callback=self.detail, cb_kwargs={'item': item})
    #
    #     #获取月份销量数据
    def mdetail(self, response, **kwargs):
        item = kwargs['jc_item']
        self.driver.get(response.url)
        # 等待页面加载完毕，等待th可见
        wait = WebDriverWait(self.driver, 10)
        wait.until(EC.visibility_of_element_located((By.XPATH, '/html/body/div[5]/div[3]/div[2]/div/div[2]/div[1]/table/tbody/tr[1]/th[7]')))
        # 获取页面源代码
        body = self.driver.page_source
        # 使用Scrapy的Selector解析页面
        selector = Selector(text=body)
        # 提取所需数据
        list_items = selector.xpath('/html/body/div[5]/div[3]/div[2]/div/div[2]/div[1]/table/tbody/tr')
        list2 = []
        for i in list_items:
            city = i.xpath('./td[3]/a/text()').get()  # 城市
            sales = i.xpath('./td[4]/a/em/text()').get()  # 销量
            type = i.xpath('./td[5]/a/text()').get()
            nextpage = i.xpath('./html/body/div[5]/div[3]/div[2]/div[2]/div[1]/div/a[3]/text()').get()
            data_dict = {city: [sales, type]}
            list2.append(data_dict)  # 将新的数据添加到list2中
            if nextpage is not None:
                nextpages = i.xpath('./div[1]/div/a[3]/@href').get()
                yield scrapy.Request(url=nextpages, callback=self.detail, cb_kwargs={'item': item})
        item['Main_sales'] = list2
        yield item
    #
    # #获取地区上牌数量
    def detail(self, response, **kwargs):
        item = kwargs['item']
        self.driver.get(response.url)
        # 等待页面加载完毕，等待tr可见
        wait = WebDriverWait(self.driver, 10)
        wait.until(EC.visibility_of_element_located((By.XPATH, '/html/body/div[5]/div[3]/div[2]/div[2]/table/tbody/tr')))
        # 获取页面源代码
        body = self.driver.page_source
        # 使用Scrapy的Selector解析页面
        selector = Selector(text=body)
        # 提取所需数据
        list_items = selector.xpath('/html/body/div[5]/div[3]/div[2]/div[2]/table/tbody/tr')
        list2 = []
        for i in list_items:
            city = i.xpath('./td[3]/a/text()').get()  # 城市
            sales = i.xpath('./td[4]/a/em/text()').get()  # 销量
            type = i.xpath('./td[5]/a/text()').get()
            nextpage = i.xpath('./html/body/div[5]/div[3]/div[2]/div[2]/div[1]/div/a[3]/text()').get()
            data_dict = {city: [sales, type]}
            if nextpage is None:
                list2.append(data_dict)
            else:
                list2.append(data_dict)
                nextpages = i.xpath('./div[1]/div/a[3]/@href').get()
                yield scrapy.Request(url=nextpages, callback=self.detail, cb_kwargs={'item': item})
        item['Main_apply'] = list2
        yield item

    def closed(self, reason):
        self.driver.quit()

