import scrapy
from ImageSpider.items import ImageItem
from ImageSpider.settings import DEFAULT_REQUEST_HEADERS
import time
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from scrapy_selenium import SeleniumRequest
class MsgaoSpider(scrapy.Spider):
    name = 'msgao'
    allowed_domains = ['msgao.com']
    start_urls = ['https://www.msgao.com/meinv/index.html']

    def start_requests(self):
        #这里使用SeleniumRequest抓取页面, 在parse中抓取页面也要用它
        yield SeleniumRequest(url=self.start_urls[0], callback=self.parse,dont_filter=True)

    def parse(self,response):
        links=response.xpath('//div[@id="mainbodypul"]/div/a/@href').extract()
        for link in links:
            yield SeleniumRequest(url=response.urljoin(link),callback=self.parse_item)
        # 下一页
        next_link=response.xpath('//div[@class="page both"]/ul/a')
        if next_link[-2].xpath('text()').extract_first()=='下一页':
            yield SeleniumRequest(url=response.urljoin(next_link[-2].xpath('@href').extract_first()), callback=self.parse,dont_filter=True)

    def parse_item(self, response):
        # 下一页
        next_link = response.xpath('//div[@class="page"]/a')
        if next_link and len(next_link)>=2:
            if next_link[-2].xpath('text()').extract_first() == '下一页':
                next_url=response.urljoin(next_link[-2].xpath('@href').extract_first())
                yield SeleniumRequest(url=next_url,callback=self.parse_item)
            else:
                print(next_link.xpath('text()').extract())
        item = ImageItem()
        item['title']=response.xpath('//div[@class="bg-white p15 center imgac clearfix"]/h1[@class="center"]/text()').extract_first()
        item['image_urls']=response.xpath('//div[@class="bg-white p15 center imgac clearfix"]/a/img/@src').extract_first()
        item['imgAlt']=response.xpath('//div[@class="bg-white p15 center imgac clearfix"]/a/img/@alt').extract_first()
        item['referer']=response.url
        if item['image_urls']==None:
            yield SeleniumRequest(url=response.url, callback=self.parse_item)
        else:
            yield item
