# -*- coding: utf-8 -*-
# @Time    : 2018/11/15 15:20
# @Author  : zjj
# @Email   : 1933860854@qq.com
# @File    : iQiYiMainSpider.py
# @Software: PyCharm
import scrapy
import json
from iQiYiSpider.items import IqiyispiderItem
from scrapy.http import Request, FormRequest
'''
    爬取创意雷达广告
'''
class iQiYiSpider(scrapy.Spider):
    name = 'iQiYiSpider'
    allowed_domains = ['www.guanggao365.com']
    start_urls = ['https://www.guanggao365.com/ads.html']


    def start_requests(self):
        print('-----开始请求-----')
        yield scrapy.FormRequest('https://www.guanggao365.com/ads.html')

    def parse(self, response):
        try:
            result = response.xpath('//div[@class="typeList clear"]//ul/li[@class="list_li"]')
            for li_result in result:
                item = IqiyispiderItem()
                item['ad_id'] = li_result.xpath('./@adid').extract()[0]
                item['ad_img'] = li_result.xpath('.//div[@class="typeListImg"]/img/@src').extract()[0]
                item['ad_title'] = li_result.xpath('.//p[@class="pName bottom1Cc"]/text()').extract()[0]
                item['ad_industry'] = li_result.xpath('.//p[@class="pName1"]/span/text()').extract()[0]
                item['ad_hot'] = li_result.xpath('.//p[@class="p1"]/span[@class="p1Span"]/text()').extract()[0]
                item['ad_time'] = li_result.xpath('.//p[@class="pCrtTime"]/text()').extract()[0]
                imgs = li_result.xpath('.//img[@class="adImg"]/@src').extract()[0]
                item['ad_logo'] = 'https://www.guanggao365.com' + imgs[1:len(imgs)]
                details_url = 'https://www.guanggao365.com/advertisement/details'
                yield scrapy.Request(
                    details_url+'?id='+item['ad_id'],
                    callback=self.parseDetails,
                    method='POST',
                    encoding='utf-8',
                    meta={"item": item}
                )
            yield scrapy.Request(
                'https://www.guanggao365.com/ads.html',
                callback=self.parse,
                method='GET',
                encoding='utf-8',
                priority=0,
                dont_filter=True,
                meta={
                    'dont_redict': True,
                    'handle_httpstatus_list': [302, 301]
                }
            )
        except Exception as e:
            print('-----------------终止解析-----------------', e)


    def parseDetails(self, response):
        item = response.meta['item']
        json_result = json.loads(response.body)
        if 'url' in json_result:
            ad_landing_page_url = json_result.get('url')
            if ad_landing_page_url != '':
                item['ad_landing_page_url'] = ad_landing_page_url
        yield item


