# -*- coding: utf-8 -*-
# @Time    : 2018/12/3 9:29
# @Author  : zjj
# @Email   : 1933860854@qq.com
# @File    : iFengMainSpider.py
# @Software: PyCharm
import scrapy
import time
import random
from iQiYiSpider.userAgent import USER_AGENT_LIST
from iQiYiSpider.items import IFengSpiderItem
import urllib
'''
    爬取凤凰广告
'''
class iFengScrapySpider(scrapy.Spider):
    name = 'iFengSpider'
    allowed_domains = ['news.ifeng.com', 'v.ifeng.com', 'zhibo.ifeng.com', 'finance.ifeng.com', 'ent.ifeng.com', 'fashion.ifeng.com', 'auto.ifeng.com', 'house.ifeng.com', 'tech.ifeng.com', 'book.ifeng.com', 'games.ifeng.com', 'culture.ifeng.com']
    start_urls = ['http://news.ifeng.com']
    headers = {
        "User-Agent": random.choice(USER_AGENT_LIST),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': 'prov=cn020; city=020; weather_city=gd_gz; region_ip=113.66.104.148; region_ver=1.30; ifengRotator_AP544=0; ifengRotator_Ap1527=0; ifengRotator_Ap1139=0; userid=1543800110855_aen3ig5973; ifengWindowCookieNamenews=2; ifengRotator_ArpAdPro_1019=0; ifengRotator_iis3_c=3; ifengRotator_AP573=0; ifengRotator_AP6443=0; ifengRotator_iis3=12; ifengRotator_AP940=0',
        'Host': 'news.ifeng.com',
        'Upgrade-Insecure-Requests': 1,
    }
    def start_requests(self):
        while True:
            time.sleep(1)
            domains = ['news', 'v', 'zhibo', 'finance', 'ent', 'fashion', 'auto', 'house', 'tech', 'book', 'games', 'culture']
            for url in self.start_urls:
                spider_url = url.replace('news', random.choice(domains))
                print('-----当前搜索的url为：', spider_url, '---------------')
                try:
                    yield scrapy.Request(spider_url, callback=self.parse, headers=self.headers, dont_filter=True)
                except Exception as e:
                    print(e)
                    break


    def parse(self, response):
        try:
            result_div = response.xpath('.//div[re:match(@style, "^clear:both;position:relative;margin:0 auto;*")]')
            # result_next = response.xpath('.//div[@class="h_mainNavNew cDGray h_mainNav"]/ul/li/a/@href').extract()
            result_next = response.xpath('.//a/@href').extract()
            if len(result_div) > 0:
                for result in result_div:
                    item = IFengSpiderItem()
                    infos = result.xpath('.//a')
                    if len(infos) > 0:
                        info = infos[1]
                        item['ad_img'] = info.xpath('.//img/@src').extract()[0]
                        item['ad_url'] = info.xpath('./@href').extract()[0]
                        req = urllib.request.Request(item['ad_url'], None, {"User-Agent": random.choice(USER_AGENT_LIST),
                                                                            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                                                                            'Accept-Encoding': 'gzip, deflate, br',
                                                                            'Accept-Language': 'zh-CN,zh;q=0.9',
                                                                            'Cache-Control': 'max-age=0',
                                                                            'Connection': 'keep-alive'
                                                                            })
                        item['ad_real_url'] = urllib.request.urlopen(req).geturl()
                        yield item

            if len(result_next) > 0:
                for next in result_next:
                    if '#' in next or 'java' in next:
                        pass
                    elif len(next) > 7:
                        yield scrapy.Request(
                            url=('http:'+next.replace('http:', '').replace('https:', '')).replace('///', '//'),
                            callback=self.parse,
                            dont_filter=True
                        )
        except Exception as e:
            print('----------------终止解析------------------', e)
