# -*- coding: utf-8 -*-
# @Time    : 2018/11/29 11:39
# @Author  : zjj
# @Email   : 1933860854@qq.com
# @File    : baiDuMainSpider.py
# @Software: PyCharm
import scrapy
import re
import time
import random
from iQiYiSpider.items import BaiDuSpiderItem
from iQiYiSpider.util import keywordUtil, cityUtil
from iQiYiSpider.userAgent import USER_AGENT_LIST
'''
    爬取百度广告
'''
class BaiDuScrapySpider(scrapy.Spider):
    name = 'BaiDuSpider'
    allowed_domains = ['www.baidu.com']
    spider_keyword = random.choice(cityUtil())+random.choice(keywordUtil())
    start_urls = ['http://www.baidu.com/s?ie=utf-8&tn=baidu&wd=' + spider_keyword.replace(' ', '')]

    def start_requests(self):
        while True:
            for url in self.start_urls:
                headers = {
                           "User-Agent": random.choice(USER_AGENT_LIST),
                           'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                           'Accept-Encoding': 'gzip, deflate, br',
                           'Accept-Language': 'zh-CN,zh;q=0.9',
                           'Cache-Control': 'max-age=0',
                           'Connection': 'keep-alive',
                           'Cookie': 'BAIDUID=0A60D643AE075F529985162093AA2B82:FG=1; BIDUPSID=0A60D643AE075F529985162093AA2B82; PSTM=1543559037; H_PS_PSSID=1428_21086_27914_27542; BD_UPN=12314753; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BDSFRCVID=NekOJeC62GIhetO90QEKhhE_VgOs-7cTH6aoJwuRQFdrjjxq3AY7EG0PqM8g0Ku-O3M3ogKK3gOTH4DF_2uxOjjg8UtVJeC6EG0P3J; H_BDCLCKID_SF=JJADVCIMf-3bfTrnM-r_-DuJ-N_tqI62aKDs2J5oBhcqEIL4QUrsbJ0geH3I0xn32gTd-J3v-5CbHUbSj4QoyR-00RQlKnjB2jbDohjwJh5nhMJlXj7JDMP0qtPeW6Jy523iob3vQpPMshQ3DRoWXPIqbN7P-p5Z5mAqKl0MLIOkbRO4-TFWD5cL3D; H_PS_645EC=17b63aRaCp3%2BbrrPlxpAsL7MuR5vzb%2Faqvyto9e9FSN8D4KdmlEC9AzCnBU; delPer=0; BD_CK_SAM=1; PSINO=6; BDSVRTM=180',
                           'Host': 'www.baidu.com',
                           'Upgrade-Insecure-Requests': 1
                           }
                spider_url = url
                print('-----当前搜索的url为：', spider_url)
                try:
                    yield scrapy.Request(spider_url, callback=self.parse, headers=headers, dont_filter=True)
                except Exception as e:
                    print(e)
                    break


    def parse(self, response):
        try:
            print('-----------开始解析-----------------')

            result_divs = response.xpath('//*[re:match(@id, "^\d{4}$")]')
            next_page = response.xpath('//div[@id="page"]/a/@href').extract()
            for result_div in result_divs:
                baidu_ad = BaiDuSpiderItem()
                url_date = result_div.xpath('.//div/a/span/text()').extract()
                if len(url_date) > 0:
                    baidu_ad['company_url'] = url_date[0]
                    baidu_ad['company_date'] = url_date[1]
                else:
                    baidu_ad['company_url'] = None
                    baidu_ad['company_date'] = None
                titles = result_div.xpath('.//div/a/font/text()').extract()
                if len(titles) > 0:
                    ad_title = ''
                    for title in titles:
                        ad_title += title
                    baidu_ad['company_title'] = ad_title
                else:
                    baidu_ad['company_title'] = None
                second_url = result_div.xpath('.//div/span/a/@href').extract()
                if len(second_url) > 0:
                    print('----需要二级爬取的url：', second_url)
                    yield scrapy.Request(
                        url=second_url[0].replace('https://', 'http://'),
                        callback=self.parseDetails,
                        encoding='utf-8',
                        meta={"baidu_ad": baidu_ad},
                        dont_filter=True
                    )
                else:
                    baidu_ad['company_type'] = None
                    baidu_ad['company_url'] = None
                    baidu_ad['company_name'] = None
                    baidu_ad['company_record'] = None
                    baidu_ad['company_bussines'] = None
                    yield baidu_ad
            if len(next_page) > 1:
                next_page_url = 'http://www.baidu.com'+next_page[len(next_page)-1]
                array = next_page[len(next_page)-1].split('&')
                pn = array[1].split('=')[1]
                if int(pn) < 20:
                    print('-----------------开始爬取下一页(', pn, ')：', next_page_url)
                    yield scrapy.Request(
                        url=next_page_url,
                        callback=self.parse,
                        dont_filter=True
                    )
                else:
                    start_new_url = 'http://www.baidu.com/s?ie=utf-8&tn=baidu&wd='+random.choice(cityUtil()) + random.choice(keywordUtil())
                    print('----开始新的爬取url：', start_new_url)
                    yield scrapy.Request(
                        url=start_new_url,
                        callback=self.parse,
                        dont_filter=True
                    )

        except Exception as e:
            print('终止解析=============================================', e)


    def parseDetails(self, response):
        baidu_ad = response.meta['baidu_ad']
        baidu_ad['company_type'] = None
        baidu_ad['company_url'] = None
        baidu_ad['company_name'] = None
        baidu_ad['company_record'] = None
        baidu_ad['company_bussines'] = None
        details_info = response.xpath('//*[re:match(@class, "^content-table$")]')
        company_name = response.xpath('//*[re:match(@class, "^c-gap-bottom$")]/text()').extract()
        if len(company_name) > 0:
            baidu_ad['company_name'] = company_name[0]
        else:
            baidu_ad['company_name'] = None
        if len(details_info) > 1:
            tr_info = details_info[1].xpath('.//tr')
            for tr in tr_info:
                try:
                    if len(tr.xpath('.//th/text()').extract()) > 0:
                        if len(tr.xpath('.//td/text()').extract()) > 0 and tr.xpath('.//th/text()').extract()[0].replace('\n', '').replace(' ', '') == '机构类型：':
                            baidu_ad['company_type'] = tr.xpath('.//td/text()').extract()[0].replace('\n', '').replace(' ', '')
                        if len(tr.xpath('.//td/a/@href').extract()) > 0 and tr.xpath('.//th/text()').extract()[0].replace('\n', '').replace(' ', '') == '网站地址：':
                            baidu_ad['company_url'] = tr.xpath('.//td/a/@href').extract()[0].replace('\n', '').replace(' ', '')
                        if len(tr.xpath('.//td/text()').extract()) > 0 and tr.xpath('.//th/text()').extract()[0].replace('\n', '').replace(' ', '') == '备案编号：':
                            baidu_ad['company_record'] = tr.xpath('.//td/text()').extract()[0].replace('\n', '').replace(' ', '')
                        if len(tr.xpath('.//td/div/text()').extract()) > 0 and tr.xpath('.//th/text()').extract()[0].replace('\n', '').replace(' ', '') == '经营范围：':
                            baidu_ad['company_bussines'] = tr.xpath('.//td/div/text()').extract()[0].replace('\n', '').replace(' ', '')
                        if len(tr.xpath('.//td/text()').extract()) > 0 and tr.xpath('.//th/text()').extract()[0].replace('\n', '').replace(' ', '') == '网站名称：':
                            if baidu_ad['company_name'] is None:
                                baidu_ad['company_name'] = tr.xpath('.//td/text()').extract()[0].replace('\n', '').replace(' ', '')

                except Exception as e:
                    print('-----------------终止-----------------', e)
        else:
            baidu_ad['company_type'] = None
            baidu_ad['company_url'] = None
            baidu_ad['company_name'] = None
            baidu_ad['company_record'] = None
            baidu_ad['company_bussines'] = None
        yield baidu_ad

if __name__ == '__main__':
    from scrapy import cmdline
    cmdline.execute("scrapy crawl BaiDuSpider".split())


