# -*- coding: utf-8 -*-
# @Time    : 2019/4/10 11:08
# @Author  : zjj
# @Email   : 1933860854@qq.com
# @File    : baiDuMobileMainSpider.py
# @Software: PyCharm
import random
from iQiYiSpider.items import baiDuMobileItem
from iQiYiSpider.util import keywordUtil, cityUtil, domainUtil, companyUtil
from iQiYiSpider.userAgent import USER_AGENT_LIST
from urllib.parse import quote
import urllib
import time
import scrapy
import re
class baiDuMobileMainSpider(scrapy.Spider):
    name = 'baidu_mobile_company'
    allowed_domains = ['m.baidu.com']
    keyword = (random.choice(cityUtil())+random.choice(keywordUtil())).replace(" ", "")
    start_urls = ['http://m.baidu.com/s?word='+quote(keyword)]
    headers = {
        'Accept': 'application/raw',
        # 'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cookie': 'BAIDUID=4DF92DDB137385DE9A7344D7AAD77D32:FG=1; BIDUPSID=4DF92DDB137385DE9A7344D7AAD77D32; PSTM=1554859253; delPer=0; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; rsv_i=dfb8c6DWt7mV9P7fv8YmzZVMoeZBzZiF5kRinfgmdMoXVausk%2F7LBV8PmnHMlV174Lxe1Fj2CjxkDVNKuK0Str4hiI9Pzks; FEED_SIDS=400753_0410_10; plus_lsv=393c3756be30db54; plus_cv=1::m:49a3f4a6; SE_LAUNCH=5%3A25914425_0%3A25914425; BDICON=10123156; H_WISE_SIDS=130591_126887_129632_129657_120173_118888_118876_118852_118835_118788_129565_107313_129944_129751_130156_130121_122034_130222_117328_130347_117429_129651_130690_129008_130320_128967_129620_129838_130988_129901_129646_124030_130716_110085_127969_123289_130758_130051_127316_128600_128201_100457; lsv=globalTjs_3aec804-wwwTcss_fcf1ef3-wwwTcss_fcf1ef3-wwwBcss_020c45a-framejs_b657f9d-atomentryjs_689bd71-globalBjs_69f906c-sugjs_2c6c63d-wwwjs_d544348; MSA_WH=1920_937; MSA_PBT=148; MSA_ZOOM=1000; shifen[106292006695_42891]=1554866167; BCLID=8641068183616601460; BDSFRCVID=Qa8OJeC62AnBh4b9GJHNhhofLeK8gM7TH6M7Z9gna2tcekzMNvgIEG0Pef8g0KubhykWogKK3gOTH4DF_2uxOjjg8UtVJeC6EG0P3J; H_BDCLCKID_SF=tJPHoKPXtID3q5rlMtQoq4D_MfOtetJyaR3I3CnvWJ5TMCoqhx5sj-j-QP67Wtv3aKcr0lky5hCbShPCXjJAet-H-H5J5-QWMejwLJ3Y3l02Vb5Ie-t2ynLVMhrWL-RMW20e0h7mWIbUsxA45J7cM4IseboJLfT-0bc4KKJxthF0HPonHjK2D5Jb3j; wpr=0; PSINO=6; ysm=8441|8441; BDSVRBFE=Go; wise_tj_cl=i@0|v@1|sInfo@1920_1080_1920_1040_1920_370|fInfo@0_0_0_0|dpr@1; COOKIE_SESSION=1194_0_0_2_1_t1_9_1_5_0_0_1_1_1554866835%7C2%230_0_0_0_0_0_0_0_1554865641%7C1',
        'Host': 'm.baidu.com',
        'Proxy-Connection': 'keep-alive',
        'User-Agent': random.choice(USER_AGENT_LIST),
        'X-Requested-With': 'XMLHttpRequest',
    }

    body = {"platform": "wise", "ms": "1", "IsAble": "1", "rset": "rcmd", "word": keyword, "qid": "7892380415749188182", "rq": keyword, "from": "0", "baiduid": "4DF92DDB137385DE9A7344D7AAD77D32:FG=1", "tn": "", "clientWidth": "1903", "t": str(int(time.time())), "r": "6565"}

    def start_requests(self):
        while True:
            body = 'data='+(quote(str(self.body).replace(" ", "").replace("'", '"')))
            yield scrapy.Request(self.start_urls[0], headers=self.headers, body=body, callback=self.parse, dont_filter=False)

    def parse(self, response):
        try:
            print('-----------开始解析-----------------')
            result_divs = response.xpath('//*[re:match(@tplid, "^\d{5}$")] | //*[re:match(@class, "^ec_ad_results$")]')
            next_page = response.xpath('//div[@id="page-controller"]/div/a/@href').extract()
            for result_div in result_divs:
                item = baiDuMobileItem()
                item['company_title'] = None
                item['company_name'] = None
                item['company_url'] = None
                item['company_domain'] = None
                company_title = result_div.xpath('.//div/a/h3[@cid="103"]')
                company_url = result_div.xpath('.//div/div/a[@cid="105"]/@href').extract()
                title_url = result_div.xpath('.//div/div/a[@class="c-blocka ec_title"]/@href').extract()
                domain = result_div.xpath('.//div/a/span[@class="c-showurl"]/text()').extract()
                if domain:
                    item['company_domain'] = domain[0]

                if company_url:
                    real_url_company = self.get_real_url(company_url[0])
                    item['company_url'] = real_url_company
                else:
                    if title_url:
                        real_url_title = self.get_real_url(title_url[0])
                        item['company_url'] = real_url_title
                if company_title:
                    item['company_title'] = company_title[0].xpath('string(.)').extract()[0]
                if item['company_url'] and len(item['company_url']) > 6:
                    item['company_name'] = self.get_company_name(item['company_url'])
                    if item['company_name']:
                        domain_list = domainUtil()
                        domain_flag = False
                        for domain_filter in domain_list:
                            if domain_filter in item['company_url']:
                                domain_flag = True
                        if domain_flag is False:
                            yield item
                    # yield scrapy.Request(item['company_url'], headers=self.headers, callback=self.parseDetail, meta={"mobile_item": item}, dont_filter=True)
            if next_page:
                array = next_page[0].split('?')
                pn = array[1].split('&')[0].split('=')[1]
                if int(pn) < 30:
                    print('-------爬取下一页: ' + next_page[0])
                    body_two = 'data='+(quote(str(self.body).replace(" ", "").replace("'", '"')))
                    yield scrapy.Request(next_page[0], headers=self.headers, body=body_two, dont_filter=False, callback=self.parse)
            spider_keyword_two = random.choice(cityUtil())+random.choice(keywordUtil())
            keyword_two = spider_keyword_two
            print('keyword_two: ' + keyword_two)
            url = 'http://m.baidu.com/s?word='+quote(keyword_two)
            body_dict_two = {"platform": "wise", "ms": "1", "IsAble": "1", "rset": "rcmd", "word": keyword_two, "qid": "7892380415749188182", "rq": keyword_two, "from": "0", "baiduid": "4DF92DDB137385DE9A7344D7AAD77D32:FG=1", "tn": "", "clientWidth": "1903", "t": str(int(time.time())), "r": "6565"}
            body_two = 'data='+(quote(str(body_dict_two).replace(" ", "").replace("'", '"')))
            yield scrapy.Request(url, headers=self.headers, body=body_two, callback=self.parse, dont_filter=False)
        except Exception as e:
            print(e)
            spider_keyword_except = random.choice(cityUtil())+random.choice(keywordUtil())
            keyword_except = spider_keyword_except
            print('keyword_except: ' + keyword_except)
            url = 'http://m.baidu.com/s?word='+quote(keyword_except)
            body_dict = {"platform": "wise", "ms": "1", "IsAble": "1", "rset": "rcmd", "word": keyword_except, "qid": "7892380415749188182", "rq": keyword_except, "from": "0", "baiduid": "4DF92DDB137385DE9A7344D7AAD77D32:FG=1", "tn": "", "clientWidth": "1903", "t": str(int(time.time())), "r": "6565"}
            body_except = 'data='+(quote(str(body_dict).replace(" ", "").replace("'", '"')))
            yield scrapy.Request(url, headers=self.headers, body=body_except, callback=self.parse, dont_filter=False)

    def parseDetail(self, response):
        mobile_item = response.meta['mobile_item']
        company_name = response.xpath('.//head/title/text()').extract()
        if company_name:
            mobile_item['company_name'] = company_name[0]
        yield mobile_item

    def get_real_url(self, url):
        try:
            req = urllib.request.Request(url, None, self.headers)
            response = urllib.request.urlopen(req)
            html = response.read().decode("utf-8", "replace")
            p1 = re.compile(r'[(](.*?)[)]', re.S) #最小匹配
            result = re.findall(p1, html)
            if result:
                return result[0].lstrip('"').rstrip('"')
            else:
                return url
        except Exception as e:
            print('get_real_url: ' + str(e))
            print('get_real_url:' + url)
            return None



    def get_company_name(self, url):
        try:
            header = {'User-Agent': random.choice(USER_AGENT_LIST)}
            req = urllib.request.Request(url, None, header)
            response = urllib.request.urlopen(req)
            html = response.read().decode("utf-8", "replace")
            # p1 = re.compile(r'(?<=\<title\>)(?:.|\n)+?(?=\<)', re.S)
            p1 = re.compile(r'[>](.*?)[<]', re.S) #最小匹配
            result = re.findall(p1, html)
            if result:
                # return result[0]
                for r in result:
                    pattern = r'[?|&|\_|\-|\r|\n|：|，|"|,|©|:|。|.|\t|;|、|；|@|\s]'
                    result_split = re.split(pattern, r)
                    company_filter = companyUtil()
                    for s in result_split:
                        if ('公司' in s) and ('公司' == s[len(s)-2:len(s)]) and (len(s) > 10) and (len(s) < 20) \
                                and ('*' not in s):
                            for c in company_filter:
                                if c in s:
                                    continue
                                else:
                                    pattern = re.compile('[0-9]+')
                                    match = pattern.findall(s)
                                    if match:
                                        continue
                                    else:
                                        list_city = cityUtil()
                                        for city in list_city:
                                            if city in s:
                                                return s
            else:
                return None
        except Exception as e:
            print('get_company_name: ' + str(e))
            print('get_company_name: ' + url)
            return None





if __name__ == '__main__':
    from scrapy import cmdline
    cmdline.execute("scrapy crawl baidu_mobile_company".split())

