import scrapy

from week1.items import QCCItem
from scrapy.loader import ItemLoader


# 爬取企查查上所有的公司信息
class Day02Spider(scrapy.Spider):
    name = 'day02'
    # 网易新闻
    custom_settings = {
        'DOWNLOAD_DELAY': 0.01,  # 下载延时
        'COOKIES_ENABLED': False   # 使用setting理得cookie
    }

    def __init__(self):
        super().__init__()
        self.kw_set = ['武汉唯理科技有限公司']
        self.headers = {
            'accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9, image / avif, image / webp, '
                      'image / apng, * / *;q = 0.8, application / signed - exchange;v = b3;q = 0.9',
            'accept - encoding': 'gzip, deflate, br',
            'accept - language': 'zh - CN, zh;q = 0.9',
            'cache - control': 'max - age = 0',
            'referer': 'https: // www.qcc.com / ',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389'
                          '.82 Safari/537.36',
            'cookie': 'QCCSESSID=kosmk1bje3i0hslga5vgj1n2n6; UM_distinctid=1784ea7a5904fc-0a8316f0ab9e23-57452418-1'
                      'fa400-1784ea7a5916eb; zg_did=%7B%22did%22%3A%20%221784ea7a6c5325-0c42034bc91065-57452418-1fa400'
                      '-1784ea7a6c6797%22%7D; hasShow=1; _uab_collina=161622731344513476220866; zg_5068e513cb8449879f83'
                      'e2a7142b20a6=%7B%22sid%22%3A%201616227335101%2C%22updated%22%3A%201616227463762%2C%22info%22%3A'
                      '%201616227335104%2C%22superProperty%22%3A%20%22%7B%5C%22%E5%BA%94%E7%94%A8%E5%90%8D%E7%A7%B0%5C'
                      '%22%3A%20%5C%22%E6%8B%9B%E6%8A%95%E6%A0%87WEB%E7%AB%AF%5C%22%7D%22%2C%22platform%22%3A%20%22%7B'
                      '%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22www.qcc.com%22%7D; CNZZDATA'
                      '1254842228=397388839-1616222751-null%7C1616228151; acw_tc=74d35ccf16162297529485507e9ab1d6ddd1'
                      'c516d15534097aadafac81; zg_de1d1a35bfa24ce29bbf2c7eb17e6c4f=%7B%22sid%22%3A%201616227313353%2'
                      'C%22updated%22%3A%201616229861850%2C%22info%22%3A%201616227313360%2C%22superProperty%22%3A%20%'
                      '22%7B%5C%22%E5%BA%94%E7%94%A8%E5%90%8D%E7%A7%B0%5C%22%3A%20%5C%22%E4%BC%81%E6%9F%A5%E6%9F%A5%E7%'
                      'BD%91%E7%AB%99%5C%22%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C'
                      '%22referrerDomain%22%3A%20%22diag.qichacha.com%22%2C%22cuid%22%3A%20%226775ff76b3c6cd2c470e912c'
                      '9a555f94%22%7D'
        }

    def start_requests(self):
        # 遍历要获取的公司名称
        for company in self.kw_set:
            try:
                print(f'开始爬取《{company}》的相关信息.')
                start_url = "https://www.qcc.com/web/search?key="+company
                print(start_url)
                yield scrapy.Request(
                    url=start_url,
                    callback=self.parse,
                    meta={
                        'keyword': company
                    },
                    headers=self.headers,
                    dont_filter=True
                )
            except Exception as e:
                print(f'获取《{company}》页面出错.')

    def parse(self, response):
        keyword = response.meta['keyword']
        href = response.xpath('//div[@class="maininfo"]/a/@href').extract_first()
        yield scrapy.Request(
            url=href,
            callback=self.detail_parse,
            meta={
                'keyword': keyword
            },
            headers=self.headers,
            dont_filter=True
        )

    def detail_parse(self, response):
        QCCItemLoader = ItemLoader(item=QCCItem, selector=response)











