"""
Python爬虫架构主要由五个部分组成，分别是调度器、URL管理器，网页下载器，网页解析器，应用程序
调度器：
URL管理器：
网页下载器：
网页解析器：
应用程序：

Scrapy爬虫框架：Scrapy，Python开发的一个快速、高层次的屏幕抓取和web抓取框架，用于抓取web站点并从页面中提取结构化的数据。
Scrapy用途广泛，可以用于数据挖掘、监测和自动化测试。
Scrapy吸引人的地方在于它是一个框架，任何人都可以根据需求方便的修改。
"""
import scrapy
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings


class SkuItem(scrapy.Item):
    name = scrapy.Field()
    price = scrapy.Field()
    image_urls = scrapy.Field()


class GomeSpider(Spider):
    name = 'gome'
    """
    "uid":"CjozJ1zQ9cyQNb0NCwLBAg==",
    "cartnum":"0_0-1_0",
    "compare":"",
    "atgregion":"11010200%7C%E5%8C%97%E4%BA%AC%E5%8C%97%E4%BA%AC%E5%B8%82%E6%9C%9D%E9%98%B3%E5%8C%BA%E6%9C%9D%E5%A4%96%E8%A1%97%E9%81%93%7C11010000%7C11000000%7C110102002",
    "s_cc":"true",
    "gpv_p22":"no%20value", 
    "DSESSIONID":"54340c2ec771458eb4e34c77d7f92376", 
    "_idusin":"80076334886",
    "s_ev13":"%5B%5B'sem_baidu_cpc_yx_pc21_%25u901A%25u7528%25u8BCD-%25u5730%25u57DF-%25u5168%25u56FD_%25u5730%25u57DF-%25u4E3B%25u8BCD1_%25u6E56%25u5357%25u8D2D%25u7269%25u7F51%25u7AD9'%2C'1557198315573'%5D%2C%5B'sem_baidu_pinpai_yx_pc_bt'%2C'1557458156393'%5D%5D",
    "route":"2dbe4187f565101d7a8af62de1b9dab0",
    "gradeId":"-1",
    "proid120517atg":"%5B%229134520865-1123460337%22%2C%229140129042-1130652885%22%2C%22A0006359011-pop8010769376%22%2C%22A0006520651-pop8012473876%22%2C%22A0006520647-pop8012473849%22%2C%22A0006520639-pop8012473803%22%5D",
    "sensorsdata2015jssdkcross":"%7B%22distinct_id%22%3A%2216a9040961e4cb-0207626da1e784-58422116-2073600-16a9040961f372%22%2C%22%24device_id%22%3A%2216a9040961e4cb-0207626da1e784-58422116-2073600-16a9040961f372%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22_latest_cmpid%22%3A%22sem_baidu_pinpai_yx_pc_bt%22%7D%7D",
    "gpv_pn":"no%20value",
    "_index_ad":"1",
    "s_getNewRepeat":"1557459323852-Repeat",
    "s_sq":"gome-prd%3D%2526pid%253Dhttps%25253A%25252F%25252Fsearch.gome.com.cn%25252Fsearch%25253Fquestion%25253D%252525E6%25252589%2525258B%252525E6%2525259C%252525BA%252526searchType%25253Dgoods%252526search_mode%25253Dnormal%252526reWrite%25253Dtrue%2526oid%253Djavascript%25253Avoid(0)%2526ot%253DA",
    "s_ppv":"-%2C14%2C14%2C949"
    """
    page_api = "https://search.gome.com.cn/search?question={keyword}&page={page}&type=json&aCnt=0"
    # start_urls = ['https://www.amazon.cn/s?k=%E9%9B%80%E5%B7%A2&__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&ref=nb_sb_noss_1']

    def __init__(self, keyword=None, **kwargs):
        super().__init__(**kwargs)
        if not keyword:
            self.keyword = "手机"

    def start_requests(self):
        for page in range(1, 40):
            headers = {
                ":authority": "search.gome.com.cn",
                ":method": "GET",
                ":path": "/search?search_mode=normal&reWrite=true&question=%E6%B4%97%E8%A1%A3%E6%9C%BA&searchType=goods&&page={}&type=json&aCnt=0&reWrite=true".format(
                    page),
                ":scheme": "https",
                "accept": "application/json, text/javascript, */*; q=0.01",
                "accept-encoding": "gzip, deflate, br",
                "accept-language": "zh-CN,zh;q=0.9",
                "referer": "https://search.gome.com.cn/search?question=%E6%B4%97%E8%A1%A3%E6%9C%BA&searchType=goods&search_mode=normal&reWrite=true",
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
                "x-requested-with": "XMLHttpRequest"
            }
            cookies = {
                "uid": "CjozJ1zQ9cyQNb0NCwLBAg==",
                "cartnum": "0_0-1_0",
                "compare": "",
                "atgregion": "11010200%7C%E5%8C%97%E4%BA%AC%E5%8C%97%E4%BA%AC%E5%B8%82%E6%9C%9D%E9%98%B3%E5%8C%BA%E6%9C%9D%E5%A4%96%E8%A1%97%E9%81%93%7C11010000%7C11000000%7C110102002",
                "s_cc": "true",
                "gpv_p22": "no%20value",
                "DSESSIONID": "54340c2ec771458eb4e34c77d7f92376",
                "_idusin": "80076334886",
                "s_ev13": "%5B%5B'sem_baidu_cpc_yx_pc21_%25u901A%25u7528%25u8BCD-%25u5730%25u57DF-%25u5168%25u56FD_%25u5730%25u57DF-%25u4E3B%25u8BCD1_%25u6E56%25u5357%25u8D2D%25u7269%25u7F51%25u7AD9'%2C'1557198315573'%5D%2C%5B'sem_baidu_pinpai_yx_pc_bt'%2C'1557458156393'%5D%5D",
                "route": "2dbe4187f565101d7a8af62de1b9dab0",
                "gradeId": "-1",
                "proid120517atg": "%5B%229134520865-1123460337%22%2C%229140129042-1130652885%22%2C%22A0006359011-pop8010769376%22%2C%22A0006520651-pop8012473876%22%2C%22A0006520647-pop8012473849%22%2C%22A0006520639-pop8012473803%22%5D",
                "sensorsdata2015jssdkcross": "%7B%22distinct_id%22%3A%2216a9040961e4cb-0207626da1e784-58422116-2073600-16a9040961f372%22%2C%22%24device_id%22%3A%2216a9040961e4cb-0207626da1e784-58422116-2073600-16a9040961f372%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22_latest_cmpid%22%3A%22sem_baidu_pinpai_yx_pc_bt%22%7D%7D",
                "gpv_pn": "no%20value",
                "_index_ad": "1",
                "s_getNewRepeat": "1557459323852-Repeat",
                "s_sq": "gome-prd%3D%2526pid%253Dhttps%25253A%25252F%25252Fsearch.gome.com.cn%25252Fsearch%25253Fquestion%25253D%252525E6%25252589%2525258B%252525E6%2525259C%252525BA%252526searchType%25253Dgoods%252526search_mode%25253Dnormal%252526reWrite%25253Dtrue%2526oid%253Djavascript%25253Avoid(0)%2526ot%253DA",
                "s_ppv": "-%2C14%2C14%2C949"
            }
            yield scrapy.Request(url=self.page_api.format(keyword=self.keyword, page=page),
                                 headers=headers, cookies=cookies, callback=self.parse_list)
        pass

    def parse_list(self, response):
        print('parse_list', response.url)
        print('page_total', response.xpath('//*[@id="min-pager-number"]/text()').extract_first())

    def parse_detail(self, response):
        item = SkuItem()
        item['name'] = response.xpath('//span[@id="productTitle"]/text()').extract_first().strip()
        item['price'] = response.xpath('//span[@id="priceblock_ourprice"]/text()').extract_first().lstrip('￥')
        image_urls = response.xpath('//*[@id="altImages"]//img/@src').extract()
        item["image_urls"] = [image_urls[i][:-28] + 'SX600.jpg' for i in range(len(image_urls) - 1)]
        yield item


if __name__ == "__main__":
    settings = get_project_settings()
    settings.set("COOKIES_ENABLED", True)
    crawl = CrawlerProcess(settings)
    crawl.crawl(GomeSpider)
    crawl.start()
