# -*- coding: utf-8 -*-
import json
import re
import scrapy

from scrapy.crawler import CrawlerProcess
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, Compose, Join, Identity
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.project import get_project_settings

from common_spider.utils import get_config


class SkuItem(scrapy.Item):
    """暂时固定这些字段"""
    id = scrapy.Field()

    url = scrapy.Field()

    website = scrapy.Field()  # 网站
    keyword = scrapy.Field()
    trans_key = scrapy.Field()

    name = scrapy.Field(
        output_processor=Compose(Join(), lambda s: s.strip())  # 可能找出多个空白字段，合并再去空格
    )
    price = scrapy.Field()
    image_urls = scrapy.Field(
        output_processor=Identity()  # 保留列表，待管道中处理
    )


class SkuLoader(ItemLoader):
    """装载选择器返回列表的第一个非空元素"""
    default_output_processor = TakeFirst()


class CommonSpider(CrawlSpider):
    name = 'common'

    custom_settings = {}

    def __init__(self, config_name, keyword=None, trans_key=None, *args, **kwargs):
        """
        初始化配置以及规则
        :param config_name: 配置文件的名字
        :param keyword: 关键字
        :param trans_key: 关键字别名
        """
        self.spider_name = config_name

        self.config = get_config(config_name)

        self.website = self.config['website']

        if keyword is None:
            self.keyword = self.config['default_keyword']
        else:
            self.keyword = keyword
        if trans_key is None:
            self.trans_key = self.config['default_trans_key']
        else:
            self.trans_key = trans_key
        # TODO: 如果在开始页面中，没有下一页的链接，就需要自己提供下一页的链接

        self.start_urls = self.config['start_urls']
        self.start_urls = [x.format(keyword=keyword) for x in self.config['start_urls']]
        print('start_urls', self.start_urls)

        self.rule_list = self.config['rules']
        # rules匹配每个请求页面中符合规则的url，适用于全站爬取
        self.rules = (
            # 从列表页中匹配详情页URL
            Rule(LinkExtractor(allow=self.rule_list['details']['allow'],
                               restrict_xpaths=self.rule_list['details']['restrict_xpath']),callback='parse_item'),)
        self.is_next_url_existed = self.rule_list['next']['is_next_url_existed']
        print('is_next_url_existed', self.is_next_url_existed)

        if self.is_next_url_existed:
            # 存在下一页链接
            print('next rule: ', self.rule_list['next']['allow'], self.rule_list['next']['restrict_xpath'])
            self.rules += (
                Rule(LinkExtractor(allow=self.rule_list['next']['allow'],
                                   restrict_xpaths=self.rule_list['next']['restrict_xpath'],),
                     # callback=self.parse_next
                     ),)
        else:
            # 如果在搜索结果中没有下一页的URL，则需要自己指定分页的接口以及增长的规则，其中必须包含关键字和页码两个参数
            self.page_url = self.rule_list['next']['page_url']
            self.page_kwargs = self.rule_list['next']['page_kwargs']

            # TODO：列表页一般还会有一半数据动态加载，但爬取规则无法通用，暂时不爬，拟解决：1. 传入多行自定义代码，2. js渲染
            # self.page_latter_url = self.config['page_latter_url']['url']

        super(CommonSpider, self).__init__(*args, **kwargs)  # 放在rules赋值后面才能初始化rules，

    def parse_next(self, response):
        """测试下一页规则是否失效，需要修改下一页rules处的代码callback=self.parse_next"""
        print(self.is_next_url_existed, "下一页", response.url)
        pass

    def start_requests(self):
        if self.is_next_url_existed:
            for url in self.start_urls:
                yield scrapy.Request(url=url, )
        else:
            # 如果没有下一页的url，则获取总页码，然后通过循环构造并请求所有列表页的url
            for url in self.start_urls:
                yield scrapy.Request(url=url, callback=self.parse_page)

    def parse_page(self, response):
        """
        如果没有下一页的url，则直接获取总页码，然后通过循环构造并请求所有列表页的url
        """
        # with open('gome.html', 'w', encoding='utf-8') as f:
        #     f.write(response.text)
        self.logger.info('parse_start url: ' + response.url)

        # 如果没有下一页的url，则直接获取总页码，然后通过循环构造并请求所有列表页的url
        page_total = eval(self.rule_list['next']['page_total'])
        print('page_total:', page_total)

        for page in range(1, int(page_total) + 1):
            # print(page)
            page_url = self.page_url.format(keyword=self.keyword, page=eval(self.page_kwargs['page']))  # 指定page递增规律
            # print(page_url)
            # TODO: 根据指定规则请求（包含请求的方式，是否需要refer,headers,cookies...）
            if self.rule_list['next'].get('headers'):
                # headers = eval(self.rule_list['next']['headers'])
                headers = {
                    ":authority": "search.gome.com.cn",
                    ":method": "GET",
                    ":path": "/search?search_mode=normal&reWrite=true&question=%E6%B4%97%E8%A1%A3%E6%9C%BA&searchType=goods&&page={}&type=json&aCnt=0&reWrite=true".format(page),
                    ":scheme": "https",
                    "accept": "application/json, text/javascript, */*; q=0.01",
                    "accept-encoding": "gzip, deflate, br",
                    "accept-language": "zh-CN,zh;q=0.9",
                    "cookie": "uid=CjozJ1zQ9cyQNb0NCwLBAg==; cartnum=0_0-1_0; s_ev13=%5B%5B'sem_baidu_cpc_yx_pc21_%25u901A%25u7528%25u8BCD-%25u5730%25u57DF-%25u5168%25u56FD_%25u5730%25u57DF-%25u4E3B%25u8BCD1_%25u6E56%25u5357%25u8D2D%25u7269%25u7F51%25u7AD9'%2C'1557198315573'%5D%5D; compare=; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2216a9040961e4cb-0207626da1e784-58422116-2073600-16a9040961f372%22%2C%22%24device_id%22%3A%2216a9040961e4cb-0207626da1e784-58422116-2073600-16a9040961f372%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22_latest_cmpid%22%3A%22sem_baidu_cpc_yx_pc21_%E9%80%9A%E7%94%A8%E8%AF%8D-%E5%9C%B0%E5%9F%9F-%E5%85%A8%E5%9B%BD_%E5%9C%B0%E5%9F%9F-%E4%B8%BB%E8%AF%8D1_%E6%B9%96%E5%8D%97%E8%B4%AD%E7%89%A9%E7%BD%91%E7%AB%99%22%7D%7D; proid120517atg=%5B%229140129042-1130652885%22%2C%22A0006359011-pop8010769376%22%2C%22A0006520651-pop8012473876%22%2C%22A0006520647-pop8012473849%22%2C%22A0006520639-pop8012473803%22%5D; s_cc=true; gpv_p22=no%20value; atgregion=11010200%7C%E5%8C%97%E4%BA%AC%E5%8C%97%E4%BA%AC%E5%B8%82%E6%9C%9D%E9%98%B3%E5%8C%BA%E6%9C%9D%E5%A4%96%E8%A1%97%E9%81%93%7C11010000%7C11000000%7C110102002; DSESSIONID=c197b79bdef947d0a8f986568b0c164f; _idusin=80082039617; route=bbc92a9674ecf2078e553831d27ce2ad; _index_ad=0; gradeId=-1; gpv_pn=no%20value; s_getNewRepeat=1557392037597-Repeat; s_sq=gome-prd%3D%2526pid%253Dhttps%25253A%25252F%25252Fsearch.gome.com.cn%25252Fsearch%25253Fquestion%25253D%252525E6%252525B4%25252597%252525E8%252525A1%252525A3%252525E6%2525259C%252525BA%252526searchType%25253Dgoods%252526search_mode%25253Dnormal%252526reWrite%25253Dtrue%2526oid%253Djavascript%25253Avoid(0)%2526ot%253DA; s_ppv=-%2C20%2C20%2C1506",
                    "referer": "https://search.gome.com.cn/search?question=%E6%B4%97%E8%A1%A3%E6%9C%BA&searchType=goods&search_mode=normal&reWrite=true",
                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
                    "x-requested-with": "XMLHttpRequest"
                }
                print('headers', headers)
                yield scrapy.Request(url=page_url, headers=headers)
            else:
                yield scrapy.Request(url=page_url)
            # headers = {
            #     'referer': 'https://search.jd.com/Search?keyword={}&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&bs=1&wq={}'
            #                '&stock=1&page={}&s=1&click=0'.format(self.keyword, self.keyword, 2 * page - 1),
            # }
            # yield scrapy.Request(url=self.next_url.format(self.code, self.code, i + 1, 48 * i - 20),
            #                      headers=headers, callback=self.parse_next)
            # TODO: 动态加载的后30条数据，也应该在此处请求，但请求规则和解析规则不一样

    def parse_item(self, response):
        """从详情页提取信息"""
        self.logger.info(response.url)
        # 测试
        # print(response.text)  # 打印商品详情页查看实际下载页面
        # price = response.xpath('//*[@id="dd-price"]/text()').extract()[1].strip()
        # print("price", price)

        # item固定
        item_loader = SkuLoader(item=SkuItem(), response=response)
        # 测试增加url
        item_loader.add_value('url', response.url)
        for key, value in self.config.get('item').items():
            # print(key, value)
            if value.get('method') == 'xpath':
                # print(key, response.xpath(value['value']))
                item_loader.add_xpath(key, value['value'])
            elif value.get('method') == 'value':
                item_loader.add_value(key, value['value'])
            elif value.get('method') == 'eval':
                item_loader.add_value(key, eval(value['value']))
            elif value.get('method') == 'css':
                item_loader.add_css(key, value['value'])
        # TODO: id前面添加网站前缀，用于区分
        item_loader.replace_value('id', self.spider_name + "_" + str(item_loader.load_item()['id']))
        # =============是否拼接图片url（处理URL）==============================
        if self.config.get('item').get('image_urls').get('format'):
            images_temp = [eval(self.config.get('item').get('image_urls').get('format')) for image_url in
                           item_loader.load_item()['image_urls']]
            # 更新item中的image_urls
            item_loader.replace_value('image_urls', images_temp)

        # =============判断是否调用价格接口，如果没有则直接返回item==============
        if self.rule_list.get('price'):
            # 从item_loader中获取已经提取的id，或者再从url获取一次
            # 苏宁，价格有两个请求参数，研究页面渲染，动态加载坑
            price_url = self.rule_list['price']['url']
            # 价格动态加载参数
            price_kwargs = self.rule_list['price']['kwargs']
            new_kwargs = {}
            for k in price_kwargs:
                # print(k, price_kwargs[k])
                new_kwargs[k] = eval(price_kwargs[k])
            print("进入价格接口", price_url.format(**new_kwargs))
            yield scrapy.Request(url=price_url.format(**new_kwargs), callback=self.parse_price,
                                 meta={'item_loader': item_loader})
        else:
            # TODO: 如果图片url不为空，才返回该字段
            # if item_loader.get_value('image_urls'):
            item_loader.add_value('price', '')
            yield item_loader.load_item()

        # =============爬取不同规格=======================
        # 1. 查出id的列表
        if self.rule_list.get('type'):
            # 统一添加异常处理，传入的可执行代码，可能会报错
            try:
                # 1. 查出id的列表
                # 对于包含多个参数的url。。。尝试只提供一个坑位，两个参数合在一起传进来
                if self.rule_list['type']['method'] == "xpath":
                    type_id_list = response.xpath(self.rule_list['type']['rule']).extract()
                elif self.rule_list['type']['method'] == "eval":
                    type_id_list = eval(self.rule_list['type']['rule'])
                else:
                    type_id_list = []
                # test
                print("提取的type_id", type_id_list)
                # 2. 根据规则构造url，其中可对id做一些格式化处理，如果有两个参数，比如苏宁？
                for type_id in type_id_list:
                    # 初始化动态参数
                    type_kwargs = self.rule_list['type']['kwargs']
                    type_kwargs_temp = {}
                    for k in type_kwargs:
                        type_kwargs_temp[k] = eval(type_kwargs[k])
                    # print("id", type_kwargs_temp)
                    print("进入规格页", type_id, self.rule_list['type']['url'].format(**type_kwargs_temp))
                    yield scrapy.Request(url=self.rule_list['type']['url'].format(**type_kwargs_temp),
                                         callback=self.parse_item)
            except Exception as e:
                self.logger.error(str(e))

    def parse_price(self, response):
        """
        解析价格
        :param response:
        :return:
        """
        # print(response.text)
        item_loader = response.meta['item_loader']

        # 通过传入的规则提取价格
        price = eval(self.rule_list['price']['price_rule'])
        item_loader.add_value('price', price)

        # 价格不存在，也保存该字段
        yield item_loader.load_item()


if __name__ == "__main__":
    spider_name = "jd"

    settings = get_project_settings()

    # custom_settings = get_config(spider_name).get('settings')
    # settings = dict(settings.copy())
    # settings.update(custom_settings)  # 合并配置
    # settings.set('IMAGES_STORE', 'D:/common_images')
    # 统一配置
    crawl = CrawlerProcess(settings)
    # 特定配置
    CommonSpider.custom_settings = get_config(spider_name).get('settings')

    crawl.crawl(CommonSpider, config_name=spider_name, keyword="笔记本", trans_key='notebook')
    crawl.start()
