import scrapy
import json
import re
from tutorial.items import CogikSpiderItem


class Products_Spider(scrapy.Spider):
    # 读取所有url
    file = open(
        '../../data/products_url', 'r')
    url_list = []
    for line in file:
        url_list.append(line)
    name = "products_more"
    allowed_domains = ["list.jd.com"]
    # start_urls = [
    #     "https://list.jd.com/list.html?tid=1001743%0A",
    # ]
    start_urls = url_list

    def parse(self, response):
        item = CogikSpiderItem()

        products_category = response.css(
            '.s-title h3 b').xpath('text()').extract()[0]
        item['item_id'] = products_category
        item['label'] = products_category

        result = []
        for sel in response.xpath("//div[@class='selector']"):
            # 获取除品牌外的其他内容
            for sel_wrap in sel.css('.sl-wrap'):
                attr = {}
                key = sel_wrap.css('.sl-key span').xpath('text()').extract()[0]
                value = sel_wrap.css(
                    '.sl-v-list ul li a').xpath('text()').extract()
                attr['name'] = key
                attr['value_name'] = value
                if value:
                    result.append(attr)

        # 通过获取json数据
        # 读取respongse中other_exts中的内容
        res = response.body_as_unicode()
        start = res.find('other_exts')
        end = res.find('var pay_after')
        pre_result = res[start: end]

        # 如果获取到的pre_result太短，说明里面没有内容，则不进行处理
        if len(pre_result) > 35:
            other_attr = str(re.findall(r'{.*}', pre_result))
            # 将other_exts中多余的属性去掉
            other_attr = other_attr[3: -3]
            split_arr = other_attr.split('},{')
            for string in split_arr:
                str_arr = string.split(',')
                split_str = '{' + str_arr[0] + ',' + str_arr[-1] + '}'
                # 将去掉多余属性的项转换成字典形式
                split_dict = eval(split_str)
                value_name_str = split_dict['value_name']
                new_dict = {}
                new_dict['name'] = split_dict['name']
                new_dict['value_name'] = value_name_str.split(';')
                result.append(new_dict)
        # 将结果转换成字典形式
        # result_dict = {}
        # result_dict[products_category] = result
        # 去掉数据中原有的空格，并转成json
        result_str = str(result).replace(' ', '')
        result_str = result_str.replace('：', '')
        # with open("/home/lxq/cogik-LinguisticResources/data/crawl-cache/products-data", "a") as f:
        #     f.write(result_str + '\n')
        item['attr'] = result_str
        if "'name':'品牌'" not in result_str:
            # 获取bsaeUrl来得到品牌名
            left = res.find('baseURL')
            right = res.find('var params')
            baseUrl = str(re.findall(r"(?<=\').*?(?=\')", res[left: right])[0])
            # 对于米面杂粮这一类别需要进行特殊处理，因为其baseUrl中包含\n
            # baseUrl = 'tid=1001743&sort=sort_totalsales15_desc&trans=1'
            url = response.url.split('?')[0] + '?' + baseUrl + '&md=1'
            yield scrapy.Request(url=url, callback=self.parse1, meta={'item': item})
        else:
            yield item
    def parse1(self, response):
        item = response.meta['item']
        body = response.body_as_unicode()
        # 将null替换成1234,防止在转成字典时出错
        body = body.replace('null', '1234')
        dict = eval(body)
        brand_name_list = []
        if dict['brands'] != 1234:
            for brand in dict['brands']:
                brand_name_list.append(brand['name'])
            brand_dict = {}
            # brand_dict['cat'] = str(dict['summary']['cate_infos']['cat3_name'])
            brand_dict['name'] = 'brand'
            brand_dict['value_name'] = brand_name_list
            item['brand'] = brand_dict
        # result_str = str(brand_dict)
        # with open("/home/lxq/cogik-LinguisticResources/data/crawl-cache/products-brands", "a") as f:
        #     f.write(result_str + '\n')
        yield item
