import scrapy
from scrapy.http import JsonRequest
from scrapy import cmdline
from lxml import etree


class WinShopSpider(scrapy.Spider):
    name = "WinShop"

    # allowed_domains = ["www.winshangdata.com"]
    # start_urls = ["https://www.winshangdata.com"]

    def start_requests(self):
        url = 'http://www.winshangdata.com/wsapi/brand/getBigdataList3_4'

        # 表单数据
        for page in range(1, 86):
            data = {
                "isHaveLink": "",
                "isTuozhan": "",
                "isXxPp": "",
                "kdfs": "",
                "key": "",
                "orderBy": "1",
                "pageNum": page,
                "pageSize": 60,
                "pid": "",
                "qy_p": "",
                "qy_r": "",
                "xqMj": "",
                "ytlb1": "",
                "ytlb2": ""
            }
            yield JsonRequest(url=url, data=data, callback=self.parse, dont_filter=False)

    def parse(self, response, **kwargs):
        result = response.json()
        for res in result['data']['list']:
            yield scrapy.Request(f"http://www.winshangdata.com/brandDetail?brandId={res['brandId']}",
                                 callback=self.parse_detail, cb_kwargs={"brand_name": res['brandName']})

    @classmethod
    def parse_detail(cls, response, brand_name):
        html = etree.HTML(response.text)
        item = dict()
        li_list = html.xpath("//ul[@class='detail-option border-b']/li")
        item['brand_name'] = brand_name
        item['create_date'] = li_list[0].xpath(".//span[last()]/text()")[0]
        item['shop_type'] = li_list[2].xpath(".//span[last()]/text()")[0]
        item['cooperate'] = li_list[3].xpath(".//span[last()]/text()")[0]
        item['area'] = li_list[4].xpath(".//span[last()]/text()")[0]

        yield item


if __name__ == '__main__':
    cmdline.execute('scrapy crawl WinShop'.split())
