# __author__ = 'chenbinghui'
#!/usr/bin/python
# -*- coding: UTF-8 -*-

import scrapy
import logging
from turorial.items import TmallItem


class TmallSpider(scrapy.Spider):
    name = "Tmall"
    allowed_domains = ["tmall.com"]
    start_urls = [
        'https://list.tmall.com/search_product.htm?q=jackjones%D0%AC&type=p&xl=jackjones_10&from=.list.pc_1_suggest',
    ]
    post_headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36",
        "Referer": "http://www.tmall.com",
    }

    def start_requests(self):
        reqs = []  # 每个页面的request
        cookies = {
            'miid': '1279809970704864021',
            'thw': 'cn',
            't': '7349beda1fac2771e1b07173a388c1a7',
            'cookie2': '169e58df275871365bf763a04f83945d',
            '_tb_token_': 'f5836335bbbed',
            'l': 'As7Ol7pcpNOglmJtnYezXP/Fnq6RuZJB',
            'isg': 'AuTkU7_eYUo5n5WHgkykUP1IteI6RAjnXtEpK_4Ehq96qYZzJ431dp1BH7ZL',
            'cna': 'xxqjEU4BaTMCAXLV6R/2cfxq',
            'sca': '49d5174e',
            'atpsida': 'b8147f8d3acd3709988ab26d_1495089785_1',
            'aimx': 'xxqjEYvEdQcCAXLV6R9iOoQn_1495089785',
            'cad': 'k95WugY3Sgew+2KIuDSUxTOnySH07xok1SSfrDICn3k=0001',
            'cap': '41cf',
            '_med': 'dw:1366&dh:768&pw:1366&ph:768&ist:0',
            'res': 'scroll%3A1349*6611-client%3A1349*637-offset%3A1349*6611-screen%3A1366*768',
            'pnm_cku822': '043UW5TcyMNYQwiAiwQRHhBfEF8QXtHcklnMWc%3D%7CUm5Ockt%2FR3pPe0F5QndJdCI%3D%7CU2xMHDJ7G2AHYg8hAS8XIgwsAl4%2FWTVSLFZ4Lng%3D%7CVGhXd1llXGhQbVhsVm5VYF5jVGlLcEx2SHxBf0F0QH5AekF%2FQG44%7CVWldfS0RMQ01DDQUKBMzHWxSPAIrFioSKhI4Az0YLlV7LXs%3D%7CVmhIGCUFOBgkGiMXNww3CzcXKxUuFTUPNAEhHSMYIwM5BjNlMw%3D%3D%7CV25Tbk5zU2xMcEl1VWtTaUlwJg%3D%3D',
            'cq': 'ccp%3D1'
        }
        for i in range(0, 2):  # 代表从0到1页
            req = scrapy.Request("https://list.tmall.com/search_product.htm?spm=a220m.1000858.0.0.wH40GN&s=" + str(
                i * 60) + "&q=%C4%D0%D7%B0&sort=d&style=g&from=nanzhuang..pc_1_suggest&suggest=0_1&type=pc#J_Filter",
                                 cookies=cookies,headers=self.post_headers)
            reqs.append(req)
        return reqs
    #记录处理的页数
    count = 0
    def parse(self, response):
        TmallSpider.count += 1
        divs = response.xpath('//div[@id="J_ItemList"]/div/div')
        if not divs:
            logging.info("list page error---%s" %response.url)
        else:
            for div in divs:
                item = TmallItem()
                item['GOODS_PRICE'] = div.xpath('p[@class="productPrice"]/em/@title').extract_first()
                item['GOODS_NAME'] = div.xpath('p[@class="productTitle"]/a/@title').extract_first()
                pre_goods_url = div.xpath('p[@class="productTitle"]/a/@href').extract_first()
                item['GOODS_URL'] = pre_goods_url if "https:" in pre_goods_url else ('https:'+pre_goods_url)
                yield scrapy.Request(url=item['GOODS_URL'],meta={'item':item},callback=self.parse_detail,dont_filter=True)

    def parse_detail(self,response):
        div = response.xpath('//div[@class="extend"]/ul')
        if not div:
            logging.info("Detail page error--%s" %response.url)
        else:
            item = response.meta['item']
            div = div[0]
            item['SHOP_NAME'] = div.xpath('li[1]/div/a/text()').extract_first()
            item['SHOP_URL'] = div.xpath('li[1]/div/a/@href').extract_first()
            item['COMPANY_NAME'] = div.xpath('li[3]/div/text()').extract_first().strip()
            item['COMPANY_ADDRESS'] = div.xpath('li[4]/div/text()').extract_first().strip()
            yield item