import re
import scrapy
import logging
logger = logging.getLogger(__name__)


class TmallSpider(scrapy.Spider):
    name = 'tmall'
    allowed_domains = ['tmall.com']
    start_urls = ['https://list.tmall.com/search_product.htm?q=耳机']
    num = 0

    def parse(self, response):
        cookies_str = '_med=dw:1920&dh:1080&pw:1920&ph:1080&ist:0; cq=ccp%3D0; dnk=%5Cu6842%5Cu8BB8%5Cu65D7; tracknick=%5Cu6842%5Cu8BB8%5Cu65D7; lid=%E6%A1%82%E8%AE%B8%E6%97%97; _l_g_=Ug%3D%3D; unb=829246918; lgc=%5Cu6842%5Cu8BB8%5Cu65D7; cookie1=VAYoGvrusFdJalqNaOd2XRghljWVFtjjehRkehJJ%2FIA%3D; login=true; cookie17=W8t9qqtSEWee; _nk_=%5Cu6842%5Cu8BB8%5Cu65D7; sg=%E6%97%978f; cna=xlzwF2rdRSQCAXeLxlevYVE/; xlly_s=1; uc1=cookie21=VFC%2FuZ9ajC0X15Rzt0LhxQ%3D%3D&pas=0&cookie16=UtASsssmPlP%2Ff1IHDsDaPRu%2BPw%3D%3D&cookie14=Uoe0aDTXCwbQtA%3D%3D&existShop=false&cookie15=VT5L2FSpMGV7TQ%3D%3D; uc3=lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=W8t9qqtSEWee&nk2=2n90EzDH&vt3=F8dCufwryGtkkuXiKN0%3D; uc4=nk4=0%402EV%2FUImmg3jM%2BM5FfHGzj88%3D&id4=0%40WeijjRIynLgXQ0YJdYC7UKrfmyU%3D; cookie2=147c5d4ca98d0d2c891c99997541151e; sgcookie=E100TmTH223bMeG4HNp5YxuKp9oRuXWgznTU%2Bix1NAMS0AH5aFD5pkCgEBnob9gKGb0ZcxO8TNf3rZRz5VHgIeg1ng%3D%3D; t=4c5414ea0f90c7a61a70daccfee02d68; csg=e45f4c2d; _tb_token_=f5ee55385773e; enc=vgxMar2d7iNPcstw6rgQfaOX%2B%2B9w%2Fd6msL1Px7lKf8ToSnZ5KoNUfFc35dDm3J6BdAGAhAbxBiMt3tiwYEn7lQ%3D%3D; pnm_cku822=098%23E1hvGQvUvbpvUvCkvvvvvjiWP259ljtPRFqZsjYHPmPWzjlPR2SZzjiERsdZzjnhi9hvCvvv9UUgvpvhvvvvvvgCvvpvvPMMuvhvmvvv9buyAwm4kvhvC99vvOCgLd9Cvm9vvvvvphvvvvvvvu3vpvAevvm2phCvhRvvvUnvphvppvvv96CvpCCvmvhvLvsS59vjn%2B1lYE7rejZIeExr1EAKfvDr1RCl5FGDN%2BLWafmAdcHmaNoxfX9wjLKxfwLZd3ODN%2BLZaNpM%2B3%2Busj7QD40OwAq6D7zhVTgvvpvVvvpvvhCv; res=scroll%3A1088*7100-client%3A1088*937-offset%3A1088*7100-screen%3A1920*1080; _uab_collina=160585538515478398119644; isg=BE5OFZsEnQ48mikP6Xy8iJoTnyQQzxLJaIqiNniXutEM2-414F9i2fSZEwe3WArh; l=eBx4E_ZnOpkl13HyBOfanurza77OSIRYYuPzaNbMiOCPOw1B5e0lWZ7kKYL6C3GVh6zBR3ujXzbpBeYBqQAonxvttBALurkmn; tfstk=cuqOBpmoX6fGQZDLUPQHcheVr_5lw3fxOdGDDT1TXM-7_X1c8uqRqn7shCndp'  # 抓包获取
        cookies_dict = {i.split('=')[0]: i.split('=')[1] for i in cookies_str.split('; ')}
        # tr_list = response.xpath('//div[@class="product  "]')
        tr_list = response.xpath('//div[@class="view  "]/div')
        for tr in tr_list:
            self.num += 1
            meta_dict = {}
            meta_dict['name'] = self.name
            meta_dict['SKU_ID'] = tr.xpath('./@data-id').extract_first().strip()
            meta_dict['title'] = tr.xpath('.//p[@class="productTitle"]/a/@title').extract_first().strip()
            meta_dict['price'] = tr.xpath('.//p[@class="productPrice"]/em/@title').extract_first().strip()
            meta_dict['shop'] = tr.xpath('.//div[@class="productShop"]/a/text()').extract_first().strip()
            img = tr.xpath('.//div[@class="productImg-wrap"]/a/img/@src').extract_first()
            if not img:
                img = tr.xpath('.//div[@class="productImg-wrap"]/a/img/@data-ks-lazyload').extract_first()
            meta_dict['img_url'] = ('https:' + img) if img else ""
            meta_dict['volume'] = tr.xpath('.//p[@class="productStatus"]/span[1]/em/text()').extract_first().strip()
            meta_dict['review'] = tr.xpath('.//p[@class="productStatus"]/span[2]/a/text()').extract_first().strip()
            meta_dict['href'] = tr.xpath('.//div[@class="productImg-wrap"]/a/@href').extract_first()
            detail_url = 'https:' + meta_dict['href']
            # print(meta_dict['title'])
            logger.info("{}: {}".format(self.num, meta_dict['title']))
            # yield scrapy.Request(detail_url, callback=self.parse_detail, meta=meta_dict)
            yield meta_dict
        # 翻页
        # next_url = response.xpath('//a[text()="下一页>>"]/@href').extract_first()
        # if next_url != 'javascript:;' and next_url:
        #     next_url = 'https://list.tmall.com/search_product.htm' + next_url
        #     # print(next_url)
        #     logger.info("next_url: {}".format(next_url))
        #     yield scrapy.Request(next_url, callback=self.parse, cookies=cookies_dict)

    # def parse_login(self, response):
    #     ret = re.findall(r"noobpythoner|NoobPythoner", response.text)
    #     print(ret)

    def parse_detail(self, response):
        item = response.meta
        # print(response.text)
        imgVedio = re.search(r'"imgVedioID":"(\d+)"', response.text)
        imgVedioID = imgVedio.group(1) if imgVedio else ""
        logger.info("imgVedioID: ", imgVedioID)
        # item['job_content'] = response.xpath('//ul[@class="squareli"]/li/text()').extract()
        # print(response)
        return item

