# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy_splash import SplashRequest
from urllib.parse import quote
from tbsplash.items import TbsplashItem

script = """
function main(splash, args)
  splash.resource_timeout = 30
  splash:set_viewport_size(1024, 5000)
  splash.images_enabled = false
  assert(splash:go(args.url))
  assert(splash:wait(args.wait))
  js = string.format("document.querySelector('#spulist-pager div.form > input').value=%d;document.querySelector('#spulist-pager div.form > span.btn').click()", args.page)
  splash:evaljs(js)
  assert(splash:wait(args.wait))
  splash.scroll_position = {y=5000} 
  splash:wait(7)
  return splash:html()
end
"""


class TaobaoSpider(scrapy.Spider):
    name = 'taobao'
    allowed_domains = ['www.taobao.com']
    start_url = 'https://s.taobao.com/search?q='

    def start_requests(self):
        for keyword in self.settings.get('KEY_WORDS'):
            for page in range(1, self.settings.get('MAX_PAGE_NUM') + 1):
                url = self.start_url + quote(keyword)
                # print("="*20)
                # print(url)
                yield SplashRequest(url, callback=self.parse, endpoint='execute', args={'lua_source': script, 'page': page, 'wait': 10})

    def parse(self, response):
        
        item = TbsplashItem()

        div_list_1 = response.xpath("//div[@class='grid-container row']/div[@class='grid-item col']")
        div_list_2 = response.xpath("//div[@class='grid-container row']/div[@class='blank-row col']//div[@class='grid-item col']")

        for div in (div_list_1 + div_list_2):
            # 标题
            title = div.xpath(".//a[@class='product-title']/@title").extract_first()
            item['title'] = title

            # 价格
            price = div.xpath(".//div[@class='col']//strong/text()").extract_first()
            try:
                if price.isnumeric():
                    item['price'] = float(price)
                else:
                    item['price'] = price
            except:
                item['price'] = price
            
            # 周销量
            week_sale = div.xpath(".//span[@class='week-sale']/span/text()").extract_first()
            try:
                if week_sale.isnumeric():
                    item['sale'] = int(week_sale)
                else:
                    item['sale'] = week_sale
            except:
                item['sale'] = week_sale

            # 评论数量
            comment = "".join(list(div.xpath(".//div[@class='info-row comment-row']//text()").extract())).replace('\n', '').strip()
            if re.match(r"暂无点评", comment):
                comment = "Null"
            else:
                comment = re.match(r"点评(\d+)条", comment.replace(" ", "")).group(1)
                if comment.isnumeric():
                    comment = int(comment)
            item['comment'] = comment

            # 商家数量
            retailer = div.xpath(".//div[@class='info-row seller']/a/text()").extract_first()
            seller = re.match(r"共有(\d+)商家在售", retailer).group(1)
            try:
                if seller.isnumeric():
                    seller = int(seller)
                else:
                    seller = retailer
            except:
                seller = retailer
            item['seller'] = seller

            # 
            img = div.xpath(".//img/@src").extract_first()
            item['img'] = img

            yield item
