import re
import scrapy
import urllib.parse
import time
from scrapy.http import Request
from bs4 import BeautifulSoup
from dirbot.items import kaolaItem
from dirbot.mysqlPipelines.sql import Sql


class kaolaSpider(scrapy.Spider):
    name = "kaola"
    # allowd_domains=["www.kaola.com"]
    base_url = "https://www.kaola.com/"
    bash_url = base_url + "search.html?zn=top&key="
    bashurl = "&searchRefer=searchbutton&timestamp="
    keyWord = "奶粉"

    def start_requests(self):
        # demo url  https://www.kaola.com/search.html?zn=top&key=%25E7%25BE%258E%25E6%25B1%2581%25E6%25BA%2590&searchRefer=searchbutton&oldQuery=%25E7%25BE%258E%25E6%25B1%2581%25E6%25BA%2590&timestamp=1518059417070
        url = self.bash_url + urllib.parse.quote_plus(self.keyWord.encode("utf8")) + self.bashurl + str(
            int(round(time.time() * 1000)))
        yield Request(url,self.parse)

    def parse(self, response):
        # print(response.text)
        soup = BeautifulSoup(response.text, 'lxml')
        result = soup.find('div',attrs={'class':'bodybox'}).find('div',attrs={'id':'searchbox'}).find('div',attrs={'class':'resultwrap'})
        #搜索结果页数
        urlList = []
        maxPage = 0
        #如果结果集大于一页
        if (result.find('div',attrs={'class':'splitPages'})!=None):
            maxPage = int(result.find('div',attrs={'class':'splitPages'}).find_all('a',attrs={'class':None})[-1].get_text())
            pages = result.find('div',attrs={'class':'splitPages'}).find_all('a',attrs={'class':None})
            for link in pages:
                urlList.append(link.attrs['href'])
        else:
            maxPage = 1
            urlList.append(self.bash_url + urllib.parse.quote_plus(self.keyWord.encode("utf8")) + self.bashurl + str(
            int(round(time.time() * 1000))))
        for url in urlList:
            yield Request(url, callback=self.getProductInfo)

    #获取商品信息
    def getProductInfo(self, response):
        soup = BeautifulSoup(response.text, 'lxml')
        result = soup.find('div', attrs={'class': 'bodybox'}).find('div', attrs={'id': 'searchbox'}).find('div', attrs={
            'class': 'resultwrap'}).find('div',attrs={'id':'searchresult'}).find('ul').find_all('li', attrs={'class': 'goods'})
        #每一个商品
        itemList = []
        for one in result:
            item = kaolaItem()
            try:
                # descInfo = one.find('div',attrs={'class':'goodswrap promotion'}).find('div',attrs={'class':'desc clearfix'})
                descInfo = one.find('div',attrs={'class':re.compile(r"goodswrap(\s\w+)?")}).find('div',attrs={'class':'desc clearfix'})
                item['name'] = descInfo.find('div',attrs={'class':'titlewrap'}).find('a').attrs['title']
                item['url'] = self.base_url + descInfo.find('div',attrs={'class':'titlewrap'}).find('a').attrs['href']
                item['price'] = descInfo.find('p',attrs={'class':'price'}).find('span').text
                # item['singlePrice'] = descInfo.find('p',attrs={'class':'price'}).find('b').text
                item['area'] = descInfo.find('p',attrs={'class':'goodsinfo clearfix'}).find('span',attrs={'class':'proPlace ellipsis'}).text
                item['commentCount'] = descInfo.find('p',attrs={'class':'goodsinfo clearfix'}).find('a').text
                tagInfo = descInfo.find('p', attrs={'class': 'saelsinfo'})
                if(tagInfo != None):
                    tagInfo =tagInfo.find_all('span')
                    if (tagInfo != None) :
                        item['tag'] = self.get_tag_str(tagInfo)
                else:
                    item['tag'] = ""
                item['picUrl'] = "https:" + one.find('div',attrs={'class':re.compile(r"goodswrap(\s\w+)?")}).find('a').find('div',attrs={'class':'img'}).find('img').attrs['src']
                # print(item)
                Sql.insertKaolaRecoed(item["name"], item["url"], item["picUrl"], item["price"], "", item['tag'],
                                      item["commentCount"])

                # itemList.append(item)
            except Exception as e:
                print(e)
                print(one)
        # return itemList


    def get_tag_str(self,tags):
        ret_str = ""
        for a in tags:
            ret_str = ret_str + " " + a.text
        return  ret_str