import requests
import xlwt

from threading import Thread
from queue import Queue

from lxml import etree
from retrying import retry


class Spider(object):
    def __init__(self):
        self.base_url = "https://www.ebay.com/sch/i.html?_from=R40&_nkw=Car+sticker&_sacat=0&LH_TitleDesc=0&_fsrp=1&LH_TitleDesc=0&rt=nc&_ipg=25&rt=nc&rt=nc&rt=nc&rt=nc&_pgn={}&rt=nc"
        self.headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
        }
        self.url_queue = Queue()
        self.data_url_queue = Queue()
        self.data_queue = Queue()
        self.save_data_list = []

    def get_url(self):
        for i in range(1, 1000):
            url = self.base_url.format(i)
            self.url_queue.put(url)

    def get_sku_url(self):
        while True:
            url = self.url_queue.get()
            response = requests.get(url, headers=self.headers)
            print(url)
            html = etree.HTML(response.content)
            url_list = html.xpath('//ul[@class="srp-results srp-list clearfix"]/li[@class="s-item   "]//div[@class="s-item__info clearfix"]/a/@href')
            for a in url_list:
                self.data_url_queue.put(a)
            self.url_queue.task_done()

    def get_data(self):
        # for url in self.all_url:
        while True:
            url = self.data_url_queue.get()
            print(url)
            response = requests.get(url, headers=self.headers)
            html = etree.HTML(response.content)
            item = []
            name = html.xpath('//h1/text()')
            price = html.xpath('//span[@class="notranslate"]/text()')
            sales = html.xpath('//a[@class="vi-txt-underline"]/text()')
            size = html.xpath("""//tr/td[@class="attrLabels"][text()="
									 			Brand: "]/following-sibling::td//span/text()""")
            brand = html.xpath("""//tr/td[@class="attrLabels"][text()="
									 			Size: "]/following-sibling::td//span/text()""")
            _type = html.xpath("""//tr/td[@class="attrLabels"][text()="
									 			Type: "]/following-sibling::td/span/text()""")
            sticker_placement = html.xpath("""//tr/td[@class="attrLabels"][text()="
									 			Sticker Placement: "]/following-sibling::td/span/text()""")
            material_type = html.xpath("""//tr/td[@class="attrLabels"][text()="
									 			Material Type: "]/following-sibling::td/span/text()""")
            item_type = html.xpath("""//tr/td[@class="attrLabels"][text()="
									 			Item Type: "]/following-sibling::td/span/text()""")
            item.append(name[0] if len(name) > 0 else item.append(None))
            item.append(price[0] if len(price) > 0 else item.append(None))
            item.append(sales[0] if len(sales) > 0 else item.append(None))
            item.append(brand[0] if len(brand) > 0 else item.append(None))
            item.append(size[0] if len(size) > 0 else item.append(None))
            item.append(_type[0] if len(_type) > 0 else item.append(None))
            item.append(sticker_placement[0] if len(sticker_placement) > 0 else item.append(None))
            item.append(material_type[0] if len(material_type) > 0 else item.append(None))
            item.append(item_type[0] if len(item_type) > 0 else item.append(None))
            print(item)
            self.data_queue.put(item)
            self.data_url_queue.task_done()

    def save_data(self):
        workbook = xlwt.Workbook(encoding='ascii')
        sheet1 = workbook.add_sheet("车膜")
        row0 = ["产品名", "价格", "销量", "产家", "尺寸", "类型", "位置", "材料", "产品类型"]
        for i in range(0, len(row0)):
            sheet1.write(0, i, row0[i])
        for data in self.save_data_list:
            for index, i in enumerate(range(0, len(data))):
                sheet1.write(index+1, i, data[i])
                print("完成一条")
        workbook.save('data2.xls')

    def run(self):
        t_list = []

        self.get_url()
        for i in range(10):
            t_data_url = Thread(target=self.get_sku_url)
            t_list.append(t_data_url)
        for i in range(15):
            t_data = Thread(target=self.get_data)
            t_list.append(t_data)
        self.save_data()
        for t in t_list:
            t.setDaemon(True)
            t.start()
        for q in [self.url_queue, self.data_url_queue, self.data_queue]:
            q.join()
        print("执行完成！！！！")


if __name__ == '__main__':
    spider = Spider()
    spider.run()
