# -*- coding: utf-8 -*-
"""
Created on 2021-09-17 16:01:50
---------
@summary:
---------
@author: xubin
"""
import feapder
from feapder.utils.tools import get_current_date
from feapder.utils.tools import get_md5
from items import novoprotein_task_info_item
from items.NovoproteinProduct_item import NovoproteinproductItem
from my_tool.snowflake_id import IdWorker

worker = IdWorker(1, 2, 0)


class NovoproteinSpider(feapder.AirSpider):
    def start_requests(self):
        yield feapder.Request("https://www.novoprotein.com.cn/Cn/product/index/catid/6.html")

    def parse(self, request, response):

        href_list = response.xpath(
            '/html/body/div[4]/div[4]/div[1]/ul/li[6]//@href | /html/body/div[4]/div[4]/div[1]/ul/li[7]//@href').getall()
        for url in href_list:
            if url.startswith('https'):
                yield feapder.Request(url=url, callback=self.second_parse)

    def second_parse(self, request, response):
        try:
            page_num = response.xpath('//a[@class="end"]/@href').get().split('/')[-1].strip('.html')

            for p in range(1, int(page_num) + 1):
                url = 'htt' + response.url.strip('.html') + f'/p/{p}.html'
                yield feapder.Request(url=url, callback=self.four_parse)
        except:
            pass

    def three_parse(self, request, response):
        product_info = response.xpath('//ul[@class="selist"]/li/a/@href | //ul[@class="selist"]/li/a/text()').getall()

        data = {}
        for index in range(0, len(product_info), 2):
            data[product_info[index + 1]] = product_info[index]
        for name, url in data.items():
            item = novoprotein_task_info_item.NovoproteinTaskInfoItem()
            item.id = get_md5(name)
            item.name = name
            item.url = url
            # yield item
            yield feapder.Request(url=url, callback=self.four_parse)

    def four_parse(self, request, response):
        res_list = response.xpath('//div[@class="loction"]//text()').getall()
        path = ''.join([_.strip().strip('&nbsp&nbsp') for _ in res_list if _.strip()])
        print(path)
        th_text = response.xpath('//th/text()').getall()
        print(th_text)
        nodes = response.xpath('//table//tr')
        for node in nodes:
            pro_info = node.xpath('.//td//text() | .//td//@href').getall()
            if (len(pro_info)) != 0:
                data = {
                    th_text[0]: pro_info[1],
                    th_text[1]: pro_info[3],
                    th_text[2]: pro_info[4],
                    th_text[3]: pro_info[5].strip().strip('￥').strip('¥ '),
                    'detail_url': pro_info[0],
                    'path': path
                }

                print(data)
                item = NovoproteinproductItem()
                item.id = worker.get_id()
                item.type = data['path']
                item.name = data[th_text[1]]
                item.accession = data[th_text[0]]
                item.pack = data[th_text[2]]
                item.detail_url = data['detail_url']
                item.price = data[th_text[3]]
                item.create_by = 'xubin@%'
                item.update_by = 'xubin@%'
                item.create_on = get_current_date()
                item.update_on = get_current_date()
                item.batch_id = 4538223704501391361

                yield item


if __name__ == "__main__":
    NovoproteinSpider(thread_count=4).start()
