# -- coding: utf-8 --
import time
import requests
import re
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.inner_utils.standardize_field_utils import check_city_field, check_time_field
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.tiejiawang.items import TieJiaWangItem
from commonresources.spiders.basespider import BaseSpider


class TieJiaWangSpider(BaseSpider):
    name = "TieJiaWang"
    name_zh = "铁甲网"
    province = "北京"
    allowed_domains = ['product.cehome.com']
    start_urls = ["https://product.cehome.com/wajueji/"]

    def __init__(self, full_dose=True):
        super(TieJiaWangSpider, self).__init__(full_dose)
        self.convert_dict = { 'zero_class':'品类一',
                              "first_class": "品类二",
                              "second_class": "品类三",
                              'name': "设备名称",
                              'param': "数据组",
                              'pic': "图片组",
                              'home_page': '封面图组'
                             }


    @property
    def fake_headers(self):
        headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/88.0.4324.182 Safari/537.36 Edg/88.0.705.81 "
        }
        return headers

    def parse(self, response):
        a_s = response.xpath("//div[@class='category']//ul[@class='clearfix']/li/h2/a")
        for a in a_s:
            item_com = {}
            item_com["origin_url"] = a.xpath("./@href").extract()[0]
            item_com["announcement_title"] = a.xpath("./@title").extract()[0]
            yield scrapy.Request(url=item_com["origin_url"], callback=self.parse_clearfix, headers=self.fake_headers,
                                 meta=item_com)

    def parse_clearfix(self, response):
        a_h = response.xpath('//span[@id="secondCategory"]/h3/a')
        item_ah = {}
        item_ah["comm_name"] = response.meta['announcement_title']
        for a in a_h:
            item_ah["origin_url"] = a.xpath("./@href").extract()[0]
            item_ah["announcement_title"] = a.xpath("./text()").extract()[0]
            yield scrapy.Request(url=item_ah["origin_url"], callback=self.parse_clearfix_1, headers=self.fake_headers,
                                 meta=item_ah)

    def parse_clearfix_1(self, response):
        url = response.url
        url1 = url + 'ss_0_0/'
        item_ar = {}
        item_ar["origin_url"] = url1
        item_ar['announcement_title'] = response.meta['announcement_title']
        item_ar["comm_name"] = response.meta["comm_name"]
        yield scrapy.Request(url=item_ar["origin_url"], callback=self.parse_page, headers=self.fake_headers,
                             meta=item_ar)

    def parse_page(self, response):
        a_p = response.xpath('//div[@class="digg"]//span[2]/text()')
        if not a_p:
            item_ar = {}
            item_ar["origin_url"] = response.url
            item_ar['announcement_title'] = response.meta['announcement_title']
            item_ar["comm_name"] = response.meta["comm_name"]
            yield scrapy.Request(url=item_ar["origin_url"], callback=self.pares_clearfix_2, headers=self.fake_headers,
                                 meta=item_ar)
        else:
            page_max = a_p[0].root.replace('共', '').replace('页', '')
            i = int(page_max)
            url = response.url
            x = url.split("/ss_0_0/")[0]
            for page in range(i):
                page += 1
                x_url = x + '/' + f"{page}" + '_15' + '/ss_0_0/'
                item_ar = {}
                item_ar["origin_url"] = x_url
                item_ar['announcement_title'] = response.meta['announcement_title']
                item_ar["comm_name"] = response.meta["comm_name"]
                yield scrapy.Request(url=item_ar["origin_url"], callback=self.pares_clearfix_2, headers=self.fake_headers,
                                     meta=item_ar)

    def pares_clearfix_2(self, response):
        a_h = response.xpath("//div[@class='listResult']//h6")
        for a in a_h:
            item_ah = {}
            item_ah['com_type'] = response.meta['announcement_title']
            item_ah["comm_name"] = response.meta["comm_name"]
            item_ah["origin_url"] = a.xpath("./a/@href").extract()[0]
            item_ah["com_title"] = a.xpath("./a/@title").extract()[0]
            yield scrapy.Request(url=item_ah["origin_url"], callback=self.start_param_parse, headers=self.fake_headers,
                                 meta=item_ah)

#################################################################################################################################

    def start_param_parse(self, response):
        item_ah = {}
        item_ah['com_type'] = response.meta['com_type']
        item_ah["comm_name"] = response.meta["comm_name"]
        item_ah["origin_url"] = response.meta["origin_url"]
        item_ah["com_title"] = response.meta["com_title"]
        home_page=[]
        home_page_list=response.xpath('//div [@class="imgBox"]/div/a')

        if home_page_list:
            for i in home_page_list:
                img_src = i.xpath("./img/@src")[0].root
                home_page.append(img_src)
            else:
                pass
        else:
            pass
        if not home_page:
            home_page = '无图'
        else:
            pass
        item_ah["home_page"] = home_page

        pic_url = response.xpath('//div[@class="artNav"]/ul/li[4]/a/@href')[0].root
        item_ah['pic_url'] = pic_url
        item = TieJiaWangItem()
        param_url = response.xpath('//div[@class="artNav"]/ul/li[2]/a/@href')[0].root
        # print(param_url)
        yield scrapy.Request(url=param_url, callback=self.param_parse,meta=item_ah)

        # yield scrapy.Request(url=pic_url, callback=self.pic_parse,meta=item_ah)
        # print(pic_url)
        # yield item
        # print(item)

    def param_parse(self, response):
        item_ah = {}
        # item_ah['com_type'] = response.meta['com_type']
        # item_ah["comm_name"] = response.meta["comm_name"]
        # item_ah["origin_url"] = response.meta["origin_url"]
        # item_ah["com_title"] = response.meta["com_title"]
        item_ah=response.meta
        div_list = response.xpath("//div[@class='paramCon']/div[@class='sideBar']/a/text()").extract()
        ul_list = response.xpath("//div[@class='paramCon']/ul")
        shuxingzu = []
        item = TieJiaWangItem()
        for ul in ul_list:
            for div in div_list:
                li_list = ul.xpath("./li")
                # print(li_list)
                if li_list[0].xpath('./strong/text()').extract()[0] == div:
                    del li_list[0]
                    dic_list = []
                    for li in li_list:
                        name = '属性名称:' + li.xpath('./span/text()').extract()[0]
                        danwei = re.findall(r'[(](.*?)[)]', name)  # 正则提取单位
                        if danwei:
                            danwei = danwei[0]
                            name = re.sub(u"\\(.*?\\)|\\{.*?\\}|\\[.*?\\]|\\<.*?\\>", "", name)
                        else:
                            danwei = ''
                        danwei = '属性单位:' + danwei
                        shuju = '属性值:' + li.xpath('./label/text()').extract()[0]

                        dic = {name: [shuju, danwei]}
                        dic_list.append(dic)
                    fin_dic = {div: dic_list}
                    shuxingzu.append(fin_dic)
        if not shuxingzu:
            shuxingzu = '无参数'
        item_ah['param'] = shuxingzu
        # response.meta['param'] = shuxingzu
        yield scrapy.Request(url=item_ah["pic_url"], callback=self.pic_parse, meta=item_ah)
        # return  shuxingzu
        # print(shuxingzu)

    def pic_parse(self, response):
        # item = TieJiaWangItem()
        pic_list = []
        item_ah=response.meta
        pic_xpath_list = response.xpath("//div[@class='pic_list']/ul[@class='clearfix']/li")  # 得到所有图片的标签
        dict = {
            '挖掘机械': ['挖掘机''挖掘装载机','抓木机','抓料机','装载机','推土机','平地机','非公路自卸车','滑移装载机','铲运机'],
            '起重机械': ['起重机','施工升降机',],

            '筑养路机械': ['沥青摊铺机','铣刨机','稳定土拌合机','沥青搅拌站','水泥摊铺机','稳定土拌合站','路面再生设备','稀浆封层车',
                          '同步封层车','路面养护车','安全缓冲车','沥青路面修补车','除雪机械','沥青洒布车','碎石撒布机'],

            '压实机械': ['压路机','振动夯'],

            '混凝土机械': ['泵车','搅拌运输车','拖泵','混凝土搅拌站','混凝土湿喷机','车载泵'],

            '高空作业机械': ['高空作业机械'],

            '桩工机械': ['打桩机','旋挖钻','水平定向钻','压桩机','强夯机','锚杆钻机','非开挖机械','切削钻机','长螺旋钻机','连续墙抓斗'],

            '矿山机械': ['潜孔钻机','爆破孔钻机','掘进钻车','空气压缩机','移动破碎站','锚杆台车','锚索台车','水井钻机','掘进机',
                        '扒装机','凿岩机','岩芯钻机','凿岩台车','天井钻机','矿用洒水车','堆取料机','装船机','输送机','采掘机'],

            '工业运输车辆': ['平板运输车','工程自卸车','叉车','叉装车','伸缩臂叉车','多功能工具车','粉粒物料运输半罐车'],

            '市政工程机械': ['市政环卫机械'],

            '附件工装': ['破碎锤','铲斗','抓木器','抓钳器','松土器','快速连接器','加长臂拆除臂','钻具','智能设备'],

            '配件': ['发动机','配件','润滑油'],

            '特殊机械': ['铁路枕木铺设机','拆炉机']
        }
        #
        #     """
        #     一级：
        #     挖掘机械
        #     铲运机械
        #     起重机械
        #     筑养路机械
        #     压实机械
        #     混凝土机械
        #     高空作业机械
        #     桩工机械
        #     矿山机械
        #     工业运输车辆
        #     市政工程机械
        #     附件工装
        #     配件
        #     特殊机械
        #     """
        # }
        if pic_xpath_list:
            for i in pic_xpath_list:
                img_src = i.xpath("./a/img/@src")[0].root
                pic_list.append(img_src)  # 将图片以链接和二进制形式封装成字典，存入图片字典
            item_ah['pic_list'] = pic_list
            item = TieJiaWangItem()
            item['origin_url'] = item_ah['origin_url']
            item['first_class'] = item_ah['comm_name']
            item['second_class'] = item_ah['com_type']
            item['name'] = item_ah['com_title']
            item['param'] = item_ah['param']
            item['pic'] = item_ah['pic_list']
            item['home_page'] = item_ah['home_page']
            item['source_type'] = '铁甲网'
            item["is_parsed"] = 0
            for a in dict.items():
                for x in a[1]:
                    if item['first_class'] == x:
                        item['zero_class'] =a[0]
            yield item
        else:
            pic_list = '无图'
            item_ah['pic_list'] = pic_list
            item = TieJiaWangItem()
            item['origin_url'] = item_ah['origin_url']
            item['first_class'] = item_ah['comm_name']
            item['second_class'] = item_ah['com_type']
            item['name'] = item_ah['com_title']
            item['param'] = item_ah['param']
            item['pic'] = item_ah['pic_list']
            item['home_page'] = item_ah['home_page']
            item['source_type'] = '铁甲网'
            item["is_parsed"] = 0
            yield item
