import json
import re
import time
from datetime import datetime
from hashlib import sha1
from urllib import parse
from urllib.parse import unquote

import scrapy
from huobiao.tools.c_format import winTheBidding

KEYS = ['医疗设备', '净化空调', '二氧化碳培养箱', '洁净工作台', '灭菌器','冷藏箱',
                                            '药品阴凉柜', '经皮黄疸仪', '酶标仪', '免疫分析系统', 'pcr柜', '药品稳定性试验箱',
        '负压隔离舱', '层面光照培养箱',
        'PCR仪', '臭氧消毒柜', '生物安全柜', '冻干机', '核酸采样亭', '恒温水箱', '生化分析仪', '液氮罐', '旋涡混合器', '冷冻离心机',
        '霉菌培养箱', '酶免工作站', '分子杂交仪', '手术室多功能恒温箱', '微生物培养箱', '恒温恒湿称重箱',
        '医用冷藏箱', '安全存储柜', '医用冷藏柜', '核酸提取试剂', '方舱实验室', '生物安全转运箱', '安全柜', '组合式空调', '微孔板孵育器', '干培两用箱',
        '包埋机', '酸碱柜', '台式恒温摇床', '采样工作站', '医用低温保存箱', '离心机', '血液冷藏箱', '脱水机纯水机',
        '赛默飞仪器', '真空干燥箱', '病理取材台', '黄疸治疗仪', '移液器', '恒温恒湿培养箱', '核酸提取仪', '电热恒温培养箱',
        '赛默飞耗材', '恒温金属浴', '恒温振荡培养箱', '配药柜', '病毒采样管', '经皮黄疸治疗仪', '生化培养箱', '血浆融化箱', '过滤器',
        '标本柜', '冷链产品', '电热鼓风干燥箱', '培养箱','净化工程','消毒机器人','立式蒸汽灭菌器','洁净台','黄疸仪','pcr仪','核酸提取仪']




def strtostamp(t):
    t = datetime.strptime(t, '%Y-%m-%d')
    return int(t.timestamp())


class HbSpider(scrapy.Spider):
    name = 'hb'

    def start_requests(self):
        # 登录
        url = 'http://www.huobiao.cn/api/hbuser/login'
        data = {
            'phone': '15665780252',
            'password': 'gxcgq123',
        }

        data = json.dumps(data).encode('utf-8')
        yield scrapy.FormRequest(
            url=url,
            method='post',
            body=data,
            callback=self.parse,
            dont_filter=True
        )

    def parse(self, response, **kwargs):
        # token 没用处
        # token = response.json()['data']['token']
        searchurls = []
        searchurls += [f'http://www.huobiao.cn/bid/type_108/word_{i}/search_accurate/' for i in KEYS]
        # searchurls += [f'http://www.huobiao.cn/winbid/word_{i}%20中标公告/page_1.html' for i in KEYS]
        for url in searchurls:
            yield scrapy.Request(
                url=url,
                callback=self.parse_list,
            )

    # 搜索列表页
    def parse_list(self, response):

        try:
            search_key = re.findall('word_(.+?)\s*中标公告', unquote(response.url))[0]
        except IndexError:
            search_key = re.findall('word_(.+?)/', unquote(response.url))[0]
        lis = response.xpath('//div[@class="list"]/a')
        for li in lis:
            # 标题
            title = ''.join(li.xpath('./div[@class="item-title"]/span[@class="big"]//text()').getall())
            # 详情页链接
            href = parse.urljoin(response.url, li.xpath('./@href').get())
            # href = li.xpath('./@href').get()
            item = {
                'id': '',
                'href': href,
                'title': title,
                'pubdate': strtostamp(li.xpath('./div[@class="item-property"]/div[@class="time"]//text()').get()),
                'enhref': '',
                'enname': '',
                'legalperson': '',
                'updatetime': int(time.time()),
                'search_key': search_key
            }

            # print(item)
            # if '中标' in title and '公告' in title:
            yield scrapy.Request(
                url=href,
                callback=self.parse_content,
                meta={
                    'item': item
                }
            )
        # 翻页
        if response.xpath('//div[@class="pages"]/a[last()]/text()').get() == '下一页':
            next_url = parse.urljoin(response.url, response.xpath('//div[@class="pages"]/a[last()]/@href').get())
            time.sleep(1)
            yield scrapy.Request(
                url=next_url,
                callback=self.parse_list,
                meta={
                    'item': {
                        'search_key': search_key
                    }
                }

            )

    def parse_content(self, response):

        item = response.meta['item']

        ennames = winTheBidding(response.text)

        if len(ennames) > 0:
            # 有中标企业
            # 搜索中标企业
            for enname in ennames:
                if '(' in enname and ')' in enname:
                    abridge, full_name = re.findall(r'([\u4e00-\u9fa5]*)\(([\u4e00-\u9fa5]*)\)', enname)[0]
                    if abridge in full_name:
                        enname = full_name
                item['enname'] = enname
                sha = sha1()
                sha.update(enname.encode())
                item['id'] = sha.hexdigest()
                yield item
