import time

import scrapy
from spidertools.utils.time_utils import get_current_date

from spidertools import selenium_init
from spidertools.common_pipeline.base_item import convert_dict
from commonresources.spider_items.jiangsu.huaian.items import HuaiAnGongGongZiYuanJiaoYiZhongXinItem


class HuaiAnGongGongZiYuanJiaoYiZhongXinSpider(scrapy.Spider):
    """
    淮安公共资源交易中心 交易信息列表页获取爬虫
    http://ggzy.huaian.gov.cn/epointweb/
    """
    name = 'HuaiAnGongGongZiYuanJiaoYiZhongXin'
    name_zh = '淮安公共资源交易中心'
    province = "江苏"
    city = '淮安'
    allowed_domains = ['ggzy.huaian.gov.cn']
    start_urls = ['http://ggzy.huaian.gov.cn/EpointWeb/showinfo/jyxxsearch.aspx']

    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        self.browser_cookie = {}
        # self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def parse(self, response):
        if response.status != 200:
            raise Exception("网页请求失败！")  # 待优化为自定义一个异常类，抛出
        else:
            d = selenium_init.get_browser()
            d.get("http://ggzy.huaian.gov.cn/EpointWeb/showinfo/jyxxsearch.aspx")
            d.maximize_window()
            time.sleep(3)
            iframe_elemnt = d.find_element_by_id("searchList")
            print(iframe_elemnt)
            d.switch_to.frame(iframe_elemnt)
            # client = MongoClient("mongodb://192.168.3.205:27017")
            # client = MongoClient("mongodb://127.0.0.1:27017")
            # mydb = client['znlh']
            # mycollection = mydb['znlh_test']

            js = "var q=document.documentElement.scrollTop=10000"
            d.execute_script(js)
            element_content = d.find_element_by_class_name("huifont").text
            page = int(element_content.split('/')[-1]) - 1
            i = 0
            while i < page:
                elements = d.find_elements_by_xpath('//tr[@class="ewb-trade-tr"]')
                for element in elements:
                    item = dict()
                    tds = element.find_elements_by_class_name("ewb-trade-td")
                    # item['index'] = tds[0].text  # 后续待优化为先从数据库中查询记录条数，再以此为基础，新增记录
                    item['announcement_title'] = tds[1].text
                    url_part = tds[1].find_element_by_tag_name('a').get_attribute("href")
                    item['origin_url'] = url_part
                    item['project_area'] = tds[2].text if len(tds) == 4 else ''
                    item['release_time'] = tds[-1].text
                    if item['release_time'] != get_current_date() and not self.full_dose:
                        i = page
                    yield scrapy.Request(url=item['origin_url'], meta=item, cookies=self.browser_cookie,
                                         callback=self.handle_next_html,dont_filter=True)
                i += 1
                try:
                    d.find_element_by_xpath("//*[contains(text(),'下页 >')]").click()
                    time.sleep(2)
                except Exception as e:
                    print(e)
                    break
            d.quit()

    def handle_next_html(self, response):
        item = HuaiAnGongGongZiYuanJiaoYiZhongXinItem()
        item['project_area'] = response.meta['project_area']
        item['release_time'] = response.meta['release_time']
        item['origin_url'] = response.meta['origin_url']
        item['announcement_title'] = response.meta['announcement_title']
        item['html'] = response.text
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item
