# -- coding: utf-8 --
# -- coding: utf-8 --
import re
import json
import scrapy
from spidertools.utils.time_utils import get_current_date
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.tianjin.items import TianJinShiGongGongZiYuanJiaoYiWangItem
from commonresources.spiders.basespider import BaseSpider
from pprint import pprint
import requests
import datetime
from commonresources.inner_utils.selenium_utils import SeleniumUtils
import time


class TianJinShiGongGongZiYuanJiaoYiWangSpider(BaseSpider):



    name = 'TianJinShiGongGongZiYuanJiaoYiWang'
    name_zh = "天津市公共资源交易网"
    province = "天津"
    # allowed_domains = ['ggzyjy.sc.gov.cn']

    start_urls = ['http://ggzy.zwfwb.tj.gov.cn/queryContent_2-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=83&beginTime=&endTime=']

    headers = {

        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                      " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
    }

    def __init__(self, full_dose=True):
        super(TianJinShiGongGongZiYuanJiaoYiWangSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict

    def parse(self, response):
        its = [
            ["招标公告", "http://ggzy.zwfwb.tj.gov.cn/queryContent-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=81&beginTime=&endTime="  ],
            # ["中标公告",  "http://ggzy.zwfwb.tj.gov.cn/queryContent-jyxx.jspx?title=&inDates=&ext=&ext1=&origin=&channelId=83&beginTime=&endTime=" ],
            # ["中标候选人公示", "http://www.shcpe.cn/jyfw/xxfw/u1ai18.html", ],
        ]
        for it in its:
            _selenium = SeleniumUtils(url=it[1],  # ip_proxy=True,
                                      headless=False,
                                      slide_bottom=10000,
                                      max_window=False, )

            i, init_first_page = 0, 0
            if i<=3000:
                if not self.full_dose:
                    stop_condition = "i<=2"  # 增量就只取网页前两页，全量取网页前两页
                else:
                    stop_condition = "True"
                while eval(stop_condition):
                    i += 1
                    if not init_first_page:
                        selector = _selenium.get_scrapy_selector()  # 得到网页的select对象
                        init_first_page += 1
                    else:
                        selector = _selenium.handle_next_page('//ul[@class="pages-list"]/li[10]/a')  # 得到之前网页的下一页的select对象

                    # if not selector.attrib:  # 如果没有对象则跳出循环
                    #     break
                    for a,sel in enumerate(selector.xpath('//ul[@class="article-list2"]/li')):  # 从网页中得到数据，再封装成item，发到parse_item_new中

                        url_xpath = '//ul[@class="article-list2"]/li['+ str(a+1)+']'
                        origin_url = _selenium.get_current_url(url_xpath,need_kill=True)

                        a_text = sel.xpath('./div/a')[0]
                        title = a_text.xpath('string(.)').extract()[0].replace(' ', '').replace('\n', '').replace('\t', '')
                        release_time = sel.xpath('./div/div/text()')[0].extract().replace(' ','').replace('\n', '')

                        item = {'item': TianJinShiGongGongZiYuanJiaoYiWangItem(), 'announcement_type': it[0],
                                'origin_url': origin_url,
                                'announcement_title': title,
                                'release_time': release_time}
                        yield scrapy.Request(url=item['origin_url'],
                                             headers={},
                                             meta=item,
                                             callback=self.parse_item_new,
                                             )
                        time.sleep(2)
