import time
import re
import scrapy
from spidertools.utils.time_utils import get_current_date
from commonresources.inner_utils.selenium_utils import SeleniumUtils
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.beijing.items import BeiJingShiJianSheGongChengXinXiWangItem
from commonresources.spiders.basespider import BaseSpider


class ShangHaiJianSheGongChengJiaoYiFuWuZhongXin(BaseSpider):

    name = "ShangHaiJianSheGongChengJiaoYiFuWuZhongXin"
    name_zh = "上海建设工程交易服务中心"
    province = "上海"
    start_urls = ["https://www.baidu.com/"]

    def __init__(self, full_dose=False):
        super(ShangHaiJianSheGongChengJiaoYiFuWuZhongXin, self).__init__(full_dose)
        self.convert_dict = convert_dict

    def parse(self, response):
        its = [
            ["招标公告",  "https://ciac.zjw.sh.gov.cn/NetInterBidweb/GKTB/SgfbZbxx.aspx", ],
            # ["中标公告",  "https://ciac.zjw.sh.gov.cn/XmZtbbaWeb/gsqk/ZbjgGkList.aspx", ],
            # ["中标候选人公示", "http://www.shcpe.cn/jyfw/xxfw/u1ai18.html", ],
        ]
        for it in its:
            _selenium = SeleniumUtils(url=it[1],  # ip_proxy=True,
                                      # headless=False,
                                      slide_bottom=10000,
                                      max_window=False, )
        if it[0]=='招标公告':
            input_xpath_dict={
                '//*[@id="txt_beginTime"]':'2021-01-01',
                '//*[@id="txt_endTime"]':get_current_date()
            }
            click_xpath='//*[@id="Btn_submit"]'
            _selenium.input(input_xpath_dict=input_xpath_dict,click_xpath=click_xpath,need_click=True) #设定好时间
            i, init_first_page = 0, 0
            if not self.full_dose:
                stop_condition = "i<=2"  # 增量就只取网页前两页，全量取网页前两页
            else:
                stop_condition = "True"
            while eval(stop_condition):
                i += 1
                if not init_first_page:
                    selector = _selenium.get_scrapy_selector()  # 得到网页的select对象
                    init_first_page += 1
                else:
                    time.sleep(5)
                    selector = _selenium.handle_next_page('//*[@id="nextPages"]')  # 得到之前网页的下一页的select对象
                    time.sleep(5)
                if not selector.attrib:  # 如果没有对象则跳出循环
                    break
                for sel in selector.xpath("//*[@id='form1']/div[3]/table/tbody/tr/td/table[3]/tbody/tr"):  # 从网页中得到数据，再封装成item，发到parse_item_new中
                    if '获取文件开始日期' in sel.xpath('string(.)').extract()[0]:
                        pass
                    else:
                        url_number = sel.xpath('./td[2]/a/@onclick')[0].extract()
                        number = re.findall('\'(.*?)\',',url_number)[0]
                        origin_url='https://ciac.zjw.sh.gov.cn/NetInterBidweb/GKTB/DefaultV2019.aspx?gkzbXh='+ str(number)
                        title = sel.xpath('./td[2]/a/span/text()')[0].extract().replace(' ','').replace('\n','')
                        release_time = sel.xpath('./td[3]/a/text()')[0].extract().replace('/','-').replace(' ','').replace('\n','')

                        item = {'item': BeiJingShiJianSheGongChengXinXiWangItem(), 'announcement_type': it[0],
                                'origin_url': origin_url,
                                'announcement_title': title,
                                'release_time': release_time}
                        yield scrapy.Request(url=item['origin_url'],
                                             headers={},
                                             meta=item,
                                             callback=self.parse_item_new,
                                             )
                        time.sleep(5)


        elif it[0]=='中标公告':
            input_xpath_dict = {
                '//*[@id="txtZbrqBegin"]': '2021-01-01',
                '//*[@id="txtZbrqEnd"]': get_current_date()
            }
            click_xpath = '//*[@id="btnSearch"]'
            _selenium.input(input_xpath_dict=input_xpath_dict, click_xpath=click_xpath, need_click=True)  # 设定好时间

            y = 10 #设定每次小翻页的次数
            if self.full_dose==False:
                y=6
            b = 0
            selector = _selenium.get_scrapy_selector()  # 得到完整的一大页
            x=True
            while x:
                i=0
                while i<y:     # 从单个大页面获取详细信息
                    if b==0:
                        index=2
                    if b!=0:
                        index=3
                    for a,sel in enumerate(selector.xpath('//*[@id="gvZbjgGkList"]/tbody/tr')):
                        if '项目名称' in sel.xpath('string(.)').extract()[0]:
                            pass
                        if '...' in sel.xpath('string(.)').extract()[0]:
                            pass
                        elif '项目名称' not in sel.xpath('string(.)').extract()[0]:
                            url_xpath = '//*[@id="gvZbjgGkList_lbXmmc_'+str(a-1)+'"]'  # 12
                            origin_url=_selenium.get_current_url(url_xpath)
                            _selenium.back()
                            print(sel.xpath('string(.)').extract()[0])
                            title = sel.xpath('./td[2]/a/text()')[0].extract().replace(' ', '').replace('\n', '')
                            release_time = sel.xpath('./td[3]/text()')[0].extract().replace('年', '-').replace('月', '-').replace('日', '').replace(' ','').replace('\n', '')
                            item = {'item': BeiJingShiJianSheGongChengXinXiWangItem(), 'announcement_type': it[0],
                                    'origin_url': origin_url,
                                    'announcement_title': title,
                                    'release_time': release_time}
                            yield scrapy.Request(url=item['origin_url'],
                                                 headers={},
                                                 meta=item,
                                                 callback=self.parse_item_new,
                                                 )
                            time.sleep(5)
                    new_page_xpath = '//*[@id="gvZbjgGkList"]/tbody/tr[14]/td/table/tbody/tr/td' + f'[{index + i}]/a'
                    time.sleep(5)
                    selector = _selenium.handle_next_page(new_page_xpath)
                    i+=1
                b+=1
                if y == 6:
                    x=False

        time.sleep(1)
        _selenium.close()

