# -*- coding: UTF-8 -*-
"""
@author:zhangxing
@file:beijingjianshegongchengxinxiwang.py
"""

import time

import scrapy

from commonresources.inner_utils.selenium_utils import SeleniumUtils
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.beijing.items import BeiJingShiJianSheGongChengXinXiWangItem
from commonresources.spiders.basespider import BaseSpider


class BeiJingShiJianSheGongChengXinXiWangSpider(BaseSpider):
    """
        北京市建设工程信息网
            主页：http://www.bcactc.com/

            交易信息页：
            招标：
                园林 http://www.bcactc.com/home/gcxx/now_ylzbgg.aspx
                施工 http://www.bcactc.com/home/gcxx/now_sgzbgg.aspx
                材料设备 http://www.bcactc.com/home/gcxx/now_clsbzbgg.aspx
            中标：
                施工 http://www.bcactc.com/home/gcxx/now_zbjggs.aspx?type=sg
                园林 http://www.bcactc.com/home/gcxx/now_zbjggs.aspx?type=yl
                材料设备 http://www.bcactc.com/home/gcxx/now_zbjggs.aspx?type=clsb
    """
    name = "BeiJingShiJianSheGongChengXinXiWang"
    name_zh = "北京市建设工程信息网"
    province = "北京"
    start_urls = ["https://www.baidu.com/"]

    def __init__(self, full_dose=False):
        super(BeiJingShiJianSheGongChengXinXiWangSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict

    def parse(self, response):
        its = [
            ["招标公告", "施工", "http://www.bcactc.com/home/gcxx/now_sgzbgg.aspx", ],
            ["招标公告", "材料设备", "http://www.bcactc.com/home/gcxx/now_clsbzbgg.aspx", ],
            ["中标公告", "施工", "http://www.bcactc.com/home/gcxx/now_zbjggs.aspx?type=sg", ],
            ["中标公告", "材料设备", "http://www.bcactc.com/home/gcxx/now_zbjggs.aspx?type=clsb", ],
            ["中标公告", "园林", "http://www.bcactc.com/home/gcxx/now_zbjggs.aspx?type=yl", ],
            ["招标公告", "园林", "http://www.bcactc.com/home/gcxx/now_ylzbgg.aspx", ],
        ]
        for it in its:
            _selenium = SeleniumUtils(url=it[2],  # ip_proxy=True,
                                      # headless=False,
                                      slide_bottom=10000,
                                      max_window=True, )
            i, init_first_page = 0, 0
            if not self.full_dose:
                stop_condition = "i<=2"
            else:
                stop_condition = "True"
            while eval(stop_condition):
                i += 1
                if not init_first_page:
                    selector = _selenium.get_scrapy_selector()
                    init_first_page += 1
                else:
                    selector = _selenium.handle_next_page('//input[@name="PagerControl1:_ctl2"]')
                if not selector.attrib:
                    break
                for sel in selector.xpath("//tr[contains(@class,'gridview')]"):
                    item = {'item': BeiJingShiJianSheGongChengXinXiWangItem(), 'announcement_type': it[0],
                            'project_type': it[1], 'origin_url': "http://www.bcactc.com/home/gcxx/" + sel.xpath(
                                './td/a/@href').extract_first(),
                            'announcement_title': sel.xpath('./td/a/text()').extract_first(),
                            'release_time': sel.xpath('./td[@class="gridview_RowTD"][last()]/text()').extract_first()[
                                            :10],
                            'project_num': sel.xpath('./td[@class ="gridview_FirstField"]/text()').extract_first()}
                    if not item['project_num']:
                        item['project_num'] = sel.xpath('./td[@class="gridview_RowTD"][1]/text()').extract_first()
                    yield scrapy.Request(url=item['origin_url'],
                                         headers={},
                                         meta=item,
                                         callback=self.parse_item_new,
                                         )
            time.sleep(1)
            _selenium.close()
