import re
import scrapy
import time
from spidertools.utils.time_utils import get_current_date
from commonresources.spider_items.shanghai.items import ShangHaiShiGongGongZiYuanJiaoYiZhongXinItem
from commonresources.spiders.basespider import BaseSpider
from scrapy.selector import Selector
import requests
import datetime



class ShangHaiShiGongGongZiYuanJiaoYiZhongXin(BaseSpider):
    """
        上海市公共资源交易中心
    """
    name = 'ShangHaiShiGongGongZiYuanJiaoYiZhongXin'
    name_zh = "上海市公共资源交易中心"
    province = "上海"

    # start_urls = ['https://www.shggzy.com/jyxxgc/index.jhtml']

    def __init__(self, full_dose=True):
        super(ShangHaiShiGongGongZiYuanJiaoYiZhongXin, self).__init__(full_dose)


    def start_requests(self):
        type_dict={'招标公告':'29','资格预审公告':'30','中标候选人公告':'32','中标公告':'33'}
        type_list=['中标公告']
            # ,'资格预审公告' ,'中标候选人公告','中标公告']

        #https://www.shggzy.com/queryContent_2-jyxx.jspx 通用换页
        for type in type_list:
            project_type=type
            type_number = type_dict[project_type]
            meta={
                'project_type':project_type,
                'type_number' :type_number,
                "need_break": False,
                'page':1
            }
            d = {
                'title': '',
                'channelId': type_number,
                'origin': '',
                'inDates': '4000',
                'ext': ''}
            yield scrapy.FormRequest(
                url='https://www.shggzy.com/queryContent-jyxx.jspx',
                meta=meta,
                callback=self.first_parse,
                formdata=d
            )
            # print(meta)
    def first_parse(self,response):
        # print(response.text)
        page_number = 2
        page = response.meta['page']

        headers = {
            'USER_AGENT': "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1"
        }

        if not response.meta['need_break']:
            if page < page_number:
                # new_url = 'https://www.shggzy.com/queryContent_'+str(page)+'-jyxx.jspx'
                r = response.text
                html_text = Selector(text=r)
                if not response.meta['need_break']:
                    li_list=html_text.xpath("//div[@class='gui-title-bottom']/ul/li")
                    for li in li_list:
                        url = li.xpath('./@onclick')[0].extract()
                        url = re.findall("'(.*?)'",url)[0]
                        title = li.xpath('./span[@class="cs-span2"]/text()')[0].extract().replace('\n','').replace('\t','').replace(' ','')
                        project_time=li.xpath('./span[4]/text()')[0].extract()#公告发布时间
                        # item = ShangHaiShiGongGongZiYuanJiaoYiZhongXinItem()
                        # item["announcement_title"] = title  # 标题
                        # item["release_time"] = project_time  # 发布时间
                        # item["origin_url"] = url  # 网站地址
                        # item["announcement_type"] = response.meta['project_type']  # 公告类型
                        # item['source_type'] = '上海市公共资源交易中心'  # 属于哪个网站的信息
                        # item['province'] = '上海'  # 所属省份
                        # item['html'] = requests.get(url=url, headers=headers).text
                        # item['is_parsed'] = 0
                        # if 'Sorry, Page Not ' in item['html']:
                        #     print(str(page) +'页')
                        #     return None
                        item = {}
                        item["announcement_type"] = response.meta['project_type']  # 公告类型
                        item["announcement_title"] = title  # 标题
                        item["release_time"] = project_time  # 发布时间
                        item["origin_url"] = url  # 网站地址
                        item["item"] = ShangHaiShiGongGongZiYuanJiaoYiZhongXinItem()
                        if not self.full_dose and item["release_time"] != (datetime.datetime.now()+datetime.timedelta(days=-1)).strftime('%Y-%m-%d'):
                            response.meta["need_break"] = True
                        else:
                            yield scrapy.Request(url=item["origin_url"],
                                                 callback=self.parse_item_new,
                                                 meta=item)
                            # yield item
                            time.sleep(2)
                if not response.meta['need_break']:
                    print('这是第'+str(page)+'页')
                    page += 1
                    new_url = 'https://www.shggzy.com/queryContent_' + str(page) + '-jyxx.jspx'
                    if page==2:
                        j=1
                    time.sleep(3)
                    d = {
                        'title': '',
                        'channelId': response.meta['type_number'],
                        'origin': '',
                        'inDates': '4000',
                        'ext': ''}
                    yield scrapy.FormRequest(
                        url=new_url,
                        meta={
                                'project_type':response.meta['project_type'],
                                'type_number' :response.meta['type_number'],
                                "need_break": response.meta['need_break'],
                                'page':page
                            },
                        callback=self.first_parse,
                        formdata=d
                    )
                    j=1
            else:
                pass






