import requests
import scrapy
from spidertools.utils.time_utils import get_current_date
import json
from commonresources.inner_utils.standardize_field_utils import check_city_field
from commonresources.spider_items.beijing.items import BeiJingShiGongGongZiYuanZongHeJiaoYiXiTongItem
from commonresources.spiders.basespider import BaseSpider
import time
from scrapy.selector import Selector
import re


class BeiJingShiGongGongZiYuanZongHeJiaoYiXiTong(BaseSpider):
    name = 'BeiJingShiGongGongZiYuanZongHeJiaoYiXiTong'
    name_zh = "北京市公共资源综合交易系统"
    province = "北京"

    # start_urls = ['https://www.bjggzyzhjy.cn/G2/pubnotice/public-notice.do']

    def __init__(self, full_dose=False):
        super(BeiJingShiGongGongZiYuanZongHeJiaoYiXiTong, self).__init__(full_dose)

    def start_requests(self):
        type_list = ['https://www.bjggzyzhjy.cn/G2/public-notice!noticeList.do?','https://www.bjggzyzhjy.cn/G2/result-notice!resultNoticeList.do?']
        #
        # type_list=['https://www.bjggzyzhjy.cn/G2/public-notice!noticeList.do?']
        zhaobiao_data={
                        'filter_params_': 'bidNoticeId,packageId,projectId,enrollId,reviewWay,noticePublishWay,tenderNoticeNo,tenderCategory,enrollEntId,bidSectionNameAndCode,packageName,rowNum,uniformProjectCode,systemType,projectName,projectType,applyTimeStart,applyTimeEnd',
                        'defined_operations_': '',
                        'nocheck_operations_': '',
                        'gridSearch': 'false',
                        'nd': '1614923242222',
                        'PAGESIZE': '10',
                        'PAGE': '1',
                        'sortField': '',
                        'sortDirection': 'asc'
                    }
        zhongbiao_data={
                        'filter_params_': 'resultLetterId,resultLetterGatherId,projectId,projectName,packageId,tenderCategory,tenderNoticeNo,enterpriseId,rowNum,systemType,bidSectionNameAndCode,uniformProjectCode,packageName,projectType',
                        'defined_operations_': '',
                        'nocheck_operations_': '',
                        'gridSearch': 'false',
                        'nd': '1614923242222',
                        'PAGESIZE': '10',
                        'PAGE': '1',
                        'sortField': '',
                        'sortDirection': 'asc'
                    }
        for type in type_list:
            if type=='https://www.bjggzyzhjy.cn/G2/public-notice!noticeList.do?':
                page_number=10
                type_name='招标公告'
                yield scrapy.FormRequest(
                    url=type,
                    formdata=zhaobiao_data,
                    meta={
                        "need_break": False,
                        'type': type,
                        'page_number': page_number,
                        'type_name': type_name,
                        'x_data': zhaobiao_data
                    },
                    callback=self.first_parse,

                )
            if type=='https://www.bjggzyzhjy.cn/G2/result-notice!resultNoticeList.do?':
                page_number=300
                type_name='中标公告'
                yield scrapy.FormRequest(
                    url=type,
                    formdata=zhongbiao_data,
                    meta={
                        "need_break": False,
                        'type': type,
                        'page_number': page_number,
                        'type_name':type_name,
                        'x_data':zhongbiao_data
                    },
                    callback=self.first_parse,
                )

    def first_parse(self, response):
        second_page_number = 1
        headers = {
            'USER_AGENT': "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1"
        }
        if not response.meta['need_break']:
            while second_page_number < int(response.meta['page_number']):
                start_url=response.meta['type']
                d = response.meta['x_data']
                d['PAGE']=str(second_page_number)
                r = requests.post(url=start_url, headers=headers, data=d).text
                r = json.loads(r)
                print('######################'+str(second_page_number)+'页')
                message_list = r['data']
                for message in message_list:
                    if response.meta['type_name']=='招标公告':
                        origin_url = 'https://www.bjggzyzhjy.cn/G2/pubnotice/kb-enroll!previewNotice.do?flag=toLogin&viewFlag=false&' + 'projectId=' + message['projectId'] + '&bidNoticeId=' + message['projectId']
                        if not response.meta['need_break']:
                            item=BeiJingShiGongGongZiYuanZongHeJiaoYiXiTongItem()
                            item['origin_url'] = origin_url  # 原始地址
                            item['announcement_title'] = message['projectName']# 公告标题
                            item['announcement_type'] = response.meta['type_name'] # 公告类型
                            item['release_time'] = message['applyTimeStart'].split(" ", 1)[0]  # 取发布时间
                            item['source_type'] = '北京市公共资源综合交易系统'  # 属于哪个网站的信息
                            item['province'] = '北京'  # 所属省份
                            item['project_type'] = message['tenderCategory']['desc']  # 工程类型
                            item['project_name'] = message['projectName']#项目名称
                            item['html'] = requests.get(url=origin_url,headers=headers).text
                            item['is_parsed'] = 0
                            if not self.full_dose and item['release_time'] != get_current_date():
                                # if not self.full_dose and item['release_time'] != "2021-02-22":
                                response.meta['need_break'] = True
                            else:
                                yield item
                    elif response.meta['type_name']=='中标公告':
                        origin_url='https://www.bjggzyzhjy.cn/G2/pubnotice/jt-bid-result-notice!preView.do?'+'projectId='+message['projectId']+'&bidPackageArrStr='+message['packageId']+'&resultLetterGatherId='+message['resultLetterGatherId']+'&SID='+message['resultLetterGatherId']#详情页的URL构造
                        if not response.meta['need_break']:
                            item = BeiJingShiGongGongZiYuanZongHeJiaoYiXiTongItem()
                            item['origin_url'] = origin_url  # 原始地址
                            item['announcement_title'] = message['projectName']  # 公告标题
                            item['announcement_type'] = response.meta['type_name']  # 公告类型
                            # item['release_time'] = message['applyTimeStart'].split(" ", 1)[0]  # 取发布时间
                            item['source_type'] = '北京市公共资源综合交易系统'  # 属于哪个网站的信息
                            item['province'] = '北京'  # 所属省份
                            item['project_type'] = message['tenderCategory']['desc']  # 工程类型
                            item['project_name'] = message['projectName']  # 项目名称
                            item['is_parsed']=0
                            item['html'] = requests.get(url=origin_url, headers=headers).text
                            try:
                                text_sel=Selector(text=item['html'])
                                text=text_sel.xpath('//tr')
                                for tr in text:
                                    tr_text=tr.xpath('string(.)').extract()[0]
                                    if '中标结果公示发布日期' in tr_text:
                                        time = tr.xpath('./td/span')[0].xpath('string(.)').extract()[0].split(' ')[9]
                                        item['release_time'] = time  # 取发布时间
                            except Exception as a :
                                item['release_time']='no time come out'
                            # print(item)
                            if not self.full_dose and item['release_time'] != get_current_date():
                                # if not self.full_dose and item['release_time'] != "2021-02-22":
                                response.meta['need_break'] = True
                            else:
                                yield item
                if not response.meta['need_break']:
                    second_page_number+=1
                else:
                    break





