from commonresources.spiders.basespider import BaseSpider
from scrapy.http import Request,FormRequest
from scrapy.selector import Selector
import re
import time
import json
from commonresources.spider_items.base_item import BaseItem

class SuZhouShiJianZhuShiChangZongHeChaXunPingTaiSpider(BaseSpider):
    '''
                苏州市建筑市场综合查询平台 http://221.224.132.154/zhcx/
                '''
    name = 'SuZhouShiJianZhuShiChangZongHeChaXunPingTai'
    name_zh = '苏州市建筑市场综合查询平台'
    province = "江苏"
    city = '苏州'
    allowed_domains = ['221.224.132.154']
    start_urls = {
                    #"项目登记":'http://221.224.132.154:9093/zhcx/project/info/getlxdj',
                    # "质量监督":"http://221.224.132.154:9093/zhcx/project/info/getgcjbqk",
                    # "安全监督":"http://221.224.132.154:9093/zhcx/project/info/getaqjdlist",
                    '招标信息':"http://221.224.132.154:9093/zhcx/project/info/getgtxmztb",
                    '中标信息':'http://221.224.132.154:9093/zhcx/project/info/getgtxmzhongbiao',
                    '合同信息':'http://221.224.132.154:9093/zhcx/project/info/gethtba',
                    "合同变更备案":"http://221.224.132.154:9093/zhcx/project/info/gethtbabg",
                    "施工许可":"http://221.224.132.154:9093/zhcx/project/info/getsgxk",
                    "竣工备案":"http://221.224.132.154:9093/zhcx/project/info/getjgba",
    }

    def __init__(self, full_dose=False):
        super(SuZhouShiJianZhuShiChangZongHeChaXunPingTaiSpider, self).__init__(full_dose)

    def start_requests(self):
        fake_head = self.get_fake_head()
        for key ,value in self.start_urls.items():
            post_body = self.get_post_body()

            yield FormRequest(url=value,callback=self.parse_list,method='post',
                                formdata=post_body,headers=fake_head,meta={"current_page":1,'announce_type':key})


    def parse_list(self, response):
        current_page = int(response.meta['current_page'])
        announce_type = response.meta['announce_type']
        if 'total_page' in response.meta:
            total_page = response.meta["total_page"]
        else:
            total_page = -1

        need_break = False
        json_data = json.loads(response.text)
        if json_data:
            if 'page' in json_data:
                if total_page == -1:
                    total_page = int(json_data['page']['totalPage'])

                for info in json_data['page']['list']:
                    item = BaseItem()
                    if 'noticeFromDate' in info:
                        item['release_time'] = info["noticeFromDate"].split(" ")[0]
                    elif 'shrDate' in info:
                        item['release_time'] = info["shrDate"].split(" ")[0]
                    elif 'barq' in info:
                        item['release_time'] = info["barq"].split(" ")[0]
                    elif 'tgdate' in info:
                        item['release_time'] = info["tgdate"].split(" ")[0]
                    elif 'fzrq' in info:
                        item['release_time'] = info["fzrq"].split(" ")[0]
                    elif 'acceptDate' in info:
                        item['release_time'] = info["acceptDate"].split(" ")[0]



                    need_break = self.check_if_need_break(item['release_time'])
                    if need_break:
                        break
                    if 'xmmc' in info:
                        item['announcement_title'] = info['xmmc'] + announce_type
                    elif 'subProjectName' in info:
                        item['announcement_title'] = info['subProjectName'] + announce_type
                    elif 'gcmc' in info:
                        item['announcement_title'] = info['gcmc'] + announce_type
                    elif 'gcname' in info:
                        item['announcement_title'] = info['gcname'] + announce_type

                    item['source_type'] = self.name_zh
                    item['province'] = self.province
                    item['city'] = self.city
                    item['is_parsed'] = 0
                    item['html'] = info

                    item['origin_url'] = response.url

                    yield item

        if not need_break:
            if current_page < total_page:
                fake_head = self.get_fake_head()
                post_body = self.get_post_body(page=current_page+1)

                yield FormRequest(url=response.url, callback=self.parse_list, method='post',
                                  formdata=post_body, headers=fake_head, meta={"current_page": current_page+1, 'announce_type': announce_type,"total_page":total_page})















    def get_fake_head(self):
        header = {
            "Connection": "keep-alive",
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Referer": "http://221.224.132.154/",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9"
        }
        return header

    def get_post_body(self,page=None):
        post_dict = {
            "projectname":"",
            "finishnum":"",
            "qualitynum":"",
            "startdate":"",
            "enddate":"",
            "limit": "10",
        }
        post_dict['t'] = str(int(time.time()*1000))
        if page == None:
            post_dict['page'] = "1"
        else:
            post_dict['page'] = str(page)

        return post_dict




