import feapder
from feapder.utils.log import log

from tools import get_crawlFlag, parse_money_toFloat, do_old, do_new, is_valid_time, db

total_amount = 0
CRAWL_FLAG = get_crawlFlag()
tb_name = "gdgpo"

def init_data_from_web(data_obj, item):
    data_obj.crawl_flag = CRAWL_FLAG
    data_obj.title = item["title"]
    data_obj.page_url = item["url"]
    data_obj.project_manager = item["agentManageName"]
    data_obj.project_id = item["openTenderCode"]
    data_obj.purchase_id = item["planCodes"]
    data_obj.region = item["regionName"]
    data_obj.budget = parse_money_toFloat(item["budget"])
    data_obj.purchaser = item["purchaser"]
    data_obj.purchase_type = item["catalogueNameList"]
    data_obj.publish_time_str = item["noticeTime"]
    data_obj.publish_time = item["noticeTime"]
    data_obj.agency = item["agency"]


def init_data_from_db(data_obj, item):
    data_obj.crawl_flag = CRAWL_FLAG
    data_obj.title = item["title"]
    data_obj.page_url = item["url"]
    data_obj.project_manager = item["project_manager"]
    data_obj.project_id = item["project_id"]
    data_obj.purchase_id = item["purchase_id"]
    data_obj.region = item["region"]
    data_obj.budget = item["budget"]
    data_obj.purchaser = item["purchaser"]
    data_obj.purchase_type = item["purchase_type"]
    data_obj.publish_time_str = item["publish_time_str"]
    data_obj.publish_time = item["publish_time"]
    data_obj.agency = item["agency"]


class SpiderTest(feapder.AirSpider):

    def start_requests(self):
        # 每页8条 它hardcode的
        if total_amount is None or total_amount <= 0:
            return
        page_amount = total_amount // 8
        for idx in range(0, page_amount):
            data = {"key": "非开挖",
                    "templetPath": "searchList.html",
                    "siteid": "cd64e06a-21a7-4620-aebc-0576bab7e07a",
                    "pageNum": idx,
                    "page": idx + 1}
            yield feapder.Request(
                url="https://gdgpo.czt.gd.gov.cn/freecms/templetPro.do",
                data=data, method="POST", query_keyword="非开挖")

    def parse(self, request, response):
        url_list = response.xpath('//ul[@class="searchListContentList"]/li/a/@href').extract()
        if len(url_list) > 0:
            txt = response.xpath('//ul[@class="searchListContentList"]/li/a/span').xpath('string(.)').extract()
            title_list = []
            for i in range(0, len(txt), 2):
                title_list.append(txt[i] + "&" + txt[i + 1])
            title_url_dict = dict(zip(title_list, url_list))
            log.info(title_url_dict)
            for title, url in title_url_dict.items():
                if title.find('中标') >= 0 and is_valid_time(title):
                    yield feapder.Request(url, callback=self.parse_detail, title=title, render_time=2,
                                          query_keyword="非开挖")

    def parse_detail(self, request, response):
        url = str(request.url)
        if url.__contains__('/oldweb/'):
            do_old(request=request, response=response, db=db, tb_name=tb_name, crawl_flag=CRAWL_FLAG)
        else:
            do_new(request=request, response=response, db=db, tb_name=tb_name, crawl_flag=CRAWL_FLAG, isFirst=True)


if __name__ == "__main__":
    init_data = {"key": "非开挖",
                 "templetPath": "searchList.html",
                 "siteid": "cd64e06a-21a7-4620-aebc-0576bab7e07a",
                 "pageNum": 0,
                 "page": 1}
    res = feapder.Request(url="https://gdgpo.czt.gd.gov.cn/freecms/templetPro.do", data=init_data, retry_times=3)
    total_amount = res.get_response() \
        .xpath("/html/body/section/section/section/div[2]/div/div[3]/form/b[1]") \
        .xpath('string(.)').extract()[0]
    if total_amount is not None:
        total_amount = int(total_amount)
    SpiderTest().start()
