import feapder
from bs4 import BeautifulSoup
from feapder.utils.log import log

import tools
from items.ggzy_item import GgzyItem

base_url = 'http://ggzy.gz.gov.cn/${type}/index_${page}.jhtml?word=${word}&supervision=&projectType='

type_dict = {
    'jyywjsgcszgczbxx': '市政工程',
    'jyywjsgcslswzbxx': '水利水务',
    'jyywjsgcyllyzbxx': '林业园林',
    'jyywjsgcxxjyfblcfbgg': '发包'
}
keyword_dict = {
    '%E6%8E%92%E6%B0%B4': '排水',
    '%E9%9B%A8%E6%B0%B4': '雨水',
    '%E6%8E%92%E6%B1%A1&': '排污',
    '%E7%AE%A1%E9%81%93': '管道',
    '%E7%AE%A1%E7%BD%91': '管网',
    '%E7%8E%AF%E5%A2%83%E6%B2%BB%E7%90%86': '环境治理',
    '%E9%9D%9E%E5%BC%80%E6%8C%96': '非开挖',
    '%E6%8E%92%E6%B6%9D': '排涝',
    '%E7%8E%AF%E5%A2%83%E6%95%B4%E6%B2%BB': '环境整治'
}

fabao_search_url = 'http://ggzy.gz.gov.cn/jyywjsgcxxjyfblcfbgg/index_{page}.jhtml?word={word}&supervision=&projectType='
task_list = []
tb_name = 'ggzy'


def handle_title(s: str):
    if s is None or s == '':
        return ''
    s = s[s.find('&') + 1:]
    s = s[:s.rfind('&')]
    s = s.replace('&', '')
    return s


def build_tasks():
    task_url = base_url
    for search_type, search_type_description in type_dict.items():
        task_url_a = task_url.replace("${type}", search_type)
        for keyword, keyword_description in keyword_dict.items():
            task_url_b = task_url_a.replace("${word}", keyword)
            page_amount = get_page_amount(task_url_b, keyword)
            if page_amount <= 0:
                continue
            for idx in range(1, page_amount + 1):
                task_url_c = task_url_b.replace("${page}", str(idx))
                task = {
                    'url': task_url_c,
                    'type': search_type_description,
                    'keyword': keyword_description
                }
                task_list.append(task)
    log.debug("tasks built over ===> " + str(len(task_list)))
    return task_list


def get_page_amount(task_url: str, keyword: str):
    tmp_url = task_url.replace("${page}", "1").replace("${word}", keyword)
    tmp_req = feapder.Request(url=tmp_url, retry_times=3)
    response = tmp_req.get_response()
    tmp = response.xpath('//*[@id="tab1"]/div[2]/div[3]/div').xpath('string(.)').extract()
    if tmp is not None and len(tmp) > 0:
        count_text: str = tmp[0]
        count_text = count_text[:count_text.find('页')]
        res = count_text[count_text.find('/') + 1:]
        log.info("get_page_amount : " + tmp_url + " ===> " + str(res))
        if res.isdigit():
            return int(res)
    return -1


class SpiderTest(feapder.AirSpider):

    def __init__(self):
        super().__init__()
        build_tasks()

    def start_requests(self):
        for task in task_list:
            yield feapder.Request(task['url'], query_type=task['type'] + ":" + task['keyword'])

    def parse(self, request, response):
        bs: BeautifulSoup = response.bs4()
        title_url_list = bs.find(id='tab1').find(name='tbody').find_all(name='tr')
        tb_data_list = []
        tb_url_list = []
        tb_url_tmp = bs.find(id='tab1').find(name='tbody').find_all(name='a')
        for item in tb_url_tmp:
            tb_url_list.append(str(item.get('href')))
        for tds in title_url_list:
            txt = tds.get_text(strip=True, separator='&').replace(' ', '') \
                .replace('\r', '').replace('\n', '').replace('\t', '')
            tb_data_list.append(txt)
        tb_dict = dict(zip(tb_data_list, tb_url_list))
        for title, url in tb_dict.items():
            match_res = tools.is_title_match_keyword(title)
            if tools.is_valid_time(title) and match_res != '':
                log.warning('parse_detail ： ' + str(url))
                yield feapder.Request(url, callback=self.parse_detail, title=title,
                                      query_keyword=match_res, query_type=request.query_type)  # callback 为回调函数

    def parse_detail(self, request, response):
        """
        解析详情
        """
        response.encoding_errors = "ignore"
        url: str = request.url
        title = handle_title(request.title)
        bs: BeautifulSoup = response.bs4()
        query_type = request.query_type
        query_keyword = request.query_keyword
        data_obj = GgzyItem()
        data_obj.query_keyword = query_keyword
        data_obj.type = query_type
        data_obj.crawl_flag = tools.get_crawlFlag()
        data_obj.page_url = url
        data_obj.title = title

        if url.__contains__('/jyywjsgcxxjyfblcfbgg/'):
            attachment_list = bs.find(name='div', attrs={"class": "attachments"}).find_all(name='a')
            target_info = {}
            for attachment in attachment_list:
                attachment_name = attachment.get_text(strip=True)
                attachment_url: str = attachment.get('href')
                if attachment_name is None or attachment_name == '':
                    continue
                if attachment_url is None or attachment_url.strip() == '':
                    continue
                target_info[attachment_name] = attachment_url
            data_obj.target_info = target_info
            data_obj.project_name = title
            data_obj.publish_time = request.title[request.title.rfind('&') + 1:]
            check_data_obj_json = tools.db.find(
                sql="select * from {a} where title = '{b}'".format(a=tb_name, b=title),
                to_json=True)
            if check_data_obj_json:
                update_res = tools.db.update_smart(table=tb_name, data=data_obj.to_UpdateItem().to_dict,
                                                   condition="title='" + data_obj.title + "'")
                if update_res:
                    log.debug("update success :" + str(data_obj.to_dict))
                else:
                    log.warning("update fail :" + str(data_obj.to_dict))
            else:
                add_res = tools.db.add_smart(table=tb_name, data=data_obj.to_dict)
                if add_res:
                    log.debug("add success :" + str(data_obj.to_dict))
                else:
                    log.warning("add fail :" + str(data_obj.to_dict))
        else:
            td_list = bs.find_all(name='td')
            tb_dict = {}
            for idx in range(0, len(td_list), 2):
                k = td_list[idx].get_text(strip=True)
                v = td_list[idx + 1].get_text(strip=True)
                if k is None or v is None or k == '' or v == '':
                    continue
                tb_dict[k] = v
            log.warning(tb_dict)

            if tb_dict.__contains__('是否作废'):
                if tb_dict['是否作废'].__contains__('是'):
                    return

            project_name = tools.get_val_from_dict(tb_dict, '项目名称')
            requester = tools.get_val_from_dict(tb_dict, '招标单位')
            supplier_name = tools.get_val_from_dict(tb_dict, '中标单位')
            project_id = tools.get_val_from_dict(tb_dict, '项目编号')
            agency = tools.get_val_from_dict(tb_dict, '招标代理')

            def get_deal_amount():
                if tools.get_val_from_dict(tb_dict, '中标总价(万元)') != '':
                    return '中标总价(万元):' + tools.get_val_from_dict(tb_dict, '中标总价(万元)')
                if tools.get_val_from_dict(tb_dict, '中标综合单价(元)') != '':
                    return '中标综合单价(元):' + tools.get_val_from_dict(tb_dict, '中标综合单价(元)')
                if tools.get_val_from_dict(tb_dict, '中标下浮率(%)') != '':
                    return '中标下浮率(%):' + tools.get_val_from_dict(tb_dict, '中标下浮率(%)')

            deal_amount = get_deal_amount()
            project_manager = tools.get_val_from_dict(tb_dict, '项目负责人')
            bid_id = tools.get_val_from_dict(tb_dict, '中标通知书编号')
            publish_time_str = tools.get_val_from_dict(tb_dict, '中标通知书发放时间')
            publish_time = tools.get_val_from_dict(tb_dict, '中标通知书发放时间')

            data_obj.project_name = project_name
            data_obj.project_id = project_id
            data_obj.project_manager = project_manager
            data_obj.supplier_name = supplier_name
            data_obj.requester = requester
            data_obj.agency = agency
            data_obj.deal_amount = deal_amount
            data_obj.bid_id = bid_id
            data_obj.publish_time = publish_time
            data_obj.publish_time_str = publish_time_str

            check_data_obj_json = tools.db.find(
                sql="select * from {a} where bid_id = '{b}'".format(a=tb_name, b=bid_id),
                to_json=True)
            if check_data_obj_json:
                is_need_update: bool = False
                check_data_time = tools.get_val_from_dict(check_data_obj_json, 'publish_time')
                if check_data_time == '' or publish_time > check_data_time:
                    is_need_update = True
                if is_need_update:
                    # update
                    update_res = tools.db.update_smart(table=tb_name, data=data_obj.to_UpdateItem().to_dict,
                                                       condition="bid_id='" + data_obj.bid_id + "'")
                    if update_res:
                        log.debug("update success :" + str(data_obj.to_dict))
                    else:
                        log.warning("update fail :" + str(data_obj.to_dict))
            else:
                add_res = tools.db.add_smart(table=tb_name, data=data_obj.to_dict)
                if add_res:
                    log.debug("add success :" + str(data_obj.to_dict))
                else:
                    log.warning("add fail :" + str(data_obj.to_dict))


if __name__ == "__main__":
    # SpiderTest(thread_count=15).start()
    SpiderTest().start()
