import re
import sys
from datetime import datetime

from bs4 import BeautifulSoup
from feapder.db.mysqldb import MysqlDB
from feapder.utils.log import log
from items.gdgpo_item import GdgpoItem

db = MysqlDB()
keyword_list = ['排水', '排污', '雨水', '管道', '管网', '环境治理', '非开挖', '排涝', '环境整治']


def init_data_from_db(data_obj, item, crawl_flag):
    data_obj.crawl_flag = crawl_flag
    data_obj.title = item["title"]
    data_obj.page_url = item["page_url"]
    data_obj.project_manager = item["project_manager"]
    data_obj.project_id = item["project_id"]
    data_obj.purchase_id = item["purchase_id"]
    data_obj.region = item["region"]
    data_obj.budget = item["budget"]
    data_obj.purchaser = item["purchaser"]
    data_obj.purchase_type = item["purchase_type"]
    data_obj.publish_time_str = item["publish_time_str"]
    data_obj.publish_time = item["publish_time"]
    data_obj.agency = item["agency"]


def is_title_match_keyword(s: str):
    match_res = ''
    if s is None or s == '':
        return match_res
    for k in keyword_list:
        if s.__contains__(k):
            match_res += k + '&'
    if match_res.endswith('&'):
        match_res = match_res[:len(match_res) - 1]
    return match_res


def get_val_from_dict(target: dict, key):
    if target is None or len(target) == 0:
        return
    if target.__contains__(key):
        return target[key]
    else:
        return ''

def parse_money_toFloat(s: str):
    if s is None or s == '':
        return -1.0

    if s.rfind('元'):
        s = s[:len(s) - 2]
    s = s.strip()
    res = ''
    count_dot = 0
    for c in s:
        if '0' <= c <= '9' or c == '.':
            res += c
            if c == '.':
                count_dot += 1
    # 处理啥b网站全都是点的情况
    if count_dot > 1:
        res = res.replace(".", "")
    log.debug("parse_money_toFloat {a} ===> {b} ".format(a=s, b=res))
    if res == '':
        return None
    return float(res)


def get_crawlFlag():
    return sys.argv[0].split("/")[-1].split(".")[0]


def getExtractItem(item):
    if item is not None and len(item) > 0:
        tmp = str(item[0]).strip()
        idx = tmp.find("：")
        if idx != -1:
            tmp = tmp[idx + 1:]
        return tmp


def get_bs_text_from_id(bs: BeautifulSoup, tag_name: str, item_id: str):
    if item_id is None or item_id.strip() == '':
        return ''
    item_list = bs.find_all(name=tag_name, id=item_id)
    if len(item_list) > 0:
        tmp = item_list[0].get_text()
        idx = tmp.find("：")
        if idx != -1:
            tmp = tmp[idx + 1:]
        return tmp
    return ''


def getBsItem(item):
    if item is not None:
        res = item.get_text()
        idx = res.find("：")
        if idx != -1:
            res = res[res.find("：") + 1:]
        return res


def is_valid_time(time: str):
    res = False
    if time is None or time == '' or time.rfind('&') < 0:
        return res
    try:
        time = time[time.rfind('&') + 1:]
        if time.__contains__(':'):
            res = datetime.strptime(time, '%Y-%m-%d %H:%M:%S').date().year == 2021
        else:
            res = datetime.strptime(time, '%Y-%m-%d').date().year == 2021
        log.debug("is_valid_time {} ====>  {}".format(time, str(res)))
        return res
    except RuntimeError:
        return False


def do_new(request, response, db: MysqlDB, tb_name: str,
           crawl_flag: str, isFirst: bool):
    """
    爬新的
    """
    response.encoding_errors = "ignore"
    # 取url
    url = request.url
    # 取title
    title = request.title
    log.debug("$$$ do_new:{} $$$".format(str(isFirst)))
    bs = response.bs4()
    if isFirst:
        data_obj = GdgpoItem(crawl_flag=crawl_flag)
        # TODO  补充非oldweb的(isFirst=True) 字段爬取 即init_data_from_db的字段 -》已补充 待检查
        tag_name_span = 'span'
        publish_org = get_bs_text_from_id(bs=bs, tag_name=tag_name_span, item_id='f_issuing_agency')
        publish_time = get_bs_text_from_id(bs, tag_name_span, 'f_noticeTime')
        publish_time_str = get_bs_text_from_id(bs, tag_name_span, 'f_noticeTime')
        purchase_id = get_bs_text_from_id(bs, tag_name_span, 'f_openTenderCode')
        budget = get_bs_text_from_id(bs, tag_name_span, 'f_budget')
        purchase_type = get_bs_text_from_id(bs, tag_name_span, 'f_catalogueNameList')
        agency = get_bs_text_from_id(bs, tag_name_span, 'f_agency')
        project_manager = get_bs_text_from_id(bs, tag_name_span, 'f_agentManageName')

        title = get_bs_text_from_id(bs, 'p', 'info-title-especially')
        page_url = url

        data_obj.page_url = page_url
        data_obj.title = title
        data_obj.budget = budget
        data_obj.publish_org = publish_org
        data_obj.publish_time = publish_time
        data_obj.publish_time_str = publish_time_str
        data_obj.purchase_id = purchase_id
        data_obj.purchase_type = purchase_type
        data_obj.agency = agency
        data_obj.project_manager = project_manager

    else:
        purchase_id = request.purchase_id
        data_obj_json = db.find(sql="select * from {a} where purchase_id = '{b}'".format(a=tb_name, b=purchase_id),
                                to_json=True)[0]
        data_obj = GdgpoItem(crawl_flag=crawl_flag)
        init_data_from_db(data_obj, data_obj_json, crawl_flag)

    log.debug(data_obj)
    log.info("req url: " + url)
    log.info("req title: " + title)

    supplier_name = bs.find("table").find_all("tr")[1].find_all("td")[0].get_text().strip()
    supplier_address = bs.find("table").find_all("tr")[1].find_all("td")[1].get_text().strip()
    deal_amount = bs.find("table").find_all("tr")[1].find_all("td")[2].get_text().strip()
    deal_amount = parse_money_toFloat(deal_amount)
    log.info("供应商(中标单位): " + supplier_name)
    log.info("供应商地址: " + supplier_address)
    log.info("中标金额: " + str(deal_amount))
    data_obj.supplier_name = supplier_name
    data_obj.supplier_address = supplier_address
    data_obj.deal_amount = deal_amount

    project_name = getBsItem(bs.find("h4", string=re.compile("项目名称")))
    data_obj.project_name = project_name
    query_keyword = request.query_keyword
    data_obj.query_keyword = query_keyword
    # if project_name:
    #     project_name = project_name.get_text()
    #     log.debug("project_name=" + str(project_name))
    #     project_name = project_name[project_name.find("：") + 1:]
    #     log.info("项目名称: " + project_name)
    #     data_obj.project_name = project_name

    if isFirst:
        add_res = db.add_smart(table=tb_name, data=data_obj.to_dict)
        if add_res:
            log.info("#########First insert over#########")
        else:
            log.info("!!!!!!!!!First insert fail !!!!!!!!")
    else:
        update_res = db.update_smart(table=tb_name, data=data_obj.to_UpdateItem().to_dict,
                                     condition="purchase_id='" + data_obj.purchase_id + "'")
        if update_res:
            log.info("#########update over#########")
        else:
            log.info("!!!!!!!!! update fail !!!!!!!!")
    log.debug(data_obj.to_dict)
    log.info("=======================")


# TODO 旧页面可能样式会变 有空的话 改用bs的关键词去拿元素
# 发现旧页面必定不是2021年 可以不改

def do_old(request, response, db: MysqlDB, tb_name: str, crawl_flag: str):
    """
    解析详情
    """
    # 取url
    url = request.url
    # 取title
    title = request.title
    # 解析正文
    # content = response.xpath(
    #     'string(//div[@class="content"])'
    # ).extract_first()  # string 表达式是取某个标签下的文本，包括子标签文本

    # print("content", content)
    bs: BeautifulSoup = response.bs4()
    supplier_div = bs.find(id="generalArticleEditForm00080101_bidOrgDetailTDP")
    if supplier_div is not None:
        supplier_div = supplier_div.get_text()
    agency = response.xpath("/html/body/div[1]/div[2]/div[3]/p[2]/span[1]").xpath('string(.)').extract()
    requester = response.xpath("/html/body/div[1]/div[2]/div[3]/p[2]/span[2]").xpath('string(.)').extract()
    budget = response.xpath("/html/body/div[1]/div[2]/div[3]/p[5]/span").xpath('string(.)').extract()
    project_name = response.xpath("/html/body/div[1]/div[2]/div[3]/p[4]/span").xpath('string(.)').extract()
    purchase_id = response.xpath("/html/body/div[1]/div[2]/div[3]/p[3]/span").xpath('string(.)').extract()
    purchase_method = response.xpath("/html/body/div[1]/div[2]/div[3]/p[6]/span").xpath('string(.)').extract()
    publish_time = response.xpath("/html/body/div[1]/div[2]/div[2]/p/span[3]").xpath('string(.)').extract()
    deal_amount = response.xpath("/html/body/div[1]/div[2]/div[2]/p/span[6]").xpath('string(.)').extract()
    purchase_type = response.xpath("/html/body/div[1]/div[2]/div[2]/p/span[5]").xpath('string(.)').extract()
    publish_org = response.xpath("/html/body/div[1]/div[2]/div[2]/p/span[2]").xpath('string(.)').extract()
    project_manager = response.xpath("/html/body/div[1]/div[2]/div[2]/p/span[8]").xpath('string(.)').extract()

    agency = getExtractItem(agency)
    requester = getExtractItem(requester)
    budget = getExtractItem(budget)
    project_name = getExtractItem(project_name)
    purchase_id = getExtractItem(purchase_id)
    purchase_method = getExtractItem(purchase_method)
    publish_time = getExtractItem(publish_time)
    deal_amount = getExtractItem(deal_amount)
    purchase_type = getExtractItem(purchase_type)
    publish_org = getExtractItem(publish_org)
    project_manager = getExtractItem(project_manager)

    agency = bs.find(name='span', string=re.compile('代理机构'))
    requester = bs.find(name='span', string=re.compile('委托单位'))
    budget = bs.find(name='span', string=re.compile('预算金额'))
    project_name = bs.find(name='span', string=re.compile('项目名称'))
    publish_org = bs.find(name='span', string=re.compile('发布机构'))
    publish_time = bs.find(name='span', string=re.compile('发布时间'))
    project_manager = bs.find(name='span', string=re.compile('项目负责人'))
    deal_amount = bs.find(name='span', string=re.compile('预算金额'))
    purchase_type = bs.find(name='span', string=re.compile('预算金额'))
    project_manager = bs.find(name='span', string=re.compile('预算金额'))

    log.debug("##################")
    log.info("url: " + url)
    log.info("title: " + title)
    log.info("中标供应商: " + str(supplier_div))
    log.info("代理机构: " + str(agency))
    log.info("委托单位: " + str(requester))
    log.info("预算金额: " + str(budget))
    log.info("项目名: " + str(project_name))
    log.info("采购计划编号: " + str(purchase_id))
    log.info("采购方式: " + str(purchase_method))
    log.info("发布日期: " + str(publish_time))
    log.info("中标金额: " + str(parse_money_toFloat(deal_amount)))
    log.info("采购品目(分类): " + str(purchase_type))
    log.info("发布机构: " + str(publish_org))
    log.info("项目负责人: " + str(project_manager))

    data_obj = GdgpoItem(crawl_flag=crawl_flag)
    data_obj.page_url = url
    data_obj.title = title
    data_obj.budget = parse_money_toFloat(budget)
    data_obj.purchase_method = purchase_method
    data_obj.project_name = project_name
    data_obj.purchase_id = purchase_id
    data_obj.publish_time = publish_time
    data_obj.deal_amount = parse_money_toFloat(deal_amount)
    data_obj.purchase_type = purchase_type
    data_obj.publish_org = publish_org
    data_obj.project_manager = project_manager

    # check_data = db.find(sql="select * from gdgpo where project_id = '" + data_obj.project_id + "'",
    #                      to_json=True)
    # if not check_data:
    op_res = db.add_smart(table=tb_name, data=data_obj.to_dict)
    if op_res:
        log.info("#####insert over######")
    else:
        log.info("######insert fail#####")
    log.debug("##################")


# if __name__ == '__main__':
#     print(parse_money_toFloat("1,142,592.45元"))
