# -*- coding: UTF-8 -*-
import Levenshtein
from dao.mysql import Mysql
from bs4 import BeautifulSoup
from tool.StringTools import StringTools


def hint(html_database):
    mysql = Mysql()
    # 原始关键字
    SQL_keyword = "SELECT keyword FROM `crawler`.`keyword`"
    keywords_raw = mysql.getAll(SQL_keyword)

    html_raw = html_database['origin_html'].decode("utf8")
    id = html_database['id']
    html = BeautifulSoup(html_raw, "lxml")
    # 网页分词，确定是否命中
    hint = []
    raw = StringTools.removeAll_symbol(html.get_text())  # 去掉符号词
    pass_word = ["检测", "测试", "检验", "元素", "像素", "因素", "要素", "溶解", "溶性"]
    seg_list = StringTools.jieba(raw, pass_word)
    for keyword in keywords_raw:
        keyword = keyword['keyword'].decode("utf8")
        for seg in seg_list:
            if Levenshtein.distance(seg, keyword) < 3:
                if hint.count(seg) > 0:
                    print(seg, "已有")
                else:
                    hint.append(seg)
                    print("当前页面id=", id, seg, keyword, "相似度：", Levenshtein.distance(seg, keyword))
                    hint.append("<" + seg + ">" + keyword + "[" + str(Levenshtein.distance(seg, keyword)) + "]")

    # 修改数据库
    if len(hint) > 0:
        # 命中，检查是否已经处理
        origin_url = html_database['origin_url'].decode("utf8")
        SQL_duplication = "SELECT id FROM `crawler`.`raw_source` WHERE origin_url = %s and status=1"
        site_val = [origin_url]
        # print(SQL_duplication, site_val)
        duplication = mysql.getAll(SQL_duplication, site_val)
        if duplication:
            id_list = []
            for row_id in duplication:
                id = row_id['id']
                id_list.append(str(id))
            id_list = ",".join(id_list)
            SQL_ut = "UPDATE `crawler`.`raw_source` SET `status` = 3,`hint` = '已经处理过:" + id_list + "' WHERE `id` = %s"
            val = [id]
        else:
            SQL_ut = "UPDATE `crawler`.`raw_source` SET `status` = 1, `hint` = %s WHERE `id` = %s"
            val = (''.join(hint), id)
    else:
        SQL_ut = "UPDATE `crawler`.`raw_source` SET `status` = 2 WHERE `id` = %s"
        val = [id]
    print(SQL_ut, val)
    mysql.update(SQL_ut, val)
    mysql.dispose(1)


def hint2(html_database):
    mysql = Mysql()
    SQL_keyword = "SELECT product FROM product"
    site = html_database['origin_url'].decode("utf8")
    products = mysql.getAll(SQL_keyword)
    html_raw = html_database['origin_html'].decode("utf8")
    html = BeautifulSoup(html_raw, "lxml").getText()
    flag = 0
    product_list = set()
    for product in products:
        pro = product['product'].decode("utf8")
        if pro in html:
            product_list.add(pro)
            flag = 1
    if flag == 1:
        SQL_ut = "UPDATE `crawler`.`raw_source` SET `status` = 5,`hint_product`= %s WHERE `origin_url` = %s AND `status` = 1"
        val = ["-".join(product_list), site]
        print(SQL_ut)
        mysql.update(SQL_ut, val)
    else:
        SQL_ut = "UPDATE `crawler`.`raw_source` SET `status` = 4 WHERE `origin_url` = %s AND `status` = 1"
        val = [site]
        print("无产品：", SQL_ut)
        mysql.update(SQL_ut, val)
    mysql.dispose(1)


# 命中后对于网页分析
def classify_site(html_database):
    mysql = Mysql()
    # 获得原始网站url、soup网页、title
    site = html_database['origin_url'].decode("utf8")
    html = html_database['origin_html'].decode("utf8")
    hint = html_database['hint'].decode("utf8")
    # 平台信息确定
    SQL_platfrom = "SELECT site.`name` AS platform FROM site INNER JOIN raw_source ON raw_source.site_id = site.id WHERE raw_source.origin_url=%s"
    platform = mysql.getOne(SQL_platfrom, [site])["platform"].decode("utf8")
    # 初始化一些信息
    record = {'origin_site': site, 'result_site': site, 'change_site': site, "hint_site": hint, "platform": platform}
    soup = BeautifulSoup(html, "lxml")
    title = soup.title.getText()
    # 项目需要格式化分析，切词
    soup = StringTools.remove_symbol(soup.get_text("|"))
    print(soup)
    # 获得公告标化数据
    SQL_queryType = "SELECT origin,standard,other FROM standardization WHERE scope ='公告' ORDER BY weight ASC"
    Types = mysql.getAll(SQL_queryType)
    try:
        origin_info = html_database['origin_info'].decode("utf8")
    except:
        pass_word = ["公告"]  # 跳过公告
        seg_list = StringTools.jieba(soup, pass_word)
    else:
        type = origin_info[5:]
        seg_list = [type]
    tuple = ("", "", 3)
    for Type in Types:
        originType = Type['origin'].decode("utf8")
        for seg in seg_list:
            if Levenshtein.distance(seg, originType) < 3:
                print(seg, originType, Levenshtein.distance(seg, originType))
                if tuple[2] >= Levenshtein.distance(seg, originType):
                    tuple = (seg, Type, Levenshtein.distance(seg, originType))
    try:
        type = tuple[1]['other'].decode("utf8")
    except:
        print("无法处理----------------------------------------------------------", title, site)
        return
    # 加入输出记录
    record['type'] = type
    filters = {'number': '编号', 'account': '采购单位名称#采购单位#招标人:#采购人：#采购人:', 'product': '产品', 'address_county': '地址',
               'due_date': '截止时间#结束时间',
               'release_date': '公告时间#公示时间#时间', 'result_price': '中标金额', 'agency': '代理机构名称#代理机构',
               'result_date': '公告时间#公示时间#时间', 'result_product': '中标产品', 'winner': '中标供应商', 'result_price': '中标金额#成交金额',
               'change_date': '公告时间#公示时间#时间', 'change_product': '变更产品', 'change_account': '招标人#中标方'}
    # 抓取到的所有信息
    for ft in filters:
        keyword = filters[ft]
        data = getKeyValue(soup, keyword)
        if ft == "address_county":
            # 获得省市标化
            SQL_queryP = "SELECT origin,standard FROM `crawler`.`standardization` WHERE scope = '省市'"
            provinces = mysql.getAll(SQL_queryP)
            for province in provinces:
                origin = province['origin'].decode("utf8")
                if data.find(str(origin)) >= 0:
                    record["address_province"] = province['standard'].decode("utf8")
                    if len(origin) > 2:  # 长度大于二的地方名称
                        record["address_city"] = origin
        # 机构类型
        if ft == "account":
            if data != "" and data != "--":
                record["institute_type"] = StringTools.institute_type(data)
        record[ft] = data

    # 是否有项目编号
    print(record)
    # if record['number'] and record['number'] != "--" and record['number'] != "":
    #     SQL_init = "SELECT id FROM bidding WHERE number=%s"
    #     row = mysql.getAll(SQL_init, [record['number']])
    # row_id = 0
    # try:
    #     if row[0]["id"]:
    #         row_id = row[0]["id"]
    # except:
    #     record['is_first'] = 1
    # else:
    #     record['is_first'] = 1
    row_id = 0
    record['is_first'] = 1
    # 数据库需要数据
    dataTitles = ['type', 'number', 'is_first', "hint_site", "platform"]
    print(record)
    print(tuple[1])
    if "起始记录" == type:
        # 解析字段
        record['origin_type'] = tuple[1]['origin'].decode("utf8")
        record['standard_type'] = tuple[1]['standard'].decode("utf8")
        dataTitles = dataTitles + ['account', 'agency', 'address_province', 'address_city', 'address_county',
                                   'release_date', 'due_date', 'change_date', 'origin_type', 'standard_type',
                                   "institute_type"]
        if row_id == 0:
            SQL_save = "INSERT INTO bidding (`origin_site`) VALUES (%s)"
            print(SQL_save)
            row_id = mysql.insertOne(SQL_save, [site])
        else:
            record["origin_site"] = site
            dataTitles = dataTitles + ["origin_site"]
        table_update(dataTitles, record, row_id, mysql)
    elif "过程记录" == type:
        record['change_origin_type'] = tuple[1]['origin'].decode("utf8")
        dataTitles = dataTitles + ['change_origin_type', "change_date", 'change_account']
        if row_id == 0:
            SQL_save = "INSERT INTO bidding (`change_site`) VALUES (%s)"
            print(SQL_save)
            row_id = mysql.insertOne(SQL_save, [site])
        else:
            record["change_site"] = site
            dataTitles = dataTitles + ["change_site"]
        table_update(dataTitles, record, row_id, mysql)
    elif "结果记录" == type:
        record['result_origin_type'] = tuple[1]['origin'].decode("utf8")
        record['result_standard_type'] = tuple[1]['standard'].decode("utf8")

        dataTitles = dataTitles + ['winner', 'result_date', 'province', "result_price",
                                   'result_origin_type', 'result_standard_type']
        if row_id == 0:
            SQL_save = "INSERT INTO bidding (`result_site`) VALUES (%s)"
            print(SQL_save)
            row_id = mysql.insertOne(SQL_save, [site])
        else:
            record["result_site"] = site
            dataTitles = dataTitles + ["result_site"]
        table_update(dataTitles, record, row_id, mysql)
    else:
        print(3)

    SQL_ut = "UPDATE `crawler`.`raw_source` SET `status` = 3 WHERE `origin_url` = %s"
    val = [site]
    mysql.update(SQL_ut, val)
    mysql.dispose(1)


def getKeyValue(text, keyword):
    kws = keyword.split("#")
    for kw in kws:
        index_start = StringTools.find_excluding(text, kw)  # 把后面有详见的去掉
        data = "--"
        if index_start > 0:
            index_start = index_start + len(kw)
            index_start = StringTools.skip(index_start, text)
            end = StringTools.end(index_start, text)
            if "--" == data:
                data = text[index_start:end]
                return data
    return data


def table_update(dataTitles, record, row_id, mysql):
    for data in dataTitles:
        try:
            record[data]
        except:
            print("放弃采集当前字段：", data)
        else:
            if isinstance(record[data], int):
                SQL_update = "UPDATE `crawler`.`bidding` SET `" + data + "` = " + str(
                    record[data]) + " WHERE `id` = " + str(row_id)
            else:
                SQL_update = "UPDATE `crawler`.`bidding` SET `" + data + "` = '" + record[data] \
                             + "' WHERE `id` = " + str(row_id)
            print(SQL_update)
            mysql.update(SQL_update)


# 命中1，不需要的是2，处理过的是3，再次击中是5，没击中是4
while (1):
    mysql = Mysql()
    # 获得原始数据
    SQL_site = "SELECT origin_html,origin_url,origin_info,hint FROM raw_source WHERE `status`=1 LIMIT 0,100"
    # SQL_site = "SELECT origin_html,origin_url,origin_info,hint FROM raw_source WHERE id=20761"
    raws = mysql.getAll(SQL_site)
    if not raws:
        break
    for raw in raws:
        classify_site(raw)
    mysql.dispose(1)

# while (1):
#     mysql = Mysql()
#     # 获得原始数据
#     SQL_site = "SELECT origin_html,origin_url,origin_info,hint FROM raw_source WHERE `status`=1 LIMIT 0,10"
#     # SQL_site = "SELECT origin_html,origin_url,origin_info,hint FROM raw_source WHERE id=6807"
#     raws = mysql.getAll(SQL_site)
#     if not raws:
#         break
#     for raw in raws:
#         hint2(raw)
#     mysql.dispose(1)

# 第一步，命中目标，并根据是否有处理过做标记
# 获得原始数据
# while (1):
#     mysql = Mysql()
#     SQL_site = "SELECT id,origin_html,origin_url,origin_info FROM raw_source WHERE status = 0 LIMIT 0,1000"
#     raws = mysql.getAll(SQL_site)
#     if not raws:
#         break
#     for raw in raws:
#         hint(raw)
#     mysql.dispose(1)
