import json
import pandas as pd
import xlwt as xlwt
import re
import cpca
import os
import time
import requests
from datetime import datetime
from Util import format_content, parse_salary_info, base_api, remove_phone_and_address
import Util

base_url = "https://gateway.gyhapp.com"


# 新版的解析工种 工种解析
# 从给定的内容中直接判断是否包含既定工种，如果不包含则舍弃
# 这种做法可以极大提高准确率，但是会牺牲数据量（可能会丢失很多数据）
# 后续应该在开发过程中后端配合不断补充增加一些不常见或特殊工种，让数据丢失率降低
def get_work_type_by_desc(detail):
    try:
        parameter = {'information': detail}
        res = requests.post('https://gateway.gongyouhui.com/admin/ai/workRecognize',
                            headers={"content-type": "application/json"},
                            data=json.dumps(parameter))
        # res = requests.post('https://beta.gongyouhui.com/admin/ai/workRecognize',
        #                     headers={"content-type": "application/json"},
        #                     data=json.dumps(parameter))
        data = json.loads(res.text)
        if not data:
            print('工种判断接口数据异常')
            return []

        work_list = data.get("data")
        # print('工种判断结果:', len(work_list), work_list, '  原内容:', detail)
        return work_list or []
    except Exception as e1:
        print('get_work_type_by_desc_ai 方法 接口请求异常', e1)
        return []


def get_location(address_text, default_pc=""):
    try:
        clean = re.compile('<.*?>')
        address_text = re.sub(clean, '', str(address_text))
        # 去除特殊字符
        address_text = re.sub(r'[\n\r\t\[\]]', '', address_text)
        address_text = re.sub('地址：', '', address_text)
        # print('打印：',address_text)

        # 高德地图逆地理编码解析，传入的字符串不能超过127个字符，否则必定会解析报错
        # 所以这里增加判断如果是字符超过的话就用cpca库从字符串中提取出省市赋值给地址变量
        if len(address_text) > 126:
            temp = cpca.transform([address_text])
            temp_province = temp.iloc[0, 0]
            temp_city = temp.iloc[0, 1]

            if temp_province is not None:
                address_text = temp_province + (temp_city or "")

        url_for_location = 'https://gyh.work/third/getLonAndLatD'
        # url_for_location = 'http://192.168.1.106:20700/third/getLonAndLatD'
        #
        resp = requests.get(url_for_location,
                            params={"address": address_text})

        json_data = json.loads(resp.text)
        # print(address_text, '  后端解析记过：', json_data)
        # 高德地图逆地理编码解析，传入的字符串不能超过127个字符，否则解析直接报错
        # 所以这里增加判断如果是字符超过
        if json_data is None or json_data.get('status') == 500 or json_data.get('code') is None or json_data.get(
                'code') != '00000':
            try:
                # print('1开始本地解析')
                # 在尝试解析一次
                temp = cpca.transform([address_text])
                temp_province = temp.iloc[0, 0]
                temp_city = temp.iloc[0, 1]
                # print('cpca库解析地址：', temp)
                # print('2开始本地解析',temp_province, temp_city)

                # 内蒙 是无法解析到的，如果是内蒙且不包含内蒙古，就替换为内蒙古
                if temp_province is None and "内蒙" in address_text and "内蒙古" not in address_text:
                    address_text = re.sub('内蒙', '内蒙古', address_text)
                    temp = cpca.transform([address_text])
                    temp_province = temp.iloc[0, 0]
                    temp_city = temp.iloc[0, 1]

                if temp_province is not None:
                    # print('高德无法解析字符串，尝试用python cpca库解析')
                    address_text = temp_province + (temp_city or "")
                    # print('请求参数：', address_text)
                    resp = requests.get(url_for_location,
                                        params={"address": address_text})
                    json_data = json.loads(resp.text)
                    # print('接口返回', json_data)
                else:
                    if len(default_pc) == 0:
                        # print('[放弃本条数据]Cpca无法解析出地址', address_text)
                        return None
                    else:
                        # print('高德无法解析字符串，尝试用python cpca库解析')
                        address_text = default_pc
                        resp = requests.get(url_for_location,
                                            params={"address": address_text})
                        json_data = json.loads(resp.text)
            except Exception as e4:
                print('[放弃本条数据]API解析出错', address_text, e4)
                return None

        json_data = json_data.get('data')
        loc_pro = json_data.get("province")
        loc_city = json_data.get("city")

        # 县级市
        loc_county = json_data.get("county")
        loc_detail = json_data.get("address")
        loc_longitude = json_data.get('longitude')
        loc_latitude = json_data.get('latitude')

        # print('地址信息解析结果：', json_data)

        # 如果经纬度或省市拿不到，也当做无效数据，服务器的处理方式，爬虫端按照规则做筛选
        if loc_pro is None or loc_longitude is None or loc_latitude is None or loc_longitude == 0 or loc_latitude == 0:
            print('解析后发现经纬度、省份存在空值', loc_pro, loc_detail, loc_longitude, loc_latitude, ' [原值:',
                  address_text,
                  ']\n')
            return None

        # 如果省市相同，就把区(县/县级市)赋值给市
        if loc_pro == loc_city and loc_county is not None:
            loc_city = loc_county

        if loc_city is None or "[]" in loc_city:
            loc_city = ''

        if loc_detail is None or len(loc_detail) < 2:
            loc_detail = loc_pro + (loc_city or "")

        if loc_county is None or "[]" in loc_county:
            loc_county = ''

        if loc_detail is None or len(loc_detail) < 2:
            print('无法解析/生成详细地址')
            return None

        return_data = {"longitude": loc_longitude, "latitude": loc_latitude, "province": loc_pro, "city": loc_city,
                       "county": loc_county, "detail": loc_detail}

        # print('打印全职：', return_data)
        # print('高德位置信息解析封装：', return_data, ' [ 原信息:', address_text, ']')

        return return_data
    except Exception as ee:
        print("\napi解析地址经纬度出现异常:", ' 地址->', address_text, '  异常信息->', ee)
        return None


# 检测是否包含关键字，如果包含则需要审核  "1"包含  "0"不包含
def check_contain_by_keys(content):
    try:
        # 检查是否存在缓存文件
        if os.path.exists('keys.json'):
            # 读取缓存文件
            with open('keys.json', 'r') as f:
                data = json.load(f)
                # 检查缓存是否过期（超过一小时）
                if time.time() - data['timestamp'] < 3600:
                    # print('使用缓存的keys')
                    keyword_list = data['keys']
                else:
                    # 缓存过期，重新请求接口
                    keyword_list = request_filter_keys()
        else:
            # 缓存文件不存在，请求接口
            keyword_list = request_filter_keys()

        temp_list = []
        # print('内容：', content, ' 关键词：', keyword_list)
        for keyword in keyword_list:
            name = keyword.get('name').replace('\n', '')
            # print('sub内容：', content, ' 子关键词：', keyword)
            if '&' not in name:
                if name in content:
                    temp_list.append({'id': keyword.get('id'), 'name': keyword.get('name')})
                    # return "1"
            else:
                sub_keywords = name.split('&')
                if all(sub_keyword in content for sub_keyword in sub_keywords):
                    temp_list.append({'id': keyword.get('id'), 'name': keyword.get('name')})
                    # return "1"

            # if "不出国" in

        if len(temp_list) == 0:
            temp_list = ""

        # print('关键词列表：',temp_list)
        return temp_list
    except Exception as e1:
        print('关键词解析异常：', e1)
        # request_tags()
        request_filter_keys()
        return ""


# 加载过滤关键字列表
def request_filter_keys():
    global base_url
    keys = requests.post(base_url + '/entry/dictionaryOperativeWord/getOperativeWordList')
    print('结果：', keys.text)
    data = json.loads(keys.text)
    tag_keys = data.get('data') or []

    # 处理 tag_keys，只保留每个项目的 id 和 name 字段，且去除 name 里面的回车符、换行符和制表符
    processed_tag_keys = []
    for item in tag_keys:
        tid = item.get('id') or ''
        name = item.get('name') or ''
        if name:
            # 去除 name 里面的回车符、换行符和制表符
            name = name.replace('\n', '').replace('\r', '').replace('\t', '')
        processed_tag_keys.append({'id': tid, 'name': name})

    # 保存接口请求的结果到缓存文件
    with open('keys.json', 'w') as f:
        json.dump({'timestamp': time.time(), 'keys': processed_tag_keys}, f)
    return tag_keys


# 查找内容中是否包含标签
def find_tag(content):
    try:
        # 检查是否存在缓存文件
        if os.path.exists('tags.json'):
            # 读取缓存文件
            with open('tags.json', 'r') as f:
                data = json.load(f)
                # 检查缓存是否过期（超过一小时）
                if time.time() - data['timestamp'] < 3600:
                    # print('使用缓存的tags')
                    tag_keys = data['tags']
                else:
                    # 缓存过期，重新请求接口
                    tag_keys = request_tags()

                if len(tag_keys) == 0:
                    # 缓存文件不存在，请求接口
                    tag_keys = request_tags()
        else:
            # 缓存文件不存在，请求接口
            tag_keys = request_tags()
        # print('进入到判断中')

        result = []
        codes_found = set()  # 用于记录已经找到的code，防止重复
        for rule in tag_keys:
            matched_labels = [label for label in rule['dictionaryLabel'] if label in content]
            if matched_labels and rule['code'] not in codes_found:
                if len(matched_labels) > 1:

                    # 如果包含了五险一金，且列表里还包含了五险，就去除五险
                    if "五险一金" in matched_labels:
                        matched_labels = [item for item in matched_labels if item != "五险" and item != "一金"]

                    if "交社保" in matched_labels:
                        matched_labels = [item for item in matched_labels if item != "社保"]

                    # 如果包含了包吃包住，且列表里还包含了包吃和包住，就去除包吃和包住
                    if "包吃包住" in matched_labels or "包吃住" in matched_labels:
                        matched_labels = [item for item in matched_labels if item != "包吃" and item != "包住"]

                    if "包吃包住" in matched_labels:
                        matched_labels = [item for item in matched_labels if item != "包吃住"]

                    if "不包吃" in content:
                        matched_labels = [item for item in matched_labels if "包吃" not in item]

                    if "不包住" in content:
                        matched_labels = [item for item in matched_labels if "包住" not in item]

                    if "不包吃包住" in content or "不包吃不包住" in content or "不包吃住" in content:
                        matched_labels = [item for item in matched_labels if "包住" not in item and "包吃" not in item]

                    if "无年终奖" in content:
                        matched_labels = [item for item in matched_labels if item != "年终奖"]

                    if "无五险一金" in content:
                        matched_labels = [item for item in matched_labels if "五险" not in item and "一金" not in item]

                result.append({'code': rule['code'], 'content': matched_labels})
                codes_found.add(rule['code'])
        # print('标签：', tag_keys, result)
        return result
    except Exception as e1:
        print('标签解析异常：', e1)
        return []


# 提交数据 数据直接入库
def upload_work_data(platform, job_name, job_type, job_content, job_linkman, job_phone, job_province, job_city,
                     job_longitude, job_latitude,
                     job_address, job_wages, job_unit, item=None):
    if item is None:
        item = {}
    global line_no
    global curr_platform
    global last_platform

    curr_platform = platform
    return_message = "OK"

    # 平台和上次不一样后就重置行号为1 并重新赋值上次的平台
    if last_platform != curr_platform:
        line_no = 1
        last_platform = curr_platform

    # 初始化一些空且非必要的数据
    job_linkman = job_linkman if job_linkman is not None and job_linkman != '' else "先生"

    if isinstance(job_name, list) and len(job_name) > 0:
        job_name = job_name[0]

    # 判断条件 START
    # if len(job_type) == 0:
    #     print('\n------------ 工种为空，', job_content, job_type)
    #     return "工种为空"

    if job_province is None or len(job_province) < 2:
        print('\n------------ 省市信息未能获取到，', job_province, job_city)
        return "省市信息未能获取到"

    if job_phone is None or len(job_phone) != 11:
        print('\n------------ 电话为空或非手机号码，', job_phone)
        return "电话为空或非手机号码"

    if job_content is None or len(job_content) < 2 or job_name is None or len(job_name) < 1:
        print('\n------------ 标题内容为空，', job_name)
        return "标题内容为空"

    if job_address is None or len(job_address) < 2:
        print('\n------------ 地址信息为空，', job_address)
        return "地址信息为空"

    # 对内容去除文本前的空格以及html标签
    job_name = re.sub(' {2,}', ' ', job_name)
    if job_content is None or len(job_content) == 0:
        job_content = job_name
    else:
        job_content = str(job_content)
        job_content = re.sub('<.*?>', '', job_content)
        # 如果有2个以上空格 就替换为1个
        job_content = re.sub(' {2,}', ' ', job_content)
        job_content = job_content.lstrip()

    job_content = job_content.lstrip()
    job_content = format_content(job_content)
    job_name = job_name.lstrip()

    tags = find_tag(job_name + job_content)
    keyword = check_contain_by_keys(job_name + job_content)

    # 微信1  其他网站平台2  去施工3
    source = 2
    if platform == '去施工':
        source = 3

    emojis = ['[太阳]', '[月亮]', '[庆祝]', '[烟花]', '[鲜花]', '[红包]', ' ']
    while job_name.startswith(tuple(emojis)):
        for emoji in emojis:
            if job_name.startswith(emoji):
                job_name = job_name.replace(emoji, '', 1)

    while job_content.startswith(tuple(emojis)):
        for emoji in emojis:
            if job_content.startswith(emoji):
                job_content = job_content.replace(emoji, '', 1)

    job_salary = parse_salary_info(job_content)
    job_content = remove_phone_and_address(job_content)

    if job_type is None or len(job_type) == 0:
        print("工种为空，但是继续上传", job_type)

    parameter = [{
        "minWage": job_salary.get('min') or "",
        "maxWage": job_salary.get('max') or "",
        "wage": job_salary.get('min') if len(str(job_salary.get('min'))) > 0 else "面议",
        "unit": job_salary.get('unit') or "",
        "keyword": str(keyword),
        "title": str(job_name),
        "labelCustom": tags,
        "workerType": job_type or [],
        "information": job_content,
        "contactPerson": job_linkman,
        "contactNumber": job_phone,
        "provinceName": job_province,
        "cityName": job_city,
        "longitude": job_longitude,
        "latitude": job_latitude,
        "address": job_address,
        "requestBody": str(item),
        "source": source,
        "platform": platform
    }]

    # print('打印下参数：', parameter)
    # res = requests.post('http://192.168.1.106:20700/third/add', headers={"content-type": "application/json"},
    #                    data=json.dumps(parameter))
    ##z                     data=json.dumps(parameter))
    res = requests.post(base_api, headers={"content-type": "application/json"},
                        data=json.dumps(parameter))
    print('上传结果：', res.text, '  参数:', parameter)
    # print('参数', parameter)
    if "00000" not in res.text:
        return_message = "接口上传报错[" + str(res.text) + "]"
        print('接口上传报错：', res.text)
        return return_message

    if return_message == "OK":
        print('\n')
        print('--------------------------  ', platform, '第 ', line_no, ' 条数据  --------------------------')
        print('已入库-->', '标题:', job_name, ' 薪资:', job_wages, job_unit, ' 接口返回:', res.text, ' 原始：', item)
        line_no += 1
    else:
        print('未入库！丢弃本条数据 原因:', return_message)

    return return_message


def request_tags():
    try:
        global base_url
        keys = requests.get(base_url + '/entry/dictionaryLabel/getDictionaryLabel')
        print('请求接口获取tags', keys.text)
        data = json.loads(keys.text)
        tag_keys = data.get('data')

        # 处理 tag_keys，只保留每个项目的 id 和 name 字段，且去除 name 里面的回车符、换行符和制表符
        processed_tag_keys = []
        for item in tag_keys:
            tid = item.get('code') or ''
            name = item.get('name') or ''
            works = item.get('dictionaryLabel') or []
            if name:
                # 去除 name 里面的回车符、换行符和制表符
                name = name.replace('\n', '').replace('\r', '').replace('\t', '')
            processed_tag_keys.append({'code': tid, 'name': name, 'dictionaryLabel': works})

        # 保存接口请求的结果到缓存文件
        with open('tags.json', 'w') as f:
            json.dump({'timestamp': time.time(), 'tags': processed_tag_keys}, f)
        return processed_tag_keys
    except Exception as e1:
        print('getDictionaryLabel接口请求出错', e1)


# 招工标题，取工种里面的第一个name
def title_one(data):
    return data[0]["name"]


def new_table():
    excel = xlwt.Workbook(encoding='utf-8')
    # 创建表页
    sheet = excel.add_sheet('微信公众号', cell_overwrite_ok=True)
    # 创建表头
    title = ['招工标题', '工种', '工种标签', '招工信息(描述)', '发布时间', '联系人', '联系电话', '省份', '市', '县',
             '工作地址', '经度', '纬度', '工资', '工资单位', '备注', '包含关键词', '标签', '最小工资', '最大工资']
    i = 0
    for data in title:
        sheet.write(0, i, data)
        i = i + 1
    # 读取Excel文件
    book = pd.read_excel(r'E:\飞书下载文件\20240115-原数据.xlsx', sheet_name=0)  # 替换为Excel文件路径
    # 遍历工种
    index = 0
    # pattern = r'^1[3-9]\d{9}$'  # 匹配以1开头，第二位是3到9之间的数字，后面跟着9个任意数字的11位数字串
    for i, row in book.iterrows():
        # 判断第二列是否为NaN
        if row[1] != "NaN":
            if row[4] != "NaN":
                # 判断电话号码长度
                if len(str(row[2])) >= 11:
                    # print(row[2])
                    # 判断第二列的bool类型，Ture保留，False丢弃
                    # if re.match(pattern, str(row[2])):
                    # print(pattern)
                    # 调用get_location方法获取原数据第四列进行经纬度，省市县解析
                    Provinc_citi_counti = get_location(row[4])
                    # print(Provinc_citi_counti)
                    # 打印解析的省市县
                    # print('地区：', get_location(row[4]))
                    if Provinc_citi_counti == None:
                        continue
                    # 获取当前日期
                    now = datetime.now()
                    # 格式化日期
                    formatted_date = now.strftime("%Y-%m-%d")
                    time_data = (f"{formatted_date}")
                    # 调用接口对第二列进行数据清洗
                    work = get_work_type_by_desc(row[1])
                    # 检测是否包含关键字
                    keywords = check_contain_by_keys(row[1])
                    keywords_s = str(keywords)
                    # 调用find_tag方法查找内容中是否包含标签
                    label = find_tag(row[1])
                    label_s = str(label)
                    # int转为string
                    a = str(work)
                    # 判断工种是否为"[]"，是"[]"删除整行
                    if not a == "[]":
                        index = index + 1
                        sheet.write(index, 0, title_one(work))
                        sheet.write(index, 1, a)
                        # 电话
                        sheet.write(index, 6, row[2])
                        # 招工信息(描述)
                        sheet.write(index, 3, row[1])
                        # 工种地址
                        sheet.write(index, 10, row[4])
                        # 联系人
                        sheet.write(index, 5, row[3])
                        # 插入经度
                        sheet.write(index, 11, Provinc_citi_counti["longitude"])
                        # 纬度
                        sheet.write(index, 12, Provinc_citi_counti["latitude"])
                        # 省
                        sheet.write(index, 7, Provinc_citi_counti["province"])
                        # 市
                        sheet.write(index, 8, Provinc_citi_counti["city"])
                        # 县
                        sheet.write(index, 9, Provinc_citi_counti["county"])
                        # 关键字
                        sheet.write(index, 16, keywords_s)
                        # 标签
                        sheet.write(index, 17, label_s)
                        # 工资标签
                        sheet.write(index, 13, row[5])
                        # 日期
                        sheet.write(index, 4, time_data)

    # 保存文件
    excel.save('20240115-清洗数据.xlsx')
    # Util.upload_work_data('longitude', 'latitude', 'province', 'city', 'county', 'detail')


new_table()
