#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import inspect
import re
import time

from bs4 import BeautifulSoup

from common import hlog
from data_handle.util import classification_level
from send_info import send_to_api
from data_handle.platform_handle import PlatformDataClean
from service.clean_error import save_html_log


class ZhipinDetailClean(PlatformDataClean):
    def __init__(self, load_dict):
        super().__init__()
        self.load_dict = load_dict

    def data_clean(self):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)
        hlog.info("开始清洗数据")

        html = base64.b64decode(self.load_dict["htmlString"].encode("utf-8")).decode("utf-8")
        try:
            soup = BeautifulSoup(html, "lxml")

            # 职位
            position = self.get_position(soup)

            if not position:
                hlog.exit_func(func_name)
                return

            # 薪资
            salary, salary_min, salary_max, several = self.get_salary(soup)

            # 城市，经验，学历，工作性质
            city, exp, exp_min, exp_max, edu, kind, edu_level = self.get_city_exp_edu_kind(soup)

            # 职位标签
            tag_list = []

            # 职位诱惑
            advantage = self.get_advantage(soup)

            # 职位描述
            pos_desc = self.get_pos_desc(soup)

            # 招聘者
            hr_name, hr_position = self.get_hr(soup)

            # 公司
            company_name = self.get_company_name(soup)

            # 上班地点
            location = self.get_location(soup)

            # 公司info
            stage, scale, scale_left, scale_right, company_main_page, publish_time, company_nature = \
                self.get_company_info(soup)

            # 来源
            src_name = self.load_dict.get("platform")
            src_url = self.load_dict.get("url")
            src_pos_id = re.search(".*?detail/(.*?).html.*?", self.load_dict["url"]).group(1).strip()

            # spiderUuid
            spider_uuid = self.load_dict.get("spiderUuid")

            info = dict()
            info['srcName'] = src_name
            info['srcUrl'] = src_url
            info['srcPosId'] = src_pos_id
            info['position'] = position
            info['salary'] = salary
            info['salaryMin'] = salary_min
            info['salaryMax'] = salary_max
            info['several'] = several
            info['city'] = city
            info['address'] = location
            info['exp'] = exp
            info['expMin'] = exp_min
            info['expMax'] = exp_max
            info['edu'] = edu
            info['eduLevel'] = edu_level
            info['tagList'] = tag_list
            info['advantage'] = advantage
            info['posDesc'] = pos_desc
            info['hrName'] = hr_name
            info['hrPosition'] = hr_position
            info['companyName'] = company_name
            info['companyNature'] = company_nature
            info['stage'] = stage
            info['scale'] = scale
            info['scaleLeft'] = scale_left
            info['scaleRight'] = scale_right
            info['companyMainPage'] = company_main_page
            info['publishTime'] = publish_time
            info['spiderUuid'] = spider_uuid
            info['kind'] = kind

            send_data = info

            hlog.info("数据清洗成功，准备发送到API接口")
            hlog.info("清洗成功的html的URL：" + src_url)

            send_to_api("send_pos_detail", "ZhiPin", send_data)

        except Exception as e:
            try:
                platform = self.load_dict.get("platform")
                spider_uuid = self.load_dict.get("spiderUuid")
                url = self.load_dict.get("url")
                save_html_log(html, platform, spider_uuid, url, e)
            except Exception as exc:
                hlog.error("保存boss直聘清洗失败的html出错：" + str(exc))
                hlog.error("保存boss直聘清洗出错的spiderUuid: " + self.load_dict.get("spiderUuid"))
                hlog.error("保存boss直聘清洗出错的html的url: " + self.load_dict.get("url"))
        finally:
            hlog.exit_func(func_name)

    @staticmethod
    def get_position(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        position_list = soup.select(".info-primary .name h1")
        position = ""

        if position_list:
            is_position = re.search(".*?>(.*?)</.*?", str(position_list[0]))

            if is_position:
                position = is_position.group(1).strip()
        else:
            hlog.error(str(soup))
            hlog.error('BOSS直聘爬虫：职位详情分析失败，是否被反爬机制识别了？请人工确认')

        hlog.var('position', position)
        hlog.exit_func(func_name)
        return position

    @staticmethod
    def get_salary(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        salary_list = soup.select(".info-primary .name .salary")
        is_salary = re.search(".*?>(.*?)</.*?", str(salary_list[0]))

        salary, salary_min, salary_max, several = 0, 0, 0, 12
        if is_salary:
            salary = is_salary.group(1).strip()
            salary_find = re.findall(r"\d+", salary)
            is_day = re.findall("[天]", salary)
            if is_day:
                salary_min = (int(salary_find[0]) * 22) // 1000
                salary_max = (int(salary_find[1]) * 22) // 1000
            else:
                salary_min = int(salary_find[0])
                salary_max = int(salary_find[1])

            if len(salary_find) == 3:
                several = int(salary_find[2])

        hlog.exit_func(func_name)

        return salary, salary_min, salary_max, several

    @staticmethod
    def get_city_exp_edu_kind(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        is_salary = re.findall("[天]", str(soup.select(".info-primary .name .salary")))
        is_practice = re.findall("[周月]", str(soup.select(".info-primary p")[0]))
        city_exp_edu = re.findall(".*?>(.*?)<.*?", str(soup.select(".info-primary p")[0]), re.S)

        city, exp, edu = "", "", ""
        exp_min, exp_max = 0, 0
        if is_salary or is_practice:
            city = city_exp_edu[0].strip()
            exp = "应届生"
            edu = city_exp_edu[-1].strip()
        elif city_exp_edu:
            city = city_exp_edu[0].strip()
            exp = city_exp_edu[2].strip()
            edu = city_exp_edu[-1].strip()

            exp_find = re.findall(r"\d+", exp)
            if len(exp_find) == 2:
                exp_min = int(exp_find[0])
                exp_max = int(exp_find[1])
            elif len(exp_find) == 1:
                if int(exp_find[0]) >= 5:
                    exp_min = int(exp_find[0])
                else:
                    exp_max = int(exp_find[0])

        kind = None

        edu_level = classification_level(edu)

        hlog.exit_func(func_name)

        return city, exp, exp_min, exp_max, edu, kind, edu_level

    @staticmethod
    def get_advantage(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        advantage = []
        advantage_list = list(set(soup.select(".tag-container .tag-more .job-tags span")))
        for adv_b in advantage_list:
            adv_result = re.search(".*?>(.*?)</.*?", str(adv_b))

            if adv_result:
                advantage.append(adv_result.group(1))

        hlog.exit_func(func_name)

        return advantage

    @staticmethod
    def get_pos_desc(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        pos_describ_s = soup.select(".text")
        is_pos_desc_str = re.search('.*?text">(.*?)</div.*?', str(pos_describ_s[0]), re.S)
        if is_pos_desc_str:
            pos_desc_str = is_pos_desc_str.group(1).strip()
            pos_desc = pos_desc_str.replace("<br/>", "\n").replace("&amp;", "&")
        else:
            pos_desc = ""

        hlog.exit_func(func_name)

        return pos_desc

    @staticmethod
    def get_hr(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        is_hr_name = re.search(".*?>(.*?)<i.*?", str(soup.select(".detail-op .name")[0]))
        if is_hr_name:
            hr_name = is_hr_name.group(1).strip()
        else:
            hr_name = ""

        is_hr_position = re.search(".*?>(.*?)<em.*?", str(soup.select(".detail-op .gray")[0]))
        if is_hr_position:
            hr_position = is_hr_position.group(1).strip()
        else:
            hr_position = ""

        hlog.exit_func(func_name)

        return hr_name, hr_position

    @staticmethod
    def get_company_name(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        is_company_name = re.search(".*?>(.*?)</.*?", str(soup.select(".company-info > a")[1]), re.S)
        if is_company_name:
            company_name = is_company_name.group(1).strip()
        else:
            company_name = ""

        hlog.exit_func(func_name)

        return company_name

    @staticmethod
    def get_location(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        work_place = re.search(".*?>(.*?)<.*?", str(soup.select(".job-location .location-address")[0]))
        if work_place:
            location = work_place.group(1).strip()
        else:
            location = ""

        hlog.exit_func(func_name)

        return location

    @staticmethod
    def get_company_info(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        info_str = str(soup.select(".sider-company"))

        stage, scale, company_main_page, publish_time = None, None, None, ""
        company_nature = []

        is_stage = re.search('.*?icon-stage.*?/i>(.*?)</.*?', info_str)
        if is_stage:
            # 发展阶段
            stage = is_stage.group(1).strip()

        is_scale = re.search('.*?icon-scale.*?/i>(.*?)</.*?', info_str)
        if is_scale:
            # 规模
            scale = is_scale.group(1).strip()

        scale_left, scale_right = 0, 0
        scale_find = re.findall(r"\d+", scale)
        if len(scale_find) == 1:
            scale_left = int(scale_find[0])
        elif len(scale_find) == 2:
            scale_left = int(scale_find[0])
            scale_right = int(scale_find[1])

        is_nature = re.search('.*?icon-industry.*?/i>.*?">(.*?)</a.*?', info_str, re.S)
        if is_nature:
            nature_pattern = re.compile(r"\w+")
            nature_result = re.findall(nature_pattern, is_nature.group(1).strip())
            for result in nature_result:
                company_nature.append(result.strip())

        is_main_page = re.search('.*?icon-net.*?/i>(.*?)</.*?', info_str)
        if is_main_page:
            # 主页
            company_main_page = is_main_page.group(1).strip()

        is_publish_time = re.search(".*?更新于：(.*?)</p", info_str)
        if is_publish_time:
            # 发布时间
            publish_time = is_publish_time.group(1).strip()
        else:
            publish_time = time.strftime("%Y-%m-%d", time.localtime())

        hlog.exit_func(func_name)
        return stage, scale, scale_left, scale_right, company_main_page, publish_time, company_nature


if __name__ == "__main__":
    # 修复bug的时候用
    with open("this.html", "r", encoding="utf-8") as f:
        htmlString = f.read()
    l_dict = {"htmlString": htmlString,
              "url": "https://www.zhipin.com/job_detail/cf3840b9e6239c560nZ439-0GVI~.html?ka=search_list_1",
              "platform": "boss直聘"}
    zhipin = ZhipinDetailClean(l_dict)
    zhipin.data_clean()
