#!/usr/bin/python
# coding=utf-8
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import re, json, random, time
from urllib import parse
from spider.job_categories import job_categories
import os

_USE_DEBUG = 1

class Job51Parser:
    def __init__(self, html:str):
        # name state list
        self.job_info = []
        self.page_parser(html)

    def put_info(self):
        info = ''
        for job in self.job_info:
            info += str(job) + '\n'

        return info

    @staticmethod
    def get_associate_job(associate_list: list, soup: BeautifulSoup):
        recommend_list = soup.select('.related-search-list li')
        # link_list = link_list.find_all('')

        for job in recommend_list:
            if job.text not in associate_list:
                associate_list.append(job.text)

    def page_parser(self, html:str):
        job_info = []

        self.soup = BeautifulSoup(html, 'lxml')

        if _USE_DEBUG:
            print(self.soup.prettify())

        self.update_work_info(job_info, self.soup)
        self.job_info += job_info
        # print(job_info)

        # # JOB List处理
        # if job_name in self.job_info:
        #     self.job_info[job_name]['work'] += job_info
        # else:
        #     self.job_info.update({job_name: {'city': city_name, 'state': 1, 'associate': False, 'work': job_info}})
        #
        # # 推荐岗位处理
        # for job in job_associate:
        #     if job not in self.job_info:
        #         self.job_info.update({job: {'city': city_name, 'state': 0, 'associate': False, 'work': []}})
        #
        # return job_info, job_associate

    def create_url(self, job: str, city: str = '上海'):
        # 找不到城市切换成全国站
        if city in self.city_dict:
            city_code = self.city_dict[city]
        else:
            city_code = 100010000
        url = f'https://www.zhipin.com/web/geek/job?city={city_code}&query={self.cn2urlcode(job)}'
        return url

    def city_fresh(self):
        url = 'https://www.zhipin.com/wapi/zpCommon/data/city.json'
        self.driver.get_url(url)
        self.soup = BeautifulSoup(self.driver.page_source, 'lxml')
        # print(self.soup.prettify())

        element = self.soup.find('body')
        city_dict = json.loads(element.text)

        self.city_dict = {}
        self.current_city = []
        municipality = ['北京', '上海', '天津', '重庆']
        if city_dict['message'] == 'Success':
            province_list = city_dict['zpData']['cityList']
            local_city = city_dict['zpData']['locationCity']

            for province in province_list:
                city_list = province['subLevelModelList']
                for city in city_list:
                    self.city_dict.update({city['name']: city['code']})
                    # print(city['name'], city['code'])

            self.local_city = {local_city['name']: local_city['code']}

        print(self.local_city)

    @staticmethod
    def cn2urlcode(ch_str: str, encoding: str = 'utf-8'):
        return parse.quote(ch_str.encode(encoding=encoding))

    @staticmethod
    def get_city_info(soup: BeautifulSoup):
        city_dict = {}
        current_city = ''

        html_city_list = soup.find('ul', class_='dropdown-city-list')

        for city in html_city_list:
            if 'ka' in city.attrs:
                city_code = str(city.attrs['ka']).replace('sel-city-', '')
                city_dict.update({city.text: city_code})

            if 'class' in city.attrs and len(city.attrs['class']):
                current_city = city.text

        # print(f'city list:{city_dict}, current city:{current_city}')
        return current_city, city_dict

    @staticmethod
    def update_work_info(work_list, soup: BeautifulSoup):
        def _get_job_name(work):
            return {'岗位': work.select('.job-name')[0].text}

        def _get_job_salary(work):
            return {'薪酬': work.select('.salary')[0].text}

        def _get_job_area(work):
            return {'工作地点': Job51Parser.text_split(work.select('.job-area')[0].text)}

        def _get_job_url(work):
            html_job_url = work.find('a', class_='job-card-left')

            if html_job_url is not None and 'href' in html_job_url.attrs:
                company_url = 'https://www.zhipin.com' + html_job_url.attrs['href']
            else:
                company_url = ''

            return {'岗位链接': company_url}

        def _get_job_info(work):
            def _job_salary_list(salary: str):
                salary_list = re.findall(r'\d+\.?\d*', salary)
                if len(salary_list) == 1:
                    salary_list = [int(salary_list[0])]
                elif len(salary_list):
                    salary_list = [int(val) for val in salary_list]
                else:
                    salary_list = []

                return salary_list

            info = {'岗位': work.select('.job-name')[0].text,
                    '薪酬': _job_salary_list(str(work.select('.salary')[0].text)),
                    '薪酬描述': str(work.select('.salary')[0].text),
                    '工作地点': Job51Parser.text_split(work.select('.job-area')[0].text)}

            tag_list = []
            company_tags = work.find_all('ul', class_='tag-list')[0]
            company_tags = company_tags.find_all('li')
            for tag in company_tags:
                tag_value = Job51Parser.text_split(tag.text)
                tag_list += tag_value
                # print(tag_value)

            info.update({'岗位Tag': tag_list})

            return info

        def _get_company_name(work):
            return {'公司名称': Job51Parser.text_split(work.select('.company-name')[0].text)}

        def _get_company_spec(work):
            return {'公司特点': Job51Parser.text_split(work.select('.info-desc')[0].text)}

        def _get_company_url(work):
            company_name = work.select('.company-name')[0]
            company_url = company_name.find('a')

            if 'href' in company_url.attrs:
                company_url = 'https://www.zhipin.com' + company_url.attrs['href']
            else:
                company_url = ''

            return {'公司链接': company_url}

        def _get_company_tag(work):
            tag_list = []
            company_tags = work.find_all('ul', class_='company-tag-list')[0]
            company_tags = company_tags.find_all('li')
            for tag in company_tags:
                tag_value = Job51Parser.text_split(tag.text)
                tag_list += tag_value
                # print(tag_value)

            return {'公司Tag': tag_list}

        def _get_company_info(work):
            info = {'公司名称': Job51Parser.text_split(work.select('.company-name')[0].text),
                    '公司特点': Job51Parser.text_split(work.select('.info-desc')[0].text)}

            tag_list = []
            company_tags = work.find_all('ul', class_='company-tag-list')[0]
            company_tags = company_tags.find_all('li')
            for tag in company_tags:
                tag_value = Job51Parser.text_split(tag.text)
                tag_list += tag_value
                # print(tag_value)

            info.update({'公司Tag': tag_list})
            info.update({'工作地点': Job51Parser.text_split(work.select('.job-area')[0].text)})

            return info

        def _get_company_hr(work):
            html_info = work.find_all('div', class_='info-public')[0]
            hr_name = str(html_info.text)
            hr_part = str(html_info.find('em').text)

            info = {'HR': [hr_name.replace(hr_part, ''), hr_part]}
            return info

        html_func_list = [_get_job_info, _get_company_info, _get_company_hr,
                          _get_job_url, _get_company_url]
        html_work_list = soup.select('.job-list-box .job-card-wrapper')
        for index, work in enumerate(html_work_list):
            # print(work.prettify())

            info = {'序号': len(work_list) + 1}
            for func in html_func_list:
                info.update(func(work))

            print(info)
            work_list.append(info)

        # print(html_work_list)

        return work_list

    @staticmethod
    def text_split(in_text):
        text_list = []
        key_list = [';', '\n', ' ', '\\', '/', '·']
        reg_str = str(in_text)

        for i in key_list:
            reg_str = reg_str.replace(i, '\n')

        for text in reg_str.split('\n'):
            if len(text) and text != '\n':
                text_list.append(text)
        return text_list

if __name__ == '__main__':
    workPath = os.path.join(os.path.expanduser("~"), 'Desktop', '51job.html')
    with open(workPath, 'r') as f:
        html = f.read()
        # # parser = Job51Parser(html=html)
        #
        soup = BeautifulSoup(html, 'lxml')
        print(soup.prettify())

        job_info = []

        # 查找下一页按钮
        btn = soup.find('button', class_='btn-next')
        btn = btn.find('i', class_='el-icon el-icon-arrow-right')
        print(btn)

        # 查找job_list
        job_list = soup.find('div', class_='j_joblist')
        job_list = job_list.find_all('div', class_='e sensors_exposure')
        filter = [['岗位', str, 'span', 'jname at', 0],
                  ['薪酬', str, 'span', 'sal', 0],
                  ['薪酬描述', str, 'span', 'sal', 0],
                  ['公司名称', str, 'a', 'cname at', 0],
                  ['公司Tag', list, 'p', 'dc at', 0],
                  ['公司Tag', list, 'p', 'int at', 0]]
        for item in job_list:
            info = {'序号': len(job_info) + 1}
            for val in filter:
                block = val[2]
                class_str = val[3]
                offset = val[4]
                result = val[1]()
                title = val[0]

                find_list = item.find_all(block, class_=class_str)
                if offset == -1:
                    for find in find_list:
                        if isinstance(result, str):
                            result += find.text
                        else:
                            result += [find.text]
                elif offset < len(find_list):
                    if isinstance(result, str):
                        result += find_list[offset].text
                    else:
                        result += [find_list[offset].text]

                print(f'{title}:{result}')
                if title in info:
                    info[title] += result
                else:
                    info.update({title: result})

            # 添加工作地点信息
            find_list = item.find_all('span', class_='d at')[0]
            find_list = find_list.find_all('span')
            info.update({'工作地点': find_list[0].text, '岗位Tag': [find_list[2].text, find_list[4].text]})

            # 添加岗位TAG
            # ['岗位Tag', list, 'p', 'tags', -1]
            find_list = item.find_all('p', class_='tags')[0]
            find_list = find_list.find_all('span')
            info.update({'岗位Tag': [find.text for find in find_list]})
            info.update({'HR': []})

            # 获取岗位链接
            url = item.find('a', class_='el')
            if url is not None and 'href' in url.attrs:
                info.update({'岗位链接': url.attrs['href']})
            else:
                info.update({'岗位链接': ''})

            # 获取公司链接
            url = item.find('a', class_='cname at')
            if url is not None and 'href' in url.attrs:
                info.update({'公司链接': url.attrs['href']})
            else:
                info.update({'公司链接': ''})

            print(f'{info}\n')
            job_info.append(info)

        print(job_info)
