# coding=utf8
from config import *

import random
import time
import json
import os
import csv
import requests
import re
from lxml import etree
import urllib.parse


def encode_job_list_condition(url: str, condition):
    """
    获取列表的URL加入查询条件
    :param url:
    :param condition:
    :return:
    """

    # 查询条件
    search_condition = ''

    # 工作城市
    city = '全国'
    if 'city' in condition:
        city = condition['city']
    search_condition = 'city=' + url_encode(city)

    # 排序方式: default;hot;new
    px = 'default'
    if 'px' in condition:
        px = condition['px']
    search_condition = search_condition + '&px=' + url_encode(px)

    # 公司规模 1:少于15人， 2:15-50人以此类推若多选,如1，2都选，就是_1_2
    if 'gm' in condition:
        search_condition = search_condition + '&gm=' + url_encode(condition['gm'])

    # 融资阶段
    if 'jd' in condition:
        search_condition = search_condition + '&jd=' + url_encode(condition['jd'])

    # 工作性质：全职，兼职，实习
    if 'gx' in condition:
        search_condition = search_condition + '&gx=' + url_encode(condition['gx'])

    # 工作经验 isSchoolJob 1 应届
    if 'gj' in condition:
        search_condition = search_condition + 'gj=' + url_encode(condition['gj'])

    # 学历
    if 'xl' in condition:
        search_condition = search_condition + '&xl=' + url_encode(condition['xl'])

    # 行业
    if 'hy' in condition:
        search_condition = search_condition + '&hy=' + url_encode(condition['hy'])

    search_condition = search_condition + '&needAddtionalResult=false'
    request_url = url + '?' + search_condition

    return request_url


def get_user_agent() -> str:
    """
    获取USER-AGENT
    :return:
    """
    return random.choice(USER_AGENT)


def url_encode(url: str):
    """
    url编码
    """
    return urllib.parse.quote(url)


def headers():
    _headers = DEFAULT_HEADERS.copy()
    _headers['User-Agent'] = get_user_agent()
    return _headers


def get_cookies(url: str):
    response = requests.get(
        url=url,
        headers=headers(),
        allow_redirects=False,
        verify=VERIFY_SSL
    )
    return response.cookies


def get_job_list_cookies(search_kd: str, condition):
    """
    获取job list的cookie
    :param condition:
    :param search_kd: 查询关键字
    :return:
    """
    if search_kd is None or len(search_kd) <= 0:
        raise Exception('查询条件中的搜索关键字不能为空')

    url = get_job_list_cookies_url(search_kd, condition)
    return get_cookies(url)


def get_job_list_cookies_url(search_kd: str, condition):
    base_url = 'https://www.lagou.com/jobs/list_{}'.format(search_kd)
    url = encode_job_list_condition(base_url, condition)
    return url


def get_job_detail_cookies(search_kd: str, condition):
    """
    获取工作详情的cookie
    :param condition:
    :param search_kd: 查询关键字
    :return:
    """
    return get_job_list_cookies(search_kd, condition)


def save_csv(jobs):
    """
     保存分页数据
     :param jobs: 职位列表
     """
    if len(jobs) <= 0:
        return

    rows = []
    for index, job in enumerate(jobs):
        row = [
            job['position_id'],
            job['position_name'],
            job['education'],
            job['work_year'],
            job['salary'],
            job['industry_field'],
            job['city'],
            job['district'],
            job['job_desc']
        ]
        rows.append(row)

    file_name = FILE_NAME
    f = None
    if os.path.exists(file_name):
        f = open(file_name, 'a+', newline='', encoding='utf-8')
    else:
        print("create file: " + file_name)
        f = open(file_name, "w", newline='', encoding='utf-8')
        header = ['职位ID', '职位名称', '学历要求', '工作经验', '薪资', '行业', '城市', '城区', '职位描述']
        writer = csv.writer(f)
        writer.writerow(header)

    # 追加到文件后面
    writer = csv.writer(f)
    # 写入文件
    writer.writerows(rows)

    if f is not None:
        f.close()


def get_job_position_page(search_kd: str, page_no: int, cookies, condition):
    """
    获取工作职位的分页数据
    :param search_kd:
    :param page_no: 页码，以1开始
    :param cookies:
    :param condition: 分页条件
    :return:
    """

    is_first = "true" if page_no == 1 else "false"
    form_data = {
        'first': is_first,
        'pn': page_no,
        'kd': search_kd
    }
    url = "https://www.lagou.com/jobs/positionAjax.json"
    target_url = encode_job_list_condition(url, condition)

    referer_url = get_job_list_cookies_url(search_kd, condition)
    _headers = headers()
    _headers['referer'] = referer_url

    return requests.post(target_url,
                         data=form_data,
                         cookies=cookies,
                         headers=_headers,
                         timeout=3,
                         verify=VERIFY_SSL
                         )


def get_page_info(search_kd: str, condition):
    cookies = get_job_list_cookies(search_kd, condition)
    time.sleep(1)
    response = get_job_position_page(search_kd, 1, cookies, condition)

    rep_content = json.loads(response.text)['content']
    page_info = {
        'page_size': rep_content['pageSize'],
        'total_count': rep_content['positionResult']['totalCount']
    }
    c = divmod(page_info['total_count'], page_info['page_size'])
    page_info['total_page'] = c[0] + 1 if c[1] > 0 else c[0]
    return page_info


def parse_jd(html) -> str:
    # 获取页面中jd的标签的内容
    doc = etree.HTML(html)
    path = '//dl[@id="job_detail" and @class="job_detail"]/dd[@class="job_bt"]/div[@class="job-detail"]//text()'
    jd_detail_list = doc.xpath(path)
    # 清洗
    describe = ''
    for index, text in enumerate(jd_detail_list):
        text = text.strip().replace(u'\xa0', u' ')
        text = re.sub(r"\s+", u" ", text)  # 替换多余的空格
        describe += text
    return describe


def get_job_base_info(jobs: list, page_no: int, search_kd: str, condition):
    cookies = get_job_list_cookies(search_kd, condition)
    response = get_job_position_page(search_kd, page_no, cookies, condition)
    content_json = json.loads(response.text)['content']
    show_id = content_json['showId']
    pos_data = content_json['positionResult']['result']

    if len(pos_data):
        for i in range(len(pos_data)):
            job = {
                "position_id": pos_data[i]['positionId'],
                "position_name": pos_data[i]['positionName'],
                "education": pos_data[i]['education'],
                "work_year": pos_data[i]['workYear'],
                "salary": pos_data[i]['salary'],
                "industry_field": pos_data[i]['industryField'],
                "city": pos_data[i]['city'],
                "district": pos_data[i]['district'],
                "show_id": show_id,
                "job_desc": None
            }
            jobs.append(job)


def get_job_details(jobs: list, search_kd: str, condition):
    cookies = get_job_detail_cookies(search_kd, condition)

    _headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7"
    }

    cookie_use_times = 0
    for index, job in enumerate(jobs):

        position_id = job['position_id']
        show_id = job['show_id']
        print("===> 开始获取职位ID: [{}] 的职位描述信息".format(job['position_id']))

        jd_url = 'https://www.lagou.com/jobs/{}.html?show={}'.format(position_id, show_id)

        response = requests.get(jd_url, cookies=cookies, headers=_headers, allow_redirects=False, verify=VERIFY_SSL)

        if response.status_code == 302:
            print("warning: 302, url: {}".format(jd_url))
            time.sleep(random.randint(5, 8))
            print("重新获取cookie")
            cookies = get_job_detail_cookies(search_kd, condition)
            response = requests.get(jd_url, cookies=cookies, headers=_headers, allow_redirects=False, verify=VERIFY_SSL)

        response.encoding = 'utf-8'
        desc = parse_jd(response.text)
        if desc is None or len(desc) <= 0:
            print("warning: 没有找到 职位ID: [{}] 职位描述信息内容!".format(position_id))
        else:
            print("职位ID: [{}] 的职位描述信息内容 : {}".format(position_id, desc))
        job['job_desc'] = desc
        print("<=== 完成获取职位ID: [{}] 的职位描述信息".format(position_id))

        # 每两次需要重新获取以下cookie, 否则302重定向到登录页
        # cookie_use_times += 1
        # if cookie_use_times == 2:
        #     print("重新获取cookie")
        #     cookies = get_job_detail_cookies(search_kd, condition)
        #     cookie_use_times = 0

        if not index == len(jobs) - 1:
            time.sleep(random.randint(2, 5))


class LagouSpider(object):
    def run(self, search_kd: str, condition):
        """
        运行爬虫
        :param search_kd: 搜索关键字
        :param condition: 查询条件
        :return:
        """

        if search_kd is None or len(search_kd) <= 0:
            raise Exception('查询条件中的搜索关键字不能为空')

        # 获取分页信息
        print("获取分页信息.")
        page_info = get_page_info(search_kd, condition)
        print('获取到分页信息, page_size:{}, total_count:{}, total_page:{}'
              .format(page_info['page_size'], page_info['total_count'], page_info['total_page']))

        if page_info is None or page_info['total_count'] == 0:
            print("没有找到分页信息, search_kd: [{}]".format(search_kd))
            return

        print('开始采集数据.')

        time.sleep(5)

        start_page_no = 1
        last_page_no = page_info['total_page']
        # last_page_no = 1

        # 爬取每页的数据
        for pageNo in range(start_page_no, last_page_no + 1):
            jobs = []  # job字典列表

            print('获取第 [{}] 页职位分页数据.'.format(pageNo))

            # 获取职位的基础信息
            get_job_base_info(jobs, pageNo, search_kd, condition)

            time.sleep(random.randint(5, 8))

            get_job_details(jobs, search_kd, condition)

            save_csv(jobs)

            print('完成第 [{}] 页数据的处理.'.format(pageNo))

        print("完成...")
