#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/11/11 11:39
# @Author  : lizhen
# @Site    : 
# @File    : 001.python_yz.py
# @Software: PyCharm
import configparser
import os
import random
import sys
import time
import traceback
from threading import Thread

import pymysql
import requests
import urllib3
from lxml import etree

from Utils.ExtractInfo import extract_info
from Utils.Log import mylog
from Utils.PrepareInfo import prepare_info
from Utils.Time import mytime


class BossJob:
    city_scity = {'扬州': {'scity': '101190600'}, '南京': {'scity': '101190100'},
                  '上海': {'scity': '101020100'}, '杭州': {'scity': '101210100'},
                  '苏州': {'scity': '101190400'}, }
    subject_code = {
        'python': '100109',
    }
    origin_path = os.getcwd()
    new_path = '/'.join(origin_path.split('\\')[:-2]) + '/Utils'
    mylog.basicConfig('debug')
    first_url = 'https://www.zhipin.com/c{scity}-p{code}/?page={page}&ka=page-{page}'
    user_pswd_cfg = os.environ['user_pswd']
    cf = configparser.ConfigParser()
    cf.read(user_pswd_cfg, encoding='utf-8')
    mysql_user = cf.get('MySQL', 'MySQL_user')
    mysql_pswd = cf.get('MySQL', 'MySQL_pswd')
    config = {
        'host': 'localhost',
        'user': mysql_user,
        'password': mysql_pswd,
        'database': 'boss',
        'port': 3306,
        'charset': 'utf8',
    }
    table_fields = []

    def update_proxies(self):
        os.chdir(self.new_path)
        while True:
            time.sleep(3 * 60)
            prepare_info.save_proxies()

    def get_init_params(self, location, page, keyword, flag=True):
        """
        :param location:
        :param page:
        :param keyword:
        :param flag: 是否是第一次被调用
        :return:
        """
        os.chdir(self.new_path)
        user_agent = extract_info.get_useragent()
        extract_info.ua_default = user_agent
        proxy = extract_info.get_proxy()
        if flag:
            self.first_url = self.first_url.format(scity=self.city_scity[location]['scity'],
                                                   code=self.subject_code[keyword], page=page)
            self.url_prefix = '/'.join(self.first_url.split('?')[0].split('/')[:-2])
        return user_agent, proxy

    def parse_time(self, _time):
        if '' == _time:
            _time = ''
        elif ':' in _time:
            _time = '{date} {time}'.format(date=mytime.get_ymd_zh(), time=_time)
        elif '昨天' == _time:
            _time = mytime.get_ymd_before_zh(-1)
        else:
            now_time = mytime.get_ymd_zh()
            new_time = mytime.get_y_zh() + _time
            if new_time > now_time:
                _time = str(int(mytime.get_y_zh()[:-1]) - 1) + '年' + _time
            else:
                _time = mytime.get_y_zh() + _time
        return _time

    def parse_listpage(self, node, job_info):
        job_info['job-title'] = node.xpath('.//div[@class="job-title"]/text()')[0] if len(
            node.xpath('.//div[@class="job-title"]/text()')) > 0 else ''
        job_info['job-salary'] = node.xpath('.//span[@class="red"]/text()')[0] if len(
            node.xpath('.//span[@class="red"]/text()')) > 0 else ''
        job_info['job-city'] = node.xpath('./div[1]/p[1]/text()')[0].strip() if len(
            node.xpath('./div[1]/p[1]/text()')) > 0 else ''
        job_info['job-experience'] = node.xpath('./div[1]/p[1]/text()')[1].strip()
        job_info['job-education'] = node.xpath('./div[1]/p[1]/text()')[2].strip()
        job_info['com-name'] = node.xpath('./div[2]//a/text()')[0] if len(
            node.xpath('./div[2]//a/text()')) > 0 else ''
        job_info['com-type'] = node.xpath('./div[2]//p/text()')[0].strip() if len(
            node.xpath('./div[2]//p/text()')) > 0 else ''
        job_info['com-finance'] = node.xpath('./div[2]//p/text()')[1].strip() if len(
            node.xpath('./div[2]//p/text()')) == 3 else ''
        job_info['com-size'] = node.xpath('./div[2]//p/text()')[2].strip() if len(
            node.xpath('./div[2]//p/text()')) == 3 else node.xpath('./div[2]//p/text()')[1].strip()
        job_info['employer'] = node.xpath('./div[3]//h3[@class="name"]/text()')[0].strip() if len(
            node.xpath('./div[3]//h3[@class="name"]/text()')) > 0 else ''
        job_info['employer-title'] = node.xpath('./div[3]//h3[@class="name"]/text()')[1].strip()
        _time = node.xpath('./div[3]//p/text()')[0].strip()[3:] if len(node.xpath('./div[3]//p/text()')) > 0 else ''
        job_info['job-date'] = self.parse_time(_time)

    def parse_detail(self, node, job_info):
        name = node.xpath('./h3/text()')[0]
        if name == '职位描述':
            job_info['job-detail'] = ';'.join([item.strip('\n').strip() for item in node.xpath('./div/text()')])
        elif name == '团队介绍':
            job_info['team-detail'] = ','.join(node.xpath('.//span/text()'))
        elif name == '公司介绍':
            job_info['com-detail'] = ';'.join([item.strip('\n').strip() for item in node.xpath('./div/text()')])
        elif name == '竞争力分析':
            pass
        elif name == '工商信息':
            job_info['register-name'] = node.xpath('./div[@class="name"]/text()')[0] if len(
                node.xpath('./div[@class="name"]/text()')) > 0 else ''
            job_info['register-owner'] = node.xpath('./div[2]/li[1]/text()')[0].strip() if len(
                node.xpath('./div[2]/li[1]/text()')) > 0 else ''
            job_info['register-finance'] = node.xpath('./div[2]/li[2]/text()')[0].strip() if len(
                node.xpath('./div[2]/li[2]/text()')) > 0 else ''
            job_info['register-date'] = node.xpath('./div[2]/li[3]/text()')[0].strip() if len(
                node.xpath('./div[2]/li[3]/text()')) > 0 else ''
            job_info['register-com'] = node.xpath('./div[2]/li[4]/text()')[0].strip() if len(
                node.xpath('./div[2]/li[4]/text()')) > 0 else ''
            job_info['register-status'] = node.xpath('./div[2]/li[5]/text()')[0].strip() if len(
                node.xpath('./div[2]/li[5]/text()')) > 0 else ''
        elif name == '工作地址':
            job_info['job-location'] = node.xpath('.//div[@class="location-address"]/text()')[0].strip()

    def save_to_mysql(self, job_info, keyword):
        conn = None
        csr = None
        try:
            conn = pymysql.connect(**self.config)
            csr = conn.cursor()
            if self.table_fields == []:
                # 查询表字段
                select_sql = 'select COLUMN_NAME from information_schema.columns where table_name="{keyword}" and TABLE_SCHEMA="{database}"'.format(
                    keyword=keyword, database=self.config['database'])
                sel_result = csr.execute(select_sql)
                mylog.setLog('debug', 'select {sel_result} field(s)'.format(sel_result=sel_result))
                for field in csr.fetchall():
                    self.table_fields.append(field[0])
            # 插入数据
            insert_sql = 'INSERT INTO python values({placeholder}%s)'.format(
                placeholder='%s, ' * len(self.table_fields[:-1]))
            params = []
            for field in self.table_fields:
                params.append(job_info[field] if field in job_info else '')
            ins_result = csr.execute(insert_sql, params)
            mylog.setLog('debug', 'insert {ins_result} record(s)'.format(ins_result=ins_result))
            conn.commit()
        except:
            if conn:
                conn.rollback()
            mylog.setLog('error', traceback.format_exc())
            sys.exit(1)
        finally:
            if csr:
                csr.close()
            if conn:
                conn.close()

    def run(self, location='上海', start_page=1, end_page=1, keyword='python', flag=True):
        # , xlsx_path='doc/job_boss_python.xlsx', xlsx_engine='openpyxl'
        """
        :param location:
        :param page:
        :param keyword:
        :param flag:是否重新存储
        :return:
        """
        if end_page and start_page > end_page:
            mylog.setLog('error', '页数设置错误！')
            return
        # 更新IP代理池
        thread = Thread(target=self.update_proxies)
        thread.start()
        job_list = []
        while self.first_url != '':
            try:
                # 请求列表页
                user_agent, proxy = self.get_init_params(location, start_page, keyword)

                mylog.setLog('debug', 'listpage_first: {listpage_first}'.format(listpage_first=self.first_url))
                listpage_resp = requests.get(self.first_url, headers=user_agent, proxies=proxy)
                listpage_html = etree.HTML(listpage_resp.text)
                listpage_cur = listpage_html.xpath('//*[@ka="page-cur"]/text()')[0]

                mylog.setLog('debug', 'listpage_cur: {listpage_cur}'.format(listpage_cur=listpage_cur))

                listpage_next_url = self.url_prefix + listpage_html.xpath('//*[@ka="page-next"]/@href')[0]
                listpage_next_flag = listpage_html.xpath('//*[@class="next disabled"]')
                nodes = listpage_html.xpath('//*[@id="main"]/div/div[2]/ul/li/div')
                for node in nodes:
                    # 去除不是当前位置的招聘信息
                    if node.xpath('./div[1]/p[1]/text()')[0].strip().split(' ')[0] != location:
                        continue
                    job_info = {}
                    self.parse_listpage(node, job_info)
                    detail_url = self.url_prefix + node.xpath('./div[1]/h3/a/@href')[0]

                    mylog.setLog('debug', 'detail_url: {detail_url}'.format(detail_url=detail_url))

                    user_agent, proxy = self.get_init_params(location, start_page, keyword, flag=False)
                    detail_resp = requests.get(detail_url, headers=user_agent, proxies=proxy)
                    detail_html = etree.HTML(detail_resp.text)
                    nodes = detail_html.xpath('//*[@id="main"]/div[3]/div/div[2]/div[3]/div')[:-5]
                    for node in nodes:
                        self.parse_detail(node, job_info)
                    job_list.append(job_info)
                    self.save_to_mysql(job_info, keyword)
                    time.sleep(random.randint(1, 5))
            except (ConnectionRefusedError, urllib3.exceptions.NewConnectionError,
                    urllib3.exceptions.MaxRetryError, requests.exceptions.ProxyError, TimeoutError,
                    IndexError)as e:
                mylog.setLog('error', traceback.format_exc())
                time.sleep(30)
                continue
            if end_page and listpage_cur >= end_page:
                break
            # 是否是尾页
            self.first_url = listpage_next_url if len(listpage_next_flag) == 0 else ''
        # 爬取结束
        mylog.setLog('debug', 'task completed!')
        sys.exit(0)


def main():
    bj = BossJob()
    bj.run(end_page=None)


if __name__ == '__main__':
    main()
    # for location in city_scity:
    #     main(location)
