# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @ author = 'zoushunli'
# @ author_email = '409358374@qq.com'
import os

import requests
import random
import bs4
from multiprocessing import Pool
import scrapy
import re
import sqlite3
import xlwt
from urllib import parse

class MyJobSearch(object):
    def __init__(self):
        super(MyJobSearch, self).__init__()
        self.user_agent = [
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0']
        self.headers = {'User-Agent': self.user_agent[random.randint(0, 1)]}
        self.re_compile = re.compile
        self.requests_get = requests.get
        self.bs4_BeautifulSoup = bs4.BeautifulSoup
        self.xlwt_Workbook = xlwt.Workbook
        self.sqlite3_connect = sqlite3.connect
        self._name = self.__class__.__name__
    
    def get_html_text(self, url):
        # print(url)
        html = ''
        try:
            r = self.requests_get(url, headers=self.headers, timeout=10)
            r.status_code
            r.encoding = r.apparent_encoding
            html = r.content
        except Exception as e:
            print('Exception:-->', e)
            print("产生异常")
        finally:
            return html
        
    def prase_job_link_html(self, html_text, company_link_list):
        soup = self.bs4_BeautifulSoup(html_text, 'html.parser')
        # .el 表示 属性是class 值为el
        eldiv_t1 = soup.select("div.el > p.t1 > span > a")
        eldiv_t2 = soup.select("div.el > span.t2 > a")
        eldiv_t3 = soup.select("div.el > span.t3")
        eldiv_t4 = soup.select("div.el > span.t4")
        eldiv_t5 = soup.select("div.el > span.t5")
        for i in range(len(eldiv_t1)):
            data = dict()
            job_info = eldiv_t1[i]
            job_name = job_info['title']
            job_link = job_info['href']
            data['job_link'] = job_link
            data['job_name'] = job_name
            company_info = eldiv_t2[i]
            company_name = company_info['title']
            company_link = company_info['href']
            data['company_link'] = company_link
            data['company_name'] = company_name
            company_link_list.append(data)

    def get_job_info_data(self, company_link_data, job_info_list):
        for job_link in company_link_data:
            html = self.get_html_text(job_link['job_link'])
            # print(html)
            self.get_job_html_data(html, job_link, job_info_list)

        
    def get_job_html_data(self, html_text, job_link, job_info_list):
        # with open('html_temp2.html', 'rb') as f:
        #     html_text = f.read()
        
        
        info_data = dict()
        soup = self.bs4_BeautifulSoup(html_text, 'html.parser')
        # .el 表示 属性是class 值为el
        # job_name = soup.select("div.in > div.cn > h1.title")
        # job_name = job_name.attrs['title']
        
        job_name = soup.select("div.in > div.cn > h1")[0]['title']
        # print(job_name)
        # company_name_info = soup.select("div.in > div.cn > p.cname > a.catn")[0]
        company_name_info = soup.select("div.in > div.cn > p.cname > a.catn")[0]
        
        company_name = company_name_info['title']
        company_link = company_name_info['href']
        # print(company_name, company_link)
        msg_ltype = soup.select("div.in > div.cn > p.msg.ltype")[0]['title'].split('|')
        msg_ltype = [x.strip() for x in msg_ltype]
        # print('msg_ltype', msg_ltype)
        address = msg_ltype[0]
        exp = msg_ltype[1]
        education = msg_ltype[2]
        number = msg_ltype[-2]
        datatime = msg_ltype[-1]
        # if len(msg_ltype) == 5:
        #
        #     exp = msg_ltype[1]
        #     education = msg_ltype[2]
        #     number = msg_ltype[-2]
        #     datatime = msg_ltype[-1]
        #
        # elif len(msg_ltype) == 5:
        #     address = msg_ltype[0]
        #     exp = msg_ltype[1]
        #     education = msg_ltype[2]
        #     number = msg_ltype[-2]
        #     datatime = msg_ltype[-1]
        #
        salary = soup.select("div.in > div.cn > strong")[0].text
        job_msg = soup.select("div.bmsg.job_msg.inbox")[0].text
        
        info_data['job_name'] = job_name
        info_data['company_name'] = company_name
        info_data['salary'] = salary
        info_data['number'] = number
        info_data['education'] = education
        info_data['exp'] = exp
        info_data['address'] = address
        info_data['datatime'] = datatime
        info_data['job_link'] = job_link
        info_data['company_link'] = company_link
        info_data['job_msg'] = job_msg
        job_info_list.append(info_data)

    def save_html_data_2_xlwt(self, data_list, path=None):
        if path:
            name = path.rsplit('/', 1)[-1].split('.')[0]
    
        else:
            name = self._name
        work_book = self.xlwt_Workbook(encoding="utf-8", style_compression=0)
        work_sheet = work_book.add_sheet(name, cell_overwrite_ok=True)
        job_title = ('职位名', '公司名', '薪资', '人数', '学历', '经验', '工作地点', '发布时间', '工作链接', '公司链接', '工作信息')
        job_title = ('job_name', 'company_name', 'salary', 'address',
                     'job_link', 'company_link', 'datatime', 'welfare', 'msg_job',  'label', 'requirement')
        for i in range(len(job_title)):
            work_sheet.write(0, i, job_title[i])

        for x in range(len(data_list)):
            for y in range(len(job_title)):
                work_sheet.write(x + 1, y, data_list[x][job_title[y]])
        if not path:
            work_book.save(name + '.xls')
        else:
            work_book.save(path)
    
    def create_table(self, table_name, db_path):
        pass
    
    def insert_data(self, data_list, db_path):
        pass
    
    def save_html_data_2_sqlite(self, table_name, data_list, db_path):
        pass
    
    def download_img(self, data_list):
        pass


def main():
    # 数据库名
    db_path = r'E:\scripts\development_package\Crawler\database/MyJobSearch.xls'.replace('\\', '/')

    table_name = 'MyJobSearch'
    # 关键词 转换
    keyword = '渲染师'
    keyword = parse.quote(parse.quote(keyword))
    # 上海 广州
    address = r'020000%252C030200'
    # 发布时间
    datatime = {'all': '99', '24hours': '0', '3day': '1', '1week': '2', '1month': '3'}
    # 薪资
    money = {'all': '99', '2<': '01', '2-3': '02', '3-4.5': '03', '4.5-6': '04', '6-8': '05', '0.8-1w': '06',
             '1-1.5w': '07', '1.5-2w': '08', '2-3w': '09'}
    # 页面
    page_num = '1'
    url = f'https://search.51job.com/list/{address},000000,0000,00,{datatime["all"]},{money["all"]},{keyword},2,{page_num}.html'
    demo = MyJobSearch()
    data_list = list()
    for i in range(1, 6):
        page_num = str(i)
        url = f'https://search.51job.com/list/{address},000000,0000,00,{datatime["all"]},{money["all"]},{keyword},2,{page_num}.html'
        new_url = url
        html = demo.get_html_text(url)

        demo.prase_job_link_html(html, company_link_list)
        demo.get_job_info_data(company_link_list, job_info_list)
    # demo.get_job_html_data(company_link_list, 'None',  job_info_list)
    

    # demo.save_html_data_2_xlwt(data_list, path)
    
    # for i in range(0, 1):
    #     new_url = url + str(i * 25)
    #     html = demo.get_html_text(new_url)
    #     print(html)
    #     demo.prase_html_data(html, data_list)
    
    # demo.save_html_data_2_xlwt(data_list)
    # demo.save_html_data_2_sqlite(table_name, data_list, db_path)


if __name__ == "__main__":
    main()
