# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @ author = 'zoushunli'
# @ author_email = '409358374@qq.com'
import os
from urllib import parse

from lxml import etree
import requests
import random
import bs4
from multiprocessing import Pool
import scrapy
import re
import sqlite3
import xlwt




class MyJobSearch(object):
    def __init__(self):
        super(MyJobSearch, self).__init__()
        self.user_agent = [
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0']
        self.headers = {'User-Agent': self.user_agent[random.randint(0, 1)]}
        self.re_compile = re.compile
        self.requests_get = requests.get
        self.bs4_BeautifulSoup = bs4.BeautifulSoup
        self.xlwt_Workbook = xlwt.Workbook
        self.sqlite3_connect = sqlite3.connect
        self.selector = etree.HTML
        self._name = self.__class__.__name__
    
    def get_html_text(self, url):
    
        html = ''
        try:
            r = self.requests_get(url, headers=self.headers, timeout=10)
            r.status_code
            r.encoding = r.apparent_encoding
            html = r.content
        except Exception as e:
            print('Exception:-->', e)
            print("产生异常")
        finally:
            return html
    
    def get_links(self, html_text, link_list):
        xpath_el = '//*[@id="resultList"]/div[@class="el"]'
        selector = self.selector(html_text)
        eldiv_els = selector.xpath(xpath_el)
        for eldiv_el in eldiv_els:
            job_link = eldiv_el.xpath('p/span/a/@href')[0]
            link_list.append(job_link)
        return link_list
    
    def prase_html_data(self, html_text, data_info):
        # with open('html_temp.html') as f:
        #     html_text = f.read()
        # 先定位找到所有工作的列表位置
        xpath_el = '//*[@id="resultList"]/div[@class="el"]'
        selector = self.selector(html_text)
        eldiv_els = selector.xpath(xpath_el)
        num = 0
        for eldiv_el in eldiv_els:
            data_list = dict()
            num += 1
            job_name = eldiv_el.xpath('p/span/a/@title')[0]
            job_link = eldiv_el.xpath('p/span/a/@href')[0]
            company_name = eldiv_el.xpath('span[1]/a/@title')[0]
            company_link = eldiv_el.xpath('span[1]/a/@href')[0]
            address = eldiv_el.xpath('span[2]/text()')[0]

            salary = eldiv_el.xpath('span[3]/text()')
            datatime = eldiv_el.xpath('span[4]/text()')[0]
            if not salary:
                salary = ['']
            data_list['job_name'] = job_name
            data_list['company_name'] = company_name
            data_list['job_link'] = job_link
            data_list['company_link'] = company_link
            data_list['address'] = address
            data_list['salary'] = salary[0]
            data_list['datatime'] = datatime
            job_html_text = self.get_html_text(job_link)
            self.prase_job_html_data(job_html_text, data_list)
            data_info.append(data_list)
            # print('job_link-->', job_link)
            print(num)
        
    def prase_job_html_data(self, job_html_text, data_list):
        msg_ltype = '/html/body/div[3]/div[2]/div[2]/div/div[1]/p[2]/@title'
        welfare = '/html/body/div[3]/div[2]/div[2]/div/div[1]/div/div/span/text()'
        msg_job = '/html/body/div[3]/div[2]/div[3]/div[1]/div/p/text()'
        msg_job1 = '/html/body/div[3]/div[2]/div[3]/div[1]/div/text()'
        msg_job2 = '/html/body/div[3]/div[2]/div[3]/div[1]/div/p[1]/span/text()'

        label = '/html/body/div[3]/div[2]/div[3]/div[2]/div/p/text()'
        selector = self.selector(job_html_text)
        welfare = selector.xpath(welfare)
        msg_job = selector.xpath(msg_job)
        if not msg_job:
            msg_job = selector.xpath(msg_job1)
        if not msg_job:
            msg_job = selector.xpath(msg_job2)
        label = selector.xpath(label)
        msg_ltype = selector.xpath(msg_ltype)[0]
        msg_ltype = [x.strip() for x in msg_ltype.split('|') if x.replace('发布', '')]
        requirement =';'.join(msg_ltype)
        
        if not welfare:
            welfare = ['']
        if not label:
            label = ['']
        data_list['welfare'] = welfare[0]
        data_list['msg_job'] = msg_job[0]
        data_list['label'] = label[0]
        data_list['requirement'] = requirement
    
    def save_html_data_2_xlwt(self, data_list, path=None):
        if path:
            name = path.rsplit('/', 1)[-1].split('.')[0]
            
        else:
            name = self._name
        work_book = self.xlwt_Workbook(encoding="utf-8", style_compression=0)
        work_sheet = work_book.add_sheet(name, cell_overwrite_ok=True)
        job_title = ('职位名', '公司名', '薪资', '工作地点',
                     '工作链接', '公司链接', '发布时间', '福利', '工作信息',  '上班地址', '补充')
        
        job_title = ('job_name', 'company_name', 'salary', 'address',
                     'job_link', 'company_link', 'datatime', 'welfare', 'msg_job',  'label', 'requirement')
        for i in range(len(job_title)):
            work_sheet.write(0, i, job_title[i])
    
        for x in range(len(data_list)):
            for y in range(len(job_title)):
                work_sheet.write(x + 1, y, data_list[x][job_title[y]])
        if not path:
            work_book.save(name + '.xls')
        else:
            work_book.save(path)
            
    def create_table(self, table_name, db_path):
    
        sql = '''
            create table %s
            (id INTEGER PRIMARY KEY AUTOINCREMENT,
            job_name text ,
            company_name text ,
            salary text ,
            address  text ,
            job_link  text ,
            company_link text ,
            datatime text ,
            welfare text ,
            msg_job text,
            label text,
            requirement text);
            
            
            ''' % table_name
        conn = self.sqlite3_connect(db_path)
        print('Opened database successfully')
        cur = conn.cursor()  # 获取游标
        cur.execute(sql)  # 执行sql语句
        conn.commit()  # 提交数据
        conn.close()

    def insert_data(self, data_list, table_name, db_path):
        conn = self.sqlite3_connect(db_path)
        print('Opened database successfully')
        cur = conn.cursor()  # 获取游标
        for data in data_list:
            # print(data)
            for k, v in data.items():
                data[k] = '"' + v + '"'  # 把数据转成字符串 很重要

            sql = """
            insert into %s (job_name, company_name, salary, address,
                     job_link, company_link, datatime, welfare, msg_job,  label, requirement)
            values (%s)""" % (table_name, ",".join(
                [data['job_name'], data['company_name'], data['salary'], data['address'], data['job_link'],
                 data['company_link'], data['datatime'], data['welfare'], data['msg_job'], data['label'],
                 data['requirement']]))
            cur.execute(sql)  # 执行sql语句
            conn.commit()  # 提交数据
        cur.close()
        conn.close()
        print('成功插入')

    def save_html_data_2_sqlite(self, table_name, data_list, db_path):
        try:
            self.create_table(table_name, db_path)
        except:
            pass
        finally:
            self.insert_data(data_list, table_name, db_path)
    

def main():
    # 数据库名
    db_path = r'E:\scripts\development_package\Crawler\database/MyJobSearch.db'.replace('\\', '/')
    xls_path = r'E:\scripts\development_package\Crawler\database/MyJobSearch.xls'.replace('\\', '/')
    table_name = 'MyJobSearch'
    # 关键词 转换
    keyword = '渲染师'
    keyword = parse.quote(parse.quote(keyword))
    # 上海 广州
    address = r'020000%252C030200'
    # 发布时间
    datatime = {'all': '99', '24hours': '0', '3day': '1', '1week': '2', '1month': '3'}
    # 薪资
    money = {'all': '99', '2<': '01', '2-3': '02', '3-4.5': '03', '4.5-6': '04', '6-8': '05', '0.8-1w': '06',
             '1-1.5w': '07', '1.5-2w': '08', '2-3w': '09'}
    # 页面
    page_num = 1
    demo = MyJobSearch()
    data_list = list()
    for i in range(1, 6):
        link_list = list()
        page_num = str(i)
        url = f'https://search.51job.com/list/{address},000000,0000,00,{datatime["all"]},{money["all"]},{keyword},2,{page_num}.html'
        new_url = url
        html = demo.get_html_text(new_url)
        demo.get_links(html, link_list)

        if len(link_list) != 0:
            print('正在爬第 %d 页, 共有 %d 条' % (i, len(link_list)))
            demo.prase_html_data(html, data_list)
        else:
            print('第 %d 页没有了, 已结束 ' % i)
            break

    demo.save_html_data_2_xlwt(data_list, xls_path)
    print('save xls over')
    demo.save_html_data_2_sqlite(table_name, data_list, db_path)
    print('save sqlite over')
    
if __name__ == "__main__":
    main()

        




