# -*- coding: UTF8 -*-
# 作者: rwq
import collections
import json
import xlsxwriter as xlsxwriter
from utils import get_header
import requests
import queue
import threading
import os
import csv
import re
import pymysql
import matplotlib.pyplot as plt
from openpyxl import load_workbook

def formatyue(str):
    sa = re.findall(r'(.*?)万/年', str)
    if len(sa) ==0:
        return str
    else:
        sa1 = re.findall(r'(.*?)-(.*)', sa[0])
        saf = float(sa1[0][0])
        sal = float(sa1[0][1])
        saf = saf / 12
        sal = sal / 12
        str = "{}-{}万/月".format(round(saf, 1), round(sal, 1))
        return str
def formatqian(str):
    sa = re.findall(r'(.*?)千/月', str)
    if len(sa) ==0:
        return str
    else:
        sa1 = re.findall(r'(.*?)-(.*)', sa[0])
        saf = float(sa1[0][0])
        sal = float(sa1[0][1])
        saf = saf / 10
        sal = sal / 10
        str = "{}-{}万/月".format(round(saf, 1), round(sal, 1))
        return str

def fomatpjun(str):
    sa = re.findall(r'(.*?)万/月', str)
    sa1 = re.findall(r'(.*?)-(.*)', sa[0])
    saf = float(sa1[0][0])+float(sa1[0][1])
    str = "{}".format(round(saf/2, 1))
    return str

class zhaopin(object):
    def __init__(self, keyword, city='成都', thread=10, path=os.getcwd()):
        self.keyword = keyword
        self.city = city
        self.thread = thread
        self.csv_header = ['职位名称', '薪资（万/月）', '公司名称', '工作地点', '发布时间', '公司链接', '公司类型', '公司行业']
        self.baseurl = 'https://search.51job.com/list/'
        self.header = get_header()
        self.path = path
        self.pagequeue = queue.Queue()
        self.jobqueue = queue.Queue()

    def _get_city_code(self):
        url = 'https://js.51jobcdn.com/in/js/2016/layer/area_array_c.js'
        req = requests.get(url, headers=self.header).text
        a = req.find(self.city)
        return req[a - 9:a - 3]

    def _get_max_page(self):
        city_code = self._get_city_code()
        url = self.baseurl + '{},000000,0000,00,9,99,{},2,1.html'.format(city_code, self.keyword)
        req = requests.get(url=url, headers=self.header)
        req.encoding = 'gbk'
        response = req.content.decode('gbk')
        max_page = re.findall(r'"total_page":"(.*?)"', response)
        max_page = int(max_page[0])

        for page in range(1, max_page + 1):
            page_url = self.baseurl + '{},000000,0000,00,9,99,{},2,{}.html'.format(city_code, self.keyword, page)
            self.pagequeue.put(page_url)

    def Spider(self):
        while not self.pagequeue.empty():
            url = self.pagequeue.get()
            print('正在爬取：{}'.format(url))
            req = requests.get(url, headers=get_header())
            req.encoding = 'gbk'
            response = req.content.decode('gbk')
            for i in range(1, 12):
                try:
                    title = re.findall(r'"job_title":"(.*?)"', response)
                    if title[0] == None:
                        break
                    company_name = re.findall(r'"company_name":"(.*?)"', response)
                    job_name = re.findall(r'"job_title":"(.*?)"', response)
                    salary = re.findall(r'"providesalary_text":"(.*?)"', response)
                    work_area = re.findall(r'"workarea_text":"(.*?)"', response)
                    time = re.findall(r'"issuedate":"(.*?)"', response)
                    company_type = re.findall(r'"companytype_text":"(.*?)"', response)
                    company_hangye = re.findall(r'"companyind_text":"(.*?)"', response)
                    detail = re.findall(r'"job_href":"(.*?)"', response)
                    if len(salary[i]) != 0:
                        salary = salary[i].replace('\\/', '/')
                    salary=formatyue(salary)
                    salary = formatqian(salary)
                    salary=fomatpjun(salary)
                    detail = detail[i].replace('\\/', '/')
                    company_hangye = company_hangye[i].replace('\\/', '/')
                    data = {
                        "职位名称": job_name[i],
                        "薪资（万/月）": salary,
                        "公司名称": company_name[i],
                        "工作地点": work_area[i],
                        "发布时间": time[i],
                        "公司链接": detail,
                        "公司类型": company_type[i],
                        "公司行业": company_hangye,
                    }
                    self.jobqueue.put(data)
                except:
                    continue

    def run(self):
        self._get_max_page()
        con = pymysql.connect(host='localhost', port=3306, user='root', password='0', database='qcwy', charset='utf8')
        cursor = con.cursor()
        workbook = xlsxwriter.Workbook(r'C:\Users\SixStart\PycharmProjects\pythonProject1\期末\python.xlsx')
        worksheet = workbook.add_worksheet()
        sql = "INSERT INTO `qcwy`.`jobinfo`(`id`, `job_name`, `salary`, `company_name`, `work_area`, `time`, `detail`, `company_type`, `company_hangye`) VALUES"
        thread_list = []
        for i in range(self.thread):
            t = threading.Thread(target=self.Spider)
            thread_list.append(t)
        for t in thread_list:
            t.setDaemon(True)
            t.start()
        for t in thread_list:
            t.join()
        if os.path.exists(self.path):
            data_list = []
            self.path = os.path.join(self.path, 'save_list')
            while not self.jobqueue.empty():
                data_list.append(self.jobqueue.get())
            with open(os.path.join(self.path, 'info——job——{}——zone{}.csv'.format(self.keyword, self.city)), 'w',
                      newline='', encoding='utf-8') as f:
                f_csv = csv.DictWriter(f, self.csv_header)
                f_csv.writeheader()
                f_csv.writerows(data_list)
            worksheet.write("A1", "职位名称")
            worksheet.write("B1", "薪资（万/月）")
            worksheet.write("C1", "公司名称")
            worksheet.write("D1", "工作地点")
            worksheet.write("E1", "发布时间")
            worksheet.write("F1", "公司链接")
            worksheet.write("G1", "公司类型")
            worksheet.write("H1", "公司行业")
            worksheet.write("L1", "工资")
            worksheet.write("M1", "次数")
            salarylist = []
            for i in range(1, len(data_list)):
                job_name = data_list[i]['职位名称']
                salary = data_list[i]['薪资（万/月）']
                company_name = data_list[i]['公司名称']
                work_area = data_list[i]['工作地点']
                time = data_list[i]['发布时间']
                detail = data_list[i]['公司链接']
                company_type = data_list[i]['公司类型']
                company_hangye = data_list[i]['公司行业']
                salarylist.append(salary)
                try:
                    worksheet.write("A{}".format(i+1), job_name)
                    worksheet.write("B{}".format(i+1), salary)
                    worksheet.write("C{}".format(i+1), company_name)
                    worksheet.write("D{}".format(i+1), work_area)
                    worksheet.write("E{}".format(i+1), time)
                    worksheet.write("F{}".format(i+1), detail)
                    worksheet.write("G{}".format(i+1), company_type)
                    worksheet.write("H{}".format(i+1), company_hangye)

                except:
                    pass
                sqlv = "(null,'{}','{}','{}','{}','{}','{}','{}','{}');".format(job_name, salary, company_name,
                                                                                work_area, time, detail, company_type,
                                                                                company_hangye)
                sql2 = sql + sqlv
                try:
                    cursor.execute(sql2)
                    con.commit()
                except:
                    con.rollback()
            b = collections.Counter(salarylist)
            salarycount={}
            salarycount2={}
            salarymoneylist=[]
            salarycountlist = []
            c = 2
            for i in b:
                salarycount[i]= b[i]
            for i in sorted(salarycount):
                salarycount2[i]=salarycount[i]
            for i in salarycount2.keys():
                worksheet.write("L{}".format(c), i)
                salarymoneylist.append(i)
                worksheet.write("M{}".format(c), salarycount2[i])
                salarycountlist.append(salarycount2[i])
                c=c+1
        workbook.close()
        cursor.close()
        # # 关闭连接
        con.close()


def showpicuure():
    salarymoneylist = []
    salarycountlist = []
    # 读取文件
    wb = load_workbook('python.xlsx')
    ws = wb.active
    # 获取表中L列，L列为薪水从低到高排列
    for col in ws['L']:
        if isinstance(col.value, str):
            salarymoneylist.append(col.value)
    salarymoneylist.remove("工资")
    # 获取表M列，M列为该薪水的招聘数量
    for col in ws['M']:
        if isinstance(col.value, int):
            salarycountlist.append(col.value)
    #绘图
    plt.style.use('ggplot')
    customers_index = range(len(salarymoneylist))
    #设定图表长宽和像素
    fig = plt.figure(figsize=(15, 3), dpi=100)
    #设定图表所占区域
    ax1 = fig.add_subplot(1, 1, 1)
    #设定xy列，文字居中，图表颜色
    ax1.bar(customers_index, salarycountlist, align='center', color='darkblue')
    #设定文字在x轴底部，y轴左侧
    ax1.xaxis.set_ticks_position('bottom')
    ax1.yaxis.set_ticks_position('left')
    #设定x轴的元素
    plt.xticks(customers_index, salarymoneylist, rotation=0, fontsize='small')
    plt.xlabel('nums')
    plt.ylabel('salary')
    plt.title('Salary—Statistics—{}')
    plt.figure(dpi=80)
    plt.savefig('salary.png', bbox_inches='tight')
    plt.show()



def check():
    con = pymysql.connect(host='localhost', port=3306, user='root', password='0', database='qcwy', charset='utf8')
    cursor = con.cursor()
    sql = "SELECT * FROM `jobinfo` order by salary desc;"
    cursor.execute(sql)
    con.commit()
    results = cursor.fetchall()
    col_names = ['job_name','salary', 'company_name', 'work_area','company_hangye','detail']
    strs = {}
    for row in results:
        strs[row[0]] = dict(zip(col_names, row[2:]))
    cursor.close()
    con.close()
    result = json.dumps(strs)
    filename = 'gongsi.json'
    with open(filename, 'w') as file_obj:
        file_obj.write(result)



if __name__ == '__main__':
    key1 = input("请输入关键词")
    city1 = input("请输入城市")
    #爬虫
    zhaopin(keyword=key1, city=city1).run()
    #数据库正序输出json
    check()
    #读取excel生成图像
    showpicuure()







