#!/usr/bin/python
# -*- encoding: utf-8 -*-
'''
@File    :   
@Time    :   // ::
@Author  :   Malygos 
@Version :   1.0
@Contact :   751756061@qq.com
@WebSite :   https://gitee.com/malygos_l/python_homework
'''
# Start typing your code from here
'''在51job网站上，爬取2020年发布的Python开发工程师的职位的薪酬，计算北京地区改职位的平均薪酬'''

import requests
from lxml import etree
from format_data import format_salary
from import_data import *
from multiprocessing import Pool


# 爬虫函数
def spider(i):
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36'
    }

    # url地址  format的位置是控制翻页的数字
    url = "https://search.51job.com/list/000000,000000,0000,00,9,99,python%25E5%25BC%2580%25E5%258F%2591%25E5%25B7%25A5%25E7%25A8%258B%25E5%25B8%2588,2,{0}.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare=".format(
        i)
    malygos_data = requests.get(url, headers=headers)
    malygos_data.encoding = malygos_data.apparent_encoding
    malygos_data.raise_for_status()

    # 爬取职位名、公司名、工作地点、薪资、发布时间 五类信息
    analys = etree.HTML(malygos_data.text)
    jobnames = analys.xpath(
        '//*[@id="resultList"]/div[@class="el"]/p/span/a/text()')
    companys = analys.xpath(
        '//*[@id="resultList"]/div[@class="el"]/span[1]/a/text()')
    places = analys.xpath(
        '//*[@id="resultList"]/div[@class="el"]/span[2]/text()')
    salarys = analys.xpath(
        '//*[@id="resultList"]/div[@class="el"]/span[3]/text()')
    times = analys.xpath(
        '//*[@id="resultList"]/div[@class="el"]/span[4]/text()')

    # 打印检测
    for jobname, company, place, salary, time in zip(
            jobnames,
            companys,
            places,
            salarys,
            times,
    ):
        spider_data = {
            "Jobname": jobname.strip(),
            "Company": company,
            "Place": place,
            "salary": salary,
            "time": time
        }
        # print(spider_data)    # 打印爬取的五类信息
        # print(format_salary(spider_data["salary"]))    # 打印格式化后的薪资看是否正确
        write(spider_data)      # 传入数据库


# 计算北京地区平均薪资
def caculate_avg():
    sum_salary = 0      # 薪资总和

    # 选取北京岗位薪资 sqlalchemy
    salarys1 = session.query(User.place, User.salary).filter(
        User.place.like("北京%")).order_by(User.salary).all()

    flag = 0        # 北京地区的岗位数量
    for salary in salarys1:
        new_salary = format_salary(salary[1])       # 元组不能改变值，所以用一个新的变量
        sum_salary += new_salary        
        flag += 1
    avg = sum_salary / flag /10000    # 计算平均薪资，以万为单位
    print("北京地区的python开发工程师的平均薪酬是%.1f万元/月\n" % avg)   # 打印信息


# 主函数
def main():
    # 38页数据
    # 多进程爬取数据
    po = Pool(6)
    for i in range(1, 39):
        po.apply_async(spider, (i, ))
    po.close()
    po.join()
    print("\n爬取数据写入数据库成功！\n")

    # 计算北京地区的平均薪资并打印
    caculate_avg()


if __name__ == '__main__':
    main()
