import requests
import time
import json
import pymysql
import xlwt
import random

#爬取拉钩网数据分析岗位的信息.
#岗位名称(postionName),公司名称(companyFullName),公司大小(companySize),学历(education),公司是否上市融资(financeStage)
#发表时间(lastLogin),公司地址(postionAddress),岗位职责(postionDetail),岗位标签(postionLables),薪资(salary)
coon = pymysql.connect(user='root', password='33570638', charset='utf8', host='localhost',
                       db='pythondata', port=3306)  # 连接数据库
cursor = coon.cursor()  # 创建游标对象
def get_info(url,formData):
    MY_USER_AGENT = [
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
        "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
        "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
        "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)"
    ]
    proxies = {
        'https': 'https://182.38.232.79:4210',
        'https': 'https://49.83.210.250:4232',
        'https': 'https://121.226.187.59:4245',
        'https': 'https://49.82.174.178:4247',
        'https': 'https://125.106.138.71:4245'
    }
    headers = {
        'User-Agent': random.choice(MY_USER_AGENT),
        'referer': 'https://www.lagou.com/wn/jobs?px=new&pn=1&cl=false&fromSearch=true&labelWords=sug&suginput=%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90&kd=%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90&city=%E5%B9%BF%E5%B7%9E',
        'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'
    }
    time.sleep(5)
    sss = requests.session()  # 获取浏览器的session（会话）
    sss.headers.update(headers)  # 更新自己的请求信息
    sss.get('https://www.lagou.com/wn/jobs?cl=false&fromSearch=true&labelWords=sug&suginput=%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90&kd=%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90&city=%E5%B9%BF%E5%B7%9E&pn=1',
            headers=headers)
    res=sss.post(url=url,headers=headers,data=formData,proxies=proxies) #通过会话以post方法向服务器表单提交数据
    res.text
    result=res.json()
    info=result['content']['positionResult']['result'] #从result读取信息
    print(len(info))
    info_list=[]  #存放当前页面上的招聘信息
    for job in info:
        # 岗位名称(postionName),公司名称(companyFullName),公司大小(companySize),学历(education),公司是否上市融资(financeStage)
        # 发表时间(lastLogin),公司地址(postionAddress),岗位职责(postionDetail),岗位标签(postionLables),薪资(salary)
        information=[]
        information.append(job['positionName'])
        information.append(job['companyFullName'])
        information.append(job['companySize'])
        information.append(job['education'])
        information.append(job['financeStage'])
        information.append(job['lastLogin'])
        information.append(job['positionAddress'])
        information.append(job['positionDetail'])
        information.append(job['positionLables'])
        information.append(job['salary'])
        # print(information)
        # labes=''
        # for labe in job['positionLables']:
        #     labes=labes +","+labe
        cursor.execute("insert into lagoushujufenxi(positionName,companyFullName,companySize,education,"
                       "financeStage,lastLogin,positionAddress,postionDetail,postionLables,salary)"
                       "values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(job['positionName'],job['companyFullName'],job['companySize'],
                       job['education'],job['financeStage'],job['lastLogin'],job['positionAddress'],job['positionDetail'],
                       str(job['positionLables']),job['salary'] ))
        coon.commit()  # 提交数据
        info_list.append(information)
        return info_list

#爬取页数
if __name__=='__main__':
    page=int(input("请输入要爬取的总页数:   "))
    info_result=[]  #定义一个列表存放所有爬取的招聘信息
    title=['岗位名称','公司名称','公司大小','学历','公司是否上市融资','发表时间','公司地址','岗位职责','岗位标签','薪资']
    info_result.append(title)
    # num=1
    #定义表单数据
    for num in range(1,page+1): #爬取页数
        time.sleep(5)
        print("正在爬取第{}页".format(num))
        url = 'https://www.lagou.com/jobs/v2/positionAjax.json'  #数据地址
        formData={ #构建表单数据
            'first': 'true',
            'pn': num,
            'kd': '数据分析',
            'city':'广州'
        }
        try:
            info=get_info(url,formData)
            info_result.append(info)
        except Exception as msg:
            print('爬取第%页出现问题' %(num))
    #将数据爬取到excel当中
