# a = input("请输入第一个数：")
# b = input("请输入第二个数：")
# sum = a+b
# print(a,'+',b,'=',sum)
# class Person:
#     def sayHello(self):
#         print('Hello,how are you?')
# P =Person()
# P.sayHello()
# import  random
# def exchange(a,i,j):
#     temp=a[i]
#     a[i]=a[j]
#     a[j]=temp
#     def shuffle(a):
#         n=len(a)
#         for i in range(n):
#             r=random.randrange(i,n)
#             exchange(a,i,r)
#             t=[2,3,4,5,6,7]
#             shuffle(t)
#             print(t)
# import csv
# import json
# import re
# import pprint
# import requests
# f = open('python招聘数据1.csv', mode='a', encoding='utf-8', newline='')
# csv_writer = csv.DictWriter(f, fieldnames=[
#     '标题',
#     '公司名字',
#     '城市',
#     '薪资',
#     '招聘信息',
#     '公司属性',
#     '公司规模',
#     '企业性质',
#     '招聘发布日期',
#     '公司详情页',
#     '招聘详情页'])
# csv_writer.writeheader()
# for page in range(1, 11):
#     url = f'https://search.51job.com/list/010000%252C020000%252C030200%252C040000%252C090200,000000,0000,00,9,99,python,2,{page}.html'
#     headers = {
#         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'}
#     response = requests.get(url=url, headers=headers)
#     html_data = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', response.text, re.S)[0]
#     json_data = json.loads(html_data)
#     for index in json_data['engine_jds']:
#         # pprint.pprint(index)
#         dit = {
#             '标题': index['job_name'],
#             '公司名字': index['company_name'],
#             '城市': index['workarea_text'],
#             '薪资': index['providesalary_text'],
#             '招聘信息': '|'.join(index['attribute_text']),
#             '公司属性': index['companyind_text'],
#             '公司规模': index['companysize_text'],
#             '企业性质': index['companytype_text'],
#             '招聘发布日期': index['issuedate'],
#             '公司详情页': index['company_href'],
#             '招聘详情页': index['job_href'],}
#         csv_writer.writerow(dit)
#         print(dit)

# import  random
# def exchange(a,i,j):
#     temp=a[i]
#     a[i]=a[j]
#     a[j]=temp
#     def shuffle(a):
#         n=len(a)
#         for i in range(n):
#             r=random.randrange(i,n)
#             exchange(a,i,r)
#             t=[2,3,4,5,6,7]
#             shuffle(t)
#             print(t)


# import random
# def exchange(a,i,j):
#     temp=a[i]
#     a[i]=a[j]
#     a[j]=temp
# def shuffle(a):
#     n=len(a)
#     for i in range(n):
#         r=random.randrange(i,n)
#         exchange(a,i,r)
# t=[2,3,4,5,6,7]
# shuffle(t)
# print(t)

# def my_sum4(a,b,*c,**d):
#     total=a+b
#     for n in  c:
#         total=total+n
#     for key in d:
#         total=total+d[key]
#     return  total
# print(my_sum4(1,2))
# print(my_sum4(1,2,3,4,5))
# print(my_sum4(1,2,3,4,5,male=6,female=7))

# def is_prime(n):
#     if n<2:
#         return  False
#     i=2
#     while i * i<=n:
#         if n%i==0:
#             return  False
#         i+=1
#         return  FutureWarning
#     for i in range(100):
#         if is_prime(i):
#             print(i,end='')


import csv#吴冠霖
import json
import re
import pprint
import requests
f = open('python招聘数据4.csv', mode='a', encoding='utf-8', newline='')
csv_writer = csv.DictWriter(f, fieldnames=[
    '标题',
    '公司名字',
    '城市',
    '薪资',
    '招聘信息',#吴冠霖
    '公司属性',
    '公司规模',
    '企业性质',
    '招聘发布日期',
    '公司详情页',
    '招聘详情页'])
csv_writer.writeheader()
for page in range(1, 20):
    url = f'https://search.51job.com/list/040000%252c200200%252c230300%252c070200%252c070300,000000,4500,00,9,99,python,2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'}
    response = requests.get(url=url, headers=headers)
    html_data = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', response.text, re.S)[0]
    json_data = json.loads(html_data)
    for index in json_data['engine_jds']:
        # pprint.pprint(index)
        dit = {#吴冠霖
            '标题': index['job_name'],
            '公司名字': index['company_name'],
            '城市': index['workarea_text'],
            '薪资': index['providesalary_text'],
            '招聘信息': '|'.join(index['attribute_text']),
            '公司属性': index['companyind_text'],
            '公司规模': index['companysize_text'],
            '企业性质': index['companytype_text'],#吴冠霖
            '招聘发布日期': index['issuedate'],
            '公司详情页': index['company_href'],
            '招聘详情页': index['job_href'],}
        csv_writer.writerow(dit)
        print(dit)
