import string
# 导入整个类
import urllib.request
# 只导入某个方法
from urllib.parse import quote

import pymysql
from bs4 import BeautifulSoup

def nonspace(s):
    return s.strip().replace( "","").replace( "\n","")

city = input("请输入城市：")
code={
    '北京': '530',
    '上海': '538',
    '广州': '763',
    '深圳': '765',
    '天津': '531',
    '武汉': '736',
    '西安': '854',
    '成都': '801',
    '沈阳': '599',
    '南京': '635',
    '杭州': '653',
    '苏州': '639',
    '重庆': '551',
    '长沙': '749',
    '厦门': '682',
    '南昌': '691'
}
city = code[city]

type = input("请输入岗位：")

# 路径拼接
url = "https://sou.zhaopin.com/?jl="+city+"&kw="+type+"&p=1"
#如果url有空格 需要用%20来替代空格
#url有中文需要用quote转义

#对输入的中文进行处理
url = quote(url,safe=string.printable)
res = urllib.request.urlopen(url)
#print(url)

#read()取出网页源代码(byte类型数据)
#tong'guodecode()utf8形式

content = res.read().decode()
#print(content)

# 通过BeaUtifulSoup对象解析得到页面内容
# 使用lxml解析器
soup = BeautifulSoup(content, "lxml")
#print(soup)

# 通过select()方法通过类名获取数据
jobitem = soup.select(".joblist-box__item")
#print(jobitem)

# 存放数据的列表
job_list = []
for job in jobitem:
    #存放单条信息
    job_dic = {}
    # get_text()获取所有文本内容，如果有多个内容，则以字符串列表方法显示
    # getText()获取所有文本内容，如果有多个内容，会拼接成一个字符串
    job_name = job.select(".jobinfo__name")[0].get_text()
    job_sal = job.select(".jobinfo__salary")[0].get_text()
    # 用来处理工资两边空格，还有内容中间的空格，还有换行
    job_sal = nonspace(job_sal)
    #print(job_sal)
    if '天' in job_sal:
        job_sal = int(job_sal.split("-")[0])*30
    elif ('千' in job_sal):
        job_sal = int(job_sal.split("千")[0])*1000
    elif('万' in job_sal):
        job_sal = float(job_sal.split("万")[0])*10000
    else:
        job_sal = 5000
    # # 拿到公司名称
    company_name = job.select(".companyinfo__name")[0].get_text()
    company_name = nonspace(company_name)
    # # 同时获取 三个信息
    job_msg = job.select(".jobinfo__other-info-item")
    # # 工作地点
    job_place = job_msg[0].get_text()
    job_place = nonspace(job_place)[0:job_place.find('·')-1]

    # # 工作经验
    job_exp = job_msg[1].get_text()
    job_exp = nonspace(job_exp)
    # # 学历要求
    job_education = job_msg[2].get_text()
    job_education = nonspace(job_education)
    #
    # # 最后将获取到的数据全部放入字典
    job_dic["job_name"] = job_name
    job_dic["job_sal"] = job_sal
    job_dic["company_name"] = company_name
    job_dic["job_place"] = job_place
    job_dic["job_exp"] = job_exp
    job_dic["job_education"] = job_education
    job_list.append(job_dic)


#创建数据库连接
db = pymysql.connect(
     host='localhost',
     user='root',
     passwd='123456',
     charset='utf8'
 )
#获取操作游标
cursor = db.cursor()
#指定连接的数据库
cursor.execute("use crawlerdemo")
for job in job_list:
     sql = (("insert into job values (null,'%s','%s','%s','%s','%s','%s')") %
            (job["job_name"],job["job_sal"],job["company_name"],job["job_place"],job["job_exp"],job["job_education"]))
     try:
         cursor.execute(sql)
     except Exception as e:
         print(e)
         print("插入数据失败")
         # 事务回滚
         db.rollback()

#事务提交
db.commit()

for job in job_list:
    print(job)









