# coding=utf-8
import urllib.request
from bs4 import BeautifulSoup
from urllib.parse import quote
import csv
import pymysql
import sys
import time

# 数据库配置
host = '127.0.0.1' # localhost:本地数据库
user = 'root' # 用户名
password = '123456' # 密码
database = 'recruitment_crawler' # 数据库名称
charset = 'utf8' # 编码方式

# 创建数据库连接
conn = pymysql.connect(host=host, user=user, password=password
                           , database=database, charset=charset)
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)

class Main:
	def index(self,keyboard,i):
	#组装发送请求的链接
		link="https://www.liepin.com/zhaopin/?key=" + quote(keyboard) + "&currentPage="+str(i)
		print(link)
		#组装请求头信息
		headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:47.0) Gecko/20100101 Firefox/47.0"}
		#发送Request 请求
		req=urllib.request.Request(link,headers=headers)
		#获取返回信息
		response = urllib.request.urlopen(req)
		#设置utf-8格式，防止出现乱码
		html = response.read().decode("utf-8")
		#用beautifulSoup处理相关的网页数据
		soup=BeautifulSoup(html,'html.parser')
		#找到相关的class，这个class包含了我们需要的数据，包括职位，工作年限，公司名称等信息。
		sojob_result=soup.find("div",class_='left-list-box')

		if sojob_result is None:
			return
		
		list=sojob_result.find_all("li")
		sql = '''insert into crawler_job (
		job_name,requirement,job_city,job_place,job_experience,job_education,
		min_wage,max_wage,final_wage,company_name,company_industry,number_of_company_personnel
		)
		values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'''
		# 岗位
		for x in range(0,len(list)):
			if list[x].find("div", class_='job-title-box') is not None:
		   	  	#工作名称
				job_name=list[x].find("div", class_='job-title-box').find("div", class_='ellipsis-1').get_text().strip()
				#要求
				requirement = ""
				labels_tags=list[x].find_all("span", class_='labels-tag')
				for y in range(0, len(labels_tags)):
					requirement += labels_tags[y].get_text().strip()
					if y < len(labels_tags) - 1:
						requirement += ","

				#工作信息
				work_city=list[x].find("div", class_='job-title-box').find("span", class_='ellipsis-1').get_text().strip().split("-")
				#工作城市
				job_city = work_city[0]
				#工作地点
				job_place = work_city[-1]

				#工作经验
				job_experience = labels_tags[0].get_text().strip()

				#学历要求
				job_education = labels_tags[1].get_text().strip()

				#是否急聘
				# hot = 1
				# if list[x].find("div", class_='job-tag') is not None:
				# 	print("急聘")
				# else:
				# 	hot = 0

				#薪资待遇
				min_wage="面议"
				max_wage="面议"
				#年终薪资
				final_wage=""
				job_salary=list[x].find("span", class_='job-salary').get_text().strip()
				if "·" in job_salary:
					final_wage=job_salary.split("·")[1]
					job_salary=job_salary.split("·")[0]
				if "-" in job_salary:
					min_wage=float(job_salary.split("-")[0])*1000
					max_wage=float(job_salary.split("-")[1].strip("k"))*1000

				#公司名称
				company_name=list[x].find("span", class_='company-name').get_text().strip()

				company_tags = ""
				company_industry = ""
				number_of_company_personnel = ""

				if list[x].find("div", class_='company-tags-box ellipsis-1') != None:
					company_tags=list[x].find("div", class_='company-tags-box ellipsis-1').find_all("span")
					#行业
					company_industry = company_tags[0].get_text().strip()
					#公司规模
					number_of_company_personnel = company_tags[len(company_tags) - 1].get_text().strip()
				# 结构化数组

				job = [
					job_name,
					requirement,
					job_city,
					job_place,
					job_experience,
					job_education,
					min_wage,
					max_wage,
					final_wage,
					company_name,
					company_industry,
					number_of_company_personnel,
				]
				# print(job)
				cursor.execute(sql,tuple(job))
				conn.commit()

if __name__=="__main__":
	keyboard = sys.argv[1]
	pages = sys.argv[2]
	for page in range(0,int(pages)):
		Main().index(keyboard,page);
	cursor.close()
	conn.close()