import random
# from pyquery import PyQuery as pq  #本次采用pyquery和re解析数据
import re
import time

import requests  # 请求数据
from bs4 import BeautifulSoup

#
# url = "https://search.51job.com/list/040000,000000,0000,00,9,99,Java,2,1.html"
# head = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# request = urllib.request.Request(url, headers=head)
# time.sleep(1.2)
# html = ""
# try:
#     respone = urllib.request.urlopen(request)
#     html = respone.read().decode("gbk")
#     # print(html)
# except urllib.error.URLError as e:
#     if hasattr(e, "code"):
#         print(e.code)
#     if hasattr(e, "reason"):
#         print(e.reason)
#
# # html = open(r"E:/myPython/pac/51job/searchPage.html", "rb")
# soup = BeautifulSoup(html, "html.parser")  # 解析HTML文件
# datalist = []
#
# # str1 = r'''script src='''
# for item in soup.find_all("script", type="text/javascript", ):
#     item = str(item)
#     if item.find(str1) >= 0:
#         continue
#     data = item
#
# # print(data)
#
# findHref = re.compile(r'"job_href":"(.*?)","job_name"')
# hrefs = re.findall(findHref, data)
#
# # print(hrefs)
# hrefslist = []
# for i in hrefs:
#     ii=re.sub(r'\\',"",i)
#     # havep = i.replace(r"\\","")
#     hrefslist.append(ii)
# print(hrefslist)
# # print(type(hrefslist[0]))
# #
# # uu = []
# # url = [r'https:\\/\\/jobs.51job.com\\/shenzhen-nsq\\/130536283.html?s=sou_sou_soulb&t=1',r'https:\\/\\/jobs.51job.com\\/shenzhen-nsq\\/130536283.html?s=sou_sou_soulb&t=1']
# # for i in url :
# #     havep = i.replace(r"\\","")
# #     uu.append(havep)
# #
# # print(uu)
#
# # data.append(link)
#
#
# # for item in soup.find_all('div', class_="item"):  # 根据div及class查找
# #     # print(item)
# #     data = []  # 保存一部电影所有信息
# #     item = str(item)
# #
# #     link = re.findall(findLink, item)[0]
# #     data.append(link)
# #
# #     datalist.append(data)
# #     print(datalist)
# # for link in eldiv:
# # link["href"]
# # link["title"]
# '''
#

html = open(r"/51job/detailedPage.html", "rb")
soup = BeautifulSoup(html, "html.parser")  # 解析HTML文件
# datalist = []
# demo = [{"keyword": "java", "title": "11418R-Java开发（资管中心)",
#          "salary": "1.5-2万/月", "companyName": "平安科技", "cpType": "民营公司",
#          "cpper": "50-150人", "field": "制药/生物工程", "area": "深圳-南山区",
#          "wkTime": "3-4年经验 ", "school": "本科", "need": "招1人",
#          "releaseDate": " 04-13发布", "welfare": "五险一金绩效奖金带薪年假节日福利定期体检",
#          "wkName": "PHP开发工程师", "contactWay": "上班地址：桃源街道3806房",
#          "jobInfo": "开发、调试和..", "detailUrl": "https://jobs.51job.com/shenzhen-nsq/130961179.html?s=sou_sou_soulb&t=0"},
#         {},
#         {}]
#
# detailUrl = "url"
# # # 头部信息
# headInfo = soup.find("div", class_="cn")
# #j
# # title = headInfo.find("h1k").text
# title = headInfo.find("h1k")
# print(title)
#
#
# salary = headInfo.find("strong").text
# salary = "未注明" if len(salary) == 0 else salary
# # print(salary)
#
#
# # fcompany=re.compile(r'title=.*>(.*)<em ')
# companyName = headInfo.find("p", class_="cname").find("a", class_="catn")["title"]
# # print(company)
#
# msg = headInfo.find("p", class_="msg ltype").text
# # print(msg)
#
# msgList = msg.split("  |  ")
# area = msgList[0]
# wkTime = msgList[1]
# school = msgList[2]
# need = msgList[3]
# releaseDate = msgList[4]
# # print(loct,wkTime,school,need,releaseDate)
#
# # 福利
# jtag = headInfo.find("div", class_="jtag").find("div", class_="t1")
# welfare = [item.text for item in jtag.find_all("span", class_="sp4")]
# # print(welfare)
#
#
# # 右侧信息
# comtags = soup.find("div", class_="com_tag").find_all("p", class_="at")
# tags = [str(tag["title"]) for tag in comtags]
#
# # 民营公司
# cpType = tags[0]
# # print(cpType)
# # 1000-5000人
# cpper = tags[1]
# # print(cpper)
# field = tags[2]
# # print(field)
#
#
# # 左侧主要信息
# mainInfo = soup.find("div", class_="tCompany_main")
#
# # msg_inbox=mainInfo.find("div", class_="bmsg job_msg inbox").text
# # print(msg_inbox)
# # "wkName": "PHP开发工程师", "contactWay": "上班地址：桃源街道3806房",
# # "jobInfo": "开发、调试和..", "detailUrl": "https://jobs.51job.com/shenzhen-nsq/130961179.html?s=sou_sou_soulb&t=0"}
# # jobinfo = re.findall(re.compile("(\d、.*；)",re.S), msg_inbox)[0].strip()
# keywd, wkName = "Not specified", "Not specified"
# jobinfo = ""
# for i in mainInfo.find("div", class_="bmsg job_msg inbox").find_all("p"):
#     i = i.text
#     if "职能类别：" in i:
#         wkName = i[5:]
#     if "关键字：" in i:
#         keywd = i[4:]
#     else:
#         jobinfo += (i +"\n")
#
# print(jobinfo)
# # jobinfo = re.findall(re.compile("(\d、.*；)"), msg_inbox)
# # print(msg_inbox)
#
# # wkName=re.findall(re.compile("职能类别：(.*)"), msg_inbox)[0].strip()
# # print(wkName)
# #
# # findKeywd=re.search("关键字：(.*)", msg_inbox)
# # keywd="Not specified" if findKeywd==None else findKeywd.group(1)
# # print(keywd)
#
# contactWay = mainInfo.find("div", class_="bmsg inbox").find("p", class_="fp").text
# # print(contactWay)

# pool = [[1,2,3,4],[6,7,8,9],[12,13,14,15]]
#
# for i in pool:
#     for j in i:
#         print(j)
#
# data = {}
# data["dd"] = 3
# print(data)


# html = getAPage(url, "gbk")
# html = open(r"E:/myPython/pac/51job/testFile/detailedPage.html", "rb")
# soup = bs4html(html)
#
# # 51job发布链接
# detailUrl = url
# # 头部信息
# headInfo = soup.find("div", class_="cn")
#
# # 招聘职位
# title = headInfo.find("h1").text
#
# # 薪资
# salary = headInfo.find("strong").text
# salary = "未注明" if len(salary) == 0 else salary
#
# # 公司名
# companyName = headInfo.find("p", class_="cname").find("a", class_="catn")["title"]
#
# # 招聘信息标签
# msg = headInfo.find("p", class_="msg ltype").text
#
# try:
#     msgList = msg.split("  |  ")
#     # 地区
#     area = msgList[0]
#     # 工作经验时间
#     wkTime = msgList[1]
#     # 学历
#     school = msgList[2]
#     # 招聘人数
#     need = msgList[3]
#     # 招聘发布时间
#     releaseDate = msgList[4]
# except Exception as  e:
#     print("招聘信息标签这里出现问题...,系统报错：", e)
# finally:
#     area, wkTime, school, need, releaseDate = "", "", "", "", "",
# # 福利
# jtag = headInfo.find("div", class_="jtag").find("div", class_="t1")
# welfare = [item.text for item in jtag.find_all("span", class_="sp4")]
#
# # 右侧信息
# comtags = soup.find("div", class_="com_tag").find_all("p", class_="at")
# tags = [str(tag["title"]) for tag in comtags]
# # 公司类型
# cpType = tags[0]
# # 公司人数规模
# cpper = tags[1]
# # 公司从事领域
# field = tags[2]
#
# # 左侧主要信息
# mainInfo = soup.find("div", class_="tCompany_main")
# msg_inbox = mainInfo.find("div", class_="bmsg job_msg inbox").text
#
# # 工作职能类别,工作关键字
# wkName, keywd = "Not specified", "Not specified"
# # 职位信息
# jobInfo = ""
# for p in mainInfo.find("div", class_="bmsg job_msg inbox").find_all("p"):
#     p = p.text
#     if "职能类别：" in p:
#         wkName = p[5:]
#     if "关键字：" in p:
#         keywd = p[4:]
#     else:
#         jobInfo += (p + "\n")
#
# # 工作联系方式
# contactWay = mainInfo.find("div", class_="bmsg inbox").find("p", class_="fp").text

# data = {"searchWord": searchWord, "job_name": title,
#         "providesalary": salary, "company_name": companyName, "companytype": cpType,
#         "companysize": cpper, "companyind": field, "workarea": area,
#         "wkTime": wkTime, "school": school, "need": need,
#         "releaseDate": releaseDate, "jobwelf": welfare,
#         "wkName": wkName, "contactWay": contactWay,
#         "jobInfo": jobInfo, "detailUrl": detailUrl}