from base64 import encode
import os
import time
import sys
from tkinter import mainloop
from tkinter.constants import E
from typing import Text
from bs4.element import AttributeValueWithCharsetSubstitution, Script
from pymongo import database
import requests
from bs4 import BeautifulSoup 
import re
import json
from selenium import webdriver
from selenium.webdriver.chrome.webdriver import WebDriver
import tkinter.messagebox

sys.path.append(r"D:\\pwn\\python")
from database import*
from hcity import*









class selenium(object):
	def Driver(self):
		self.option = webdriver.ChromeOptions()
		self.option.add_experimental_option("detach", True)
		self.option.add_experimental_option('excludeSwitches', ['enable-automation'])
		self.driver = webdriver.Chrome(executable_path='C://Users//13972//AppData//Local//Programs//Python//Python39//Scripts//chromedriver.exe', options=self.option)

		# driver = webdriver.Chrome('C://Users//13972//AppData//Local//Programs//Python//Python39//Scripts//chromedriver.exe',options=option)
		self.driver.get(
			'https://login.51job.com/login.php?loginway=0&isjump=0&lang=c&from_domain=i&url=')
		time.sleep(2)


	def User_login(self):
		self.user_name_input_name = 'loginname'
		self.password_input_name = 'password'
		str_user_name = '1397239425@qq.com'
		str_password = 'dd137772'
		try:	
			self.driver.find_element_by_id(self.user_name_input_name).send_keys(str_user_name)
			self.driver.find_element_by_id(self.password_input_name).send_keys(str_password)
			time.sleep(1)
			self.driver.find_element_by_css_selector(
				'#login_btn').click()
			# self.driver.find_element_by_css_selector('xpth').click()
			# time.sleep(1)
			# alert = self.driver.switch_to.alert
			# alert.send_keys('输入成功！！！')
			# time.sleep(1)
			# alert.accept()
		except Exception as e:
			print(e)
		finally:
			time.sleep(1)

		# driver.find_element_by_xpath(
		# 	'/html/body/div[3]/div[1]/div[2]/form/div[6]/button').click()
		# print('点击成功')
		# driver.get('https://search.51job.com/list/000000,000000,0000,32,9,99,%25E4%25BA%25A7%25E5%2593%2581%25E7%25BB%258F%25E7%2590%2586,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=')
		# driver.add_experimental_option("detach", True)
		# text = soup.find_all_next('script')
		# for x in text:
		# 	print(x.string)

	

def get_data():
	# with open('text.html','r') as res:
	# 	soup = BeautifulSoup(res,'lxml')
	# text = soup.find('p',class_='dc at')
	# text2 = soup.find(attrs={'class': 'dc at'})
	# text3 = soup.find_all('p',class_= 'dc at',limit=3)
	# result = []
	# for all in text3:
	# 	one = {}
	# 	one['name'] = all.string
	# 	result.append(one)
	# print(result)
	# son = soup.find('span',class_='time')
	# parent = son.find_parent()
	# print(parent)
	# brothers = son.find_next_siblings() # 同级之间下面的所有
	# print(brothers)
	# brother = son.find_next_sibling()  # 同级之间的下一个
	# print(brother)
	# mister = son.find_all_next('span',class_='sal')#页面中所有这一限定（下面的）
	# print(mister)
	# up = son.find_previous()#上一个
	# print(up)
	# upp = son.find_all_previous('span',class_='time')#上面的所有(最好有限制不然会死循环)
	# print(upp)
	# text = soup.find_all('script', attrs={'type': "text/javascript"})
	# for x in text:
	# 	print(x.string)
	
	# text = soup.find_all('script', attrs={'text': "window.__SEARCH_RESULT__"})
	# text = soup.select('script', attrs={'type': "text/javascript"})[9]
	# js_test = js.loads(bs.find("script", {"id": "DATA_INFO"}).get_text())
	# print(text.string)
	# job_name = json.loads(text.text)
	# print(job_name)
	# for x in text:
	# 	print(x.string)
		# print(x[i].string)
    a= 1

	# with open('data.json','w',encoding='utf-8') as file:
	# 	file.write(json.dumps(result,indent=3,ensure_ascii=False))
		

	
class spride(object):


	def text(self):
		self.result_all=[]
		self.count = 0
		header = {
			'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
		}
		for url in self.city2:
			# url = 'https://jobs.51job.com/beijing/'
			html = requests.get(url, headers=header,timeout=3000)
			self.soup = BeautifulSoup(html.text.encode('iso-8859-1').decode('gbk'), 'html.parser')
			text = self.soup.find('div',class_= 'detlist gbox')
			text2 = text.findAll('div',class_='e')
			# text2 = text.find
			result = []
			for list in text2:
				if text is None:
					return
				one = {}
				one['招聘'] = list.find('span' ,class_= 'title').find('a').get_text().strip()
				one['公司'] = list.find('a', attrs={'class': 'name'}).get_text().strip()
				one['地点'] = list.find('span', attrs={'class': 'location name'}).get_text().strip()
				one['发布时间'] = list.find('span', attrs={'class': 'time'}).get_text().strip()

				result.append(one)
			print(result)
			self.result_all += result
			self.count += 1	
			print('当前爬取的网页地址是：'+url)
			print('目前是第'+str(self.count)+'条数据'+',还差'+str(len(self.city2)-self.count)+'条数据即可完成！')
		# with open('data.json','a+',encoding='utf-8') as file:
		# 	file.write(json.dumps(self.result_all,indent=3,ensure_ascii=False))		


	# def next_page(self):
	# 	div_page = self.soup.find('div', attrs={'class':'p_in'}).find('li',class_='bk')
	# 	self.url_page = div_page.find_next_sibling('li',attrs={'class':'bk'}).find('a').get('href')
	# 	print(type(self.url_page))
	




	def write_json(self):
		try:
			with open('data.json', 'a+', encoding='utf-8') as file:
				file.write(json.dumps(self.result_all, indent=3, ensure_ascii=False))
			print('写入成功！！！')
		except Exception as e:
			print(e)
		finally:
			print('数据已全部写入！！！')


	def read_city(self):
		file = open('hcity.json','r',encoding='utf-8')
		data = json.load(file)
		print(type(data))#list
		print(type(data[1]))#dict
		number = 0
		self.city2 = []
		print(type(self.city2))#list
		for number in range(len(data)):
			city = list(data[number].values())
			self.city2 += city
			number += 1
		print(self.city2)
		# city = str(list(data[1].values()))#str
		# print(city)
		print(type(list(data[1].values())))

			


if __name__ == '__main__':
	# get_data()
	# login()
	# text()
	# hcity = Hcity()
	# hcity.find_data()
	# hcity.write_json()
	run = spride()
	run.read_city()
	run.text()
	# run.write_json()
	# j2m = JsonToMongo()
	# j2m.write_database()
	# login = selenium()
	# login.Driver()
	# login.User_login()
