#!/var/bin/python
# -*- coding: utf-8 -*-
#######################
#	获取99114黄页信息
#######################

import sys
import time
import urllib
import re
from bs4 import BeautifulSoup
import io
import os
dirs = os.path.abspath(os.path.dirname(__file__)+"/../Config")
os.sys.path.append(dirs)   #将上上级目录加载到python的环境变量中
# os.sys.path.append("D:/job/crawler/Config")
from config import conn,driver,logpath
from selenium.webdriver.common.keys import Keys  #selenium 输入需要引入keys包

driver.implicitly_wait(60)
#获取详请页面内容 处理
def dealData(driver,province,city,i,pages):
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'Open the %s %s %s page' % (province,city,i))
	try:
		soup = BeautifulSoup(driver.page_source,"html.parser")
		enterpriselist = soup.find("ul",class_="cony_div").find_all("a")
		counties = soup.find("div",class_="local_text").find("h1").text.strip()
		
		sqllist = []
		for enterprise in enterpriselist:
			ename = enterprise.text.strip()
			sqllist.append("('%s','%s','%s','%s',%d)" % (province,city,ename,counties,i))
		insertlist = ",".join(sqllist)
		sql = "insert into enterpriselist (province,city,ename,counties,page) values %s" % insertlist
		try:
			cur.execute(sql)
			conn.commit()
		except Exception as e:
			print("Failed to add data...")		
		if i < pages:
			i = i+1
			driver.find_element_by_link_text('下一页 »').click()
			return dealData(driver,province,city,i,pages)
		else:
			driver.close()
	except Exception as e:
		return dealData(driver,province,city,i,pages)
	

print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'Start crawling the data. . .')

cur = conn.cursor()
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Open your browser. . .")


#打开登陆页面
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Open the index page. . .")
soup = driver.get("http://shop.99114.com/")
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Open the province page. . .")
soup = BeautifulSoup(driver.page_source,"html.parser")
trlist = soup.find_all("tr")
#企业列表
####为了未知错误之后重新手动开始###############
if "startpage" in sys.argv:				
	startlist = sys.argv[1].split("-")
	provincelist = []
	for tr in trlist:
		provincelist.append(tr.find_all("td")[0].text.strip().replace(" ",""))
	#删除之前的
	startprovince = provincelist.index(startlist[0])
	trlist = trlist[startprovince:]
###############
for tr in trlist:
	tdlist = tr.find_all("td")
	province = tdlist[0].text.strip()
	alist = tdlist[1].find_all("a")
	
	####为了未知错误之后重新手动开始
	if "startpage" in sys.argv:
		startlist = sys.argv[1].split("-")
		if province == startlist[0]:			
			citylist = []
			for a in alist:
				citylist.append(a.text.strip())
			#删除之前的
			startcity = citylist.index(startlist[1])
			alist = alist[startcity:]
	########################
	for a in alist:
		i = 1
		handles = driver.window_handles # 获取当前窗口句柄集合（列表类型）
		driver.switch_to_window(handles[-1])
		city = a.text.strip()		
		####为了未知错误之后重新手动开始
		if "startpage" in sys.argv:
			startlist = sys.argv[1].split("-")
			if city == startlist[1]:
				i = int(startlist[2])
				hreflist = a.get('href').split("_")
				hreflist[1] = startlist[2]
				js='window.open("%s")' % "_".join(hreflist)
			else:
				js='window.open("%s")' % a.get('href')
		else:
			js='window.open("%s")' % a.get('href')
		########################
		driver.execute_script(js)
		handles = driver.window_handles # 获取当前窗口句柄集合（列表类型）
		driver.switch_to_window(handles[-1])
		
		soup = BeautifulSoup(driver.page_source,"html.parser")
		try:
			pages = soup.find("div",class_="pagination").find_all("a")[-2].text.strip()
			pages = int(pages)-1
		except Exception as e:
			try:
				if soup.find("div",class_="pagination").find("span",class_="disabled").text.strip() == "下一页 »":
					pages = soup.find("div",class_="pagination").find("span",class_="current").text.strip()
					pages = int(pages)
			except Exception as e:
				pages = 1
		
		dealData(driver,province,city,i,pages)
		
driver.quit()