#!/var/bin/python
# -*- coding: utf-8 -*-

#######################
# 百万企业列表
# 2016-07-27
#######################
import sys
import time
import urllib
import urllib.request
import re
from bs4 import BeautifulSoup
import io
import os
dirs = os.path.abspath(os.path.dirname(__file__)+"/../Config")
os.sys.path.append(dirs)   #将上上级目录加载到python的环境变量中
# os.sys.path.append("D:/job/crawler/Config")
from config import conn,webdriver,logpath,Proxy,ProxyType
from selenium.webdriver.common.keys import Keys  #selenium 输入需要引入keys包
sys.setrecursionlimit(1000000)  # set the maximum depth as 1500

##多进程
from multiprocessing import Pool
import random

import socket
ip = socket.gethostbyname(socket.gethostname()) 
QQ = "JLP"
cur = conn.cursor()
iplist = []
getrandomiptime = 0
def getUrl(url):
	time.sleep(10)
	try:
		user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
		headers = {'User-Agent': user_agent}
		req = urllib.request.Request(url, headers=headers)
		#daili
		proxyip = "202.106.16.36:3128"
		proxy_support = urllib.request.ProxyHandler({'http': "http://"+str(proxyip)})
		opener = urllib.request.build_opener(proxy_support)
		urllib.request.install_opener(opener)
		#daili
		response = urllib.request.urlopen(req,timeout=10)
		proxypage = response.read()
		soup = BeautifulSoup(proxypage,"html.parser",from_encoding="gbk")
		return soup
	except Exception as e:
		return getUrl(url)

def get_enterprise_data(name):
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'start crawler')
	indexcontent = getUrl("http://www.88152.com/page/")
	atags = indexcontent.find("div",class_="dirlist").find_all("div")
	###为了未知错误之后重新手动开始###############
	if "startpage" in sys.argv:				
		startlist = sys.argv[2].split("-")
		provincelist = []
		for at in atags:
			provincelist.append(at.text.strip().replace(" ",""))
		#删除之前的
		startkey = provincelist.index(startlist[0])
		atags = atags[startkey:]
	###############
	for atag in atags:
		a = atag.find("a").get("href")
		province = atag.text.strip()
		pagecontent = getUrl(a)
		totalpage = pagecontent.find("div",id="pageLink").find_all("a")[-2].text.strip()
		pagestart=1
		###为了未知错误之后重新手动开始###############
		if "startpage" in sys.argv:			
			startlist = sys.argv[2].split("-")
			if province == startlist[0]:
				pagestart = int(startlist[1])
		###############
		for i in range(int(pagestart-1),int(totalpage)):
			print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'Open the %s province, %d page' % (a,i+1))
			pagecontent = getUrl(a+"list"+str(i+1)+".html")
			enterpriselist = pagecontent.find_all("div",class_="company")
			sqllist=[]
			for enterprisetag in enterpriselist:
				ename = enterprisetag.text.strip()
				sqlexsit = "select * from enterpriselist where ename like '%s'" % ("%"+ename+"%")
				cur.execute(sqlexsit)
				result = cur.fetchone()
				if result == None:
					sqllist.append("('%s','%s')" % (ename,province))
			if sqllist:
				insertlist = ",".join(sqllist)
				sql = "insert into enterpriselist (ename,province) values %s" % insertlist
				try:
					cur.execute(sql)
					conn.commit()
				except Exception as e:
					print("Failed to add data...")
			else:
				continue
get_enterprise_data(0)
