#!/var/bin/python
# -*- coding: utf-8 -*-
#! /usr/bin/env python3
#######################
#	获取新浪网站企业股权信息
#######################
import sys
import time
import urllib
import re
from bs4 import BeautifulSoup
import io
import os
import json
dirs = os.path.abspath(os.path.dirname(__file__)+"/../Config")
os.sys.path.append(dirs)   #将上上级目录加载到python的环境变量中
from config import conn,driver,logpath
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'Began to crawl data...')

cur = conn.cursor()
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"open Browser ...")
#driver = webdriver.Firefox()
#driver = webdriver.PhantomJS(executable_path='D:/phantomjs198/phantomjs.exe')
time.sleep(5)

#获取远程URL链接
def GetUrlData(url):
	data=urllib.request.urlopen(url).read()
	return data

#递归获取页面内容 如果页面载入失败 重新加载
def DriverGet(url,tableid,eid,enterprise,times=1):
	js='window.open("%s")' % url
	driver.execute_script(js)
	handles = driver.window_handles # 获取当前窗口句柄集合（列表类型）
	driver.switch_to_window(handles[-1])
	# driver.get(url)
	driver.implicitly_wait(10)
	time.sleep(5)
	soup = None
	soup = BeautifulSoup(driver.page_source,"html.parser")
	if(soup.find("table",id=tableid)):
		return soup
	else:
		print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Reload the page failed to open the page The ",times,"time...")		
		if(times < 5):
			times = times+1
			return DriverGet(url,tableid,eid,enterprise,times)
		else:
			print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"The current enterprise information fetching failure...")
			#保存抓取失败企业
			insertsql = "insert into crawlerfalselog (eid,ename,url,type) values (%s,'%s','%s',3)" % (eid,enterprise,url)
			try:
				cur.execute(insertsql)
				conn.commit()
			except Exception as e:
				print("Data add failure ")
			driver.close()
			driver.quit()
			exit()

#获取详请页面内容 处理
def dealData(searchname):
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"For sina company code")
	url = "http://suggest3.sinajs.cn/suggest/type=&key=%s" % urllib.request.quote(searchname)
	geturlData = GetUrlData(url).decode("gbk")
	enterprisecode = geturlData.split("=")[1].split(",")[2]

	#查询公司对应ID
	sqls = "select eid from enterprisefinance where ename = '%s'" % searchname
	try:
		cur.execute(sqls)
		result = cur.fetchone()
	except Exception as e:
		print("Data add failure ")

	eid = result[0]
		
	############################################################公司简介股权结构
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Open the company profile page")
	soup = DriverGet("http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/%s.phtml" % (enterprisecode),"comInfo1",eid=eid,enterprise=searchname)
	gsjj_trlist = soup.find("table",id="comInfo1").find_all("tr")
	#去掉表头
	gsjj={}
	for gsjj_tr in gsjj_trlist:
		gsjj_tdlist = gsjj_tr.find_all("td")
		if(len(gsjj_tdlist)>=2):
			key = gsjj_tdlist[0]
			value = gsjj_tdlist[1]
			gsjj[key.text.strip()] = value.text.strip()

	#############################################################公司简介股权结构
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Open the page ownership structure")
	soup = DriverGet("http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CirculateStockHolder/stockid/%s/displaytype/30.phtml" % (enterprisecode),"CirculateShareholderTable",eid=eid,enterprise=searchname)
	gqjg_trlist = soup.find("table",id="CirculateShareholderTable").find("tbody").find_all("tr")
	#去掉表头
	gqjg={}
	datalist = []
	for gqjg_tr in gqjg_trlist:
		if(gqjg_tr.text != ""):
			gqjg_tdlist = gqjg_tr.find_all("td")
			if(len(gqjg_tdlist)>=2):
				if(gqjg_trlist.index(gqjg_tr)>1):
					data = [gqjg_tdlist[1].text.strip(),gqjg_tdlist[2].text.strip(),gqjg_tdlist[3].text.strip(),gqjg_tdlist[4].text.strip()]
					datalist.append(data)
				else:
					key = gqjg_tdlist[0]
					value = gqjg_tdlist[1]
					gqjg[key.text.strip()] = value.text.strip()
		else:
			break
	gqjg["StockholderStructure"] = json.dumps(datalist).replace("\\",'\\\\')

	#############################################################公司高管列表
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Open the list company executives")
	soup = DriverGet("http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpManager/stockid/%s.phtml" % (enterprisecode),"comInfo1",eid=eid,enterprise=searchname)
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Access to the chairman's name")
	kzrname = soup.find("td",text="董事长").parent.find("td").text.strip()

	#############################################################公司董事长简历详情页
	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Open the chairman resume page")
	soup = DriverGet("http://vip.stock.finance.sina.com.cn/corp/view/vCI_CorpManagerInfo.php?stockid=%s&Name=%s" % (enterprisecode,urllib.request.quote(kzrname.encode('gb2312'))),"Table1",eid=eid,enterprise=searchname)
	kzrjl_trlist = soup.find("table",id="Table1").find_all("tr")
	kzrjl_tdlist = kzrjl_trlist[2].find_all("td")
	#去掉表头
	kzrjl={}
	key = kzrjl_tdlist[0]
	value = kzrjl_tdlist[1]
	kzrjl[key.text.strip()] = value.text.strip()

	sqlgd = r"replace into enterpriseownerinfo (`eid`,`ename`,`cutoffdate`,`OfficeAddress`,`ChairmanSecretary`,`ChairmanSecretaryPhone`,`StockholderStructure`,`chairman`,`chairmanresume`) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (eid,searchname,gqjg['截止日期'],gsjj['办公地址：'],gsjj['董事会秘书：'],gsjj['董秘电话：'],gqjg['StockholderStructure'],kzrname,kzrjl["简 历"])

	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Company ownership structure data warehousing...")
	exestatus = "[SUCCESS]:"
	try:
		cur.execute(sqlgd)
		conn.commit()
	except Exception as e:
		print("Data add failure ")
		exestatus = "[ERROR]:"

	#file_object = open(logpath+"/ownerinfo.log", 'a')
	#file_object.write(exestatus+"	"+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+"	"+sqlgd+"\r\n")
	#file_object.close()

print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"Start to get the company data...")
#企业列表
if "php" in sys.argv:
	enterprise = urllib.request.unquote(sys.argv[1])
else:
	enterprise = sys.argv[1]
dealData(enterprise)
driver.close()
driver.quit()
