# -*- coding: utf-8 -*-
#######################
#	获取新浪网站企业财报信息
#######################
import sys
import os
import time
dirs = os.path.abspath(os.path.dirname(__file__)+"/../Config")
os.sys.path.append(dirs)   #将上上级目录加载到python的环境变量中
from config import pythoncmd,conn


cur = conn.cursor()
#判断是否已经存在数据
sqls = "select searchname from enterpriseinfo where status = 1 and searchname is not null"
cur.execute(sqls)
#企业列表
enterprise_list = []
result_list = cur.fetchall()
for result in result_list:
	enterprise_list.append(result[0])

if enterprise_list == None:
	enterprise_list = ["拓尔思"]
	
def startcrawler(enterprise_list):
	filepath = sys.path[0]
	for enterprise in enterprise_list:
		os.system(pythoncmd+" "+filepath+"/enterprise.py %s" % enterprise)
		time.sleep(10)

	# print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"重新爬取上次失败公司。。。")
	# cur = conn.cursor()
	# #查询上次爬取失败公司列表
	# sqls = "select * from crawlerfalselog where type = 4"
	# cur.execute(sqls)
	# result = cur.fetchall()
	# if(result):
	# 	enterprise_list = []
	# 	for item in result:
	# 		enterprise_list.append(item[2])
	# 		sqld = "delete from crawlerfalselog where id = %d" % item[0]
	# 		cur.execute(sqld)
	# 		return startcrawler(enterprise_list)
	# else:
	# 	print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),"财务数据爬取完成。。。")
	# 	exit()
startcrawler(enterprise_list)