from SeleniumCrawler import LoginInfo, SeleniumCrawler

import sys, os
import signal
import traceback
import socket


# 全局爬虫工具对象...为了方便中断信号处理
gCrawler = None
# 同上，全局记录当前正在使用的日志
gLogFile = ""
# 日志文件名
gLogFileName = "crawler.log"
# 是否显示浏览器界面
gIsShowBrowser = False
# 全局数据，目前是一个字符串
gData = ""



def LoadDataToFile():
	""" 读取程序所需数据 """
	global gData

	# .data 文件至少得有个 "0"
	fileNameData = ".data"
	inputstream = open(fileNameData, "r")
	read_lines = inputstream.readlines()
	inputstream.close()

	gData = int(read_lines[0])
	return gData


def SaveDataToFile():
	""" 存储一些数据到本地文件 """
	global gData

	fileNameData = ".data"
	if os.path.exists(fileNameData):
		os.remove(fileNameData)

	outputstream = open(fileNameData, "a")
	outputstream.write(str(gData))
	outputstream.close()


def PrintException(Exception, e):
	""" 打印异常 """
	print("str(Exception):\t" + str(Exception) )
	print( "str(e):\t\t" + str(e) )
	print( "repr(e):\t", repr(e) ) 
	#print( "e.message:\t" + e.message
	#print( "traceback.print_exc():" + traceback.print_exc() )
	print( "traceback.format_exc():\n" + traceback.format_exc() )
	
	if repr(e) == "NoSuchElementException()":
		print("The cause of the problem may be the wait time is too short.")
		print("Please recall Method: WebCrawlerTool.SetWaitTime(), and set longer wait time.")


def OnExit():
	""" 程序结束处理函数 """
	global gCrawler, gLogFile
	if gCrawler != None:
		if gLogFile != None and gLogFile != "":
			pass

		# 结束前记录当前下载的文件总数
		SaveDataToFile()
		gCrawler.Quit()


# 捕获中断信号
def exit(signum, frame):
	
	if signum == signal.SIGINT:
		print("\nCatch Signal SIGINT(2) !")
		OnExit()
	
	sys.exit()


def WaitForInput(str_your_tips):
	""" 等待输入命令继续或结束 """
	str_content_complete = "\n"
	str_content_complete += str_your_tips
	str_content_complete += "\nPress Key \"c\" to continue, or press \"e\" to exit.\n"
	while True:
		str_ret_input = input(str_content_complete)
		if str_ret_input == "c":
			break
		elif str_ret_input == "e":
			OnExit()
			sys.exit()
		else:
			print("Invalid Key, Please Re Operation !\n")

	pass


def ExecCrawer():
	""" 按流程执行爬虫程序 """
	global gCrawler, gLogFileName, gIsShowBrowser

	# 设置捕获中断信号的处理
	signal.signal(signal.SIGINT, exit)

	# 设置登陆信息
	loginInfo = LoginInfo()
	loginInfo.loginUrl = "www.example.com"
	loginInfo.elemUN = "Username"
	loginInfo.elemPWD = "Password"
	loginInfo.elemSUBMIT = "btnSubmit"
	loginInfo.userName = "Jack"
	loginInfo.passWord = "sayHelloToLucy"

	# 创建爬虫工具类
	crawler = SeleniumCrawler()
	if crawler is None:
		sys.exit()

	gCrawler = crawler
	crawler.SetLogFileName(gLogFileName)
	crawler.Exec(loginInfo, LogicMain, False, True)
	
	# 程序结束统一处理
	OnExit()


def LogicMain(crawler:SeleniumCrawler):
	global gLogFileName

	""" 真正的逻辑入口 """
	if crawler is None:
		sys.exit()

	'''
	# 打开你想分析的页面，
	# 并返回下一步准备处理的页面字符串信息 page_src
	openUrl = ""
	page_src = crawler.Open_ByBrowser(openUrl, 3)
	'''

	return


# 程序入口
if __name__ == "__main__":
	ExecCrawer()
 