#-*- coding: utf8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from urllib.parse import quote, unquote
from bs4 import BeautifulSoup
import argparse
import time
import json

yiji_ind = ["医疗","母婴","政务","游戏","旅游出行","音乐","历史军事","新闻","生活和情感","影视动漫","书籍文档","体育","金融","汽车","房产装修","招聘","民生","教育培训","商品","生活服务","生产制造","企业服务","社交网络平台","工具服务及在线查询","信息技术","人物","色情","门户","需求不明","软件","网络服务","通信"]
erji_ind = ["疾病知识","医疗器械","养生知识","整形美容","医疗服务","母婴医疗","药品保健品","医疗其它","母婴用品","母婴育儿","公益性组织","政务寻址","政策法规","市政建设","网游","单机游戏","手机游戏","页游","游戏平台","小游戏","棋牌游戏","游戏其它","国家","省份","城市","景点","旅游其它","乘坐火车","乘坐飞机","乘坐汽车","跨城出行","在线旅游","旅行社","酒店","歌曲","音乐泛需求","广播电台","音乐app","活动演出","音频","历史军事","新闻","情感问答","两性知识","动物","植物","爱宠","生活常识","偏僻技能","时尚","民俗文化","电视剧","电影","综艺","动漫","影视动漫泛需求","网络短视频","影视寻址&app","文档资料","小说","漫画","图书","报刊杂志","赛事","球队","对阵","寻址词","电竞体育","体育人物","体育其它","p2p平台","保险业","信托业","外汇类","平台中介","租赁业","证券业","金融其它","金融征信","银行业","融资贷款","新车","二手车","用车服务","汽车其它","出行用车","车主","代步车","新房","二手房","租房","精准楼盘","房价","房产知识","房产寻址&app","装修建材","装潢设计","家具家居","装修其它","寻址&app","全职","兼职","实习","校招","招聘会","公司","天气","饮食菜谱","命理","万年历","K12","学历教育","语言培训","留学","职业培训","教育综合","兴趣培训","IT","成绩和学籍查询","教育其它","早教","3C","食品饮料","化妆品","家居生活","眼镜服装配饰","珠宝饰品","鞋靴箱包","玩具文化用品","运动户外","虚拟商品","网络购物其它","网购寻址&app","酒类","生鲜水果","综合平台","彩票","团购","位置查询","餐饮","便民服务","休闲娱乐","摄影婚庆","心理援助","快递运输","市内出行","交通设施","公交查询","地铁查询","其它生活服务","机械设备","农林牧渔","化工及能源","电子电工","其它生产制造","招商休闲娱乐","招商其它","招商家居建材","招商教育培训","招商服务类","招商服装鞋帽","招商生活用品","招商礼品饰品","招商综合","招商美容化妆","招商餐饮酒店","代理代办","出国移民","咨询调查","商务服务其它","安全安保","广告包装印刷","拍卖","检修认证","法律服务","即时通讯","社区或问答平台","SNS","实用查询","搜索服务","主题桌面优化","浏览器","网址导航","其它工具服务及在线查询","信息技术","娱乐人物","领域内知名人物","普通人名","虚拟人物","人物泛需求","成人色情","门户寻址","需求不明","商用软件","民用软件","软件平台","云服务","域名空间","系统集成","网站建设","网络营销","通信及网络设备","虚拟运营商","电信运营商"]
template = {"query_domain": {"Q_query_basic": "严", "tag_qid": "8493624043781520330", "Q_qc_2": "186", "Q_qc_1": "25"}}

def config():
	parser = argparse.ArgumentParser()
	parser.add_argument("--query_file", type=str, required=False, default=None)
	parser.add_argument("--retry_time", type=int, default=5)
	parser.add_argument("--load_times", type=str, default="10")
	return parser.parse_args()


# 创建一个 Firefox 浏览器的驱动对象
driver = webdriver.Chrome()

# 使用浏览器打开指定的网页
query_origi = "baidu123"
query_ascii = quote(query_origi) # "%E6%9D%A8%E5%B9%82%E7%9A%84%E8%BA%AB%E9%AB%98%E6%98%AF%E5%A4%9A%E5%B0%91"
driver.get("https://debug.baidu-int.com/g?env=hnb.wisedebug.baidu.com&ie=utf-8&info=2&pd=wise&tn=iphone&word={}".format(query_ascii))

# 等待手动登录，后面自动记录了 cookie
import time
print("wait for login")
time.sleep(15)


def parse_div(html_doc):
	soup = BeautifulSoup(html_doc, 'lxml')
	# Find the span elements
	spans = soup.find_all('span')
	# Create a dictionary from the span text
	dict_res = {spans[i].text.strip(): spans[i+1].text.strip() for i in range(0, len(spans), 2)}
	return dict_res


def get_max_class(dict_res):
	fin_key = None
	max_val = -10000
	for k, v in dict_res.items():
		v = float(v)
		if v > max_val:
			fin_key = k
			max_val = v

	return fin_key


def my_find_element(find_by, match_str, out_time=3, post_wait=0):
	try:
	    element = WebDriverWait(driver, out_time).until(
	        EC.presence_of_element_located((find_by, match_str))
	    )

	    if post_wait > 0:
	    	time.sleep(post_wait)
	    
	    return element
	except Exception as e:
		# print("ERROR OCCURED when find element by {} \"{}\"".format(str(find_by), match_str))
		return None


def ask_industry_cls(query_origi, load_time=10):
	try:
		# 打开页面请求 query_origi
		query_ascii = quote(query_origi) # "%E6%9D%A8%E5%B9%82%E7%9A%84%E8%BA%AB%E9%AB%98%E6%98%AF%E5%A4%9A%E5%B0%91"
		driver.get("https://debug.baidu-int.com/g?env=hnb.wisedebug.baidu.com&ie=utf-8&info=2&pd=wise&tn=iphone&word={}".format(query_ascii))
		# 预留30s供页面加载
		time.sleep(load_time)

		# 切换 iframe
		driver.switch_to.frame('power')

		# 一级行业分类
		yiji_div = my_find_element(By.CSS_SELECTOR, "#__BVID__146 > div > div > div:nth-child(5) > div.card-body.m-0.p-0 > div:nth-child(2) > div.box-right.row.m-0.p-0.layout-wrap.col > div")
		if yiji_div is None:
			yiji_div = my_find_element(By.CSS_SELECTOR, "#__BVID__146 > div > div > div:nth-child(4) > div.card-body.m-0.p-0 > div:nth-child(2) > div.box-right.row.m-0.p-0.layout-wrap.col > div")
		if yiji_div is None:
			yiji_div = my_find_element(By.CSS_SELECTOR, "#__BVID__146 > div > div > div:nth-child(6) > div.card-body.m-0.p-0 > div:nth-child(2) > div.box-right.row.m-0.p-0.layout-wrap.col > div")
		
		# 二级行业分类
		erji_div = my_find_element(By.CSS_SELECTOR, "#__BVID__146 > div > div > div:nth-child(5) > div.card-body.m-0.p-0 > div:nth-child(3) > div.box-right.row.m-0.p-0.layout-wrap.col > div")
		if erji_div is None:
			erji_div = my_find_element(By.CSS_SELECTOR, "#__BVID__146 > div > div > div:nth-child(4) > div.card-body.m-0.p-0 > div:nth-child(3) > div.box-right.row.m-0.p-0.layout-wrap.col > div")
		if erji_div is None:
			erji_div = my_find_element(By.CSS_SELECTOR, "#__BVID__146 > div > div > div:nth-child(6) > div.card-body.m-0.p-0 > div:nth-child(3) > div.box-right.row.m-0.p-0.layout-wrap.col > div")
		
		if erji_div is None or yiji_div is None:
			return False
		
		yiji = get_max_class(parse_div(yiji_div.get_attribute('innerHTML')))
		erji = get_max_class(parse_div(erji_div.get_attribute('innerHTML')))

		yiji = yiji_ind.index(yiji)
		erji = erji_ind.index(erji)

		template["query_domain"]["Q_query_basic"] = query
		template["query_domain"]["Q_qc_1"] = str(yiji)
		template["query_domain"]["Q_qc_2"] = str(erji)
		print(json.dumps(template, ensure_ascii=False))

		return True
	except Exception as e:
		print("Exception {} when process query {}".format(str(e), query_origi))
		return False


args = config()
load_times = [int(x) for x in args.load_times.split(",")]
load_times = load_times[:args.retry_time]
if len(load_times) < args.retry_time:
	load_times = load_times + [load_times[-1]] * (args.retry_time - len(load_times))

if args.query_file is not None:
	with open(args.query_file, "r") as f:
		query_list = f.read().strip().split("\n")
else:
	# 测试query
	query_list = ["杨幂的身高是多少", "姚明的身高是多少"]
total_n = len(query_list)

query_list_failed = []

for try_id, load_time in zip(list(range(args.retry_time)), load_times):
	print("=== TRY_ID : {} (load_time={}) ===".format(try_id, load_time))
	print("len(query_list) : {}".format(len(query_list)))
	for qidx, query in enumerate(query_list):
		print("process {}-th query : {}".format(qidx+1, query))
		
		is_success = ask_industry_cls(query, load_time=load_time)
		if not is_success:
			query_list_failed.append(query)

	if len(query_list_failed) == 0:
		query_list = []
		break

	query_list = query_list_failed
	query_list_failed = []

failed_n = len(query_list)
print("{}/{} still failed after {} turn try".format(failed_n, total_n, try_id + 1))

if failed_n > 0:
	print("=== FAILED QUERY ===")
	print("\t".join(query_list))
