from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from urllib.parse import quote, unquote
from bs4 import BeautifulSoup
import argparse
import time

# import logging
# logging.basicConfig(level=logging.DEBUG,
# 	format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')


def config():
	parser = argparse.ArgumentParser()
	parser.add_argument("--query_file", type=str, required=False, default=None)
	parser.add_argument("--retry_time", type=int, default=5)
	parser.add_argument("--load_times", type=str, default="10")
	return parser.parse_args()


def preprocess_table(html_table):
	html_table = "<table>\n" + html_table + "\n</table>"
	return html_table


def parse_table_with_thead_tbody(html_table):
    soup = BeautifulSoup(html_table, 'html.parser')
    table = soup.find('table')
    
    # 初始化表头和表格数据
    headers = []
    table_data = []
    
    # 解析表头
    thead = table.find('thead')
    if thead:
        for th in thead.find_all('th'):
            headers.append(th.get_text(strip=True))
    
    # 解析表格数据
    tbody = table.find('tbody')
    if tbody:
        for row in tbody.find_all('tr'):
            row_data = []
            for cell in row.find_all('td'):
                row_data.append(cell.get_text(strip=True))
            table_data.append(row_data)
    
    return headers, table_data


def format_table_out(headers, table_data):
	output = "\t".join(headers) + "\n"
	for row in table_data:
	    output += "\t".join(row) + "\n"
	return output


# 创建一个 Firefox 浏览器的驱动对象
driver = webdriver.Chrome()

# 使用浏览器打开指定的网页
query_origi = "baidu123"
query_ascii = quote(query_origi) # "%E6%9D%A8%E5%B9%82%E7%9A%84%E8%BA%AB%E9%AB%98%E6%98%AF%E5%A4%9A%E5%B0%91"
driver.get("https://debug.baidu-int.com/g?env=hnb.wisedebug.baidu.com&ie=utf-8&info=2&pd=wise&tn=iphone&word={}".format(query_ascii))

# 等待手动登录，后面自动记录了 cookie
import time
print("wait for login")
time.sleep(15)


def my_find_element(find_by, match_str, out_time=20, post_wait=0):
	try:
	    element = WebDriverWait(driver, out_time).until(
	        EC.presence_of_element_located((find_by, match_str))
	    )

	    if post_wait > 0:
	    	time.sleep(post_wait)
	    
	    return element
	except Exception as e:
		print("ERROR OCCURED when find element by {} \"{}\"".format(str(find_by), match_str))
		return None


def ask_table_info(query_origi, load_time=10):
	try:
		# 打开页面请求 query_origi
		query_ascii = quote(query_origi) # "%E6%9D%A8%E5%B9%82%E7%9A%84%E8%BA%AB%E9%AB%98%E6%98%AF%E5%A4%9A%E5%B0%91"
		driver.get("https://debug.baidu-int.com/g?env=hnb.wisedebug.baidu.com&ie=utf-8&info=2&pd=wise&tn=iphone&word={}".format(query_ascii))
		# 预留30s供页面加载
		time.sleep(load_time)

		# 切换 iframe
		driver.switch_to.frame('power')

		# Debug树(beta) 按钮
		debug_button = my_find_element(By.CSS_SELECTOR, "#__BVID__167___BV_tab_button__")
		debug_button.click()

		# discoveryarc 标签
		darc_span = my_find_element(By.CSS_SELECTOR, '.json-key.discoveryarc')
		driver.execute_script("arguments[0].scrollIntoView();", darc_span)
		darc_span.click()

		# multi_queue_dump 标签
		mtq_dump_span = my_find_element(By.CSS_SELECTOR, "span[class='json-key discoveryarc.multi_queue_dump']")
		driver.execute_script("arguments[0].scrollIntoView();", mtq_dump_span)
		mtq_dump_span.click()

		# level4_ac_merge_bc 标签
		l4_merge_span = my_find_element(By.CSS_SELECTOR, "span[class='json-key discoveryarc.multi_queue_dump.Level4_ac_merge_bc']")
		driver.execute_script("arguments[0].scrollIntoView();", l4_merge_span)
		l4_merge_span.click()

		# merge_result 标签
		mr_span = my_find_element(By.CSS_SELECTOR, "#discoveryarc\.multi_queue_dump\.Level4_ac_merge_bc\.merge_result")
		mr_span.click()

		# 自动解析 按钮
		auto_parse_button = my_find_element(By.CSS_SELECTOR, "button[class='table-databutton']")
		auto_parse_button.click()
		# 等待请求完成，加载完表格。
		time.sleep(3)

		# 表格
		table = my_find_element(By.CSS_SELECTOR, "#tableDiv")
		table_html = table.get_attribute('innerHTML')
		headers, table_data = parse_table_with_thead_tbody(preprocess_table(table_html))
		with open("outputs/{}.txt".format(query_origi), "w") as wf:
			wf.write(format_table_out(headers, table_data))
		return True
	except Exception as e:
		print("Exception {} when process query {}".format(str(e), query_origi))
		return False


args = config()
load_times = [int(x) for x in args.load_times.split(",")]
load_times = load_times[:args.retry_time]
if len(load_times) < args.retry_time:
	load_times = load_times + [load_times[-1]] * (args.retry_time - len(load_times))

if args.query_file is not None:
	with open(args.query_file, "r") as f:
		query_list = f.read().strip().split("\n")
else:
	# 测试query
	query_list = ["杨幂的身高是多少", "姚明的身高是多少"]
total_n = len(query_list)

query_list_failed = []

for try_id, load_time in zip(list(range(args.retry_time)), load_times):
	print("=== TRY_ID : {} (load_time={}) ===".format(try_id, load_time))
	print("len(query_list) : {}".format(len(query_list)))
	for qidx, query in enumerate(query_list):
		print("process {}-th query : {}".format(qidx+1, query))
		
		is_success = ask_table_info(query, load_time=load_time)
		if not is_success:
			query_list_failed.append(query)

	if len(query_list_failed) == 0:
		query_list = []
		break

	query_list = query_list_failed
	query_list_failed = []

failed_n = len(query_list)
print("{}/{} still failed after {} turn try".format(failed_n, total_n, try_id + 1))

if failed_n > 0:
	print("=== FAILED QUERY ===")
	print("\t".join(query_list))
