import pandas as pd
import argparse
from bs4 import BeautifulSoup
from selenium import webdriver
from time import sleep

parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--url", type=str, default="https://jwgl.whu.edu.cn", help="先登录教务管理系统并打开成绩页面，复制该页面的url")
parser.add_argument("--driver-path", type=str, default="E:\Driver\geckodriver.exe", help="gechodriver.exe的路径，对chromedriver不兼容")
parser.add_argument("--log-path", type=str, default="gecko.log", help="gecko的日志路径")
parser.add_argument("--username", type=str, default="", help="统一登录平台的账号")
parser.add_argument("--password", type=str, required=True, help="统一登录平台的密码")
args = parser.parse_args()

def login(username, password):
    # 点击统一身份认证登录
    browser.find_element_by_id("tysfyzdl").click()
    sleep(2)
    # 输入学号密码登录
    browser.find_element_by_class_name("auth_tab_content_item")
    browser.find_element_by_xpath("//*[@id=\"username\"]").send_keys(username)
    browser.find_element_by_xpath("//*[@id=\"password\"]").send_keys(password)
    browser.find_element_by_xpath("/html/body/div/div[2]/div[3]/div[2]/div[4]/div[2]/form/p[2]/button").click()
    sleep(3)

# 更改学年和学期
def change_term(year, term):
    # 先定位到学期下拉菜单
    drop_down = browser.find_element_by_id("xnm_chosen")
    # 点击学年下拉菜单
    drop_down.click()
    # 再对下拉菜单中的选项进行选择
    year_drop_xpath = "/html/body/div[1]/div/div/form/div/div[1]/div/div/div/div/ul/li[{}]".format(yearsFirefox[year])
    drop_down.find_element_by_xpath(year_drop_xpath).click()

    # 先定位到学期下拉菜单
    drop_down = browser.find_element_by_id("xqm_chosen")
    # 点击学期下拉菜单
    drop_down.click()
    # 再对下拉菜单中的选项进行选择
    term_drop_xpath = "/html/body/div[1]/div/div/form/div/div[2]/div/div/div/div/ul/li[{}]".format(termsFirefox[term])
    drop_down.find_element_by_xpath(term_drop_xpath).click()

    # 读取headers以备用，headers需要手动复制
    # headers = {}
    # with open('raw.txt', 'r') as f:
    #     lines = f.readlines()
    #     lines.pop(0)
    #     for i in lines:
    #         two = i.split(':', 1)
    #         headers[two[0]] = two[1].strip()

def read_columns():
    rowheader = soup.find(role="rowheader")
    no_display = rowheader.find_all(style="width: 150px; display: none;")
    columns = []
    for column_header in rowheader.children:
        if column_header not in no_display:
            columns.append(column_header.text)
    return columns

def read_table(writer=None, sheet_name=None):
    table = []
    tabel_class = soup.find(class_="ui-jqgrid-btable")
    no_display = tabel_class.find_all(style="display:none;")
    for row in tabel_class.find_all(role="row", tabindex="-1"):
        row_grids = []
        for grid in row.find_all(role="gridcell"):
            if grid not in no_display:
                row_grids.append(grid.text)
        table.append(row_grids[:])
    columns = read_columns()
    try:
        tb = pd.DataFrame(table, columns=columns)
        tb.to_excel(excel_writer=writer, sheet_name=sheet_name, encoding='utf-8', index=False, header=True)
        writer.save()
    except ValueError:
        print("Error while reading", sheet_name)

ChineseNum = ['一', '二', '三']
yearsChrome = ['2019', '2020']
yearsFirefox = ['7', '6'] # 2020-2021, 2019-2020
termsChrome = ['3', '12', '16']
termsFirefox = ['2', '3', '4'] # 1, 2, 3


if __name__ == "__main__":
    # 新建selenium浏览器对象
    browser = webdriver.Firefox(executable_path=args.driver_path, service_log_path=args.log_path)

    # 浏览器访问登录页面
    browser.get(args.url)
    sleep(1)

    login(args.username, args.password)

    writer = pd.ExcelWriter("demo.xlsx")
    for year in range(len(yearsFirefox)):
        for term in range(len(termsFirefox)):
            change_term(year, term)
            # 点击查询
            browser.find_element_by_xpath("//*[@id=\"search_go\"]").click()
            # 手动绕过验证码
            captcha_xpath = "/html/body/div[1]/div/div/div[2]"
            captcha = browser.find_element_by_xpath(captcha_xpath)
            while captcha.is_displayed():
                sleep(1)
            sleep(1)
            # 读表
            print("Start reading table")
            html = browser.page_source
            soup = BeautifulSoup(html, 'lxml')
            read_table(writer, "大{}第{}学期".format(ChineseNum[year], ChineseNum[term]))
    writer.save()
    writer.close()
    browser.close()