import asyncio
from pyppeteer import launch
from pyppeteer_stealth import stealth  # 反爬
from bs4 import BeautifulSoup
import time

# 需要配置问卷星url和chrome可执行文件地址
config = {
    'wjx_url': 'https://www.wjx.cn/vj/QSpKVGa.aspx',
    'chrome_path': 'C:\\Users\\jiangshan\\AppData\\Local\\Google\\Chrome\\Application\\chrome.exe'
}

# 需要填写关键词触发答案
ans_dict = {
    '学号': '2019211402',
    '姓名': '姜山',
    '性别': '男',
    '学院': '计算机学院',
    '班级': '2019211318',
    '专业': '数据科学与大数据技术',
    '联系方式': '18462063668',
    '手机': '18462063668',
    '电话': '18462063668',
    'QQ': '1185944304',
    '微信': 'buptsg2020'
}


def get_answer(res):
    soup = BeautifulSoup(res, "html.parser")
    questions = soup.findAll(attrs={"class": "div_title_question"})
    ans = []
    for question in questions:
        ques_str = question.text
        print('Q:', ques_str)
        for key in list(ans_dict.keys()):
            if key in ques_str:
                ans.append(ans_dict[key])
                print('A:', ans_dict[key])
                break
    return ans


async def run():
    wjx_url = config['wjx_url']
    driver = await launch({
        # 谷歌浏览器的安装路径
        'executablePath': config['chrome_path'],
        # Pyppeteer 默认使用的是无头浏览器
        'headless': False,
        # 设置Windows-size和Viewport大小来实现网页完整显示
        'args': ['--no-sandbox', '--window-size=1024,768']
    })
    page = await driver.newPage()
    await page.setViewport({'width': 1024, 'height': 768})
    # 反爬虫跳入网页
    await stealth(page)
    await page.goto(wjx_url)
    time.sleep(1)
    page_text = await page.content()
    page_answer = get_answer(page_text)

    for i in range(len(page_answer)):
        await page.type('#q'+str(i+1), page_answer[i])

    await page.click('#submit_button')


if __name__ == '__main__':
    asyncio.get_event_loop().run_until_complete(run())
