import logging

import requests
from bs4 import BeautifulSoup

# html解析工具
from api.OpenAccountApi import OpenAccountApi
from api.RegLoginApi import RegLoginApi


def html_util(response):
    # 1.从响应结果中，获取请求的标签数据
    data = response.json().get("description").get("form")
    # 将获取的标签数据，打印到日志
    logging.info(f"获取的第三方接口的数据为{data}")
    # 2.创建beautifulsoup对象
    soup = BeautifulSoup(data, "html.parser")
    # 3.从form标签中获取url
    url = soup.form.get("action")

    # 4. 从input标签中获取请求体数据
    # 4.1 定义一个字典，用来接收请求体的数据
    data_dict = dict()

    # 4.2 获取全部的input标签
    input_all = soup.find_all("input")
    # 4.3 循环遍历出每一个input标签
    for sample_input in input_all:
        # 4.4从input标签中获取请求体的字段名
        sample_input.get("name")
        # 4.5从input标签中获取请求体的字段值
        sample_input.get("value")
        # 4.6.将字段名和值放入定义的请求体数据的字典中
        data_dict[sample_input.get("name")] = sample_input.get("value")
    #     将提取单位请求数据打印到日志
    logging.info(f"提取的第三方请求数据为{[url, data_dict]}")
    # 7.返回，带有url和请求体数据的列表
    return [url, data_dict]


if __name__ == '__main__':
    ses = requests.session()
    reg_login_api = RegLoginApi(ses)
    reg_login_api.user_login("18373444511", "11111MM")
    open_account_api = OpenAccountApi(ses)
    resp = open_account_api.open_account()
    url = html_util(resp)[0]
    form_dict = html_util(resp)[1]
    print(url)
    print(form_dict)
    resp = open_account_api.third_open_account(url, form_dict)
