import json 
import os
from config import DIR_PATH
import logging.handlers #用到底层导入底层
from bs4 import BeautifulSoup
def read_json(filename, key):
    arr = []
    #拼接读取文件的完整路径
    file_path  = DIR_PATH + os.sep + "data" + os.sep + filename 
    with open(file_path, encoding="utf-8") as f:
        for data in json.load(f).get(key):
            arr.append(tuple(data.values())[1:]) #列表和元组均可
    return arr

class GetLog: #日志重组的封装
    @classmethod
    def get_log(cls):
        cls.log = None
        log = None
        if log is None:
            #1.获取日志器
            cls.log = logging.getLogger()
            #设置日志级别
            cls.log.setLevel(logging.INFO)  
            #2.获取处理器
            file_path = DIR_PATH + os.sep + "log" + os.sep + "p2p.log"
            tf = logging.handlers.TimedRotatingFileHandler(filename=file_path,
                                                            when="midnight",  
                                                            interval=1,
                                                            backupCount=3,
                                                            encoding="UTF-8" )   #按时间分割保存的处理器
            #3.获取格式器
            fmt = "%(asctime)s %(levelname)s [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s" 
            fm = logging.Formatter(fmt)
            
            #4.格式器添加进处理器中 
            tf.setFormatter(fm)
            #5.将处理器添加进日志器中
            cls.log.addHandler(tf)
        return cls.log
    
# 提取html元素
def parser_html(result):
    #1.提取html
    html = result.json().get("description").get("form")
    #2.获取bs对象
    bs = BeautifulSoup(html, "html.parser")
    #3.提取url
    url = bs.form.get("action")
    print("提取url:", url)
    data = {}
    #4.查找所有input标签    
    for input in bs.find_all("input"):
        data[input.get("name")] = input.get("value")
    return url, data
    #5.遍历，并进行组装
    
    

if  __name__ == "__main__":
    read_json("register_login.json","img_code")
    GetLog.get_log().info("信息级别测试")  #测试日志器是否可以获取
    #       