import json
import logging
import logging.handlers
import os

from bs4 import BeautifulSoup
from config import DIR_PATH

import pymysql


# 断言工具
def common_assert(response, status_code=None, expect_json=None, expect_text=None):
    # 如果预期status_code不为空就断言
    if status_code:
        assert status_code == response.status_code, "状态码断言错误, 预期状态码: {} != 实际状态码: {}".format(status_code, response.status_code)
    # 如果预期json_description不为空就断言
    if expect_json:
        assert expect_json in response.json().get("description"), "json断言错误, 实际结果: {} 不包含 预期结果: {}".format(response.json().get("description"), expect_json)
    # 如果预期text不为空就断言
    if expect_text:
        assert expect_text in response.text, "text断言错误, 实际结果: {} 不包含 预期结果: {}".format(response.text, expect_text)


# 读取json工具
def read_json(file_name, key):
    # 1、获取文件完整路径
    file_path = DIR_PATH + os.sep + "data" + os.sep + file_name;
    # 2、打开文件
    with open(file_path, "r", encoding="utf-8") as f:
        arrs = []
        # 3、加载json数据指定的key,并遍历其中的数组 data
        for data in json.load(f).get(key):
            # 4、获取data中数据的values,并切去 描述，然后强转成元组，最后添加到数组中
            arrs.append(tuple(data.values())[1:])
    return arrs


# 日志工具
class Getlog:
    log = None
    @classmethod
    def get_log(cls):
        if cls.log is None:
            # 1、获取日志器
            cls.log = logging.getLogger()
            # 设置日志级别
            cls.log.setLevel(logging.INFO)
            # 2、获取日志处理器 TimedRotatingFileHandler：日志保存到文件且根据时间去分割
            filepath = DIR_PATH + os.sep + "log" + os.sep + "p2p.log"
            tf = logging.handlers.TimedRotatingFileHandler(filename=filepath,
                                                           when="midnight",
                                                           interval=1,
                                                           backupCount=3,
                                                           encoding="utf-8")
            # 3、获取格式器
            fm = "%(asctime)s %(levelname)s [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s"
            fmt = logging.Formatter(fm)

            # 4、将格式器添加到处理器中
            tf.setFormatter(fmt)
            # 5、将处理器添加到日志器中
            cls.log.addHandler(tf)
        # 返回日志器
        return cls.log

#html解析工具
def html_parser(result):
    Getlog.get_log().info("开始解析结果中的html")
    # 1、提取html
    html=result.json().get("description").get("form")
    #2、获取bs对象
    bs=BeautifulSoup(html,"html.parser")
    #3、获取url(form标签的action属性)
    url=bs.form.get("action")
    # print("三方开户的url为：",url)
    #4、获取input标签的name和value,作为字典
    data={}
    for input in bs.find_all("input"):
        data[input.get("name")]=input.get("value")
    return url,data

# 连接数据库工具封装
def connect_mysql(sql):
    connect=None
    cursor=None
    try:
        #1、连接数据库对象
        connect=pymysql.connect(host="121.43.169.97",
                                user="student",
                                password="P2P_student_2025",
                                database="czbk_member",
                                port=3306,charset="utf8",autocommit=True)
        #2、建立游标
        cursor=connect.cursor()

        #3、执行sql语句
        result=cursor.execute(sql)

        #4、返回什么结果
        if sql.lower().split()[0] == "select":
            #返回所有数据
            return cursor.fetchall()
        else:
            #返回影响行数
            return cursor.rowcount
    except Exception as e:
        print("出错了",e)
        #回滚
        connect.rollback()
    finally:
        if cursor:
            #5、关闭游标
            cursor.close()
        if connect:
            #6、关闭数据库连接
            connect.close()

#清楚数据
def clear_data():
    sql_1=""
    connect_mysql(sql_1)
    sql_2 = ""
    connect_mysql(sql_2)
    sql_3 = ""
    connect_mysql(sql_3)
    sql_4 = ""
    connect_mysql(sql_4)




if __name__ == '__main__':
    sql_1="select info.* from mb_member as mb inner join mb_member_info  as info on info.member_id = mb.id where mb.phone in ('1360001111');"

    print(connect_mysql(sql_1))
