import datetime
import hashlib
import json
import time
from functools import wraps
from urllib.parse import urljoin
from urllib.parse import urlparse
from urllib.parse import urlunparse
from posixpath import normpath
import bson
import os
from loguru import logger
import string
import random
import requests

# 用来处理时间类型不能序列化的问题
from Api.retry_model import MyRetry


class DateEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, datetime.datetime):
            return obj.strftime("%Y-%m-%d %H:%M:%S")
        elif isinstance(obj, bson.ObjectId):
            return str(obj)
        else:
            return json.JSONEncoder.default(self, obj)


def create_md5(md_str):
    m = hashlib.md5()
    b = md_str.encode(encoding='utf-8')
    m.update(b)
    only_id_md5 = m.hexdigest()
    return only_id_md5


def check_dir(path):
    if not os.path.exists(path):
        logger.info(f"{path}文件夹不存在,即将进行创建")
        try:
            os.makedirs(path)
            logger.success(f"{path}文件夹创建成功")
        except Exception as e:
            logger.error(f"{path}文件夹创建异常, e:{e}")


def delete_file(file_path):
    try:
        os.remove(file_path)
    except Exception as e:
        print(e)
        pass


# 通用补全链接函数
def url_replenish(url, base):
    if not url:
        return ''
    url1 = urljoin(base, url)
    arr = urlparse(url1)
    path = normpath(arr[2])
    return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))


def make_password():
    # return ''.join(random.choices(population=[i for i in string.ascii_letters + string.digits], k=16))
    return ''.join(random.choices(population=[i for i in string.ascii_letters], k=6)) + ''.join(random.choices(population=[str(i) for i in string.digits], k=7))


@MyRetry(times=3, return_msg="获取代理为空！")
def get_cookies_data(Type="cnki", num=1):
    '''
    获取Cookies 列表
    :param Type:cnki,wanfang,cma
    :param num:
    :return:
    '''
    # url = f"http://cookie.dic.cool:7011/get?num={num}&token=%qq123456..&name={Type}"
    url = f"http://cookie.dic.cool/get?num={num}&token=%qq123456..&name={Type}"
    # logger.debug(url)
    response = requests.get(url, timeout=4)
    response_json = response.json()
    data = response_json["data"]
    if not data:
        logger.error(f"{Type}Cookies 为空！")
        return {}
    return data


def timeit(func):
    """
    装饰器： 判断函数执行时间
    :param func:
    :return:
    """

    @wraps(func)
    def inner(*args, **kwargs):
        start = time.time()
        ret = func(*args, **kwargs)
        end = time.time() - start
        if end < 60:
            logger.debug(f'函数 {func.__name__} 花费时间：{round(end, 2)}秒')
        else:
            min, sec = divmod(end, 60)
            logger.debug(f'函数 {func.__name__} 花费时间: {round(min)}分\t{round(sec, 2)}秒')
        return ret

    return inner


def unique_dicts_by_key(dicts, key):
    """
    根据字典中的某个key去重
    :param dicts: 列表中包含多个字典 [{},{},{},{}]
    :param key: 指定去重的key
    :return:
    """
    seen = set()
    unique_dicts = []
    for d in dicts:
        if d[key] not in seen:
            seen.add(d[key])
            unique_dicts.append(d)
    return unique_dicts


def find_files_in_directory(directory, file_extension=None, max_count=None) -> list:
    """
    递归搜索指定目录下的所有文件
    :param directory: 指定的需要搜索的目录
    :param file_extension: 文件扩展名，如 ".txt"，".pdf" 等
    :param max_count: 需要获取的最大文件数量，如果为 None，则获取所有文件
    :return: 文件路径列表
    """
    files = []

    for root, dirs, filenames in os.walk(directory):
        for filename in filenames:
            file_path = os.path.join(root, filename)

            # 如果提供了文件类型筛选条件，则检查文件扩展名
            if file_extension is not None:
                if not filename.lower().endswith(file_extension.lower()):
                    continue

            files.append(file_path)

            # 如果达到最大文件数量限制，且 max_count 不为 None，则停止查找
            if max_count is not None and len(files) >= max_count:
                return files

    return files
