import base64
import concurrent.futures
import datetime
import hashlib
import io
import json
import multiprocessing
import re
import shutil
import tarfile
import urllib
import uuid
import csv
import pandas as pd
import pytz
import time
from functools import wraps
from urllib.parse import urljoin
from urllib.parse import urlparse
from urllib.parse import urlunparse
from PIL import Image, ImageChops
from posixpath import normpath
from typing import List, Dict, Union
import bson
import os
from loguru import logger
import string
import random
import requests
import chardet
from tqdm import tqdm
import hashlib

import pandas as pd


def read_excel_to_dict(filepath, sheet_name='Sheet1', read_all_sheets=False):
    """
    读取 Excel 文件并返回数据，支持读取指定 sheet 或所有 sheets。

    :param filepath: str, Excel 文件路径。
    :param sheet_name: str, 指定要读取的 sheet 名称，默认为 'Sheet1'。
    :param read_all_sheets: bool, 是否读取所有 sheets，默认 False。
    :return: list 或 dict，若读取一个 sheet，返回列表嵌套字典格式的数据；
             若读取所有 sheets，返回一个以 sheet 名为键，列表嵌套字典为值的字典。
    """
    try:
        if read_all_sheets:
            sheets_data = pd.read_excel(filepath, sheet_name=None)
            result = {
                sheet: data.fillna('').to_dict(orient='records')
                for sheet, data in sheets_data.items()
            }
        else:
            data = pd.read_excel(filepath, sheet_name=sheet_name)
            result = data.fillna('').to_dict(orient='records')

        return result

    except Exception as e:
        print(f"读取 Excel 文件出错: {e}")
        return None
def get_file_hash(file_path):
    sha256_hash = hashlib.sha256()
    with open(file_path, "rb") as f:
        for byte_block in iter(lambda: f.read(4096), b""):
            sha256_hash.update(byte_block)
        localhash = sha256_hash.hexdigest()

    return localhash


def extract_tar(key, target_path, pdf_path):
    try:
        key = target_path + "/" + key.split('/')[-1]
        save_path = os.path.join(pdf_path, key.split('/')[-1].split('.')[0])
        os.makedirs(save_path, exist_ok=True)
        with tarfile.open(key, 'r') as tar:
            file_count = len(tar.getmembers())

        with tarfile.open(key, 'r') as tar:
            progress_bar = tqdm(tar, total=file_count, desc=f'Extracting {key}')

            for member in progress_bar:
                tar.extract(member, path=save_path)
                progress_bar.set_postfix({'extracted': os.path.basename(member.name)})
                progress_bar.update(1)
        return True
    except Exception as e:
        logger.exception(f"{key} -- {e}")
        return False


def parse_time_dong_eight(time_str):
    # 将时间字符串转换为带有时区信息的 datetime 对象
    from datetime import datetime as dt, timedelta
    time_format = "%Y-%m-%d %H:%M:%S"
    time_zone = pytz.timezone('Asia/Shanghai')
    time_str = str(dt.strptime(time_str, time_format).replace(tzinfo=time_zone))

    # 解析时区偏移量
    time_part, offset_str = time_str.rsplit('+', 1)
    dt = dt.strptime(time_part, "%Y-%m-%d %H:%M:%S")
    offset_hours, offset_minutes = map(int, offset_str.split(':'))

    # 构造时区偏移量对象
    offset = datetime.timedelta(hours=offset_hours, minutes=offset_minutes)

    # 调整时间为正确的时区
    if offset_hours >= 0:
        dt += offset
    else:
        dt -= offset

    return dt


# from public_method.Retry_model import MyRetry


class DateEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, datetime.datetime):
            return obj.strftime("%Y-%m-%d %H:%M:%S")
        elif isinstance(obj, bson.ObjectId):
            return str(obj)
        else:
            return json.JSONEncoder.default(self, obj)


def create_md5(md_str):
    m = hashlib.md5()
    b = md_str.encode(encoding='utf-8')
    m.update(b)
    only_id_md5 = m.hexdigest()
    return only_id_md5


def check_dir(path):
    if not os.path.exists(path):
        logger.info(f"{path}文件夹不存在,即将进行创建")
        try:
            os.makedirs(path)
            logger.success(f"{path}文件夹创建成功")
        except Exception as e:
            logger.error(f"{path}文件夹创建异常, e:{e}")


def delete_file(file_path):
    try:
        os.remove(file_path)
    except Exception as e:
        print(e)
        pass


def is_html_complex(text):
    """
    使用复杂的正则表达式判断字符串是否是HTML
    :param text: 要判断的字符串
    :return: 如果是HTML，返回True；否则返回False
    """
    html_pattern = re.compile(r'<!DOCTYPE html>|<html[^>]*>|<body[^>]*>|<head[^>]*>|<title[^>]*>|<div[^>]*>|<span[^>]*>|<a[^>]*>|<img[^>]*>|<h[1-6][^>]*>|<p[^>]*>', re.IGNORECASE)
    return bool(html_pattern.search(text))


def url_replenish(url, base):
    if not url:
        return ''
    url1 = urljoin(base, url)
    arr = urlparse(url1)
    path = normpath(arr[2])
    return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))


def make_password():
    # return ''.join(random.choices(population=[i for i in string.ascii_letters + string.digits], k=16))
    return ''.join(random.choices(population=[i for i in string.ascii_letters], k=6)) + ''.join(
        random.choices(population=[str(i) for i in string.digits], k=7))


# @MyRetry(times=3, return_obj={}, return_msg="获取代理为空！")
def get_cookies_data(Type="cnki", num=1):
    '''
    获取Cookies 列表
    :param Type:cnki,wanfang,cma
    :param num:
    :return:
    '''
    url = f"http://cookie.dic.cool/get?num={num}&token=%qq123456..&name={Type}"
    response = requests.get(url, timeout=4)
    response_json = response.json()
    data = response_json["data"]
    if not data:
        logger.error(f"{Type}Cookies 为空！")
        return {}
    return data


def timeit(func):
    """
    装饰器： 判断函数执行时间
    :param func:
    :return:
    """

    @wraps(func)
    def inner(*args, **kwargs):
        start = time.time()
        ret = func(*args, **kwargs)
        end = time.time() - start
        if end < 60:
            logger.debug(f'函数 {func.__name__} 花费时间：{round(end, 2)}秒')
        else:
            min, sec = divmod(end, 60)
            logger.debug(f'函数 {func.__name__} 花费时间: {round(min)}分\t{round(sec, 2)}秒')
        return ret

    return inner


def unique_dicts_by_key(dicts, key):
    """
    根据字典中的某个key去重
    :param dicts: 列表中包含多个字典 [{},{},{},{}]
    :param key: 指定去重的key
    :return:
    """
    seen = set()
    unique_dicts = []
    for d in dicts:
        if d[key] not in seen:
            seen.add(d[key])
            unique_dicts.append(d)
    return unique_dicts


def delete_empty_folders(folder_path):
    try:
        if os.path.exists(folder_path) and os.path.isdir(folder_path):
            for root, dirs, files in os.walk(folder_path, topdown=False):
                for dir_name in dirs:
                    dir_path = os.path.join(root, dir_name)
                    if not os.listdir(dir_path):
                        os.rmdir(dir_path)
            if not os.listdir(folder_path):
                os.rmdir(folder_path)
        else:
            logger.debug(f"路径 '{folder_path}' 不存在或者不是一个目录")
    except Exception as e:
        logger.error(e)


def copy_and_rename_file(source_file, destination_dir, new_filename):
    """
    复制文件到指定目录，并进行重命名
    参数:
        source_file (str): 源文件路径
        destination_dir (str): 目标目录路径
        new_filename (str): 新文件名
    """
    try:
        shutil.copy(source_file, destination_dir)
        destination_file = os.path.join(destination_dir, new_filename)

        os.rename(os.path.join(destination_dir, os.path.basename(source_file)), destination_file)

        print(f"文件已复制并重命名为: {destination_file}")
    except Exception as e:
        print(f"Error 复制和重命名文件: {e}")


def get_common_keys(dict1, dict2):
    """
    获取两个字典中相同的键

    参数:
    dict1 -- 第一个字典
    dict2 -- 第二个字典

    返回:
    包含相同键的集合
    """
    common_keys = set(dict1.keys()) & set(dict2.keys())
    return common_keys


def merge_dicts(dict1, dict2):
    """
    合并两个字典，如果某个键的值在一个字典中是空的而在另一个字典中不是空的，则使用非空的值。
    返回合并后的新字典。

    参数:
    dict1 -- 第一个字典
    dict2 -- 第二个字典

    返回:
    合并后的新字典
    """
    merged_dict = {}

    # 获取两个字典中所有的键
    all_keys = set(dict1.keys()).union(set(dict2.keys()))

    for key in all_keys:
        value1 = dict1.get(key)
        value2 = dict2.get(key)

        # 如果一个字典中的值是空的，而另一个不是，则使用非空的值
        if not value1 and value2:
            merged_dict[key] = value2
        elif value1 and not value2:
            merged_dict[key] = value1
        else:
            # 如果两个字典中都有值，或者都没有值，则优先使用第一个字典的值
            merged_dict[key] = value1 if value1 is not None else value2

    return merged_dict


def write_to_csv(file_path: str, data: Union[List[List], List[Dict]], headers: List[str] = None, append: bool = False):
    """
    将数据写入CSV文件。

    参数:
    - file_path: CSV文件路径
    - data: 数据，可以是列表的列表或者列表的字典
    - headers: CSV文件的表头，默认值为None。如果data是字典列表，headers可以从字典的键自动生成。
    - append: 是否附加到现有文件，默认值为False。为True时，文件模式为'a'，否则为'w'。

    使用示例:
    write_to_csv('data.csv', [['Name', 'Age'], ['Alice', 30], ['Bob', 25]])
    write_to_csv('data.csv', [{'Name': 'Alice', 'Age': 30}, {'Name': 'Bob', 'Age': 25}], append=True)
    """
    # 确定文件模式
    mode = 'a' if append else 'w'

    # 打开文件
    with open(file_path, mode, newline='', encoding='utf-8') as csv_file:
        if not data:
            print("数据为空，无法写入CSV文件。")
            return

        # 判断数据类型
        if isinstance(data[0], dict):
            # 数据是字典列表
            if headers is None:
                headers = list(data[0].keys())  # 自动获取表头
            writer = csv.DictWriter(csv_file, fieldnames=headers)

            if not append or csv_file.tell() == 0:  # 只在写入新文件或者文件为空时写入表头
                writer.writeheader()
            writer.writerows(data)
        else:
            # 数据是列表的列表
            writer = csv.writer(csv_file)

            if headers:
                writer.writerow(headers)  # 写入表头
            writer.writerows(data)

    print(f"数据已成功写入 {file_path}")


def find_files_in_directory(directory, file_extension=None, max_count=None) -> list:
    """
    递归搜索指定目录下的所有文件
    :param directory: 指定的需要搜索的目录
    :param file_extension: 文件扩展名，如 ".txt"，".pdf" 等
    :param max_count: 需要获取的最大文件数量，如果为 None，则获取所有文件
    :return: 文件路径列表
    """
    files = []

    for root, dirs, filenames in os.walk(directory):
        for filename in filenames:
            file_path = os.path.join(root, filename)

            # 如果提供了文件类型筛选条件，则检查文件扩展名
            if file_extension is not None:
                if not filename.lower().endswith(file_extension.lower()):
                    continue

            files.append(file_path)

            # 如果达到最大文件数量限制，且 max_count 不为 None，则停止查找
            if max_count is not None and len(files) >= max_count:
                return files

    return files


def change_file_extension(src_path, new_extension, is_recursive=False, overwrite=False):
    """
    更改文件或目录中所有文件的扩展名。

    :param src_path: 源文件或目录路径
    :param new_extension: 目标文件扩展名（不带点）
    :param is_recursive: 是否递归处理目录中的所有文件
    :param overwrite: 是否覆盖已有的目标文件
    """
    if not os.path.exists(src_path):
        print(f"路径不存在: {src_path}")
        return False

    if not new_extension.startswith('.'):
        new_extension = f'.{new_extension}'

    def change_single_file(file_path):
        dir_name, base_name = os.path.split(file_path)
        file_name, _ = os.path.splitext(base_name)
        new_file_path = os.path.join(dir_name, f"{file_name}{new_extension}")

        if os.path.exists(new_file_path) and not overwrite:
            print(f"目标文件已存在且未设置覆盖: {new_file_path}")
            return

        os.rename(file_path, new_file_path)
        print(f"已重命名: {file_path} -> {new_file_path}")

    if os.path.isfile(src_path):
        change_single_file(src_path)
    elif os.path.isdir(src_path):
        for root, _, files in os.walk(src_path):
            for file in files:
                change_single_file(os.path.join(root, file))
            if not is_recursive:
                break
    else:
        print(f"无效路径: {src_path}")


class NumpyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(NumpyEncoder, self).default(obj)


def timestamp_to_datetime():
    update_timestamp = int(time.time())
    from datetime import datetime
    dt_object = datetime.fromtimestamp(update_timestamp)
    datetime_str = dt_object.strftime("%Y-%m-%d %H:%M:%S")
    return datetime_str


def count_subdirectories(folder_path):
    """
    获取指定目录下的子目录数量
    Args:
        folder_path: 指定的目录路径

    Returns: 子目录数量

    """
    try:
        # 使用os.scandir()来获取文件夹下的所有目录和文件
        entries = os.scandir(folder_path)

        subdirectories = [entry for entry in entries if entry.is_dir()]  # 筛选出所有子目录

        return len(subdirectories)
    except Exception as e:
        print(f"Error: {str(e)}")
        return 0


def get_all_subdirectories(folder_path):
    """
    获取指定文件夹下的所有子文件夹，包括嵌套的子文件夹。

    参数：
    - folder_path: 文件夹路径

    返回值：
    - subdirectories: 包含所有子文件夹路径的列表
    """
    subdirectories = []

    def recursive_scan(current_folder):
        # 使用 os.scandir 获取当前文件夹下的所有条目
        entries = os.scandir(current_folder)

        for entry in entries:
            if entry.is_dir():
                subdirectories.append(entry.path)
                recursive_scan(entry.path)

    recursive_scan(folder_path)

    return subdirectories


def get_vector_from_file_helper(file, cnki_id):
    global found_result
    with open(file, "r") as f:
        re_res = re.findall(cnki_id, f.read())
        if not re_res: return None
        while True:
            line = f.readline()
            try:
                line = json.loads(line)
            except json.JSONDecodeError:
                continue
            if line.get(cnki_id) == cnki_id:
                result = line.get("title_vector_quantity")
                found_result = True  # 设置标志为True，表示找到结果
                return result
    return None


def get_vector_from_file(cnki_id, vector_files, max_workers=20) -> dict:
    """
    从向量文件中获取指定 cnki_id 的向量
    :param cnki_id: Cnki文章表中的 article_url_md5
    :param vector_files: 向量文件列表
    :param max_workers: 最大线程数
    :return: 向量结果
    """
    global found_result  # 声明使用全局变量
    found_result = False  # 初始化标志为False

    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = {executor.submit(get_vector_from_file_helper, file, cnki_id): file for file in vector_files}
        for future in concurrent.futures.as_completed(futures):
            if found_result:  # 如果找到结果，取消其他任务
                for f in futures:
                    f.cancel()
                break
            result = future.result()
            if result is not None:
                return result

    return {}


def get_cpu_count():
    """
    获取CPU核心数
    Returns: CPU核心数
    """
    cpu_count = os.cpu_count()
    if cpu_count is not None:
        return cpu_count
    else:
        # 使用multiprocessing模块获取CPU核心数
        return multiprocessing.cpu_count()


def base64_to_image(img_base64):
    img_data = base64.b64decode(img_base64)
    return Image.open(io.BytesIO(img_data))


def get_img_base64(single_image_path):
    with open(single_image_path, 'rb') as fp:
        img_base64 = base64.b64encode(fp.read())
        return img_base64.decode()


def monitor_execution_time(timeout):
    """
    监控函数执行时间，如果超过指定时间，则返回函数结果，否则返回None
    Args:
        timeout:

    Returns:

    """

    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            start_time = time.time()
            result = func(*args, **kwargs)
            end_time = time.time()
            execution_time = end_time - start_time
            if execution_time > timeout:
                return result
            else:
                return None

        return wrapper

    return decorator


def read_excel_to_list(file_path, sheet_name):
    """
    读取Excel文件中的数据并返回一个包含字典的列表，每个字典对应表格中的一行数据。

    参数：
    - file_path: Excel文件的路径
    - sheet_name: 工作表的名称

    返回值：
    一个包含字典的列表，每个字典对应表格中的一行数据。
    """
    try:
        df = pd.read_excel(file_path, sheet_name=sheet_name)

        # 将DataFrame转换为列表，每一行都是一个字典
        data_list = df.to_dict(orient='records')

        return data_list
    except Exception as e:
        return None


def save_base64_as_image(encoded_string, output_path):
    try:
        if encoded_string.startswith('data:image'):
            base64_str_start = encoded_string.find('base64,') + 7
            encoded_string = encoded_string[base64_str_start:]
        # padding = len(encoded_string) % 4
        # if padding != 0:
        #     encoded_string += '=' * (4 - padding)
        standard_b64 = urllib.parse.unquote(encoded_string)
        decoded_bytes = base64.b64decode(standard_b64)

        os.makedirs(os.path.dirname(output_path), exist_ok=True)

        with open(output_path, 'wb') as file_to_save:
            file_to_save.write(decoded_bytes)


    except Exception as e:
        print(e)


def decode_image(src):
    """
    解码图片
    :param src: 图片编码
        eg:
            src="
                yH5BAAAAAAALAAAAAAzADEAAAK8jI+pBr0PowytzotTtbm/DTqQ6C3hGX
                ElcraA9jIr66ozVpM3nseUvYP1UEHF0FUUHkNJxhLZfEJNvol06tzwrgd
                LbXsFZYmSMPnHLB+zNJFbq15+SOf50+6rG7lKOjwV1ibGdhHYRVYVJ9Wn
                k2HWtLdIWMSH9lfyODZoZTb4xdnpxQSEF9oyOWIqp6gaI9pI1Qo7BijbF
                ZkoaAtEeiiLeKn72xM7vMZofJy8zJys2UxsCT3kO229LH1tXAAAOw=="

    :return: str 保存到本地的文件名
    """
    # 1、信息提取
    result = re.search("data:image/(?P<ext>.*?);base64,(?P<data>.*)", src, re.DOTALL)
    if result:
        ext = result.groupdict().get("ext")
        data = result.groupdict().get("data")

    else:
        raise Exception("Do not parse!")

    # 2、base64解码
    standard_b64 = urllib.parse.unquote(data)
    img = base64.urlsafe_b64decode(standard_b64)

    # 3、二进制文件保存
    filename = "{}.{}".format(uuid.uuid4(), ext)
    with open(filename, "wb") as f:
        f.write(img)

    return filename


def judgment_coding(content):
    """
    判断文件编码类型
    Args:
        content:

    Returns:

    """
    chardet_detect = chardet.detect(content)
    return chardet_detect["encoding"]


# 修改年份列表为年份区间
def change_year_range(year_list):
    year_list.sort()
    result_list = []
    one_range_list = []
    for index, year in enumerate(year_list):
        if index == 0:
            one_range_list.append(year)
        else:
            if int(year) == int(year_list[index - 1]) + 1:
                one_range_list.append(year)
            else:
                result_list.append(one_range_list)
                one_range_list = []
                one_range_list.append(year)
    if one_range_list:
        result_list.append(one_range_list)
    return_list = [one_range[0] if len(one_range) == 1 else f'{one_range[0]}-{one_range[-1]}' for one_range in result_list]

    return_list = [str(i) for i in return_list]
    now_year = str(datetime.datetime.now().year)
    result_str = ';'.join(return_list)
    if result_str.endswith(f'-{now_year}'):
        result_str = result_str.replace(now_year, '')
    return result_str


import numpy as np
from pyautogui import *
from paddleocr import PaddleOCR, draw_ocr


def ocr_img_text(path="", saveimg=False, printResult=True) -> tuple:
    """
    图像文字识别
    Args:
        path: 图片路径
        saveimg: 是否把结果保存成图片
        printResult: 是否打印出识别结果

    Returns: result, img_name

    """

    image = path

    if image == "":
        image = screenshot()
        image = np.array(image)
    else:
        image = Image.open(image).convert('RGB')
        image = np.array(image)

    ocr = PaddleOCR(use_angle_cls=True, lang="ch")

    result = ocr.ocr(image, cls=True)
    result_list = []
    if printResult is True:
        for line in result:
            for word in line:
                coordinates = word[0]
                text = word[1][0]
                coordinates_tuple = list(map(tuple, coordinates))
                result_list.append(
                    {"coordinates": coordinates_tuple, "text": text}
                )

    # 识别出来的文字保存为图片
    # img_name = os.path.join(seve_path, "PaddleocrImg", f"ImgTextOCR-img-{os.path.basename(path).split('.')[0]}.png")
    img_name = "ImgTextOCR-img-" + str(int(time.time())) + ".jpg"
    if saveimg is True:
        boxes = [detection[0] for line in result for detection in line]
        txts = [detection[1][0] for line in result for detection in line]
        scores = [detection[1][1] for line in result for detection in line]
        im_show = draw_ocr(image, boxes, txts, scores)
        im_show = Image.fromarray(im_show)
        im_show.save(img_name)

    return result_list, img_name


def is_reference(text: str) -> bool:
    patterns = [
        r'\[\d+\]',  # [1], [2], ...
        r'\(\d{4}\)',  # (1999), (2020), ...
        r'\d{4}',  # 1999, 2020, ...
        r'\b(?:Vol\.|Volume|Vol|No\.|Number|Issue|pp\.|pages|卷|期|页)\b',  # 卷号、期号、页码
        r'\b(?:doi|DOI|arXiv|ISBN|ISSN|DOI|期刊)\b',  # DOI, arXiv, ISBN, ISSN, 期刊
        r'\b(?:ed\.|eds\.|editor|editors|编|主编)\b',  # 编辑
        r'\b(?:In:|Proceedings of|Conference on|Journal of|Workshop on|会议|论文集|期刊)\b',  # 出版物
        r'\b(?:Retrieved from|Available at|取自|可在)\b',  # 取自、可在
        r'\d{4}年',  # 1999年, 2020年, ...
        r'\b(?:第\d+卷|第\d+期|第\d+版|第\d+页)\b',  # 第x卷, 第x期, 第x版, 第x页
        r'\b(?:作者|出版|出版社|编辑|论文集|期刊|会议)\b',  # 作者, 出版, 出版社, 编辑, 论文集, 期刊, 会议
    ]

    for pattern in patterns:
        if re.search(pattern, text, re.IGNORECASE):
            return True
    return False