import hashlib
import logging
import multiprocessing
import os
import sqlite3
import string
import sys
import time
from logging import Logger
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import Lock, JoinableQueue
from typing import List, Tuple

import SQL
import useRustTar

worker: multiprocessing.Pool
mDict: dict
durLock: multiprocessing.RLock
tarLock: multiprocessing.RLock
handler = logging.FileHandler('output.log', encoding='utf-8')
handler.setFormatter(logging.Formatter(
    fmt="%(asctime)s | %(levelname)s | %(name)s | %(filename)s:%(lineno)d | %(funcName)s | %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S"
))
logging.basicConfig(handlers=[handler])
dll: string = "Lib/rust_tar_library.dll"
priority: string = "STMT"
idFile: string = "..key"
queue: 'JoinableQueue[tuple[str,str]]'
cpuCount: int = 4  # 子进程个数
cachedLimit: int = 65536  # SQL缓存值
durDuration: int = 60 * 2  # 子进程持久化的间隔时间，单位秒
cachedTar: 'list[tuple[str,str]]' = []


def initLocalDB() -> Tuple[sqlite3.Cursor, sqlite3.Connection]:
    conn = sqlite3.connect("backup.db", cached_statements=cachedLimit, check_same_thread=False,
                           isolation_level='DEFERRED')
    cur = conn.cursor()
    cur.execute(SQL.createDisk)
    cur.execute(SQL.createFile)
    conn.commit()
    return cur, conn


def getDir(path: str, id: str):
    res = []
    for root, dirs, files in os.walk(path):
        for name in files:
            try:
                loc = os.path.join(root, name)
                atime = time.strftime(
                    "%Y%m%d%H%M%S", time.localtime(os.path.getatime(loc))
                )
                size = os.path.getsize(loc)
                if name.endswith(".tar.gz"):
                    # 用来查询是否拆过
                    cachedTar.append((loc, hash(loc, 6)))
                # 拆完入库的包才会有tar_hash
                res.append((loc.split(":")[-1], loc.split("/")[-1], False, atime, size, None, id))
            except Exception as e:
                pass
    return res


def cachedTar2Queue(lcur, cachedTar):
    try:
        if not len(cachedTar) == 0:
            locs = [i.split(":")[-1] for i, _ in cachedTar]
            existing_hashes = set()
            for i in range(0, len(locs), 999):
                chunk = locs[i:i + 999]
                placeholders = ','.join('?' * len(chunk))
                query = f"SELECT tar_hash FROM file WHERE path IN ({placeholders})"
                t = lcur.execute(query, chunk).fetchall()
                existing_hashes.update(row[0] for row in t)

            for loc, tar_hash in cachedTar:
                if tar_hash not in existing_hashes:
                    logger.debug(loc + "入队")
                    queue.put(item=(loc, tar_hash))
            logger.info("入队" + str(queue.qsize()) + "个包")
            cachedTar.clear()

    except sqlite3.Error as e:
        logger.error(f"数据库查询失败: {e}")
        sys.exit(-1)



def insertFileBatch(cur: sqlite3.Cursor, file_data: list):
    """
    批量插入文件数据到SQLite数据库

    参数:
        cur: SQLite游标对象
        file_data: 包含多个文件数据的列表，每个元素是包含file表所有属性的元组
    """
    # 执行批量插入
    with durLock:
        cur.executemany(SQL.insertFileAll, file_data)


def hash(input_string, l: int):
    # 使用 SHA-256 哈希算法
    hash_object = hashlib.sha256(input_string.encode())
    hash_hex = hash_object.hexdigest()

    # 定义字符集：包含大小写字母和数字
    charset = string.ascii_letters + string.digits
    # 将哈希值映射到字符集
    result = ""
    for i in range(l):
        # 取哈希值中的每两位十六进制数，转换为整数
        index = int(hash_hex[i * 2: i * 2 + 2], 16)
        result += charset[
            index % len(charset)
            ]  # 对字符集长度取模，确保结果在字符集范围内

    return result


def tarHandler(mDict: dict, queue: multiprocessing.JoinableQueue, durLock: multiprocessing.RLock,
               tarLock: multiprocessing.RLock) -> None:
    from queue import Empty
    logger = logging.getLogger(str(os.getpid()))
    cur, conn = initMDB()
    __timer = int(time.time())
    while True:
        x: tuple[str, str]
        if mDict.get("quit") and queue.empty():
            durDB(cur, conn, durLock, tarLock, logger, mDict=mDict)
            logger.info("拆包进程退出")
            break
        try:
            x = queue.get(block=True, timeout=30)
        except Empty:
            logger.debug("未获取到包，队列长度" + str(queue.qsize()))
            continue
        (loc, tar_hash) = x
        logger.info("开始拆包" + loc + " " + tar_hash)
        filenames = useRustTar.tar_rust(loc, dll)
        if filenames is None:
            logger.info(tar_hash + "拆包结束，空包")
        else:
            data = []
            for f in filenames:
                t = (f, f.split("/")[-1], tar_hash)
                if priority in f:
                    with open("output.ini", "a+") as file:
                        file.write(str(t))
                        file.write("\n")
                data.append(t)
            logger.info(tar_hash + "拆包结束，总数：" + str(len(data)) + " 大小:" +
                        str(round(os.path.getsize(loc) / 1024 / 1024, 2)) + "mb")
            cur.executemany(SQL.insertTarAll, (data))
        cur.execute(SQL.insertFilePathTarHash, (loc.split(':')[-1], tar_hash))
        if int(time.time()) - __timer > durDuration:
            durDB(cur, conn, durLock, tarLock, logger, mDict=mDict)
            cur.close()
            conn.close()
            cur, conn = initMDB()
            __timer = int(time.time())
        queue.task_done()
        # 显式回收内存空间
        del x


def diskInsert(id: str, cur: sqlite3.Cursor) -> None:
    cur.execute(SQL.insertDiskIdCreateTimeProgress0, (id, time.strftime("%Y%m%d%H%M%S", time.localtime())), )


def updateDiskProgress(lcur: sqlite3.Cursor, status: int, id: string) -> None:
    lcur.execute(SQL.updateDiskProgressById, (status, id,))


def updateDiskEndTime(lcur: sqlite3.Cursor, id: string) -> None:
    lcur.execute(SQL.updateDiskEndTimeById, (time.strftime("%Y%m%d%H%M%S", time.localtime()), id,))


def initMDB() -> Tuple[sqlite3.Cursor, sqlite3.Connection]:
    """
    初始化内存数据库，创建必要的表，并返回一个游标对象。

    返回:
        sqlite3.Cursor: 数据库游标对象，用于执行 SQL 查询和操作。
    """
    # 连接到名为 backupDB 的 SQLite 数据库文件
    conn = sqlite3.connect(":memory:", cached_statements=cachedLimit)
    # 开启自动提交
    conn.isolation_level = None
    # 创建一个游标对象
    cur = conn.cursor()
    # 如果 disk 表不存在，则创建它
    cur.execute(SQL.createDisk)
    # 如果 file 表不存在，则创建它
    cur.execute(SQL.createFile)
    # 如果 tar 表不存在，则创建它
    cur.execute(SQL.createTar)
    # 返回游标对象
    return cur, conn


def durDB(mcur: sqlite3.Cursor, mconn: sqlite3.Connection, durLock: Lock, tarLock: Lock, logger: Logger,
          mDict: dict = None) -> None:
    """所有参数除 mDict 外均为必需。

        Args:
            mcur: 内存数据库游标（必需）
            mconn: 内存数据库连接（必需）
            durLock: 第一个锁（必需）
            tarLock: 第二个锁（必需）
            logger: 日志记录器（必需）
            mDict: 配置字典（可选，默认为 None）
        """
    logger.debug("开始持久化，尝试获取durLock")
    with durLock:
        with sqlite3.connect("backup.db") as dc:
            logger.debug("成功获取durLock")
            dcur = dc.cursor()
            # 插入
            dcur.executemany(SQL.insertFileAll, mcur.execute(SQL.selectFileAllByTarHashIsNull).fetchall(), )
            # 更新
            data = mcur.execute(SQL.selectFileTarHashPathByTarHashNoNull).fetchall()
            if len(data) > 0:
                dcur.executemany(SQL.updateFileTarHashByPath, (data))
            dcur.close()
            dc.commit()
    if mDict is not None:
        # id=null说明盘全部扫过直接退出
        if mDict.get("id") is None:
            return
        logger.debug("开始持久化，尝试获取tarLock")
        with tarLock:
            logger.debug("成功获取tarLock")
            with sqlite3.connect("tar/" + mDict["id"] + ".db") as tc:
                tcur = tc.cursor()
                tcur.execute(SQL.createTar)
                tcur.executemany(SQL.insertTarAll, mcur.execute(SQL.selectTarAll).fetchall(), )
                tcur.close()
                tc.commit()
    return


def getDrives() -> List[str]:
    drives = []
    for drive in range(ord('A'), ord('Z') + 1):
        drive_letter = chr(drive) + ':\\'
        if os.path.exists(drive_letter):
            drives.append(drive_letter)
    return drives


def workerInit(logQueue) -> None:
    logger = logging.getLogger(str(os.getpid()))
    logger.setLevel(logging.INFO)
    logger.addHandler(QueueHandler(logQueue))
    logger.info("进程" + str(os.getpid()) + "启动")

def tarErrorCallback(logger, x, queue):
    logger.error("子进程报错" + str(x))
    queue.task_done()

if __name__ == "__main__":
    logger = logging.getLogger("root")
    logQueue = multiprocessing.Manager().Queue()
    logger.setLevel(logging.INFO)
    logger.addHandler(QueueHandler(logQueue))
    logger.info("当前工作目录：" + os.getcwd())
    queue = multiprocessing.Manager().JoinableQueue()
    listener = QueueListener(logQueue, logging.StreamHandler())
    listener.start()

    disks: list = []

    disks: List[str]
    cpuCount = (
        multiprocessing.cpu_count()
        if cpuCount > multiprocessing.cpu_count()
        else cpuCount
    )

    durLock = multiprocessing.Manager().RLock()
    tarLock = multiprocessing.Manager().RLock()
    mDict = multiprocessing.Manager().dict()
    mDict.setdefault("quit", False)
    worker = multiprocessing.Pool(processes=cpuCount, initializer=workerInit, initargs=(logQueue,))
    for i in range(cpuCount):
        worker.apply_async(func=tarHandler, args=(mDict, queue, durLock, tarLock),
                           error_callback=lambda x: tarErrorCallback(logger, x, queue))

    # 判断DLL文件是否存在，不存在直接退出不要污染数据库
    if not os.path.exists(dll):
        logger.error(dll + "不存在")
        sys.exit(1)

    for d in getDrives():
        # 根据是否存在idFile来判断要不要扫
        if os.path.exists(d + idFile):
            disks.append(d)
        else:
            logger.warning(d + "缺少" + idFile)

    for disk in disks:
        disk: string
        path: str = disk
        if not os.path.exists(path):
            raise KeyError(path + "不存在")
        if not os.path.isdir(path):
            raise KeyError(path + "不是文件夹")
        if not os.path.exists("tar/"):
            os.mkdir("tar/")
        # 从idFile获取id
        id: string
        with open(disk + idFile, "r+") as f:
            id = f.readline()
        if id is None:
            logger.error(disk + idFile + "缺少id")
            continue

        logger.info("开始处理" + disk)
        # 硬盘状态处理
        with durLock:
            lcur, lconn = initLocalDB()
            if lcur.execute(SQL.selectDiskAllById, (id,)).fetchone() is None:
                diskInsert(id, lcur)
            # 2代表扫过
            if int(lcur.execute(SQL.selectDiskProgressById, (id,)).fetchone()[0]) == 2:
                continue
            updateDiskProgress(lcur, 1, id)
            mDict["id"] = id
            lconn.commit()

            # 扫描硬盘目录树
            res = getDir(path, id)
            logger.info(disk + "扫描进内存完毕")
            # 将扫描出来的全盘目录入本地库
            insertFileBatch(lcur, res)
            lconn.commit()

            # 将包入队，准备拆包
            cachedTar2Queue(lcur, cachedTar)

            lcur.close()
            lconn.close()

        logger.info("当前盘" + id + "入库完毕，开始拆包...")
        # 等待当前盘的所有包拆包完成，防止漏包
        queue.join()
        logger.info(disk + "拆包扫描完成")
        with durLock:
            lcur, lconn = initLocalDB()
            updateDiskProgress(lcur, 2, id)
            updateDiskEndTime(lcur, id)
            lconn.commit()
            lcur.close()
            lconn.close()

    mDict["quit"] = True
    worker.close()
    worker.join()
    listener.stop()
    sys.exit(0)
