# -*- coding: utf-8 -*-
# standard
import os
import sys
import shutil
import math
import threading
import _thread
import subprocess
import io
import re
import shutil
from enum import Enum
from queue import Queue
from queue import Empty
from queue import Full
from random import choice

# third
import pexpect
import paramiko
from tqdm import tqdm


# local
_P_PATH =  os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if _P_PATH not in sys.path:
    sys.path.append(_P_PATH)
from db.sqlalchemy_dbc import *


"""
@Title:   
@File: copy_models.py
@Author: walle 2023年10日27日 13时31分14秒
@Version: 1.0.0
@Desc: 
本脚本只能在 hk02 linux 执行
"""
TEST_MODE = True
if TEST_MODE:
    USER = "spider"  # 文件服务器 ssh 账户 
    PW = "jingying"# 文件服务器 ssh 密码
    HOST = "47.242.28.132" # 就是 hk01
    DEST_DIR = "/mnt/projects/fake_AVSpeech_raw/"   # hk01 上的假拷贝目标目录  
    MAX_WORKERS = 1  # 最大拷贝线程数
else:
    raise RuntimeError("生产环境尚未就绪")
    USER = "root"  # 文件服务器 ssh 账户 
    PW = "Default01!"# 文件服务器 ssh 密码
    HOST = "101.132.77.17" # 文件服务器 ssh 地址 另一台机器  俞云杰 最新
    DEST_DIR = "/mnt/aiface/data/Wav2Lip/raw/AVSpeech_raw/"   # 目标保存目录, 最好带 / 结尾   俞云杰  这个是开发阶段的测试目录
    MAX_WORKERS = 6  # 最大拷贝线程数

PORT = 22  # 文件服务器 ssh 端口
MIN_VIDEO_SIZE = 1024 ** 2 # 最小文件尺寸，单位K， 也就是已1m的意思 
SOURCE_DIR = "/mnt/projects/AVSpeech/"  # 源视频目录 
TEMP_BACKUP_DIR =  "/mnt/projects/temp_backup/"  # 视频的临时备份目录
BACKUP_DIR =  "/mnt/projects/video_backup/"  # 视频的全量备份目录
# todo: 在家开发临时使用虚拟机
# USER = "walle"
# PW = "123456"
# HOST = "192.168.101.9"
# DEST_DIR = "/home/walle/work/fake_AVSpeech_raw/"   #目标服务器 上的假拷贝目标目录  
# SOURCE_DIR = "/home/walle/work/AVSpeech/"  # 源视频目录
# TEMP_BACKUP_DIR =  "/home/walle/work/temp_backup"  # 视频的临时备份目录
# BACKUP_DIR =  "/home/walle/work/video_backup"  # 视频的全量备份目录

COLORS = [
        '#FF0000', # 红色
        '#00FF00',  # 绿色
        '#0000FF', # 蓝色
        '#FFFF00',  # 黄色
        '#00FFFF',  #  青色
        '#FFFFFF',  # 白色
        '#808080',  # 灰色
        '#A9A9A9',  # 深灰色
        '#D3D3D3',  # 浅灰色
        '#FF00FF',  # 洋红
        "#A52A2A",  # 棕色
        "#FFC0CB",  # 粉色
        "#800080",  # 粉紫色
        ]
LENGTH = len(COLORS)
SCP_TASKS = Queue(10)  # 拷贝队列
DB_LOCK = threading.RLock()  # 保存线程锁
if not os.path.exists(TEMP_BACKUP_DIR):
    os.makedirs(TEMP_BACKUP_DIR)
if not os.path.exists(BACKUP_DIR):
    os.makedirs(BACKUP_DIR)



class MediaFile(BaseModel):
    name: str = Field(..., description="文件名称")
    path: str = Field(..., description="文件路径")
    size: int = Field(..., description="文件大小")
    is_remote: bool = Field(..., description="是不是远程目录")


class MediaFolderDiff(BaseModel):
    """
    远程文件夹和本地文件夹的不同
    """
    name: str = Field(..., description="文件夹名称")
    exist_files: List[MediaFile] = Field([], description="存在于远程机器上对应目录的文件")
    not_exist_files: List[MediaFile] = Field([], description="不存在于远程机器上对应目录的文件（但存在于本地目录）")


class MediaFolder(BaseModel):
    name: str = Field(..., description="文件夹名称")
    path: str = Field(..., description="文件夹路径")
    is_remote: bool = Field(..., description="是不是远程目录")
    temp_backup_path: str = Field(None, description="临时备份目录，只有本地文件夹才有的字段")
    files: List[MediaFile] = Field([], description="文件")

    def compare(self, remote_folder: "MediaFolder") -> MediaFolderDiff:
        """
        比较两个对象，返回存在于本地目录但是不存在于远程目录的文件
        """
        if self.is_remote:
            raise ValueError(f"不能以远程目录为基准进行比较: path = {self.path}")
        if not remote_folder.is_remote:
            raise ValueError(f"remote_folder 必须是一个远程目录: path = {remote_folder.path}")
        if self.name != remote_folder.name:
            raise ValueError(f"不同目录不能进行比较: {self.name} ！= {remote_folder.name}")
        a_map = {x.name: x for x in remote_folder.files}
        exist_files, not_exist_files = [], []
        for x in tqdm(self.files, colour=choice(COLORS), desc="比较远程目录"):
            if x.name in a_map:
                if x.size == a_map[x.name].size: 
                    exist_files.append(a_map[x.name])
                else:
                    logger.warning(f"{self.name} 目录下的文件已经拷贝了一部分：{x.size} > {a_map[x.name].size}")
                    not_exist_files.append(x)
            else:
                not_exist_files.append(x)
        return MediaFolderDiff(name=self.name,
                               exist_files=exist_files,
                               not_exist_files=not_exist_files)
        


def format_remote_file_info(file_lines: List[str]):
    """
    格式化远程文件信息
    """
    good_files = {}
    bad_files = {}
    for line in file_lines:
        words = [x for x in line.split(" ") if x.strip() != ""]
        if len(words) < 9:
            if line != "":
                logger.warning(f"无效的文件信息读取行")
            continue
        try:
            # size = int(words[4]) / 1024
            size = int(words[4]) 
        except Exception:
            logger.error(f"无效的文件尺寸: {words[4]} words={words}")
            continue
        file_name = words[8]
        # todo: 当前方案取的是文件名做key
        # key = file_name.rsplit(".", 1)[0]
        key = file_name
        if size < MIN_VIDEO_SIZE:
            logger.warning(f"远程服务器上{file_name}文件尺寸只有{size} 字节，需要重新下载/拷贝")
            bad_files[key] = size
        else:
            good_files[key] = size
    return good_files, bad_files


def view_remote_files(dir_path: str = None, host: str = None, port: int = None, user: str = None, password: str = None) -> Tuple[dict, dict]:
    """
    浏览远程文件 返回视频名和尺寸组成的数组
    """
    dir_path = DEST_DIR if dir_path is None else dir_path
    command = f'ls -l {dir_path} | grep "^-"'
    ssh = paramiko.SSHClient()  
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())  
    host = HOST if host is None else host
    port = PORT if port is None else port
    user = USER if user is None else user
    password = PW if password is None else password
    # 连接到远程Linux系统  
    ssh.connect(hostname=host, port=port, username=user, password=password)    
    # 执行命令并获取输出  
    stdin, stdout, stderr = ssh.exec_command(command)  
    output = stdout.read().decode()  
    # 打印输出结果  
    # print(output)  
    # 关闭SSH连接  
    ssh.close()
    file_lines = output.split("\n")
    good_files, bad_files = format_remote_file_info(file_lines)
    # good_files key 是文件名，value是文件尺寸
    return good_files, bad_files


def move_files_to_temp_backup_dir(folder_name: str, file_names: List[str]):
    """
    把指定目录下的文件一移动到临时备份文件夹
    :param src: _description_
    """
    temp_backup_folder_path = os.path.join(TEMP_BACKUP_DIR, folder_name)
    if not os.path.exists(temp_backup_folder_path):
        os.makedirs(temp_backup_folder_path)
    num = 0
    for file_name in tqdm(file_names, colour=choice(COLORS), desc="正在移动恢复拷贝的目录中的已拷贝的文件"):
        source = os.path.join(SOURCE_DIR, folder_name, file_name)
        destination = os.path.join(temp_backup_folder_path, file_name)
        if os.path.exists(destination):
            dest_size = os.path.getsize(destination)
            src_size = os.path.getsize(source)
            if dest_size == src_size:  # 两个文件相同
                continue
            else:
                # 将目标文件改名再拷贝
                dst = destination + f".{datetime.now().strtime('%Y%m%d%H%M%S')}.bak"
                shutil.move(src=destination, dst=dst)
        # 移动文件     
        shutil.move(src=source, dst=destination)
        num += 1
    logger.debug(f"目录 {folder_name} 下已有 {num} 个文件移动到临时备份目录")


def get_remote_folder_obj(folder_name: str) -> MediaFolder:
    """
    获取远程的文件夹对象
    """
    folder_name = folder_name.strip("/")
    folder_path = DEST_DIR + folder_name + "/"
    files = view_remote_files(dir_path=folder_path)[0]
    
    files = [MediaFile(name=name, size=size, is_remote=True,
                       path = folder_path + name,
                        ) for name, size in files.items()]
    folder = MediaFolder(name=folder_name, path=folder_path, is_remote=True, files=files)
    folder.files.sort(key=lambda x: x.name)
    return folder


def list_local_folder_mp4(dir_path: str) -> List[dict]:
    """
    把本地指定目录的MP4信息以字典组成的数组的形式返回
    注意这是特例化的代码，本地的文件存储格式是 源目录->子目录->视频文件
    :param dir_path: 源目录->子目录
    :return: _description_
    """
    files = []
    logger.debug(f"开始检查 {dir_path} 目录下的mp4文件")
    folder = os.path.basename(dir_path)
    for file_name in tqdm(os.listdir(dir_path), desc="文件解析进度", colour=COLORS[1]):
        file_path = os.path.join(dir_path, file_name)
        if file_name.endswith(".mp4") and os.path.isfile(file_path):
            size = os.path.getsize(file_path)
            if size < MIN_VIDEO_SIZE:
                logger.warning(f"本地 {file_name} 目录下的文件 {file_name} 尺寸只有{size} 字节，可能是拷贝终端所致，忽略")
            else:
                files.append({
                    "name": file_name,
                    "folder": folder,
                    "size": size,
                    "file_path": file_path,
                })
        else:
            logger.warning(f"无效的目录或者文件: {file_path}")
    logger.debug(f"目录 {dir_path} 发现 {len(files)} 个有效文件")
    return files


def get_locale_folder_obj(folder_name: str) -> MediaFolder:
    """
    获取本地的文件夹对象
    """
    folder_name = folder_name.strip("/")
    folder_path = os.path.join(SOURCE_DIR, folder_name)
    files = list_local_folder_mp4(dir_path=folder_path)
    
    files = [MediaFile(name=x['name'],
                       path=x['file_path'],
                       size=x['size'],
                       is_remote=False) for x in files]
    folder = MediaFolder(name=folder_name, path=folder_path, is_remote=False, files=files)
    # 检查临时备份目录
    temp_backup_path = os.path.join(TEMP_BACKUP_DIR, folder_name)
    if os.path.exists(temp_backup_path):
        folder.temp_backup_path = temp_backup_path
        files2 = list_local_folder_mp4(dir_path=folder_path)
        files2 = [MediaFile(name=x['name'],
                       path=x['file_path'],
                       size=x['size'],
                       is_remote=False) for x in files2]
        folder.files.extend(files2)
    folder.files.sort(key=lambda x: x.name)
    return folder


class CopyStatus(Enum):
    ERROR: int = -1  # 拷贝出错或者完成后的检查没有通过
    STANDBY: int = 0  # 未拷贝。待命中
    COPYING: int = 1  # 拷贝中
    COMPLETED: int = 2  # 拷贝完成


class HourFolderOrm(Base, OrmTools):
    """
    YYYYMMDDHH 组成的目录
    """
    __tablename__ = "hour_folder"
    name = Column(String(64), primary_key=True, doc="文件夹名称")
    path = Column(String(256), primary_key=True, doc="文件夹路径")
    status = Column(Integer, default=CopyStatus.STANDBY.value, nullable=True, doc="拷贝状态？ 优化查询的字段")
    error = Column(String(512), default="", nullable=True, doc="拷贝错误的原因")
    copy_begin = Column(DateTime, nullable=True, default=datetime.now, doc="开始拷贝时间")
    copy_end = Column(DateTime, nullable=True, default=None, doc="拷贝结束时间")
    create_time = Column(DateTime, nullable=True, default=datetime.now, doc="创建时间")



class MediaFileOrm(Base, OrmTools):
    __tablename__ = "media_file"
    id = Column(Integer, primary_key=True, doc="由于可能存在拷贝到一半过一段时间继续拷贝的问题，所以同名的文件可能出现在不同的目录下")
    name = Column(String(128), index=True, doc="文件名")
    folder = Column(String(64),  doc="目录，含义上是外键")
    size = Column(Integer, default=0, nullable=True, doc="文件尺寸")
    file_path = Column(String(256), doc="本地文件路径")
    progress = Column(String(64), nullable=True, default="", doc="文件夹 100% 表示拷贝完成")
    create_time = Column(DateTime, nullable=True, default=datetime.now, doc="创建时间")

    @classmethod
    def get_copied_from_db(cls) -> dict:
        """
        返回已拷贝的文件名和1组成的字典
        :return: _description_
        """
        db_session = new_db_session()
        a_map = {x.name: 1 for x in db_session.query(cls.name).where(cls.progress == "100%")}
        db_session.close()
        return a_map
    
    @classmethod
    def get_not_copy(cls) -> dict:
        """
        返回未拷贝（含未拷贝完成）的文件名和size组成的字典
        :return: _description_
        """
        db_session = new_db_session()
        a_map = {x.name: x.size for x in db_session.query(cls.name, cls.size).where(cls.progress != "100%")}
        db_session.close()
        return a_map
    
    @classmethod
    def scan_source_dir(cls, folder_name: str = None, offset: int = 5):
        """
        扫描源目录，找出新增的目录并加入数据库，
        注意此方法应该在整点 + 1小时 + offset 分钟偏移 再执行。offset 分钟偏移是为了留给正在写入的文件一个写入的时间
        """
        if folder_name is not None:
            folder_names, force = [folder_name], True
        else:
            folder_names, force = os.listdir(SOURCE_DIR), False 
        folder_names.sort()
        db_session = new_db_session()
        scaned_folders = [x.name for x in db_session.query(HourFolderOrm.name)]
        db_session.close()
        threashold = (60 + offset) * 60
        now = datetime.now()
        for folder_name in tqdm(folder_names, colour=choice(COLORS), desc="正在扫描源目录"):
            year, month, day, hour = re.search(r"(\d{4})(\d{2})(\d{2})(\d{2})", folder_name).groups()
            folder_time = datetime(year=int(year), month=int(month), day=int(day), hour=int(hour), minute=0, second=0)
            if (seconds := (now - folder_time).total_seconds()) < threashold:
                logger.debug(f"目录 {folder_name} 距离扫描时间还剩 {threashold - seconds} 秒")
                continue
            if folder_name not in scaned_folders or force:
                dir_path = os.path.join(SOURCE_DIR, folder_name)
                file_kws = list_local_folder_mp4(dir_path=dir_path)
                if len(file_kws) == 0:  # 这是空目录，忽略
                    pass
                else:
                    DB_LOCK.acquire()
                    db_session = new_db_session()
                    exist_files: Dict[str, int] = {x.name: x.id for x in db_session.query(cls.name, cls.id).filter(cls.folder == folder_name)}
                    # 处理目录对象
                    if db_session.query(HourFolderOrm).filter(HourFolderOrm.name == folder_name).first() is None:
                        db_session.add(HourFolderOrm(name=folder_name, path=dir_path,
                                                     status=CopyStatus.STANDBY.value))
                    else:
                        logger.debug(f"文件夹对象 {folder_name} 已存在")
                    # 处理文件对象
                    num1, num2 = 0, 0
                    for kw in file_kws:
                        if kw['name'] in exist_files:
                            num1 += 1
                            db_session.query(cls).filter(cls.id == exist_files[kw['name']]).update({"size": kw['size']})
                        else:
                            num2 += 2
                            db_session.add(cls(**kw))
                    db_session.commit()
                    db_session.close()
                    logger.debug(f"目录 {folder_name} 添加了 {num2} 个文件记录。 修改了 {num1} 个文件记录")
                    DB_LOCK.release()
            else:  # 已扫描过的不再扫描
                pass

    @classmethod
    def scan_source_dir_forever(cls):
        """
        持续的扫描源目录
        """
        while True:
            now = datetime.now()
            min_offset = 5  # 最小时间偏移，单位分钟， 用于给哪些压着整点的尾巴还在拷贝的文件留出时间
            if min_offset < now.minute <= 10:  # 再每个整点的指定时间之间扫描
                cls.scan_source_dir(offset=min_offset)
                sleep(3000)  # 休眠50分钟
            else:
                sleep(60)
    
    @classmethod
    def check_result(cls, folder_name: str) -> Resp:
        """
        检查scp的结果。逻辑如下：
        * 检查此目录有多少文件已被拷贝到服务器
        * 将已拷贝的文件移动到临时备份目录
        * 等待向服务端的复制完成。
        * 检查本地目录和服务端目录是否一致？
        return: 备份位置的绝对路径
        """
        resp = Resp()
        remote_folder: MediaFolder = get_remote_folder_obj(folder_name)
        locale_folder: MediaFolder = get_locale_folder_obj(folder_name)
        diff = None
        try:
            diff = locale_folder.compare(remote_folder)
        except Exception as e:
            logger.exception(e)
            resp.message = f"{datetime.now()} 比较文件夹时出错，: {e}"
        finally:
            if not resp:
                pass
            else:
                if len(diff.not_exist_files) == 0 and len(diff.exist_files) == len(locale_folder.files):
                    pass  # 拷贝成功
                else:
                    resp.message = f"拷贝结果检查没有通过： len(diff.not_exist_files) = {len(diff.not_exist_files)}, len(diff.exist_files) = {len(diff.exist_files)}, len(locale_folder.files) = {len(locale_folder.files)}"
            if resp:
                error = ""
                status = CopyStatus.COMPLETED.value
                if TEST_MODE:  # debug模式不会删除文件
                    src = locale_folder.path
                    src = src if src.endswith("/") else src + "/"
                    dst = os.path.join(BACKUP_DIR, os.path.basename(src))
                    args = ["rsync", "-r", src, dst]  # 命令尽量使用数组，以避免类似找不到路径之类的奇怪的问题
                    process = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, 
                           text=True, encoding="utf-8")
                    outs, errs = process.communicate()
                    if process.returncode == 0 and outs == '' and errs == "":
                        logger.debug(f"rsync拷贝成功 {src} 已复制至 {dst}，状态码: {process.returncode}, outs: {outs}, errs: {errs}")
                    else:
                        logger.error(f"rsync拷贝 {src} 至 {dst} 可能已经失败，状态码: {process.returncode}, outs: {outs}, errs: {errs}")
                else:   
                    shutil.rmtree(locale_folder.path)
                    logger.debug(f"scp拷贝成功 {locale_folder.path} 已删除")
            else:  # 保留目录不动
                error = resp.message
                status = CopyStatus.ERROR.value
            # 写入数据库
            update = {"error": error, "copy_end": datetime.now(), "status": status}
            remote_files_map = {x.name: x.size for x in remote_folder.files}
            num = 0
            DB_LOCK.acquire()
            db_session = new_db_session()
            db_session.query(HourFolderOrm).filter(HourFolderOrm.name == locale_folder.name).update(update)  # 修改文件夹状态
            for x in db_session.query(cls).filter(cls.folder == folder_name):
                if x.name in remote_files_map and x.size == remote_files_map[x.name]:
                    x.progress = "100%"
                    num += 1
            db_session.commit()
            db_session.close()
            DB_LOCK.release()
            if resp:
                logger.debug(f"远程服务器上 {folder_name} 下的 {num} 个文件写入成功")
                shutil.rmtree(locale_folder.path)  # 移除源目录
                if locale_folder.temp_backup_path:
                    shutil.rmtree(locale_folder.temp_backup_path)  # 移除临时备份目录
            else:
                logger.error(f"拷贝出现错误。远程服务器上 {folder_name} 下的 {num} 个文件已写入")
            return resp

    @classmethod
    def load_tasks_from_db(cls, limit: int = 10, is_first: bool = False) -> int:
        """
        从数据库加载任务放入队列,返回放入的任务数, 注意查询的是文件夹的表而不是文件。任务是以文件夹为单位的
        """
        DB_LOCK.acquire()
        db_session = new_db_session()
        resume_count = 0
        if is_first:  # 启动程序后的第一次加载任务，会优先加载
            folders = db_session.query(HourFolderOrm).filter(HourFolderOrm.status == CopyStatus.COPYING.value).order_by(HourFolderOrm.name).limit(limit).all()
            resume_count = len(folders)
        else:
            folders = []
        folders.extend(db_session.query(HourFolderOrm).filter(HourFolderOrm.status == CopyStatus.STANDBY.value).order_by(HourFolderOrm.name).limit(limit).all())
        db_session.close()
        DB_LOCK.release()
        num = 0
        for x in tqdm(folders, colour=choice(COLORS), desc="增在初始化工作列队"):
            # is_resume == True 表示是之前运行过中断的任务
            task = {"folder_name": x.name, 
                    "is_resume": True if x.status == CopyStatus.STANDBY.value else False,
                    }
            try:
                SCP_TASKS.put(task)
                num += 1
            except Full:
                break
        logger.debug(f"向工作队列放入了 {num} 个任务，其中 {resume_count} 个任务之前启动过")
        return num

    @classmethod
    def do_scp_tasks(cls):
        """
        执行拷贝任务， 注意，这是一个阻塞函数
        """
        # 启动一个线程循环扫描目录
        t = threading.Thread(target=cls.scan_source_dir_forever, daemon=True)
        t.start()
        # 执行任务
        while 1:
            task = None
            try:
                task = SCP_TASKS.get(timeout=5)
            except Empty:
                num = cls.load_tasks_from_db()
                if num == 0:
                    logger.warning(f"数据库已无数据，阻塞等待任务")
                    task = SCP_TASKS.get()
                    logger.warning(f"已被任务唤醒")
                else:
                    logger.debug(f"从数据加载了 {num} 个任务")
                    continue
                
            if task:
                cls.do_one_task(task)
    
    @classmethod
    def do_one_task(cls, task: dict):
        """
        执行一个文件的拷贝
        """
        folder_name = task['folder_name']  # 文件夹名称
        copied_file_names = []  # 当前文件夹中已拷贝的文件，
        need_copy_file_names = []  # 需要拷贝的文件的名称
        if task['is_resume']:  # 这是一个中断过的任务
            remote_folder = get_remote_folder_obj(folder_name)  # 远程目录对象
            remote_files_map = {x.name: x.size for x in remote_folder.files}
            DB_LOCK.acquire()
            db_session = new_db_session()
            
            files = db_session.query(cls).filter(cls.folder == folder_name).order_by(cls.name)
            # 用数据库的本地文件的记录与远程服务器对应目录的文件信息做比对。分别筛选出已拷贝文件和未拷贝文件
            for x in files:
                if x.name in remote_files_map and x.size == remote_files_map[x.name]:  # 如果此文件在远程服务器对应的目录上有且文件大小一致，就认为已拷贝
                    x.progress == "100%"
                    copied_file_names.append(x.name)
                else:
                    need_copy_file_names.append(x.name)
            if len(copied_file_names) > 0:
                db_session.commit()
            db_session.close()
            DB_LOCK.release()
            if len(copied_file_names) > 0:
                move_files_to_temp_backup_dir(folder_name=folder_name, file_names=copied_file_names) # 移动拷贝过的文件到临时备份目录
        # 执行scp命令
        execute_scp_command(folder_name=folder_name, need_copy_file_names=need_copy_file_names)
        # 检查结果
        cls.check_result(folder_name=folder_name)


    @classmethod
    def init_database_and_tasks(cls):
        """
        初始化数据库的俩表和下载任务
        """
        cls.scan_source_dir()  # 扫描本地文件写入数据库
        cls.load_tasks_from_db(is_first=True)  # 初始化工作队列



##################### 以下是 ssh 和 scp 相关部分 #####################################
 


def format_pexpect_output(output: bytes) -> dict:
    """
    格式化 pexepct的输出， 这里专门只scp中间过程的输出
    return: key 是文件名，value 是拷贝的百分比
    """
    lines = output.decode().split("\r\n")
    lines = [x.strip() for x in lines if x.strip() != ""]
    result = {}
    for line in lines:
        data = re.findall(r'([\w-]+\.mp4.+?\%)', line)
        for part in data:
            data = re.findall(r'([\w-]+\.mp4)\D+(.+?\%)', part)
            if len(data) > 0:
                result[data[0][0]] = data[0][1]
    return result


def calculate_progress_value(need_copy_file_names: List[str], scp_output_lines: dict) -> int:
    """
    从scp拷贝时的输出行计算进度
    """
    value = 0
    for file_name, progress_str in scp_output_lines.items():
        if file_name not in need_copy_file_names:  # 不在待拷贝文件名清单中，应该是已被剔除
            pass
        else:
            if "100%" == progress_str:
                value += 1
                need_copy_file_names.remove(file_name)
            else:
                pass
    return value


def execute_scp_command(folder_name: str = "2023122812", need_copy_file_names: List[str] = None):
    """
    执行scp命令
    :param folder_name: 文件夹名称
    ：param need_copy_file_names： 文件夹里面的文件， 用于计算进度
    """
    need_copy_file_names = [] if need_copy_file_names is None else need_copy_file_names
    user = USER
    pw = PW
    host = HOST
    dest = DEST_DIR
    cmd = f"scp -r {SOURCE_DIR}{folder_name}/ {user}@{host}:{dest}"
    # cmd = f"scp -r /mnt/projects/file_copy_by_folder/models/ {user}@{host}:{dest}"
    expects = [pexpect.EOF, pexpect.TIMEOUT, 'password:', f"{user}@{host}'s password:", "Are you sure you want to continue connecting (yes/no/[fingerprint])?"]
    total = len(os.listdir(os.path.join(SOURCE_DIR, folder_name)))
    # with tqdm(total=total, colour="red", unit_scale=True, unit_divisor=1024, unit="B") as progress: 
    with tqdm(total=total, colour="white", unit_scale=True, unit_divisor=1, unit="个") as progress: 
        p = pexpect.spawn(cmd, timeout=10)
        status = p.expect(expects, timeout=10)
        while 1:
            if status == 4: # 提示 Are you sure you want to continue connecting (yes/no/[fingerprint])? 是否确实要继续连接（是/否/[指纹]）？
                p.sendline("yes")
            elif status == 3:
                p.sendline(f'{pw}')
            elif status == 0:
                logger.debug("拷贝完成")
                break
            elif status == 1:  # 这是超时，输出一下
                output = p.before
                # print(output)
                progress_dict = format_pexpect_output(output)
                for file_name, progress_str in progress_dict.items():
                    if file_name in need_copy_file_names:
                        logger.debug(f"{file_name} SCP进度 {progress_str}")
                    else:
                        pass  # 忽略那些已下载完成的文件的的命令行输出信息
                value = calculate_progress_value(need_copy_file_names=need_copy_file_names, scp_output_lines=progress_dict)
                progress.update(value)
            else:
                print(status, p.before)  # 打印输出
            status = p.expect(expects, timeout=1)
    return



if __name__ == '__main__':
    a = b' \r\n\rQnG4EvXcAvc.mp4                                 0%    0     0.0KB/s   --:-- ETA\rQnG4EvXcAvc.mp4                                14%   19MB  18.8MB/s   00:05 ETA\rQnG4EvXcAvc.mp4                                23%   30MB  18.1MB/s   00:05 ETA\rQnG4EvXcAvc.mp4                                32%   42MB  17.5MB/s   00:05 ETA\rQnG4EvXcAvc.mp4                                41%   54MB  17.0MB/s   00:04 ETA\rQnG4EvXcAvc.mp4                                50%   67MB  16.5MB/s   00:03 ETA\rQnG4EvXcAvc.mp4                                59%   79MB  16.1MB/s   00:03 ETA\rQnG4EvXcAvc.mp4                                69%   91MB  15.7MB/s   00:02 ETA\rQnG4EvXcAvc.mp4                                78%  104MB  15.4MB/s   00:01 ETA\rQnG4EvXcAvc.mp4                                88%  116MB  15.1MB/s   00:01 ETA'
    # format_pexpect_output(a)
    execute_scp_command()
    pass