import os
import re
import sqlite3
import hashlib
import shutil
import datetime
import cv2
from PIL import Image
import exifread
import ffmpeg
import json
from flask import Flask, request, jsonify, render_template, Response, stream_with_context, send_file, abort
import threading
import logging
import io
import fnmatch
from queue import Queue
from threading import Thread, Lock

# pip install flask pillow exifread opencv-python ffmpeg-python

app = Flask(__name__)
DATABASE = 'photo_organizer.db' 
ALLOWED_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.mp4', '.mov', '.avi', '.mkv', '.flv', '.3gp'}
IMAGES = ['.jpg', '.jpeg', '.png', '.gif', '.bmp'] 

# 数据库初始化
def init_db():
    conn = sqlite3.connect(DATABASE) 
    c = conn.cursor() 
    c.execute('''CREATE  TABLE IF NOT EXISTS photos
                 (id INTEGER PRIMARY KEY AUTOINCREMENT,
                  filename TEXT,
                  filepath TEXT UNIQUE,
                  hash_value TEXT,
                  quality TEXT,
                  create_time TEXT,
                  file_size INTEGER,
                  file_type TEXT,
                  added_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP)''')
    conn.commit() 
    conn.close() 

# 计算文件哈希值
def calculate_hash(filepath, block_size=65536):
    hasher = hashlib.md5() 
    with open(filepath, 'rb') as f:
        buf = f.read(block_size) 
        while buf:
            hasher.update(buf) 
            buf = f.read(block_size) 
    return hasher.hexdigest() 

# 从文件名提取时间
def extract_time_from_filename(filename):
    patterns = [
        r'(\d{4})[_\-]?(\d{2})[_\-]?(\d{2})',  # YYYY-MM-DD或YYYY_MM_DD等
        r'(\d{8})',                            # YYYYMMDD
        r'(\d{2})[_\-]?(\d{2})[_\-]?(\d{4})'   # DD-MM-YYYY或DD_MM_YYYY等
    ]
    
    for pattern in patterns:
        match = re.search(pattern,  filename)
        if match:
            try:
                if len(match.group(0))  == 8:  # YYYYMMDD
                    return datetime.datetime.strptime(match.group(0),  '%Y%m%d')
                elif len(match.groups())  == 3:
                    # 尝试YYYY-MM-DD格式
                    try:
                        return datetime.datetime(int(match.group(1)),  int(match.group(2)),  int(match.group(3))) 
                    except:
                        pass
                    # 尝试DD-MM-YYYY格式
                    try:
                        return datetime.datetime(int(match.group(3)),  int(match.group(2)),  int(match.group(1))) 
                    except:
                        pass
            except:
                continue
    return None

# 从元数据提取时间
def extract_time_from_metadata(filepath, file_type):
    try:
        if file_type in ['image']:
            with open(filepath, 'rb') as f:
                tags = exifread.process_file(f) 
                if 'EXIF DateTimeOriginal' in tags:
                    date_str = str(tags['EXIF DateTimeOriginal'])
                    return datetime.datetime.strptime(date_str,  '%Y:%m:%d %H:%M:%S')
                elif 'Image DateTime' in tags:
                    date_str = str(tags['Image DateTime'])
                    return datetime.datetime.strptime(date_str,  '%Y:%m:%d %H:%M:%S')
        elif file_type in ['video']:
            probe = ffmpeg.probe(filepath) 
            if 'format' in probe and 'tags' in probe['format'] and 'creation_time' in probe['format']['tags']:
                date_str = probe['format']['tags']['creation_time'].split('.')[0]
                return datetime.datetime.strptime(date_str,  '%Y-%m-%dT%H:%M:%S')
    except:
        return None
    return None

def path_in_desired(path):
    return re.search(r'\d{4}[\/]\d{4}_\d{1,2}', path)

# 判断图片质量
def assess_image_quality(filepath):
    try:
        resize_img(filepath, '100')
        # 尝试打开文件
        img = cv2.imread(filepath) 
        if img is None:
            return '低'
        
        # 检查是否全黑或半黑
        gray = cv2.cvtColor(img,  cv2.COLOR_BGR2GRAY)
        avg_brightness = gray.mean() 
        
        if avg_brightness < 10:  # 全黑图片
            return '低'
        elif avg_brightness < 30:  # 半黑图片
            return '低'
        
        # 检查文件名时间与实际创建时间是否一致
        file_ctime = datetime.datetime.fromtimestamp(os.path.getctime(filepath)) 
        filename_time = extract_time_from_filename(os.path.basename(filepath)) 
        
        if filename_time and abs((filename_time - file_ctime).days) > 1:
            return '中'
        if not path_in_desired(filepath):
            return '中'
        return '高'
    except:
        logging.exception(filepath)
        return '低'

# 获取文件创建时间（优先级：文件名 > 元数据 > 文件系统）
def get_creation_time(filepath, file_type):
    # 1. 从文件名提取
    filename_time = extract_time_from_filename(os.path.basename(filepath)) 
    if filename_time:
        return filename_time.strftime('%Y-%m-%d  %H:%M:%S')
    
    # 2. 从元数据提取
    metadata_time = extract_time_from_metadata(filepath, file_type)
    if metadata_time:
        return metadata_time.strftime('%Y-%m-%d  %H:%M:%S')
    
    # 3. 从文件系统提取
    ctime = os.path.getctime(filepath) 
    return datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d  %H:%M:%S')

def compile_exclude_patterns(exclude_dirs_str):
    """
    将排除目录字符串编译为匹配模式列表
    
    :param exclude_dirs_str: 排除目录字符串，多个用逗号分隔
    :return: 编译后的模式列表
    """
    if not exclude_dirs_str:
        return []
    
    # 分割并清理排除模式
    patterns = [p.strip() for p in exclude_dirs_str.split(',')  if p.strip()] 
    
    # 标准化路径分隔符为正斜杠
    normalized_patterns = []
    for pattern in patterns:
        # 处理Windows路径分隔符
        normalized = pattern.replace('\\',  '/')
        # 确保通配符模式正确（如果没有通配符，自动添加）
        if '*' not in normalized:
            if normalized.endswith('/'): 
                normalized += '*'
            else:
                normalized += '/*'
        normalized_patterns.append(normalized) 
    
    return normalized_patterns

def match_pattern(path, exclude_patterns):
    """
    检查路径是否应该被排除
    
    :param path: 要检查的路径
    :param exclude_patterns: 排除模式列表
    :return: True表示应该排除，False表示应该包含
    """
    if not exclude_patterns:
        return False
    
    # 标准化路径分隔符为正斜杠
    normalized_path = path.replace(os.sep,  '/')
    
    # 检查是否匹配任何排除模式
    for pattern in exclude_patterns:
        # 尝试多种匹配方式
        if fnmatch.fnmatch(normalized_path,  pattern) or \
           fnmatch.fnmatch(os.path.basename(normalized_path),  pattern) or \
           fnmatch.fnmatch(normalized_path  + '/', pattern):
            logging.info(f'{path} exclude by {pattern}')
            return True
    
    return False

conn_pool = {}
def get_conn(close=False):
    tid = threading.get_ident()
    if tid not in conn_pool or not conn_pool[tid]:
        conn = sqlite3.connect(DATABASE, isolation_level=None)
        conn.execute("PRAGMA journal_mode=WAL")
        conn_pool[tid] = conn
    conn = conn_pool[tid]
    if close:
        conn.close()
        del conn_pool[tid]
    return conn

def process_file(filepath):
    """处理单个文件并插入数据库"""
    # 检查是否已在数据库中
    conn = get_conn()
    cursor = conn.cursor() 
    cursor.execute("SELECT  id FROM photos WHERE filepath = ?", (filepath,))
    if cursor.fetchone(): 
        return  # 已存在，跳过处理

    # 获取文件基本信息
    filename = os.path.basename(filepath) 
    file_size = os.path.getsize(filepath) 
    ext = os.path.splitext(filepath)[1].lower() 
    file_type = 'image' if ext in IMAGES else 'video'
    
    # 计算哈希值（耗时操作）
    hash_value = calculate_hash(filepath)
    
    # 评估质量（耗时操作）
    quality = assess_image_quality(filepath) if file_type == 'image' else '高'
    
    # 获取创建时间
    create_time = get_creation_time(filepath, file_type)
    
    # 插入数据库
    cursor.execute('''INSERT  INTO photos (filename, filepath, hash_value, quality, create_time, file_size, file_type)
                     VALUES (?, ?, ?, ?, ?, ?, ?)''',
                  (filename, filepath, hash_value, quality, create_time, file_size, file_type))
    conn.commit() 

# 扫描目录并入库
def scan_directory(input_dir, num_consumers=4, exclude_dirs_str=''):
    """
    使用生产者-消费者模式扫描目录中的媒体文件
    
    :param input_dir: 要扫描的根目录
    :param num_consumers: 消费者线程数量
    :param exclude_dirs_str: 要排除的目录字符串，多个用逗号分隔
    """
    global progress_tracker
    if progress_tracker.get('status') == 'running':
        return False
    
    # 重置进度跟踪
    with progress_tracker['lock']:
        progress_tracker.update({ 
            'current': 0,
            'total': 1,
            'status': 'running',
            'message': '开始扫描文件...'
        })
    
    # 创建线程安全队列
    file_queue = Queue(maxsize=5000)  # 设置队列最大容量，防止内存溢出
    total_files = 0
    file_count_lock = Lock()  # 用于安全更新总文件数
    processed_count = 0
    processed_count_lock = Lock()
    # 编译排除模式
    exclude_patterns = compile_exclude_patterns(exclude_dirs_str)
    
    # --------------------------
    # 生产者函数：遍历目录并将文件放入队列
    # --------------------------
    def producer():
        nonlocal total_files
        try:
            for root, dirs, files in os.walk(input_dir): 
                # 检查当前目录是否应该被排除
                if match_pattern(root, exclude_patterns):
                    continue  # 跳过此目录
                for file in files:
                    ext = os.path.splitext(file)[1].lower() 
                    if ext in ALLOWED_EXTENSIONS:
                        filepath = os.path.join(root,  file)
                        # 检查文件路径是否应该被排除
                        if match_pattern(filepath, exclude_patterns):
                            continue
                        file_queue.put(filepath)   # 将文件路径放入队列
                        
                        with file_count_lock:
                            total_files += 1
                            # 更新总文件数
                            with progress_tracker['lock']:
                                progress_tracker['total'] = total_files
        finally:
            # 所有文件遍历完成后，放入结束标记（与消费者数量相同）
            for _ in range(num_consumers):
                file_queue.put(None) 
    
    # --------------------------
    # 消费者函数：处理队列中的文件
    # --------------------------
    def consumer():
        nonlocal processed_count,processed_count_lock
        
        try:
            while True:
                filepath = file_queue.get() 
                
                # 检查结束标记
                if filepath is None:
                    file_queue.task_done()
                    get_conn(True)
                    break
                
                try:
                    # 处理单个文件
                    process_file(filepath)
                    with processed_count_lock:
                        processed_count += 1
                    
                    # 更新进度
                    with progress_tracker['lock']:
                        if progress_tracker['total'] > 0:
                            progress_tracker['current'] = int((processed_count / progress_tracker['total']) * 100)
                        progress_tracker['message'] = f'已处理: {os.path.basename(filepath)}' 
                
                except Exception as e:
                    logging.exception(filepath)
                    print(f"处理文件 {filepath} 时出错: {str(e)}")
                finally:
                    file_queue.task_done() 
        
        finally:
            # 更新完成状态
            with progress_tracker['lock']:
                progress_tracker['status'] = 'completed' if progress_tracker['current'] >= 100 else 'running'
                progress_tracker['message'] = f'扫描完成，共处理 {total_files} 个媒体文件'
    
    # --------------------------
    # 启动生产者和消费者线程
    # --------------------------
    # 启动消费者线程
    consumers = []
    for _ in range(num_consumers):
        t = Thread(target=consumer, daemon=True)
        t.start() 
        consumers.append(t) 
    
    # 启动生产者线程
    producer_thread = Thread(target=producer, daemon=True)
    producer_thread.start() 
    
    # 等待生产者完成
    # producer_thread.join() 
    
    # 等待队列处理完成
    # file_queue.join() 
    
    # 等待所有消费者完成
    # for t in consumers:
    #     t.join()     
    return True

# 组织文件到输出目录
def organize_files(output_dir, includes):
    global progress_tracker
    if progress_tracker.get('status', 'idle') == 'running':
        return False
    progress_tracker.update({
        'current': 0,
        'total': 1,
        'status': 'running',
        'message': '开始整理文件...'
    })
    
    conn = sqlite3.connect(DATABASE) 
    c = conn.cursor() 
    include_patterns = compile_exclude_patterns(includes)
    
    # 获取总文件数
    c.execute("SELECT  COUNT(*) FROM photos")
    total_files = c.fetchone()[0] 
    progress_tracker['total'] = total_files if total_files > 0 else 1
    
    # 获取所有文件
    c.execute("SELECT  id, filepath, create_time FROM photos")
    photos = c.fetchall() 
    processed_files = 0
    
    for photo in photos:
        photo_id, filepath, create_time = photo
        try:
            # 更新进度
            processed_files += 1
            progress = int((processed_files / progress_tracker['total']) * 100)
            progress_tracker['current'] = progress
            progress_tracker['message'] = f'正在移动: {os.path.basename(filepath)}' 
            if includes and not match_pattern(filepath, include_patterns):
                continue
            
            # 解析创建时间
            dt = datetime.datetime.strptime(create_time,  '%Y-%m-%d %H:%M:%S')
            year = dt.strftime('%Y') 
            month = dt.strftime('%m') 
            
            # 创建目录
            dest_dir = os.path.join(output_dir,  year, f"{year}{month}")
            os.makedirs(dest_dir,  exist_ok=True)
            
            # 目标路径
            filename = os.path.basename(filepath) 
            dest_path = os.path.join(dest_dir,  filename)
            
            # 如果已存在，添加编号
            counter = 1
            while os.path.exists(dest_path): 
                name, ext = os.path.splitext(filename) 
                dest_path = os.path.join(dest_dir,  f"{name}_{counter}{ext}")
                counter += 1
            
            # 移动文件
            shutil.move(filepath,  dest_path)
            
            # 更新数据库
            c.execute("UPDATE  photos SET filepath = ? WHERE id = ?", (dest_path, photo_id))
            conn.commit() 
        except Exception as e:
            logging.exception(filepath)
            error_msg = f"移动文件失败: {filepath}, 错误: {str(e)}"
            progress_tracker['message'] = error_msg
            print(error_msg)
    
    conn.close() 
    
    # 完成后更新进度状态
    progress_tracker['current'] = 100
    progress_tracker['status'] = 'completed'
    progress_tracker['message'] = f'整理完成，共处理 {processed_files} 个文件'
    return True

# 获取重复文件
def get_duplicates(duplicate_type, limit=12):
    conn = sqlite3.connect(DATABASE) 
    c = conn.cursor() 
    
    if duplicate_type == 'hash':
        # 按哈希值分组
        c.execute('''SELECT  hash_value, GROUP_CONCAT(id) as ids, GROUP_CONCAT(filepath) as paths, 
                      GROUP_CONCAT(quality) as qualities, GROUP_CONCAT(file_type) as file_types 
                      FROM (
                            SELECT *
                            FROM photos
                            WHERE filepath NOT LIKE '%/photo/%'
                        ) AS t 
                      GROUP BY hash_value HAVING COUNT(*) > 1 LIMIT ?''', (limit,))
    else:
        # 按文件名分组（忽略扩展名和编号）
        c.execute('''SELECT  
                      SUBSTR(filename, 0, INSTR(filename, '.')), 
                      GROUP_CONCAT(id) as ids, 
                      GROUP_CONCAT(filepath) as paths,
                      GROUP_CONCAT(quality) as qualities,
                      GROUP_CONCAT(file_type) as file_types
                      FROM (
                            SELECT *
                            FROM photos
                            WHERE filepath NOT LIKE '%/photo/%'
                        ) AS t 
                      GROUP BY SUBSTR(filename, 0, INSTR(filename, '.')) 
                      HAVING COUNT(*) > 1 LIMIT ?''', (limit,))
    
    duplicates = []
    quality_number = ['低', '中', '高']
    for row in c.fetchall(): 
        if duplicate_type == 'hash':
            hash_val, ids, paths, qualities, file_types = row
            group_name = f"哈希组: {hash_val[:8]}"
        else:
            name, ids, paths, qualities, file_types = row
            group_name = f"文件名组: {name}"
            
        id_list = ids.split(',') 
        path_list = paths.split(',') 
        quality_list = qualities.split(',') 
        file_type_list = file_types.split(',') 
        
        # 找出质量最高的文件（不选中）
        best_index = find_best_file(quality_number, path_list, quality_list)
        
        files = []
        for i, (file_id, path, quality, file_type) in enumerate(zip(id_list, path_list, quality_list, file_type_list)):
            files.append({ 
                'id': file_id,
                'path': path,
                'quality': quality,
                'selected': i != best_index,
                'file_type': file_type  # 添加文件类型信息
            })
        
        duplicates.append({
            'group_name': group_name,
            'files': files
        })
    
    conn.close() 
    return duplicates

def find_best_file(quality_number, path_list, quality_list):
    best_index = 0
    best_quality = quality_number.index('低')
    for i, quality in enumerate(quality_list):
        number = quality_number.index(quality)
        path = path_list[i]
        if number and not path_in_desired(path):
            number -= 1
        if number > best_quality:
            best_index = i
            best_quality = number
        elif number == best_quality:
            best_path = path_list[best_index]
            name = os.path.basename(path)
            best_name = os.path.basename(best_path)
            if len(name) < len(best_name):
                best_index = i
                best_quality = number
                continue
            if len(name) > len(best_name):
                continue
            name = os.path.dirname(path)
            best_name = os.path.dirname(best_path)
            if len(name) < len(best_name):
                best_index = i
                best_quality = number
    return best_index

# 删除选中文件
def delete_selected(ids):
    global progress_tracker
    if progress_tracker.get('status', 'idle') == 'running':
        return False
    progress_tracker.update({
        'current': 0,
        'total': len(ids),
        'status': 'running',
        'message': '开始删除文件...'
    })
    
    conn = sqlite3.connect(DATABASE) 
    c = conn.cursor() 
    
    # 获取文件路径
    c.execute("SELECT  filepath FROM photos WHERE id IN ({})".format(','.join('?'*len(ids))), ids)
    files_to_delete = [row[0] for row in c.fetchall()] 
    
    # 删除文件
    for i, filepath in enumerate(files_to_delete):
        try:
            if os.path.exists(filepath): 
                os.remove(filepath) 
                # 更新进度
                progress = int(((i+1) / len(files_to_delete)) * 100)
                progress_tracker['current'] = progress
                progress_tracker['message'] = f'正在删除: {os.path.basename(filepath)}' 
        except Exception as e:
            logging.exception(filepath)
            error_msg = f"删除失败: {filepath}, 错误: {str(e)}"
            progress_tracker['message'] = error_msg
            print(error_msg)
    
    # 从数据库删除
    c.execute("DELETE  FROM photos WHERE id IN ({})".format(','.join('?'*len(ids))), ids)
    conn.commit() 
    conn.close() 
    
    # 完成后更新进度状态
    progress_tracker['current'] = 100
    progress_tracker['status'] = 'completed'
    progress_tracker['message'] = f'删除完成，共处理 {len(files_to_delete)} 个文件'
    return len(files_to_delete)
    return len(files_to_delete)

def resize_img(path, width):
    """把 path 的图片按比例缩放到宽 width
    返回一个可读写的 BytesIO（file-like object）"""
    if not width:
        return path
    buf = io.BytesIO()                 # 内存文件对象
    with Image.open(path) as im:
        w, h = im.size
        new_w = int(width)
        new_h = int(h * new_w / w)     # 等比例计算高度
        im.thumbnail((new_w, new_h), Image.LANCZOS)  # 高质量缩放
        im.save(buf, format=im.format  # 原格式
                     if im.format else 'JPEG')
    buf.seek(0)                        # 游标归零，方便读取
    return buf

def preview(video: str, width: int = 100) -> Image.Image:
    out, _ = (
        ffmpeg.input(video, ss=0)
              .filter('scale', width, -1)
              .output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
              .run(capture_stdout=True, quiet=True)
    )
    return io.BytesIO(out)

# Flask路由
@app.route('/') 
def index():
    # return render_template('index.html') 
    with open('index.html',  'r', encoding='utf-8') as f:
        return f.read() 

@app.route('/api/scan',  methods=['POST'])
def api_scan():
    data = request.json 
    exclude_dirs = data.get('exclude_dirs',  '')  # 获取排除目录参数
    threads = data.get('threads',  '4')
    if scan_directory(data['input_dir'], int(threads), exclude_dirs):
        return jsonify({'status': 'success', 'message': '任务已启动'})
    return jsonify({'status': 'error', 'message': '任务启动失败'})

@app.route('/api/organize',  methods=['POST'])
def api_organize():
    data = request.json 
    includes = data.get('includes',  '')
    Thread(target=organize_files, daemon=True, args=(data['output_dir'], includes)).start()
    return get_progress()

@app.route('/api/duplicates',  methods=['GET'])
def api_duplicates():
    duplicate_type = request.args.get('type',  'hash')
    limit = request.args.get('limit', 12)
    duplicates = get_duplicates(duplicate_type, int(limit))
    return jsonify(duplicates)

@app.route('/api/delete',  methods=['POST'])
def api_delete():
    ids = request.json['ids'] 
    count = delete_selected(ids)
    return jsonify({'status': 'success', 'count': count})
    
# 添加全局进度跟踪变量
progress_tracker = {
    'current': 0,
    'total': 100,
    'status': 'idle',  # idle, running, completed, error
    'message': '',
    'lock': Lock()
}
@app.route('/api/progress') 
def get_progress():
    global progress_tracker
    data = {}
    for k,v in progress_tracker.items():
        if k == 'lock':
            continue
        data[k] = v
    return jsonify(data)

@app.route('/api/media/<int:media_id>') 
def get_media(media_id):
    """通过媒体ID获取文件，支持图片直接显示和视频流式传输"""
    conn = sqlite3.connect(DATABASE) 
    c = conn.cursor() 
    
    c.execute("SELECT  filepath, file_type FROM photos WHERE id = ?", (media_id,))
    result = c.fetchone() 
    conn.close() 
    
    if not result or not result[0]:
        abort(404, description="媒体文件不存在")
        
    filepath, file_type = result
    
    # 获取文件MIME类型
    ext = os.path.splitext(filepath)[1].lower() 
    mime_types = {
        '.jpg': 'image/jpeg',
        '.jpeg': 'image/jpeg',
        '.png': 'image/png',
        '.gif': 'image/gif',
        '.bmp': 'image/bmp',
        '.mp4': 'video/mp4',
        '.mov': 'video/quicktime',
        '.avi': 'video/x-msvideo',
        '.mkv': 'video/x-matroska',
        '.flv': 'video/x-flv'
    }
    
    mime_type = mime_types.get(ext,  'application/octet-stream')
    width = request.args.get('width', '')
    
    try:
        if file_type == 'image':
            # 图片直接返回
            return send_file(
                resize_img(filepath, width),
                mimetype=mime_type,
                as_attachment=False
            )
        elif file_type == 'video':
            # 视频流式传输
            def generate():
                with open(filepath, 'rb') as f:
                    while True:
                        data = f.read(1024*1024)   # 每次读取1MB
                        if not data:
                            break
                        yield data
            
            
            return send_file(
                preview(filepath, int(width)),
                mimetype=mime_types.get('jpeg',  'image/jpeg'),
                as_attachment=False
            ) if width else Response(
                stream_with_context(generate()),
                mimetype=mime_type,
                headers={"Content-Length": str(os.path.getsize(filepath))} 
            )
        else:
            abort(400, description="不支持的文件类型")
    except Exception as e:
        logging.exception(filepath)
        abort(500, description=f"无法加载媒体文件: {str(e)}")

def init_log():
    logger = logging.getLogger()
    logname = 'run.log'
    logger.setLevel(logging.INFO)
    logCount = 5
    logsize = 1024*1024*5

    #hdlr = logging.handlers.RotatingFileHandler(logname, 'w', logsize, logCount)
    hdlr = logging.FileHandler(logname, 'a', 'utf8')
    formater = logging.Formatter('%(asctime)s T%(thread)5d %(levelname)-8s %(message)s')
    hdlr.setFormatter(formater)
    logger.addHandler(hdlr)

def test():
    conn = sqlite3.connect(DATABASE, check_same_thread=False) 
    cursor = conn.cursor() 
    filepath = '/mnt/2t/照片/2006/2006_12/DSC00933.JPG'
    filename = os.path.basename(filepath) 
    file_size = 126
    ext = os.path.splitext(filepath)[1].lower() 
    file_type = 'image' if ext in IMAGES else 'video'
    
    # 检查是否已在数据库中
    cursor.execute("SELECT  id FROM photos WHERE filepath = ?", (filepath,))
    if cursor.fetchone(): 
        return  # 已存在，跳过处理


if __name__ == '__main__':
    # get_duplicates('hash')
    init_log()
    init_db()
    app.run(host='0.0.0.0', debug=True) 
