import os
import json
import datetime
import mysql.connector
from mysql.connector import Error
from bs4 import BeautifulSoup
import config
import threading
import time

# 数据库连接配置
def get_db_connection():
    try:
        connection = mysql.connector.connect(
            host=os.environ.get('DB_HOST') or 'mysql',
            user=os.environ.get('DB_USER') or 'root',
            password=os.environ.get('DB_PASSWORD') or 'password',
            database=os.environ.get('DB_NAME') or 'web_images'
        )
        return connection
    except Error as e:
        print(f"数据库连接错误: {e}")
        return None

# 初始化数据库表
def init_database():
    connection = get_db_connection()
    if connection:
        try:
            cursor = connection.cursor()
            # 创建表
            create_table_query = """
            CREATE TABLE IF NOT EXISTS html_information (
                id INT AUTO_INCREMENT PRIMARY KEY,
                filename VARCHAR(255) NOT NULL,
                title VARCHAR(255) NOT NULL,
                img_name VARCHAR(255) NOT NULL,
                last_commit_time DATETIME NOT NULL,
                img_mod_times TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
                UNIQUE KEY unique_filename (filename)
            )
            """
            cursor.execute(create_table_query)
            connection.commit()
            cursor.close()
            connection.close()
            print("数据库表初始化成功")
        except Error as e:
            print(f"初始化数据库错误: {e}")
            if connection.is_connected():
                connection.close()

# 获取HTML文件列表
def get_html_files(dir_path):
    html_files = []
    for root, dirs, files in os.walk(dir_path):
        for file in files:
            if file.endswith('.html'):
                html_files.append(os.path.join(root, file))
    return html_files

# 后台异步保存HTML信息的函数

def _save_html_async(html_files, start_index, batch_size=10):
    """
    异步保存HTML信息到数据库
    :param html_files: HTML文件列表
    :param start_index: 开始处理的索引
    :param batch_size: 每批处理的文件数量
    """
    total_files = len(html_files)
    remaining_files = total_files - start_index
    print(f"=" * 60)
    print(f"开始后台异步处理剩余的 {remaining_files} 个文件")
    print(f"总文件数: {total_files}, 已处理前 {start_index} 个文件")
    print(f"=" * 60)
    
    processed_count = 0  # 已处理的文件数量
    success_count = 0    # 成功处理的文件数量
    error_count = 0      # 处理失败的文件数量
    start_time = time.time()  # 开始时间
    
    # 分批处理剩余文件
    for i in range(start_index, total_files, batch_size):
        batch_files = html_files[i:i + batch_size]
        batch_num = i // batch_size + 1
        total_batches = (remaining_files + batch_size - 1) // batch_size  # 总批次数
        
        print(f"\n[批次 {batch_num}/{total_batches}] 开始处理 {len(batch_files)} 个文件")
        print(f"当前进度: {processed_count}/{remaining_files} ({processed_count/remaining_files*100:.1f}%)")
        
        # 为每批创建新的数据库连接
        connection = get_db_connection()
        if not connection:
            print("[数据库错误] 无法连接到数据库，将在5秒后重试...")
            time.sleep(5)
            continue
        
        try:
            cursor = connection.cursor()
            batch_success = 0
            batch_error = 0
            
            for j, file in enumerate(batch_files):
                file_index = i + j  # 文件在总列表中的索引
                file_progress = (file_index - start_index + 1) / remaining_files * 100
                print(f"  正在处理文件 {file_index+1}/{total_files} ({file_progress:.1f}%): {os.path.basename(file)}")
                
                try:
                    # 提取文件名和标题标签内容
                    try:
                        with open(file, 'r', encoding='utf-8', errors='ignore') as f:
                            content = f.read(8000)
                            soup = BeautifulSoup(content, 'html.parser')
                            title = soup.title.string.strip() if soup.title else '无标题'
                            img_tags = soup.find_all('img', limit=5)
                            img_name = [os.path.basename(tag['src']) for tag in img_tags if 'src' in tag.attrs]
                            print(f"    ✓ 成功读取文件内容，发现 {len(img_tags)} 个图片标签")
                    except Exception as e:
                        error_msg = f"[读取错误] 文件 {os.path.basename(file)} 读取失败: {str(e)}"
                        print(error_msg)
                        error_count += 1
                        batch_error += 1
                        continue
                    
                    # 处理文件名，确保正确的路径格式
                    filename_relative = file.replace(config.PROJECT_PATH, '').strip('/\\')
                    filename = config.WEB_PAGE_PREFIX.rstrip('/') + '/' + filename_relative
                    
                    # 使用当前时间作为最后提交时间
                    last_commit_time = datetime.datetime.now()
                    
                    # 获取图片文件的修改时间（类似redisApi的实现）
                    img_mod_times = []
                    valid_images = 0
                    for tag in img_tags[:3]:  # 只处理前3张图片
                        if 'src' in tag.attrs:
                            img_path = os.path.join(os.path.dirname(file), tag['src'])
                            if os.path.exists(img_path):
                                try:
                                    img_mod_time = datetime.datetime.fromtimestamp(os.path.getmtime(img_path)).strftime("%Y-%m-%d %H:%M:%S")
                                    img_mod_times.append(img_mod_time)
                                    valid_images += 1
                                    print(f"    ✓ 找到有效图片: {os.path.basename(img_path)}, 修改时间: {img_mod_time}")
                                except Exception as e:
                                    print(f"    ✗ 无法获取图片修改时间: {str(e)}")
                            else:
                                print(f"    ✗ 图片文件不存在: {img_path}")
                    
                    print(f"    共找到 {valid_images} 个有效图片文件")
                    
                    # 将数据存入数据库
                    insert_query = """
                    INSERT INTO html_information (filename, title, img_name, last_commit_time, img_mod_times)
                    VALUES (%s, %s, %s, %s, %s)
                    ON DUPLICATE KEY UPDATE
                        title = VALUES(title),
                        last_commit_time = VALUES(last_commit_time),
                        img_mod_times = VALUES(img_mod_times),
                        updated_at = CURRENT_TIMESTAMP
                    """
                    
                    data = (filename, title, json.dumps(img_name), last_commit_time, json.dumps(img_mod_times))
                    cursor.execute(insert_query, data)
                    print(f"    ✓ 成功将文件数据保存到数据库")
                    
                    success_count += 1
                    batch_success += 1
                    
                except Exception as e:
                    error_msg = f"[处理异常] 文件 {os.path.basename(file)} 处理失败: {str(e)}"
                    print(error_msg)
                    error_count += 1
                    batch_error += 1
                    continue
                finally:
                    processed_count += 1
            
            connection.commit()
            cursor.close()
            connection.close()
            
            # 计算批次处理时间
            batch_time = time.time() - start_time
            print(f"\n[批次 {batch_num}/{total_batches}] 处理完成")
            print(f"  批次统计: 成功 {batch_success}, 失败 {batch_error}, 总耗时: {batch_time:.2f}秒")
            print(f"  累计统计: 成功 {success_count}, 失败 {error_count}, 完成度: {processed_count/remaining_files*100:.1f}%")
            
            # 如果不是最后一批，添加适当延迟
            if i + batch_size < total_files:
                print(f"  准备处理下一批...")
                time.sleep(1)  # 短暂暂停，避免资源占用过高
                
        except Error as e:
            error_msg = f"[数据库异常] 批次 {batch_num} 数据库操作失败: {str(e)}"
            print(error_msg)
            if connection.is_connected():
                connection.rollback()
                connection.close()
            # 增加重试逻辑
            retry_count = 0
            max_retries = 3
            while retry_count < max_retries:
                print(f"  尝试重新处理批次 (重试 {retry_count+1}/{max_retries})...")
                retry_count += 1
                time.sleep(2)
                # 重新处理当前批次
                i -= batch_size  # 回退到当前批次开始位置
                break
    
    # 计算总处理时间
    total_time = time.time() - start_time
    print(f"\n" + "=" * 60)
    print(f"所有后台异步处理完成")
    print(f"处理统计: 总文件数 {remaining_files}, 成功 {success_count}, 失败 {error_count}")
    print(f"总耗时: {total_time:.2f}秒, 平均速度: {success_count/total_time:.2f} 个文件/秒")
    print(f"完成度: 100%")
    print(f"=" * 60)

# 保存HTML信息到数据库
def saveHtmlInformation():
    # 初始化数据库
    init_database()
    
    print(f"PROJECT_PATH: {config.PROJECT_PATH}")
    
    # 检查目录是否存在
    if not os.path.exists(config.PROJECT_PATH):
        print(f"警告: 目录 {config.PROJECT_PATH} 不存在")
        # 使用默认的Docker挂载路径
        docker_project_path = '/data'
        if os.path.exists(docker_project_path):
            print(f"使用Docker挂载路径: {docker_project_path}")
            config.PROJECT_PATH = docker_project_path
        else:
            print(f"错误: Docker挂载路径 {docker_project_path} 也不存在")
            print("跳过HTML文件处理")
            return
    
    # 遍历指定路径中的所有HTML文件
    print("开始扫描HTML文件...")
    html_files = get_html_files(config.PROJECT_PATH)
    print(f"找到 {len(html_files)} 个HTML文件")
    
    # 先处理前50个文件，这些将立即保存
    INITIAL_FILES_TO_PROCESS = 50
    initial_files = html_files[:INITIAL_FILES_TO_PROCESS]
    remaining_files = html_files[INITIAL_FILES_TO_PROCESS:]
    print(f"立即处理前 {len(initial_files)} 个文件")
    print(f"剩余 {len(remaining_files)} 个文件将在后台异步处理")
    
    # 立即处理前50个文件
    connection = get_db_connection()
    if connection:
        try:
            cursor = connection.cursor()
            
            for i, file in enumerate(initial_files):
                print(f"处理文件 {i+1}/{len(initial_files)}: {os.path.basename(file)}")
                
                try:
                    # 提取文件名和标题标签内容
                    try:
                        with open(file, 'r', encoding='utf-8', errors='ignore') as f:
                            content = f.read(8000)
                            soup = BeautifulSoup(content, 'html.parser')
                            title = soup.title.string.strip() if soup.title else ''
                            img_tags = soup.find_all('img', limit=5)
                            img_name = [os.path.basename(tag['src']) for tag in img_tags if 'src' in tag.attrs]
                    except Exception as e:
                        print(f"读取文件错误: {e}")
                        continue
                    
                    # 处理文件名，确保正确的路径格式
                    filename_relative = file.replace(config.PROJECT_PATH, '').strip('/\\')
                    filename = config.WEB_PAGE_PREFIX.rstrip('/') + '/' + filename_relative
                    
                    # 使用当前时间作为最后提交时间
                    last_commit_time = datetime.datetime.now()
                    
                    # 获取图片文件的修改时间（类似redisApi的实现）
                    img_mod_times = []
                    for tag in img_tags[:3]:  # 只处理前3张图片
                        if 'src' in tag.attrs:
                            img_path = os.path.join(os.path.dirname(file), tag['src'])
                            if os.path.exists(img_path):
                                try:
                                    img_mod_time = datetime.datetime.fromtimestamp(os.path.getmtime(img_path)).strftime("%Y-%m-%d %H:%M:%S")
                                    img_mod_times.append(img_mod_time)
                                except Exception:
                                    pass
                    
                    # 将数据存入数据库
                    insert_query = """
                    INSERT INTO html_information (filename, title, img_name, last_commit_time, img_mod_times)
                    VALUES (%s, %s, %s, %s, %s)
                    ON DUPLICATE KEY UPDATE
                        title = VALUES(title),
                        last_commit_time = VALUES(last_commit_time),
                        img_mod_times = VALUES(img_mod_times),
                        updated_at = CURRENT_TIMESTAMP
                    """
                    
                    data = (filename, title, json.dumps(img_name), last_commit_time, json.dumps(img_mod_times))
                    cursor.execute(insert_query, data)
                    
                    # 每处理5个文件就提交一次
                    if (i + 1) % 5 == 0:
                        connection.commit()
                        print(f"已提交 {i+1} 个文件的数据")
                        
                except Exception as e:
                    print(f"处理文件异常: {e}")
                    continue
            
            connection.commit()
            cursor.close()
            connection.close()
            print(f"初始HTML信息保存成功，共处理 {len(initial_files)} 个文件")
        except Error as e:
            print(f"保存HTML信息错误: {e}")
            if connection.is_connected():
                connection.rollback()
                connection.close()
    
    # 如果有剩余文件，启动后台线程处理
    if remaining_files:
        print("启动后台线程处理剩余文件...")
        # 创建并启动后台线程
        background_thread = threading.Thread(
            target=_save_html_async,
            args=(html_files, INITIAL_FILES_TO_PROCESS),
            daemon=True
        )
        background_thread.start()
        print("后台线程已启动，应用可以继续启动")
    else:
        print("没有剩余文件需要后台处理")

# 获取HTML信息
def getHtmlInformation():
    connection = get_db_connection()
    if connection:
        try:
            cursor = connection.cursor(dictionary=True)
            query = "SELECT * FROM html_information ORDER BY last_commit_time DESC"
            cursor.execute(query)
            result = cursor.fetchall()
            cursor.close()
            connection.close()
            
            # 处理JSON字符串
            for item in result:
                try:
                    item['img_name'] = json.loads(item['img_name']) if item['img_name'] else []
                    item['img_mod_times'] = json.loads(item['img_mod_times']) if item['img_mod_times'] else []
                except json.JSONDecodeError:
                    pass
            
            return result
        except Error as e:
            print(f"获取HTML信息错误: {e}")
            if connection.is_connected():
                connection.close()
            return []
    return []

# 添加HTML信息
def addHtmlInformation(webPageFileName, title):
    connection = get_db_connection()
    if connection:
        try:
            cursor = connection.cursor(dictionary=True)
            
            # 检查是否已存在
            check_query = "SELECT * FROM html_information WHERE img_name LIKE %s"
            cursor.execute(check_query, (f"%{webPageFileName}.png%",))
            existing_data = cursor.fetchone()
            
            current_time = datetime.datetime.now()
            
            if existing_data:
                # 更新现有数据
                update_query = """
                UPDATE html_information
                SET filename = %s, title = %s, last_commit_time = %s, img_mod_times = %s
                WHERE id = %s
                """
                data = (
                    config.WEB_PAGE_PREFIX + '/' + config.HTML_FOLDER + webPageFileName + '.html',
                    title,
                    current_time,
                    json.dumps([current_time.strftime('%Y-%m-%d %H:%M:%S')]),
                    existing_data['id']
                )
                cursor.execute(update_query, data)
            else:
                # 添加新数据
                insert_query = """
                INSERT INTO html_information (filename, title, img_name, last_commit_time, img_mod_times)
                VALUES (%s, %s, %s, %s, %s)
                """
                data = (
                    config.WEB_PAGE_PREFIX + '/' + config.HTML_FOLDER + webPageFileName + '.html',
                    title,
                    json.dumps([webPageFileName + '.png']),
                    current_time,
                    json.dumps([current_time.strftime('%Y-%m-%d %H:%M:%S')])
                )
                cursor.execute(insert_query, data)
            
            connection.commit()
            
            # 返回最新数据
            cursor.execute("SELECT * FROM html_information ORDER BY last_commit_time DESC LIMIT 1")
            new_data = cursor.fetchone()
            
            if new_data:
                try:
                    new_data['img_name'] = json.loads(new_data['img_name']) if new_data['img_name'] else []
                    new_data['img_mod_times'] = json.loads(new_data['img_mod_times']) if new_data['img_mod_times'] else []
                except json.JSONDecodeError:
                    pass
            
            cursor.close()
            connection.close()
            return new_data
        except Error as e:
            print(f"添加HTML信息错误: {e}")
            if connection.is_connected():
                connection.rollback()
                connection.close()
            return None
    return None