#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
二维码解码器模块
从图片中提取二维码并解码为文件数据
"""

import os
import struct
import zlib
from typing import Dict, List, Tuple, Optional
from PIL import Image
import qrcode
from pyzbar import pyzbar


class QRDecoder:
    def __init__(self):
        self.COMPRESSION_FLAG = 0x01  # 压缩标志位
        self.qr_data_cache = {}  # 二维码数据缓存

    def scan_qr_codes(self, image_dir: str, progress_callback=None, logger=None) -> Dict[str, Dict[int, Tuple[int, int, bytes, bool]]]:
        """
        扫描目录中的所有图片，提取二维码数据（支持压缩格式）
        一旦发现某个文件的所有二维码都找到，立即停止扫描
        
        Args:
            image_dir: 图片目录路径
            progress_callback: 进度回调函数
            logger: 日志记录器
            
        Returns:
            按文件名分组的二维码数据字典，只包含第一个完整文件的二维码数据
            {filename: {index: (total, original_size, chunk_data, use_compression)}}
        """
        qr_data_map = {}
        processed_files = 0
        total_qr_found = 0
        
        # 获取所有图片文件
        image_files = []
        for file in os.listdir(image_dir):
            if file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif', '.tiff')):
                full_path = os.path.join(image_dir, file)
                if os.path.isfile(full_path):
                    image_files.append(full_path)
        total_files = len(image_files)
        
        for i, image_path in enumerate(image_files):
            try:
                # 打开图片
                image = Image.open(image_path)
                
                # 使用pyzbar解码二维码
                decoded_objects = pyzbar.decode(image)
                
                file_qr_count = 0
                for obj in decoded_objects:
                    if obj.type == 'QRCODE':
                        try:
                            # 获取原始二进制数据
                            qr_data = obj.data.decode('utf-8', errors='surrogateescape').encode('latin-1')
                            
                            parsed_data = self._parse_qr_data(qr_data)
                            if parsed_data:
                                filename, index, total, original_size, chunk_data, use_compression = parsed_data
                                
                                # 初始化文件名对应的字典
                                if filename not in qr_data_map:
                                    qr_data_map[filename] = {}
                                
                                # 存储数据（包含压缩信息）
                                qr_data_map[filename][index] = (total, original_size, chunk_data, use_compression)
                                
                                file_qr_count += 1
                                total_qr_found += 1
                                if logger:
                                    logger.success(f"[扫描] 解析成功: {filename} [{index+1}/{total}] (压缩: {use_compression}) - 片段 {index} 已找到")
                                
                                # 检查是否已收集到某个文件的所有二维码
                                if filename in qr_data_map and len(qr_data_map[filename]) == total:
                                    if logger:
                                        logger.success(f"[提前结束] 文件 '{filename}' 的所有二维码已找到，停止扫描")
                                    return {filename: qr_data_map[filename]}
                                
                                continue
                        except Exception as e:
                            # 解析失败，跳过这个二维码
                            if logger:
                                logger.warning(f"二维码解析失败: {str(e)}")
                            pass
                            
                processed_files += 1
                
                # 更新进度
                if progress_callback and total_files > 0:
                    progress = ((i + 1) / total_files) * 100
                    progress_callback(progress)
                
            except Exception as e:
                # 图片处理失败，跳过
                if logger:
                    logger.warning(f"图片 {os.path.basename(image_path)}: 处理失败 - {str(e)}")
                continue
                
        return qr_data_map
        
    def _parse_qr_data(self, qr_data: bytes) -> Optional[Tuple[str, int, int, int, bytes, bool]]:
        """
        解析二维码数据格式（支持压缩）
        
        格式：[标志位1字节][文件名长度1字节][文件名][序号2字节][总数2字节][原始大小4字节][文件内容]
        
        Args:
            qr_data: 二维码数据
            
        Returns:
            (文件名, 序号, 总数, 原始大小, 文件内容, 是否压缩) 或 None（如果解析失败）
        """
        try:
            if len(qr_data) < 10:  # 最小长度检查（包含所有必需字段）
                return None
                
            # 读取标志位
            flags = struct.unpack('B', qr_data[0:1])[0]
            
            # 读取文件名长度
            filename_length = struct.unpack('B', qr_data[1:2])[0]
            
            # 检查文件名长度是否有效
            if filename_length == 0 or filename_length > 255:
                return None
            
            # 计算预期长度
            expected_length = 1 + 1 + filename_length + 2 + 2 + 4
            if len(qr_data) < expected_length:
                return None
                
            # 读取文件名
            filename_bytes = qr_data[2:2 + filename_length]
            try:
                filename = filename_bytes.decode('utf-8')
            except UnicodeDecodeError:
                return None
            
            # 读取序号、总数和原始大小
            index = struct.unpack('<H', qr_data[2 + filename_length:2 + filename_length + 2])[0]
            total = struct.unpack('<H', qr_data[2 + filename_length + 2:2 + filename_length + 4])[0]
            original_size = struct.unpack('<I', qr_data[2 + filename_length + 4:2 + filename_length + 8])[0]
            
            # 验证数据有效性
            if index > 65535 or total > 65535 or total == 0 or index >= total:
                return None
            if original_size > 0xFFFFFFFF:  # 4GB限制
                return None
            # 读取文件内容
            chunk_data = qr_data[expected_length:]
            
            # 检查是否使用压缩
            use_compression = bool(flags & self.COMPRESSION_FLAG)
            
            return filename, index, total, original_size, chunk_data, use_compression
            
        except (struct.error, UnicodeDecodeError, IndexError):
            return None
            
    def reconstruct_file(self, qr_data_map: Dict[str, Dict[int, Tuple[int, int, bytes, bool]]], output_dir: str, logger=None) -> List[str]:
        """
        从二维码数据重构文件（支持压缩格式）
        
        Args:
            qr_data_map: 按文件名分组的二维码数据（包含压缩信息）
            output_dir: 输出目录
            
        Returns:
            成功重构的文件路径列表
        """
        reconstructed_files = []
        
        for filename, data_map in qr_data_map.items():
            try:
                if not data_map:
                    continue
                
                # 获取二维码总数、原始大小和压缩标志
                expected_total = None
                original_size = None
                use_compression = None
                valid_data = True
                
                # 验证所有条目的数据一致性
                for index, (total, orig_size, chunk_data, compressed) in data_map.items():
                    if expected_total is None:
                        expected_total = total
                        original_size = orig_size
                        use_compression = compressed
                    elif expected_total != total or original_size != orig_size or use_compression != compressed:
                        # 数据不一致，跳过此文件
                        valid_data = False
                        break
                    
                    # 验证序号范围
                    if index < 0 or index >= total:
                        valid_data = False
                        break
                
                if expected_total is None or not valid_data:
                    continue
                
                # 检查总数范围
                if expected_total <= 0 or expected_total > 65535:
                    continue
                
                if original_size <= 0 or original_size > 0xFFFFFFFF:
                    continue
                
                # 收集所有数据块
                all_data = bytearray()
                missing_indices = []
                
                if logger:
                    logger.info(f"[重构] 开始重构文件: {filename} (共 {expected_total} 个片段)")
                
                for i in range(expected_total):
                    if i in data_map:
                        _, _, chunk_data, _ = data_map[i]
                        all_data.extend(chunk_data)
                        if logger:
                            logger.success(f"[重构] 片段 {i+1}/{expected_total} 已合并")
                    else:
                        missing_indices.append(i)
                        if logger:
                            logger.warning(f"[重构] 片段 {i+1} 缺失")
                
                # 检查是否有缺失
                if missing_indices:
                    continue
                
                # 验证数据块数量与总数匹配
                if len(data_map) != expected_total:
                    continue
                
                # 处理压缩数据
                if use_compression:
                    try:
                        # 解压数据
                        if logger:
                            logger.info(f"[解压] 正在解压数据: {len(all_data)} → {original_size} 字节")
                        decompressed_data = zlib.decompress(all_data)
                        
                        # 验证解压后的大小
                        if len(decompressed_data) != original_size:
                            if logger:
                                logger.warning(f"[解压] 解压后大小不匹配: 期望 {original_size}, 实际 {len(decompressed_data)}")
                            continue
                            
                        final_data = decompressed_data
                        if logger:
                            logger.success(f"[解压] 解压成功: {len(all_data)} → {len(decompressed_data)} 字节")
                    except zlib.error as e:
                        if logger:
                            logger.error(f"[解压] 解压失败: {e}")
                        continue
                else:
                    # 不压缩的数据
                    if len(all_data) != original_size:
                        if logger:
                            logger.warning(f"[验证] 数据大小不匹配: 期望 {original_size}, 实际 {len(all_data)}")
                        continue
                    final_data = all_data
                    if logger:
                        logger.success(f"[验证] 数据验证成功: {len(all_data)} 字节")
                
                # 保存重构的文件
                output_path = os.path.join(output_dir, filename)
                with open(output_path, 'wb') as f:
                    f.write(final_data)
                
                reconstructed_files.append(output_path)
                
                # 记录重构结果
                if use_compression:
                    compression_info = f"(压缩文件，原始大小: {original_size} 字节)"
                else:
                    compression_info = f"(未压缩，大小: {len(final_data)} 字节)"
                if logger:
                    logger.success(f"[重构] 文件保存成功: {filename} {compression_info} -> {output_path}")
                
            except Exception as e:
                continue
                
        # 统计信息
        if reconstructed_files:
            if logger:
                logger.success(f"🎉 [统计] 成功重构 {len(reconstructed_files)} 个文件")
                for file_path in reconstructed_files:
                    file_size = os.path.getsize(file_path)
                    logger.info(f"[统计] 文件: {os.path.basename(file_path)} - {file_size} 字节")
        else:
            if logger:
                logger.warning("[统计] 没有成功重构的文件")
            
        return reconstructed_files
    
    def get_reconstruction_info(self, qr_data_map: Dict[str, Dict[int, Tuple[int, int, bytes, bool]]]) -> Dict[str, Dict[str, any]]:
        """
        获取文件重构信息
        
        Args:
            qr_data_map: 按文件名分组的二维码数据
            
        Returns:
            每个文件的重构信息字典
        """
        reconstruction_info = {}
        
        for filename, data_map in qr_data_map.items():
            if not data_map:
                continue
                
            # 获取二维码总数、原始大小和压缩标志
            expected_total = None
            original_size = None
            use_compression = None
            valid_data = True
            
            # 验证所有条目的数据一致性
            for index, (total, orig_size, chunk_data, compressed) in data_map.items():
                if expected_total is None:
                    expected_total = total
                    original_size = orig_size
                    use_compression = compressed
                elif expected_total != total or original_size != orig_size or use_compression != compressed:
                    # 数据不一致
                    valid_data = False
                    break
                
                # 验证序号范围
                if index < 0 or index >= total:
                    valid_data = False
                    break
            
            if expected_total is None or not valid_data:
                reconstruction_info[filename] = {
                    "total_qr_count": 0,
                    "available_qr_count": 0,
                    "missing_indices": [],
                    "original_size": 0,
                    "use_compression": False,
                    "can_reconstruct": False
                }
                continue
            
            # 收集所有数据块
            missing_indices = []
            for i in range(expected_total):
                if i not in data_map:
                    missing_indices.append(i)
            
            # 判断是否可以重构
            can_reconstruct = len(missing_indices) == 0
            
            reconstruction_info[filename] = {
                "total_qr_count": expected_total,
                "available_qr_count": expected_total - len(missing_indices),
                "missing_indices": missing_indices,
                "original_size": original_size,
                "use_compression": use_compression,
                "can_reconstruct": can_reconstruct,
                "complete": can_reconstruct
            }
        
        return reconstruction_info