import os
import shutil
from collections import defaultdict
from PIL import Image
import imagehash
import hashlib
from datetime import datetime
from image_processor import ImageProcessor

class MediaStats:
    """处理图片和视频统计信息的类"""
    
    def __init__(self, base_dir):
        self.base_dir = base_dir
        self.total_image_count = 0
        self.total_video_count = 0
        self.format_counts = {}
        self.directory_counts = {}
        self.image_hashes = defaultdict(list)
        self.video_hashes = defaultdict(list)
        self.temp_dir = os.path.join(base_dir, 'temp')
        self.web_dir = os.path.join(base_dir, 'web')
        
        # 确保必要的目录存在
        for dir_path in [self.temp_dir, self.web_dir]:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
    
    def _get_date_from_path(self, file_path):
        """从文件名中提取日期"""
        filename = os.path.basename(file_path)
        try:
            # 从IMG_YYYYMMDD_HHMMSS或VID_YYYYMMDD_HHMMSS格式提取日期
            date_str = filename.split('_')[1]
            return datetime.strptime(date_str, '%Y%m%d')
        except:
            # 如果无法从文件名提取日期，使用文件修改时间
            mtime = os.path.getmtime(file_path)
            return datetime.fromtimestamp(mtime)
    
    def _create_date_directory(self, date):
        """创建年-月-日层级目录"""
        year_dir = os.path.join(self.base_dir, str(date.year))
        month_dir = os.path.join(year_dir, f"{date.month:02d}")
        day_dir = os.path.join(month_dir, f"{date.day:02d}")
        
        # 创建目录
        for dir_path in [year_dir, month_dir, day_dir]:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
        
        return day_dir
    
    def organize_files(self):
        """整理文件到对应日期目录"""
        print("\n=== 开始整理文件 ===")
        
        # 处理所有文件
        all_files = []
        web_files = []
        
        # 处理图片文件
        for hash_value, paths in self.image_hashes.items():
            if len(paths) > 1:
                # 按路径排序，通常会保留路径最短或最简单的文件
                sorted_paths = sorted(paths)
                # 保留第一个文件（路径最简单的）
                keep_file = sorted_paths[0]
                
                # 检查保留的文件是否有EXIF信息
                processor = ImageProcessor(keep_file)
                processor.process()
                
                if processor.has_exif_info():
                    all_files.append(keep_file)
                else:
                    web_files.append(keep_file)
                
                print(f"\n发现重复图片，保留: {keep_file}")
                print("移动以下文件到temp目录:")
                # 其余文件移动到temp目录
                for path in sorted_paths[1:]:
                    print(f"  - {path}")
                    new_path = os.path.join(self.temp_dir, os.path.basename(path))
                    self._move_file(path, new_path)
            else:
                # 检查单个文件是否有EXIF信息
                file_path = paths[0]
                processor = ImageProcessor(file_path)
                processor.process()
                
                if processor.has_exif_info():
                    all_files.extend(paths)
                else:
                    web_files.extend(paths)
        
        # 处理视频文件
        for hash_value, paths in self.video_hashes.items():
            if len(paths) > 1:
                # 按路径排序，通常会保留路径最短或最简单的文件
                sorted_paths = sorted(paths)
                # 保留第一个文件（路径最简单的）
                keep_file = sorted_paths[0]
                all_files.append(keep_file)
                print(f"\n发现重复视频，保留: {keep_file}")
                print("移动以下文件到temp目录:")
                # 其余文件移动到temp目录
                for path in sorted_paths[1:]:
                    print(f"  - {path}")
                    new_path = os.path.join(self.temp_dir, os.path.basename(path))
                    self._move_file(path, new_path)
            else:
                all_files.extend(paths)
        
        # 移动没有EXIF信息的图片到web目录
        print("\n=== 开始移动无EXIF信息的图片到web目录 ===")
        for file_path in web_files:
            if os.path.exists(file_path):
                new_path = os.path.join(self.web_dir, os.path.basename(file_path))
                self._move_file(file_path, new_path)
        
        # 移动其他文件到日期目录
        print("\n=== 开始移动文件到日期目录 ===")
        for file_path in all_files:
            if os.path.exists(file_path):  # 确保文件还存在
                date = self._get_date_from_path(file_path)
                target_dir = self._create_date_directory(date)
                new_path = os.path.join(target_dir, os.path.basename(file_path))
                self._move_file(file_path, new_path)
    
    def _move_file(self, src_path, dst_path):
        """安全地移动文件"""
        try:
            # 如果目标文件已存在，添加数字后缀
            base, ext = os.path.splitext(dst_path)
            counter = 1
            while os.path.exists(dst_path):
                dst_path = f"{base}_{counter}{ext}"
                counter += 1
            
            # 移动文件
            shutil.move(src_path, dst_path)
            print(f"移动文件: {src_path} -> {dst_path}")
        except Exception as e:
            print(f"移动文件失败 {src_path}: {str(e)}")
    
    def add_image(self, image_path):
        """添加一张图片的统计信息"""
        self.total_image_count += 1
        self._count_format(image_path)
        self._count_directory(image_path)
        self._calculate_image_hash(image_path)
    
    def add_video(self, video_path):
        """添加一个视频的统计信息"""
        self.total_video_count += 1
        self._count_format(video_path)
        self._count_directory(video_path)
        self._calculate_video_hash(video_path)
    
    def _calculate_video_hash(self, video_path):
        """计算视频文件的哈希值"""
        try:
            # 使用文件大小和前100KB内容计算哈希（减小读取量以提高性能）
            file_size = os.path.getsize(video_path)
            
            # 如果文件小于100KB，读取整个文件
            read_size = min(102400, file_size)
            
            with open(video_path, 'rb') as f:
                content = f.read(read_size)
                
            # 组合文件大小和内容的哈希值
            hash_value = hashlib.md5(f"{file_size}{content}".encode()).hexdigest()
            self.video_hashes[hash_value].append(video_path)
            
        except Exception as e:
            print(f"计算视频哈希出错 {video_path}: {str(e)}")
    
    def _count_format(self, image_path):
        """统计图片格式"""
        ext = os.path.splitext(image_path)[1].lower()
        self.format_counts[ext] = self.format_counts.get(ext, 0) + 1
    
    def _count_directory(self, image_path):
        """统计目录数量"""
        directory = os.path.dirname(image_path)
        self.directory_counts[directory] = self.directory_counts.get(directory, 0) + 1
    
    def _calculate_image_hash(self, image_path):
        """计算图片哈希值，使用多��哈希算法组合"""
        try:
            with Image.open(image_path) as img:
                # 转换为RGB模式，避免某些图片格式的问题
                if img.mode != 'RGB':
                    img = img.convert('RGB')
                
                # 计算多个哈希值
                avg_hash = str(imagehash.average_hash(img, hash_size=16))  # 增加hash_size
                dhash = str(imagehash.dhash(img, hash_size=16))
                phash = str(imagehash.phash(img, hash_size=16))
                
                # 获取图片大小作为额外参考
                width, height = img.size
                size_info = f"{width}x{height}"
                
                # 组合所有特征
                combined_hash = f"{avg_hash}_{dhash}_{phash}_{size_info}"
                hash_value = hashlib.md5(combined_hash.encode()).hexdigest()
                
                self.image_hashes[hash_value].append(image_path)
        except Exception as e:
            print(f"计算图片哈希出错 {image_path}: {str(e)}")
    
    def print_stats(self):
        """打印统计信息"""
        stats_text = "\n========== 媒体文件统计信息 ==========\n"
        stats_text += f"总图片数量: {self.total_image_count}\n"
        stats_text += f"总视频数量: {self.total_video_count}\n"
        
        # 格式统计
        stats_text += "\n各格式文件数量:\n"
        for ext, count in self.format_counts.items():
            stats_text += f"{ext}: {count} 个\n"
        
        # 目录统计
        stats_text += "\n各目录文件数量:\n"
        for directory, count in self.directory_counts.items():
            stats_text += f"{directory}: {count} 个\n"
        
        # 重复文件统计
        stats_text += "\n相同文件检测结果:\n"
        duplicate_found = False
        
        # 检查重复图片
        for hash_value, paths in self.image_hashes.items():
            if len(paths) > 1:
                duplicate_found = True
                stats_text += f"\n发现 {len(paths)} 张相同的图片:\n"
                for path in paths:
                    stats_text += f"  - {path}\n"
        
        # 检查重复视频
        for hash_value, paths in self.video_hashes.items():
            if len(paths) > 1:
                duplicate_found = True
                stats_text += f"\n发现 {len(paths)} 个相同的视频:\n"
                for path in paths:
                    stats_text += f"  - {path}\n"
        
        if not duplicate_found:
            stats_text += "未发现相同的文件\n"
        
        # 打印到控制台
        print(stats_text)
        
        # 返回统计文本，供GUI使用
        return stats_text
    
    def _print_format_stats(self):
        """打印格式统计"""
        print("\n各格式文件数量:")
        for ext, count in self.format_counts.items():
            print(f"{ext}: {count} 个")
    
    def _print_directory_stats(self):
        """打印目录统计"""
        print("\n各目录文件数量:")
        for directory, count in self.directory_counts.items():
            print(f"{directory}: {count} 个")
    
    def _print_duplicate_stats(self):
        """打印重复文件统计"""
        print("\n相同文件检测结果:")
        
        # 检查重复图片
        duplicate_found = False
        for hash_value, paths in self.image_hashes.items():
            if len(paths) > 1:
                duplicate_found = True
                print(f"\n发现 {len(paths)} 张相同的图片:")
                for path in paths:
                    print(f"  - {path}")
        
        # 检查重复视频
        for hash_value, paths in self.video_hashes.items():
            if len(paths) > 1:
                duplicate_found = True
                print(f"\n发现 {len(paths)} 个相同的视频:")
                for path in paths:
                    print(f"  - {path}")
        
        if not duplicate_found:
            print("未发现相同的文件") 