import argparse
import torch
import torchvision.models as models
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
from pathlib import Path
from typing import Dict, List, Union
import json
from torch.utils.data import DataLoader
from torchvision.models import ResNet50_Weights
from utils.data_loader import CoinDataset, custom_collate_fn
from database.json_database import JsonCoinDatabase
from database.milvus_database import MilvusCoinDatabase
import concurrent.futures
from typing import Tuple
import math
import logging
from datetime import datetime
from logging.handlers import RotatingFileHandler

# Configure logging
log_file = f"{datetime.now().strftime('%Y-%m-%d')}.log"
handler = RotatingFileHandler(log_file, maxBytes=10*1024*1024, backupCount=5)  # 10 MB
logging.basicConfig(level=logging.DEBUG, handlers=[handler], format='%(asctime)s - %(levelname)s - %(message)s')

class CoinFeatureExtractor:
    def __init__(self):
        # 加载预训练的ResNet模型
        self.model = models.resnet50(weights=ResNet50_Weights.DEFAULT)
        # 移除最后的全连接层，只用于特征提取
        self.model = torch.nn.Sequential(*list(self.model.children())[:-1])
        self.model.eval()
        
        # 如果有GPU则使用GPU
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = self.model.to(self.device)
        
        # 图像预处理
        self.transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225]
            )
        ])

    @torch.no_grad()
    def extract_features(self, image):
        """提取图像特征向量"""
        if isinstance(image, Image.Image):
            # 如果输入是 PIL 图像，进行预处理
            image_tensor = self.transform(image).unsqueeze(0)
        else:
            # 如果输入已经是张量，只需要添加批次维度
            image_tensor = image.unsqueeze(0)
        
        image_tensor = image_tensor.to(self.device)
        features = self.model(image_tensor)
        features = features.squeeze().cpu().numpy()
        
        # 归一化特征向量
        features = features / np.linalg.norm(features)
        return features

class CoinRecognitionSystem:
    def __init__(self, database_type="milvus"):
        self.feature_extractor = CoinFeatureExtractor()
        # 根据类型选择数据库实现
        if database_type == "json":
            self.database = JsonCoinDatabase()
        elif database_type == "milvus":
            self.database = MilvusCoinDatabase()
            # 确保 Milvus 集合已创建
            self.database.ensure_collection()
        else:
            raise ValueError(f"Unsupported database type: {database_type}")
    
    def add_reference_coin(self, image_path: Path, coin_id: str):
        """添加参考硬币到数据库"""
        try:
            image = Image.open(image_path).convert('RGB')
            features = self.feature_extractor.extract_features(image)
            self.database.add_coin(coin_id, features, image_path)
            return True
        except Exception as e:
            logging.info(f"添加参考硬币失败: {e}")
            return False
    
    def recognize_coin(self, front_image_path: Path, back_image_path: Path, top_k: int = 5):
        """识别硬币"""
        try:
            # 加载并提取查询图片的特征
            front_image = Image.open(front_image_path).convert('RGB')
            back_image = Image.open(back_image_path).convert('RGB')
            
            front_features = self.feature_extractor.extract_features(front_image)
            back_features = self.feature_extractor.extract_features(back_image)
            
            # 在数据库中查找相似硬币
            similar_coins = self.database.find_similar_coins(front_features, back_features, top_k)
            
            # 如果没有找到结果，返回空列表
            if not similar_coins:
                logging.info("No similar coins found")
                return []
                
            return similar_coins
            
        except Exception as e:
            logging.info(f"识别失败: {e}")
            import traceback
            traceback.print_exc()  # 打印详细的错误堆栈
            return []

def add_training_data(system: CoinRecognitionSystem, image_path: Path, coin_id: str, annotations: dict):
    """添加训练数据"""
    if system.add_reference_coin(image_path, coin_id):
        # 保存标注信息
        annotation_path = Path("data/reference_coins") / f"{coin_id}.json"
        with open(annotation_path, 'w') as f:
            json.dump(annotations, f)
        logging.info(f"成功添加硬币: {coin_id}")
    else:
        logging.error(f"添加硬币失败: {coin_id}")

class ProcessTracker:
    def __init__(self, record_file: str = "processed_records.json"):
        self.record_file = Path(record_file)
        self.processed_ids = self.load_records()

    def load_records(self) -> set:
        """加载已处理的记录"""
        if self.record_file.exists():
            with open(self.record_file, 'r') as f:
                return set(json.load(f))
        return set()

    def save_records(self):
        """保存处理记录"""
        with open(self.record_file, 'w') as f:
            json.dump(list(self.processed_ids), f)

    def add_record(self, coin_id: str):
        """添加处理记录"""
        self.processed_ids.add(coin_id)
        
    def is_processed(self, coin_id: str) -> bool:
        """检查是否已处理"""
        return coin_id in self.processed_ids

    def get_stats(self) -> dict:
        """获取处理统计信息"""
        return {
            'total_processed': len(self.processed_ids),
            'processed_ids': list(self.processed_ids)
        }

def calculate_thread_range(total_items: int, num_threads: int, thread_id: int) -> Tuple[int, int]:
    """计算每个线程处理的数据范围"""
    items_per_thread = math.ceil(total_items / num_threads)
    start = thread_id * items_per_thread
    end = min(start + items_per_thread, total_items)
    return start, end

def train_thread(start_offset: int, max_offset: int, args, thread_id: int):
    """单个线程的训练函数"""
    logging.info(f"Thread {thread_id} starting: offset {start_offset} to {max_offset}")
    
    system = CoinRecognitionSystem(database_type=args.database)
    tracker = ProcessTracker(f"processed_records_thread_{thread_id}.json")
    
    coin_dataset = CoinDataset(
        api_url=args.api_url,
        title=args.title,
        page_size=10,
        offset=start_offset,
        max_offset=max_offset
    )
    
    data_loader = DataLoader(
        coin_dataset, 
        batch_size=4, 
        shuffle=False,
        collate_fn=custom_collate_fn
    )

    processed_count = len(tracker.processed_ids)
    while processed_count < (max_offset - start_offset):
        for front_images, back_images, annotations in data_loader:
            if front_images is None or back_images is None or annotations is None or len(annotations) == 0:
                processed_count += 1
                continue

            for idx, (front_image, back_image) in enumerate(zip(front_images, back_images)):
                coin_id = annotations[idx]['id']
                
                if tracker.is_processed(coin_id):
                    logging.info(f"已处理硬币: {coin_id}")
                    continue

                front_features = system.feature_extractor.extract_features(front_image)
                back_features = system.feature_extractor.extract_features(back_image)
                coin_dir = Path(f"cached_images/{coin_id}")
                front_path = coin_dir / annotations[idx]['frontImage'].split('/')[-1]
                back_path = coin_dir / annotations[idx]['backImage'].split('/')[-1]

                system.database.add_coin(
                    coin_id=coin_id,
                    front_features=front_features,
                    back_features=back_features,
                    front_path=front_path,
                    back_path=back_path,
                    annotations=annotations[idx]
                )
                
                tracker.add_record(coin_id)
                processed_count += 1
                logging.info(f"Thread {thread_id}: Saved {coin_id} ({processed_count}/{max_offset-start_offset})")
            
            if processed_count % 10 == 0:
                tracker.save_records()
                system.database.save_to_file(f'coin_database_thread_{thread_id}.json')

    tracker.save_records()
    system.database.save_to_file(f'coin_database_thread_{thread_id}.json')
    return thread_id, tracker.get_stats()

def main():
    # 设置命令行参数解析
    parser = argparse.ArgumentParser(description="硬币识别系统")
    parser.add_argument('--mode', type=str, choices=['train', 'test', 'check'], required=True, 
                       help='运行模式: train, test 或 check')
    parser.add_argument('--api_url', type=str, default='http://154.91.32.174:80/api/coinListPage', help='获取硬币数据的 API URL')
    parser.add_argument('--title', type=str, default='yuan coin', help='请求的标题')
    parser.add_argument('--offset', type=int, default=0, help='请求的偏移量')
    parser.add_argument('--limit', type=int, default=217073, help='请求的限制数量')
    parser.add_argument('--database', type=str, choices=['json', 'milvus'], default='milvus',
                       help='数据库类型: json 或 milvus')
    parser.add_argument('--threads', type=int, default=4, help='训练时使用的线程数')
    args = parser.parse_args()

    # 创建系统实例
    system = CoinRecognitionSystem(database_type=args.database)
    
    if args.mode == 'train':
        total_items = args.limit - args.offset
        
        with concurrent.futures.ThreadPoolExecutor(max_workers=args.threads) as executor:
            futures = []
            for thread_id in range(args.threads):
                start, end = calculate_thread_range(total_items, args.threads, thread_id)
                start += args.offset  # 加上全局偏移量
                end += args.offset
                
                future = executor.submit(
                    train_thread,
                    start,
                    end,
                    args,
                    thread_id
                )
                futures.append(future)
            
            # 等待所有线程完成并收集结果
            results = []
            for future in concurrent.futures.as_completed(futures):
                thread_id, stats = future.result()
                results.append((thread_id, stats))
                logging.info(f"Thread {thread_id} completed with {stats['total_processed']} items processed")
        
        # 合并所有线程的数据库和处理记录
        logging.info("Merging results from all threads...")
        final_system = CoinRecognitionSystem(database_type=args.database)
        final_tracker = ProcessTracker()
        
        for thread_id, _ in results:
            # 合并处理记录
            thread_tracker = ProcessTracker(f"processed_records_thread_{thread_id}.json")
            final_tracker.processed_ids.update(thread_tracker.processed_ids)
        
        # 保存最终结果
        final_tracker.save_records()
        
        # 清理临时文件
        for thread_id, _ in results:
            Path(f'processed_records_thread_{thread_id}.json').unlink(missing_ok=True)
        
        logging.info("Training completed. Total processed: %d", len(final_tracker.processed_ids))

    elif args.mode == 'test':
        # 加载数据库
        system.database.load_from_file('coin_database.json')
        
        # 测试识别
        cache_dir = Path("cached_images")
        if not cache_dir.exists():
            logging.warning("缓存目录 %s 不存在", cache_dir)
            return

        # 遍历缓存目录中的所有硬币目录
        for coin_dir in cache_dir.iterdir():
            if not coin_dir.is_dir():
                continue

            try:
                # 获取正反面图片
                front_images = list(coin_dir.glob("*front*.jpg"))
                back_images = list(coin_dir.glob("*back*.jpg"))
                
                if not front_images or not back_images:
                    continue

                front_image_path = front_images[0]
                back_image_path = back_images[0]

                logging.info("正在识别硬币: %s", coin_dir.name)
                results = system.recognize_coin(front_image_path, back_image_path)
                logging.info("识别结果:")
                for idx, result in enumerate(results, 1):
                    logging.info("%d. 硬币ID: %s", idx, result['coin_id'])
                    logging.info("   正面相似度: %.4f", result['front_similarity'])
                    logging.info("   背面相似度: %.4f", result['back_similarity'])
                    logging.info("   综合相似度: %.4f", result['combined_similarity'])
                    logging.info("   正面图片: %s", result['front_image_path'])
                    logging.info("   背面图片: %s", result['back_image_path'])
                    if 'annotations' in result:
                        logging.info("   标注信息: %s", result['annotations'])
            except Exception as e:
                logging.error("处理硬币 %s 时出错: %s", coin_dir.name, e)
                continue

    elif args.mode == 'check':
        # 加载处理记录
        tracker = ProcessTracker()
        stats = tracker.get_stats()
        logging.info("已处理总数: %d", stats['total_processed'])
        
        # 检查数据库
        system = CoinRecognitionSystem()
        try:
            system.database.load_from_file('coin_database.json')
            logging.info("数据库中的硬币数量: %d", len(system.database.features_dict))
            
            # 检查不匹配
            db_ids = set(system.database.features_dict.keys())
            processed_ids = tracker.processed_ids
            
            missing_in_db = processed_ids - db_ids
            missing_in_records = db_ids - processed_ids
            
            if missing_in_db:
                logging.warning("在处理记录中存在但数据库中缺失的ID: %s", missing_in_db)
            if missing_in_records:
                logging.warning("在数据库中存在但处理记录中缺失的ID: %s", missing_in_records)
                
        except Exception as e:
            logging.error("加载数据库时出错: %s", e)

if __name__ == "__main__":
    main()
    
    

# python main.py --mode train --threads 4 --limit 1000 --offset 0 --database milvus