import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from torch import nn
from torchvision.models import resnet50, vgg16
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
import os
import pandas as pd
from tqdm import tqdm
import matplotlib
import concurrent.futures
from functools import partial
matplotlib.use('Agg')
import matplotlib.pyplot as plt

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 验证变换
val_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])


class FeatureExtractor:
    def extract_features(self, image):
        raise NotImplementedError

    def compute_similarity(self, features1, features2):
        raise NotImplementedError


class SIFTExtractor(FeatureExtractor):
    def __init__(self, n_features=1000):
        self.sift = cv2.SIFT_create(nfeatures=n_features)
        self.bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)

    def extract_features(self, image):
        if len(image.shape) != 3:
            return None
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        keypoints, descriptors = self.sift.detectAndCompute(gray, None)
        return descriptors

    def compute_similarity(self, desc1, desc2):
        if desc1 is None or desc2 is None or len(desc1) == 0 or len(desc2) == 0:
            return 0.0
        matches = self.bf.match(desc1, desc2)
        if not matches:
            return 0.0
        distances = [m.distance for m in matches]
        avg_distance = np.mean(distances[:min(50, len(distances))])
        similarity = 1.0 - min(1.0, avg_distance / 300.0)
        return max(0.0, similarity)


class CNNExtractor(FeatureExtractor):
    def __init__(self, model_name='resnet50'):
        self.device = device
        self.model = self._load_model(model_name).to(self.device)
        self.model.eval()
        self.model_name = model_name

    def _load_model(self, model_name):
        if model_name == 'resnet50':
            model = resnet50(pretrained=True)
            return nn.Sequential(*list(model.children())[:-1])
        elif model_name == 'vgg16':
            model = vgg16(pretrained=True)
            return model.features
        else:
            raise ValueError(f"未知模型: {model_name}")

    def extract_features(self, image):
        try:
            img_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            tensor_img = val_transform(img_pil).unsqueeze(0).to(self.device)
            with torch.no_grad():
                features = self.model(tensor_img)
            return features.cpu().numpy().flatten()
        except Exception as e:
            print(f"CNN特征提取错误 ({self.model_name}): {str(e)}")
            return np.zeros(2048 if self.model_name == 'resnet50' else 25088)

    def compute_similarity(self, features1, features2):
        if features1 is None or features2 is None:
            return 0.0
        return cosine_similarity([features1], [features2])[0][0]


class FrogColorAnalyzer:
    def __init__(self):
        # 定义绿色范围 (HSV)
        self.lower_green = np.array([25, 40, 40])
        self.upper_green = np.array([100, 255, 255])
        # 形态学处理内核
        self.kernel = np.ones((5, 5), np.uint8)

    def process_image(self, img):
        """图像预处理流程"""
        # 转换为RGB处理透明背景
        if img.shape[2] == 4:
            alpha = img[:, :, 3]
            img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
            mask = alpha > 200
            img = cv2.bitwise_and(img, img, mask=mask.astype(np.uint8))

        # 去除高光
        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        l_channel = lab[:, :, 0]
        overexposed = l_channel > 220
        lab[overexposed, 1] = 128  # 中性化过曝区域
        img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

        # 饱和度增强
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hsv[:, :, 1] = np.clip(hsv[:, :, 1] * 1.3, 0, 255)
        return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    def calculate_green_ratio(self, img):
        """计算绿色比例"""
        if img is None or img.size == 0:
            return 0.0

        # 预处理
        processed_img = self.process_image(img)

        # 创建绿色掩模
        hsv = cv2.cvtColor(processed_img, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, self.lower_green, self.upper_green)

        # 形态学处理
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, self.kernel)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)

        # 计算比例
        green_pixels = np.count_nonzero(mask)
        total_pixels = mask.shape[0] * mask.shape[1]
        return green_pixels / total_pixels


def safe_read_image(path):
    """安全读取图像，处理透明背景"""
    img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
    if img is None:
        print(f"警告: 无法读取图像 {path}, 使用空白图像替代")
        return np.zeros((224, 224, 3), dtype=np.uint8)

    # 处理透明背景
    if img.shape[2] == 4:
        alpha = img[:, :, 3]
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
        mask = alpha > 200
        img = cv2.bitwise_and(img, img, mask=mask.astype(np.uint8))
    elif img.shape[2] == 3:
        pass
    else:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    return img


def calculate_fused_similarity(img1_path, img2_path, extractors, discriminative_power=None):
    """特征融合的相似度计算"""
    # 初始化颜色分析器
    color_analyzer = FrogColorAnalyzer()

    # 读取图像
    img1 = safe_read_image(img1_path)
    img2 = safe_read_image(img2_path)

    # 计算绿色比例
    green_ratio1 = color_analyzer.calculate_green_ratio(img1)
    green_ratio2 = color_analyzer.calculate_green_ratio(img2)
    green_diff = abs(green_ratio1 - green_ratio2) * 100

    # 颜色差异阈值 (8%)
    if green_diff > 8:
        results = {
            'final': 10.0,
            'color_diff': green_diff,
            'color_ratio1': green_ratio1 * 100,
            'color_ratio2': green_ratio2 * 100
        }
        for method in extractors.keys():
            results[method] = 10.0
        print(f"颜色差异过大({green_diff:.2f}%)，跳过特征提取")
        return results

    # 特征提取和相似度计算
    features = {}
    for name, extractor in extractors.items():
        features[f"{name}_1"] = extractor.extract_features(img1)
        features[f"{name}_2"] = extractor.extract_features(img2)

    similarities = {
        'sift': extractors['sift'].compute_similarity(features['sift_1'], features['sift_2']),
        'resnet50': max(0.0, min(1.0, (extractors['resnet50'].compute_similarity(
            features['resnet50_1'], features['resnet50_2']) + 1) / 2)),
        'vgg16': max(0.0, min(1.0, (extractors['vgg16'].compute_similarity(
            features['vgg16_1'], features['vgg16_2']) + 1) / 2))
    }

    # 权重设置
    weights = discriminative_power or {
        'sift': 0.20,
        'resnet50': 0.50,
        'vgg16': 0.30
    }
    if discriminative_power:
        total_power = sum(discriminative_power.values())
        weights = {k: v / total_power for k, v in discriminative_power.items()}

    # 计算最终相似度
    final_similarity = sum(weights[method] * similarities[method] for method in similarities)
    final_similarity = max(0.0, min(1.0, final_similarity)) * 100

    # 返回结果
    results = {
        'final': final_similarity,
        'color_diff': green_diff,
        'color_ratio1': green_ratio1 * 100,
        'color_ratio2': green_ratio2 * 100
    }
    for method in similarities:
        results[method] = similarities[method] * 100
    return results


def query_single_image(query_image_path, target_dir, output_excel, extractors, discriminative_power=None,
                       max_workers=4):
    """多线程版本的查询函数"""
    query_image_name = os.path.basename(query_image_path)
    query_frog_id = '-'.join(query_image_name.split('-')[:2])

    target_files = sorted(
        [f for f in os.listdir(target_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png'))],
        key=lambda x: [int(part) for part in x.split('-')[:2] + [x.split('-')[2].split('.')[0]]]
    )

    print(f"开始处理查询图像: {query_image_name}")
    print(f"目标文件夹中有 {len(target_files)} 张待匹配图像")
    print(f"使用 {max_workers} 个线程并行处理...")

    results = []
    similarity_cache = {}

    # 定义一个处理函数，用于在线程中执行
    def process_target_file(target_file):
        target_path = os.path.join(target_dir, target_file)
        key = tuple(sorted([query_image_name, target_file]))

        if key not in similarity_cache:
            try:
                sim_results = calculate_fused_similarity(
                    query_image_path, target_path, extractors, discriminative_power)
                similarity_cache[key] = sim_results
            except Exception as e:
                print(f"计算 {query_image_name} 和 {target_file} 相似度时出错: {str(e)}")
                sim_results = {'final': 0}
                for method in extractors.keys():
                    sim_results[method] = 0
                similarity_cache[key] = sim_results

        target_frog_id = '-'.join(target_file.split('-')[:2])
        result_row = {
            '查询图像': query_image_name,
            '目标图像': target_file,
            '查询青蛙ID': query_frog_id,
            '目标青蛙ID': target_frog_id,
            '同一青蛙': "是" if query_frog_id == target_frog_id else "否",
            '最终相似度': similarity_cache[key]['final'],
            '颜色差异': similarity_cache[key].get('color_diff', 0),
            '查询图像绿色比例': similarity_cache[key].get('color_ratio1', 0),
            '目标图像绿色比例': similarity_cache[key].get('color_ratio2', 0)
        }
        for method in extractors.keys():
            result_row[f'{method}_相似度'] = similarity_cache[key].get(method, 0)
        return result_row

    # 创建线程池
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有任务
        futures = [executor.submit(process_target_file, target_file) for target_file in target_files]

        # 使用tqdm显示进度
        with tqdm(total=len(target_files), desc="匹配进度") as pbar:
            for future in concurrent.futures.as_completed(futures):
                try:
                    result = future.result()
                    results.append(result)
                except Exception as e:
                    print(f"处理时发生错误: {str(e)}")
                finally:
                    pbar.update(1)

    # 保存结果
    df = pd.DataFrame(results)
    df.to_excel(output_excel, index=False)
    print(f"结果已保存到: {output_excel}")
    return df


def analyze_query_top_similarity(df, report_dir, query_image_name):
    """分析查询结果的Top3相似图像"""
    os.makedirs(report_dir, exist_ok=True)
    sorted_df = df.sort_values(by='最终相似度', ascending=False)
    query_frog_id = '-'.join(query_image_name.split('-')[:2])

    top3 = []
    seen_frogs = set()
    for _, row in sorted_df.iterrows():
        if len(top3) >= 3:
            break
        target_frog_id = row['目标青蛙ID']
        if target_frog_id not in seen_frogs:
            top3.append(row)
            seen_frogs.add(target_frog_id)

    top_results = []
    for i, row in enumerate(top3[:3], 1):
        top_results.append({
            '排名': i,
            '目标图像': row['目标图像'],
            '目标青蛙ID': row['目标青蛙ID'],
            '同一青蛙': row['同一青蛙'],
            '最终相似度': row['最终相似度'],
            'resnet50_相似度': row.get('resnet50_相似度', 0),
            'vgg16_相似度': row.get('vgg16_相似度', 0),
            'sift_相似度': row.get('sift_相似度', 0)
        })

    has_same_frog = any(row['同一青蛙'] == "是" for row in top_results)
    accuracy = 100 if has_same_frog else 0

    # 保存结果
    top_df = pd.DataFrame(top_results)
    top_excel_path = os.path.join(report_dir, f"top3_{os.path.splitext(query_image_name)[0]}.xlsx")
    top_df.to_excel(top_excel_path, index=False)

    # 生成报告
    report = f"查询图像: {query_image_name}\n查询青蛙ID: {query_frog_id}\n\nTop3匹配结果:\n"
    for i, row in enumerate(top_results, 1):
        report += (f"{i}. 目标图像: {row['目标图像']} (青蛙ID: {row['目标青蛙ID']}, "
                   f"同一青蛙: {row['同一青蛙']}, 相似度: {row['最终相似度']:.2f}%)\n")
    report += f"\nTop3中包含同一青蛙: {'是' if has_same_frog else '否'}\n准确率: {accuracy:.2f}%\n"

    report_path = os.path.join(report_dir, f"top3_report_{os.path.splitext(query_image_name)[0]}.txt")
    with open(report_path, 'w') as f:
        f.write(report)
    print(f"Top3分析报告已保存到: {report_path}")
    return accuracy, top_df


if __name__ == '__main__':
    # 初始化特征提取器
    extractors = {
        'sift': SIFTExtractor(n_features=1000),
        'resnet50': CNNExtractor(model_name='resnet50'),
        'vgg16': CNNExtractor(model_name='vgg16')
    }

    # 设置路径
    query_image_path = 'D:/Frog/testFrog2/1-4-1.jpg'
    target_directory = 'D:/Frog/BigFrog'
    output_excel_path = 'D:/Frog/single_query_results.xlsx'
    report_directory = 'D:/Frog/single_query_report'

    # 执行查询 - 现在可以指定线程数
    print("\n开始多线程图像查询...")
    results_df = query_single_image(
        query_image_path,
        target_directory,
        output_excel_path,
        extractors,
        max_workers=4  # 可以根据CPU核心数调整
    )

    # 分析结果
    query_image_name = os.path.basename(query_image_path)
    print(f"\n分析查询结果: {query_image_name}")
    accuracy, top_df = analyze_query_top_similarity(
        results_df,
        report_directory,
        query_image_name
    )

    # 打印摘要
    print("\n" + "=" * 50)
    print("多线程图像查询完成! 结果摘要:")
    print(f"Top3准确率: {accuracy:.2f}%")
    print("=" * 50)