from torch.utils.data import DataLoader
from torchvision.transforms import Resize, CenterCrop, ToTensor, Normalize, Compose
from PIL import Image
try:
    from torchvision.transforms import InterpolationMode
    BICUBIC = InterpolationMode.BICUBIC
except ImportError:
    BICUBIC = Image.BICUBIC
from datalist import Market1501, collate_fn
from tqdm import tqdm
import torch
from colorama import Fore
import numpy as np
import pickle
import os
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics.pairwise import cosine_similarity
from torchvision import transforms
import importlib
from sklearn.ensemble import RandomForestClassifier  # 导入随机森林分类器

def cosine_similarity_matrix(matrix1, matrix2):
    matrix1 = np.array(matrix1)
    matrix2 = np.array(matrix2)
    return cosine_similarity(matrix1, matrix2)


class Eval:
    def __init__(self, model, train_dataset, test_dataset):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}
        self.model = model.to(self.device)
        self.train_dataloader = DataLoader(train_dataset, 1, shuffle=False, collate_fn=collate_fn, **kwargs)
        self.test_dataloader = DataLoader(test_dataset, 1, shuffle=False, collate_fn=collate_fn, **kwargs)
        self.model.eval()
        self.image_features = {}
        self.image_person_ids = {}
        self.random_forest = RandomForestClassifier(n_estimators=100)  # 初始化随机森林分类器，这里设置了100棵树

    @torch.no_grad()
    def train_random_forest(self):
        pbar = tqdm(self.train_dataloader, desc=f'Training Random Forest:', bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
        all_features = []
        all_labels = []
        for images, person_ids, camera_ids, video_ids, filenames in pbar:
            images = images.to(self.device)
            features = self.model(images).cpu().numpy()
            for feature, filename, person_id in zip(features, filenames, person_ids.numpy()):
                filename = os.path.basename(filename)
                all_features.append(feature)
                all_labels.append(person_id)

        # 训练随机森林
        all_features = np.array(all_features)
        all_labels = np.array(all_labels)
        self.random_forest.fit(all_features, all_labels)

    @torch.no_grad()
    def run(self):
        pbar = tqdm(self.test_dataloader, desc=f'Test:', bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
        for images, person_ids, camera_ids, video_ids, filenames in pbar:
            images = images.to(self.device)
            features = self.model(images).cpu().numpy()
            for feature, filename, person_id in zip(features, filenames, person_ids.numpy()):
                filename = os.path.basename(filename)
                self.image_features[filename] = feature
                self.image_person_ids[filename] = person_id
        with open('./image_features.pkl', 'wb') as f:
            pickle.dump(self.image_features, f, protocol=pickle.HIGHEST_PROTOCOL)
        with open('./image_person_ids.pkl', 'wb') as f:
            pickle.dump(self.image_person_ids, f, protocol=pickle.HIGHEST_PROTOCOL)

    def evaluate(self):
        self.image_features = pickle.load(open('./image_features.pkl', 'rb'))
        self.image_person_ids = pickle.load(open('./image_person_ids.pkl', 'rb'))
        query_dataset = self.test_dataloader.dataset.query_data
        gallery_dataset = self.test_dataloader.dataset.gallery_data
        query_features, query_indexs = [], []
        gallery_features, gallery_indexs = [], []

        for q_data in query_dataset:
            filename = os.path.basename(q_data['image_path'])
            query_features.append(self.image_features[filename])
            query_indexs.append(self.image_person_ids[filename])

        for g_data in gallery_dataset:
            filename = os.path.basename(g_data['image_path'])
            gallery_features.append(self.image_features[filename])
            gallery_indexs.append(self.image_person_ids[filename])

        query_features = np.array(query_features)
        gallery_features = np.array(gallery_features)

        print("Query count:", query_features.shape[0])
        print("Gallery count:", gallery_features.shape[0])

        # 使用随机森林预测
        query_predictions = self.random_forest.predict(query_features)
        gallery_predictions = self.random_forest.predict(gallery_features)

        rank1_count, rank5_count = 0, 0
        aps = []

        for i in tqdm(range(len(query_predictions))):
            query_pid = query_indexs[i]
            query_pred = query_predictions[i]
            # 计算随机森林预测的相似度
            sim_scores = []
            for j in range(len(gallery_predictions)):
                if query_pred == gallery_predictions[j]:
                    sim_scores.append(1)
                else:
                    sim_scores.append(0)
            sim_scores = np.array(sim_scores)
            sorted_indices = np.argsort(sim_scores)[::-1]

            if gallery_indexs[sorted_indices[0]] == query_pid:
                rank1_count += 1

            if query_pid in [gallery_indexs[idx] for idx in sorted_indices[:5]]:
                rank5_count += 1

            y_true = np.array([1 if gallery_indexs[idx] == query_pid else 0 for idx in sorted_indices])
            y_scores = sim_scores[sorted_indices]
            precision, recall, _ = precision_recall_curve(y_true, y_scores)
            aps.append(auc(recall, precision))

        rank1_accuracy = rank1_count / len(query_indexs)
        rank5_accuracy = rank5_count / len(query_indexs)
        mAP = np.mean(aps)

        print(f"Rank-1 Accuracy: {rank1_accuracy * 100:.2f}%")
        print(f"Rank-5 Accuracy: {rank5_accuracy * 100:.2f}%")
        print(f"mAP: {mAP * 100:.2f}%")

if __name__ == "__main__":

    from lth_backup import TransformFeatureExtractor

    model = TransformFeatureExtractor(
        weight_path=r"F:\temp2\TEMP\temp_tra\weights\Market1501_clipreid_ViT-B-16_60.pth"
    )
    pre_process = Compose([
        Resize((256, 128), interpolation=BICUBIC),
        CenterCrop((256, 128)),
        ToTensor(),
        Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    ])
    market_dir = r'F:\floatingAI\reid\Market-1501-v15.09.15'

    train_dataset = Market1501(
            pre_process=pre_process,
            data_dir = market_dir,
            extra_annotations=None,
            mode='train',
            color_space='RGB'
        )
    test_dataset = Market1501(
            pre_process=pre_process,
            data_dir = market_dir,
            extra_annotations=None,
            mode='test',
            color_space='RGB'
        )
    eval_obj = Eval(model, train_dataset, test_dataset)
    eval_obj.train_random_forest()  # 调用训练随机森林的方法
    eval_obj.run()
    eval_obj.evaluate()