import os
import sys
import subprocess
import requests
import re
from pathlib import Path
import numpy as np
from torchvision.io import read_video
import av
from config import __version__, TEXT_TYPES, IMAGE_TYPES,VIDEO_TYPES
import torch
from PIL import Image
from torchvision.transforms import functional as F  
from torchvision.transforms import ToPILImage  

#从一个目录列表中过滤出不是其他目录子目录的目录，返回一个包含这些目录的集合
def filter_subdirectories(directories):
    filtered_dirs = set()
  
    for dir1 in directories:
        is_subdir = False
        for dir2 in directories:
            if dir1 != dir2 and os.path.commonpath([dir1, dir2]) == dir2:
                is_subdir = True
                break
        if not is_subdir:
            filtered_dirs.add(dir1)
  
    return filtered_dirs


def parse_data_type(suffix):
    return "image" if suffix in IMAGE_TYPES else "text" if suffix in TEXT_TYPES else "video" if suffix in VIDEO_TYPES else "N/A"

#给出以指定路径下所有文件，对文件进行解析，返回文件路径，后缀，数据类型，返回一个列表
def list_files(root_path):
    results = []
    root_path = Path(root_path)
    
    if root_path.is_file():
        paths = [root_path]
    else:
        paths = root_path.rglob('*')
    
    for path in paths:
        suffix = path.suffix[1:]
        data_type = parse_data_type(suffix)
        
        if data_type != "N/A":
            results.append({"path": str(path), "suffix": suffix, "type": data_type})
    
    return results


def encode_text(model, input_text):
    embedding = model.encode(input_text)

    embedding_l2 = np.linalg.norm(embedding)
    embedding = embedding / embedding_l2

    return embedding


def encode_image(model, input_image):
    #print(input_image)
    embedding = model.encode(input_image)

    embedding_l2 = np.linalg.norm(embedding)
    embedding = embedding / embedding_l2

    return embedding

def encode_video(model, input_video):
    frame_embeddings = []  
    frame_counter=0
    container=av.open(input_video)
    for frame in container.decode(video=0):
        frame_counter += 1  
        if frame_counter % 30 == 0:  
            frame_embedding=encode_image(model,frame.to_image())
            frame_embeddings.append(frame_embedding)
    #转换成tensor
    frame_embeddings_tensor = [torch.from_numpy(np_array) for np_array in frame_embeddings]  
    # 使用平均池化层聚合嵌入  
    video_embedding = torch.mean(torch.stack(frame_embeddings_tensor, dim=0), dim=0)  
    # 后处理，如归一化嵌入向量  
    video_embedding = torch.nn.functional.normalize(video_embedding, dim=-1, p=2)  # 通常使用L2范数进行归一化  
    return video_embedding.numpy()  