import torchvision.models as models
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from transformers import CLIPProcessor, CLIPModel         # 导入多模态模型

from dataloader import get_dataset, get_dataloader
from tqdm import tqdm
import re
import os


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 创建一个分类器  先经过一个一维卷积  在经过线性分类层
class Classifier(nn.Module):

    def __init__(self, num_classes, num_filters=32, kernel_size=3):
        super().__init__()

        # 一维卷积
        self.conv1d = nn.Conv1d(in_channels=1, out_channels=num_filters, kernel_size=kernel_size, padding=1)
        # 激活函数
        self.relu = nn.ReLU()
        # 全局池化，将卷积操作压缩为 []
        self.golbal_pool = nn.AdaptiveAvgPool1d(1)
        # 全连接层
        self.fc = nn.Linear(num_filters, num_classes)
    
    def forward(self, x):
        # 调整形状 [batsh_size, 512] -> [batsh_size, 1, 512]
        x = x.unsqueeze(1)
        # 一维卷积
        x = self.conv1d(x)  # [batch_size, num_filter, newlength]
        x = self.relu(x)
        # 全局池化
        x = self.golbal_pool(x)  # [batch_size, num_filter, 1]
        x = x.squeeze(-1)        # 移除最后一个维度 [batch_size, num_filter]
        # 线性分类层
        x = self.fc(x)            # [batch_size, num_classes]
        return x


# 将resnet网络和多模态网络进行结合
class Combied_res_CLIP(nn.Module):

    def __init__(self, resnet, feature_dim=512, temperature=0.5):
        super().__init__()
        self.resnet = resnet
        self.temperature = temperature
        self.cosine_similarity = nn.CosineSimilarity(dim=-1)  # 余弦相似度  在最后一个维度进行计算

    def forward(self, clip_embed, res_origin):
        res_embed = self.resnet(res_origin)

        batch_size = clip_embed.shape[0]

        # 计算正样本对的相似度
        pos_similarity = self.cosine_similarity(clip_embed, res_embed) / self.temperature   # [batch_size]

        # 创建相似矩阵
        all_features = torch.cat([clip_embed, res_embed], dim=0)                   # [2*batch_size , 512]
        sim_matrix = torch.mm(all_features, all_features.T) / self.temperature     # [2*batch_size , 2*batch_size]

        # 从相似度矩阵中移除自身对角项， 避免对自身计算相似度
        mask = torch.eye(2*batch_size, dtype=bool, device=sim_matrix.device)
        sim_matrix = sim_matrix.masked_fill_(mask, float("-inf"))

        # 创建标签  正样本 应该具有较高的相似度性
        labels = torch.cat([torch.arange(batch_size) + batch_size, torch.arange(batch_size)]).to(sim_matrix.device)

        # 计算对比损失 
        loss = F.cross_entropy(sim_matrix, labels)
        
        return loss


# 将resnet网络 分类器网络作为输入 将网络结合起来
class Combied_res_clas(nn.Module):
    def __init__(self, resnet, classifier):
        super().__init__()
        self.resnet = resnet
        self.classifier = classifier

    def forward(self, x):
        x = self.resnet(x)        # 输入到resnet18
        x = self.classifier(x)    # 在经过分类器
        return x 




# 测试  数据集
def evaluate(test_loader, combied_res_clas):
    combied_res_clas.eval()
    correct = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            output = combied_res_clas(inputs)
            correct += (output.argmax(1) == labels).sum().item()
        print("测试准确率: ",  correct/len(test_loader.dataset))
        return correct/len(test_loader.dataset) 


# 返回保存的 最近训练 的一个模型
def get_largest_pth_file(folder_path):
    # 获取文件夹中的所有文件名
    files = os.listdir(folder_path)
    # 过滤出以 .pth 结尾的文件
    pth_files = [f for f in files if f.endswith(".pth")]

    # 使用正则表达式提取文件名中的数字
    max_file = None
    max_number = -1
    for file in pth_files:
        # 匹配文件名中的数字
        match = re.search(r'(\d+)', file)
        if match:
            number = int(match.group(1))
            # 找到最大数字的文件
            if number > max_number:
                max_number = number
                max_file = file

    # 返回最大数字文件的完整路径
    return os.path.join(folder_path, max_file) if max_file else None


# 保存一些训练中的信息  输入列表形式 保存为txt文档

def save_txt(save_list: list, file_name: str):
    with open(file_name, "w") as f:
        for i in save_list:
            f.write(str(i)+", ")
        f.write("\n")




if __name__ == "__main__":
    # 测试返回路径
    path = get_largest_pth_file("./model/save")
    print(path)

    # 测试保存列表为txt
    sav_list = [1, 2, 3, 67, 89]
    save_txt(sav_list, "./model/log/train.txt")

