#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from PIL.Image import Image
from torch import nn
import torch.nn.functional as F
from transformers import CLIPProcessor, CLIPModel


class CNNMnist(nn.Module):
    def __init__(self):
        super(CNNMnist, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=5, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.fc = nn.Linear(7 * 7 * 32, 10)

    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out


class CNNCifar(nn.Module):
    def __init__(self):
        super(CNNCifar, self).__init__()
        self.conv1 = nn.Conv2d(3, 128, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(128, 128, 3)
        self.conv3 = nn.Conv2d(128, 128, 3)
        self.fc1 = nn.Linear(128 * 4 * 4, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = F.relu(self.conv3(x))
        x = x.view(-1, 128 * 4 * 4)
        x = self.fc1(x)
        return x


class CNNCifarClip(nn.Module):
    """CNN的CLIP版本"""
    def __init__(self, config):
        super(CNNCifarClip, self).__init__()

        self.config = config

        self.conv1 = nn.Conv2d(3, 128, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(128, 128, 3)
        self.conv3 = nn.Conv2d(128, 128, 3)

        self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(self.config.device)
        self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

        self.fc1 = nn.Linear(128 * 4 * 4 + 512, 10)

    def forward(self, x):
        out = self.pool(F.relu(self.conv1(x)))
        out = self.pool(F.relu(self.conv2(out)))
        out = F.relu(self.conv3(out))
        out = out.view(-1, 128 * 4 * 4)

        outputs = None
        with torch.no_grad():
            # transformed_x = torch.repeat_interleave(x, 3, dim=1)  # 将单通道的图像复制为3通道
            # transformed_x = torch.nn.functional.interpolate(transformed_x, size=(224, 224), mode='bicubic')
            # transformed_x = torch.clamp(transformed_x, 0, 1)
            # transformed_x = (transformed_x * 255).to(torch.uint8)
            # inputs = self.processor(text=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],
            inputs = self.processor(text=['a airplane', 'a automobile', 'a bird', 'a cat', 'a deer', 'a dog', 'a frog', 'a horse', 'a ship', 'a truck'],
                                    images=x, return_tensors="pt", padding=True)
            inputs = inputs.to(self.config.device)
            outputs = self.model(**inputs)
        out = torch.cat((out, outputs.image_embeds), dim=1)

        out = self.fc1(out)
        return out


class CNNCifar100(nn.Module):
    def __init__(self):
        super(CNNCifar100, self).__init__()
        self.conv1 = nn.Conv2d(3, 256, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(256, 256, 3)
        self.conv3 = nn.Conv2d(256, 128, 3)
        self.fc1 = nn.Linear(128 * 4 * 4, 100)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = F.relu(self.conv3(x))
        x = x.view(-1, 128 * 4 * 4)
        x = self.fc1(x)
        return x


class CNNCifar2(nn.Module):  # 重新搭建CNN
    def __init__(self, config):
        super(CNNCifar2, self).__init__()

        self.config = config

        self.conv1 = nn.Conv2d(3, 32, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.conv3 = nn.Conv2d(64, 64, 3)

        self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(self.config.device)
        self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

        self.fc1 = nn.Linear(64 * 4 * 4 + 512, 64)
        self.fc2 = nn.Linear(64, 10)

    def forward(self, x):
        out = self.pool(F.relu(self.conv1(x)))
        out = self.pool(F.relu(self.conv2(out)))
        out = F.relu(self.conv3(out))
        out = x.view(-1, 64 * 4 * 4)

        outputs = None
        with torch.no_grad():
            # transformed_x = torch.repeat_interleave(x, 3, dim=1)  # 将单通道的图像复制为3通道
            # transformed_x = torch.nn.functional.interpolate(transformed_x, size=(224, 224), mode='bicubic')
            # transformed_x = torch.clamp(transformed_x, 0, 1)
            # transformed_x = (transformed_x * 255).to(torch.uint8)
            # inputs = self.processor(text=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],
            inputs = self.processor(text=['a airplane', 'a automobile', 'a bird', 'a cat', 'a deer', 'a dog', 'a frog', 'a horse', 'a ship', 'a truck'],
                                    images=x, return_tensors="pt", padding=True)
            inputs = inputs.to(self.config.device)
            outputs = self.model(**inputs)
        out = torch.cat((out, outputs.image_embeds), dim=1)


        out = F.relu(self.fc1(out))
        out = self.fc2(out)
        return out

class CNNMnistClip(nn.Module):
    def __init__(self, config):
        super(CNNMnistClip, self).__init__()
        self.config = config
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=5, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2))

        self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(self.config.device)
        self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

        self.fc = nn.Linear(7 * 7 * 32 + 512, 10)

    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.view(out.size(0), -1)

        outputs = None
        with torch.no_grad():
            transformed_x = torch.repeat_interleave(x, 3, dim=1)  # 将单通道的图像复制为3通道
            transformed_x = torch.nn.functional.interpolate(transformed_x, size=(224, 224), mode='bicubic')
            transformed_x = torch.clamp(transformed_x, 0, 1)
            transformed_x = (transformed_x * 255).to(torch.uint8)
            # inputs = self.processor(text=['a airplane', 'a automobile', 'a bird', 'a cat', 'a deer', 'a dog', 'a frog', 'a horse', 'a ship', 'a truck'],
            inputs = self.processor(text=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],
                                    images=transformed_x, return_tensors="pt", padding=True)
            inputs = inputs.to(self.config.device)
            outputs = self.model(**inputs)
        out = torch.cat((out, outputs.image_embeds), dim=1)
        out = self.fc(out)
        return out


class FedAvgCNN(nn.Module):
    def __init__(self, in_features=1, num_classes=10, dim=1024):
        super().__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_features,
                        32,
                        kernel_size=5,
                        padding=0,
                        stride=1,
                        bias=True),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2))
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(32,
                        64,
                        kernel_size=5,
                        padding=0,
                        stride=1,
                        bias=True),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2))
        )
        self.fc1 = nn.Sequential(
            nn.Linear(dim, 512),
            nn.ReLU(inplace=True)
        )
        self.fc = nn.Linear(512, num_classes)


class FedAvgCNN(nn.Module):
    def __init__(self, in_features=1, num_classes=10, dim=1024):
        super().__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_features,
                        32,
                        kernel_size=5,
                        padding=0,
                        stride=1,
                        bias=True),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2))
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(32,
                        64,
                        kernel_size=5,
                        padding=0,
                        stride=1,
                        bias=True),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2))
        )
        self.fc1 = nn.Sequential(
            nn.Linear(dim, 512),
            nn.ReLU(inplace=True)
        )
        self.fc = nn.Linear(512, num_classes)

    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        out = torch.flatten(out, 1)
        out = self.fc1(out)
        out = self.fc(out)
        return out


class FedGPMAEfficientMLP(nn.Module):
    def __init__(self, num_classes=10, dim=1600, num_users=20):
        super(FedGPMAEfficientMLP, self).__init__()
        self.num_users = num_users
        # 定义两个权重张量，但注意它们的形状可能不符合常规全连接层的定义
        self.fc1_weights = nn.Parameter(torch.randn(num_users, 512, dim, requires_grad=True))
        self.fc_weights = nn.Parameter(torch.randn(num_users, num_classes, 512, requires_grad=True))

    def forward(self, fc1_paras, fc_paras):
        # # 确保输入参数已经在正确的设备上
        # fc1_paras = fc1_paras.to(self.fc1_weights.device)
        # fc_paras = fc_paras.to(self.fc_weights.device)

        # 初始化输出张量，与输入形状相同
        out1 = torch.zeros_like(fc1_paras[0]).to(self.fc1_weights.device)
        out = torch.zeros_like(fc_paras[0]).to(self.fc1_weights.device)

        # 对每个用户的参数进行逐元素的乘法，并累加结果
        for i in range(self.num_users):
            out1 += self.fc1_weights[i] * fc1_paras[i]
            out += self.fc_weights[i] * fc_paras[i]

        # 计算平均值
        out1 = out1 / self.num_users
        out = out / self.num_users

        # 返回输出，保持与输入相同的形状（除了用户维度）
        return out1, out