import pickle
import sys

import numpy as np
import torch
from torchvision.models import resnet18,ResNet18_Weights
import torch.nn as nn
from torchvision.transforms import v2

class SiameseNetwork(nn.Module):
    def __init__(self):
        super(SiameseNetwork, self).__init__()

        # 定义第一个CNN
        self.cnn1 = nn.Sequential(
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)
        )

        # 定义第二个CNN
        self.cnn2 = nn.Sequential(
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)
        )

        # 定义分类器
        self.fc = nn.Sequential(
            nn.Linear(128 * 8 * 8 * 2, 512),  # 两个CNN输出特征连接后的大小
            nn.ReLU(),
            nn.Linear(512, 1)  # 二分类任务
        )

    def forward(self, image1, image2):
        # 分别对两个输入图像提取特征
        features1 = self.cnn1(image1)
        features2 = self.cnn2(image2)

        # 将两个特征连接起来
        combined_features = torch.cat((features1.view(features1.size(0), -1), features2.view(features2.size(0), -1)), dim=1)

        # 过分类器
        output = self.fc(combined_features)
        output = torch.sigmoid(output)
        return output

class SiameseResNet(nn.Module):
    def __init__(self):
        super(SiameseResNet, self).__init__()
        # 加载预训练的ResNet模型（不包括顶部的全连接层）
        self.resnet = resnet18(ResNet18_Weights.DEFAULT)
        self.resnet.fc = nn.Identity()  # 去除原始ResNet模型的全连接层

        # 冻结ResNet模型的参数
        for param in self.resnet.parameters():
            param.requires_grad = False

        # 全连接层
        self.fc1 = nn.Linear(512 * 2, 128)  # ResNet18输出特征维度为512，乘以2是因为两张图像的特征拼接在一起
        self.fc2 = nn.Linear(128, 1)

    def forward_once(self, x):
        # 前向传播，x为一个输入图像
        x = self.resnet(x)
        x = x.view(x.size(0), -1)  # 将特征展平
        return x

    def forward(self, input1, input2):
        # 输入两张图像，分别经过ResNet模型
        output1 = self.forward_once(input1)
        output2 = self.forward_once(input2)

        # 将两个特征拼接在一起
        concatenated = torch.cat((output1, output2), dim=1)

        # 全连接层
        output = torch.relu(self.fc1(concatenated))
        output = torch.sigmoid(self.fc2(output))
        # output = self.fc2(output)
        return output
    
class SiameseResNet2(nn.Module):
    def __init__(self):
        super(SiameseResNet2, self).__init__()
        # 加载预训练的ResNet模型（不包括顶部的全连接层）
        # self.resnet = resnet18(ResNet18_Weights.DEFAULT)
        self.resnet1 = resnet18(weights=None)
        self.resnet1.fc = nn.Identity()  # 去除原始ResNet模型的全连接层
        self.resnet2 = resnet18(weights=None)
        self.resnet2.fc = nn.Identity()
        # # 冻结ResNet模型的参数
        # for param in self.resnet.parameters():
        #     param.requires_grad = False

        # 全连接层
        self.fc1 = nn.Linear(512 * 2, 128)  # ResNet18输出特征维度为512，乘以2是因为两张图像的特征拼接在一起
        self.fc2 = nn.Linear(128, 1)

    def forward_once1(self, x):
        # 前向传播，x为一个输入图像
        x = self.resnet1(x)
        x = x.view(x.size(0), -1)  # 将特征展平
        return x

    def forward_once2(self, x):
        # 前向传播，x为一个输入图像
        x = self.resnet2(x)
        x = x.view(x.size(0), -1)  # 将特征展平
        return x

    def forward(self, input1, input2):
        # 输入两张图像，分别经过ResNet模型
        output1 = self.forward_once1(input1)
        output2 = self.forward_once2(input2)

        # 将两个特征拼接在一起
        concatenated = torch.cat((output1, output2), dim=1)

        # 全连接层
        output = torch.relu(self.fc1(concatenated))
        output = torch.sigmoid(self.fc2(output))
        # output = self.fc2(output)
        return output
    
def read_pickle_file(file_path):
    try:
        with open(file_path, 'rb') as f:
            data = pickle.load(f)
            return data
    except FileNotFoundError:
        print("文件不存在，请检查文件路径是否正确。")
        return None
    except Exception as e:
        print("读取文件时出现错误：", e)
        return None
    
        
class Classifier():
    def __init__(self,
                 model_path=r"E:\study\serl_classifier\5\double_resnet_classifier_200_epochs_BCELoss()_0.0002_2024-06-27_17-04-41.pth"
                 ) -> None:
        
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = SiameseNetwork()
        checkpoint = torch.load(model_path,map_location=self.device)
        self.model.load_state_dict(checkpoint["model_state_dict"])
        self.model.eval()

        self.model.to(self.device)

        self.transforms = v2.Compose([
            v2.ToDtype(torch.float32, scale=True),
            v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

    def predict(self,observation):

        side_image=observation["images"]['side']
        wrist_image=observation["images"]['wrist']
        side_image=torch.tensor(np.transpose(side_image,[2,0,1]), dtype=torch.uint8).to(self.device)
        wrist_image=torch.tensor(np.transpose(wrist_image,[2,0,1]), dtype=torch.uint8).to(self.device)
        side_image=self.transforms(side_image).unsqueeze(0)
        wrist_image=self.transforms(wrist_image).unsqueeze(0)
        output = self.model(side_image,wrist_image).item()
        # return output
        if output >= 0.5:
            return 1
        else:
            return 0    
    

if __name__ == '__main__':
    classifier = Classifier()
    from gym_pih.hardware.Imagingsource_for_windows import Camera as ImagingsourceCam
    from gym_pih.hardware.RealsenseCamera import Camera as RealSenseCam
    from gym_pih.hardware.LightSource import LightSource
    import cv2
    imagingsource_cam = ImagingsourceCam()
    realsense_cam = RealSenseCam()
    imagingsource_cam.Open()
    realsense_cam.Open()
    imagingsource_cam.SetProperty("Gain","Value",6.9)
    imagingsource_cam.SetProperty("Exposure","Value",float(1/60))
    light_source = LightSource()
    light_source.Light_Open(6)
    try:
        while True:
            _,wrist_image=realsense_cam.GetImage()
            _,side_image=imagingsource_cam.GetImage()
            wrist_image = wrist_image[150:214, 215:279]
            side_image = side_image[60:700, 260:900]
            side_image = cv2.resize(side_image, (64, 64))
            obs={"images":{"side":side_image,"wrist":wrist_image}}
            rew=classifier.predict(obs)
            cv2.namedWindow("wrist", cv2.WINDOW_NORMAL)
            cv2.namedWindow("side", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("wrist", 320, 320)
            cv2.resizeWindow("side", 320, 320)
            cv2.imshow("wrist", wrist_image)
            cv2.imshow("side", side_image)
            cv2.waitKey(1)
            sys.stdout.write("\r")
            sys.stdout.write(str(rew))
            sys.stdout.flush()
    finally:
        imagingsource_cam.Close()
        realsense_cam.Close()
        light_source.Light_Close()
        light_source.port_close()