import sys
import torch.backends.cudnn as cudnn
from torch.optim import Adam
import torch.utils.data
import argparse
import l2cs1
import torchvision
from models.PSTA_8_att import PSTA
from utils import DeepSupervision, DeepSupervision_acc, accuracy_score1
from sklearn.metrics import accuracy_score
import os
import numpy as np
import attributecnn
import random
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from PIL import Image
from res2net_v1b import res2net101_v1b
#([48, 2048, 7, 7])
# Argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--manualSeed', type=int, default=None, help='manual seed for randomness')
parser.add_argument('--niter', type=int, default=100, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.00001, help='learning rate')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam')
parser.add_argument('--gpu_id', type=int, default=1, help='GPU ID')
parser.add_argument('--resume', type=int, default=0, help="choose a epochs to resume from (0 to train from scratch)")
parser.add_argument('--outf', default='/home/ztx/ztx/DFGaze/outf', help='folder to output model checkpoints')
parser.add_argument('--disable_random', action='store_true', default=False, help='disable randomness for routing matrix')
parser.add_argument('--dropout', type=float, default=0.05, help='dropout percentage')
parser.add_argument('--imageSize', type=int, default=224, help='the height and width of the input image')
opt = parser.parse_args()
print(opt)
"""
# Set manual seed
if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.gpu_id >= 0:
    torch.cuda.manual_seed_all(opt.manualSeed)
    cudnn.benchmark = True
"""
opt.random = not opt.disable_random

class MyDatasetff(Dataset):

    def __init__(self, root_dir, clip_len, transforms_=None, test_sample_num=1, stride=8):
        self.root_dir = root_dir
        self.clip_len = clip_len
        self.transforms_ = transforms_
        self.test_sample_num = test_sample_num
        self.toPIL = transforms.ToPILImage()
        self.data = []
        self.stride = stride

        self.class2idx = {'fakeff': 0, 'realff': 1}
        self.class_count = [0] *2
        self.fake_count = 0

        for base, subdirs, files in os.walk(self.root_dir):
            if len(files) < self.stride * self.clip_len:  #
                continue
            data = {}
            video = []
            files.sort()
            for i, f in enumerate(files):
                if f.endswith('.png'):
                    data_dict = {}
                    data_dict['frame'] = os.path.join(base, f)
                    data_dict['index'] = i
                    video.append(data_dict)
            data['video'] = video       
            data['label'] = 0 if 'fakeff' in base else 1
            self.data.append(data)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        video = self.data[idx]['video']
        label = self.data[idx]['label']
        #print(f"Processing index {idx}: Directory contains {len(video)} frames.")

        clip_start = 0
        clip = video[clip_start: clip_start + (self.clip_len * self.stride): self.stride]
        if len(clip) < 3:  # 假设要求至少3帧
            raise IndexError(f"Clip at index {idx} does not contain enough frames (required: 3, found: {len(clip)})")
    
        a = clip[1]['frame']
        b = clip[2]['frame']
        cnames = a.rsplit('/', 1)[0]
        cname = cnames.rsplit('_', 1)[0]

        if self.transforms_:
            trans_clip = []
            for frame in clip:
                frame = Image.open(frame['frame']).convert('RGB')
                frame = self.transforms_(frame)  # tensor [C x H x W]
                trans_clip.append(frame)

            clip = torch.stack(trans_clip)  # [T, C, H, W]
            #print(f"Stacked clip shape (before permute): {clip.shape}")

            clip = torch.stack(trans_clip)
            #clip = clip.view(-1, 3, clip.size(2), clip.size(3))  # Reshape to [T*N, 3, H, W]
            #print(f"Clip shape after view: {clip.shape}")
        else:
            clip = torch.tensor(clip)

        return clip, torch.tensor(int(label)), cname


class MyDatasetwild(Dataset):

    def __init__(self, root_dir, clip_len, transforms_=None, test_sample_num=1, stride=8):
        self.root_dir = root_dir
        self.clip_len = clip_len
        self.transforms_ = transforms_
        self.test_sample_num = test_sample_num
        self.toPIL = transforms.ToPILImage()
        self.data = []
        self.stride = stride

        self.class2idx = {'fake': 0, 'real': 1}
        self.class_count = [0] * 2
        self.fake_count = 0

        for base, subdirs, files in os.walk(self.root_dir):


            if len(files) < self.stride * self.clip_len:  #
                continue
            data = {}
            video = []
            files.sort()

            for i, f in enumerate(files):
                if f.endswith('.png'):
                    data_dict = {}
                    data_dict['frame'] = os.path.join(base, f)
                    data_dict['index'] = i
                    video.append(data_dict)
            
            

            data['video'] = video
            data['class'] = self.class2idx[base.split('/')[-2]]

            self.class_count[data['class']] += 1
            data['label'] = 0 if 'fake_' in base else 1

            self.data.append(data)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
   
        video = self.data[idx]['video']
        label = self.data[idx]['label']
        sub_class = self.data[idx]['class']
        length = len(video)
        clip_start=0
        clip = video[clip_start: clip_start + (self.clip_len * self.stride): self.stride]
  
        if self.transforms_:
            trans_clip = []
            # fix seed, apply the sample `random transformation` for all frames in the clip
            seed = random.random()
            for frame in clip:
                random.seed(seed)
                frame = Image.open(frame['frame'])
                frame = self.transforms_(frame)  # tensor [C x H x W]
                #print(f"Image size before conversion: {frame.size}, Mode: {frame.mode}")
                trans_clip.append(frame)
            # (T x C X H x W) to (C X T x H x W)
            clip = torch.stack(trans_clip)
            #clip = clip.view(-1, 3, clip.size(2), clip.size(3))
        else:
            clip = torch.tensor(clip)


        return clip, torch.tensor(int(label))



if __name__ == "__main__":

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    if opt.gpu_id >= 0:
        torch.cuda.manual_seed_all(opt.manualSeed)
        cudnn.benchmark = True

    if opt.resume > 0:
        text_writer = open(os.path.join(opt.outf, 'train.csv'), 'a')
    else:
        text_writer = open(os.path.join(opt.outf, 'train.csv'), 'w')

    if not hasattr(opt, 'manualSeed') or opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)



    model1 = l2cs1.L2CS( torchvision.models.resnet.Bottleneck, [3, 4, 6,  3], 90).cuda(opt.gpu_id)
    model1.load_state_dict(
    torch.load('/home/ztx/ztx/my-vst/pretrained_models/pretrained_cnn_models/L2CSNet_gaze360.pkl'))

    model2 = l2cs1.L2CS( torchvision.models.resnet.Bottleneck, [3, 4, 6,  3], 90).cuda(opt.gpu_id)
    model2.load_state_dict(
    torch.load('/home/ztx/ztx/my-vst/pretrained_models/pretrained_cnn_models/L2CSNet_gaze360.pkl'))
    for i in model2.parameters():
        i.requires_grad = False


    capnet = PSTA(num_classes=2, seq_len=12).cuda(opt.gpu_id)



    
    #modelatt1 = attributecnn.ResNet34() 
    #modelatt1.load_state_dict(torch.load('/home/ztx/ztx/my-vst/pretrained_models/pretrained_cnn_models/res34_fair_align_multi_4_20190809.pt' ))
    pretrained_weights_path = '/home/ztx/ztx/my-vst/pretrained_models/pretrained_cnn_models/res34_fair_align_multi_4_20190809.pt'
    modelatt1 = attributecnn.ResNet34(pretrained_weights_path=pretrained_weights_path).cuda(4)
    for i in modelatt1.parameters():
        i.requires_grad = False
    input_data = torch.randn(48, 3, 256, 256).cuda(opt.gpu_id)
    modelatt = attributecnn.ResNet34(pretrained_weights_path=pretrained_weights_path).cuda(opt.gpu_id)
    resnet34_features = modelatt(input_data).cuda(opt.gpu_id)
    print("ResNet34 output features size:", resnet34_features.size())
    

    modelcnn = res2net101_v1b()
    # 然后将模型移动到指定的 GPU 上
    modelcnn = modelcnn.cuda(opt.gpu_id)
    
    model_path ='/home/ztx/ztx/my-vst/pretrained_models/pretrained_cnn_models/premodel_res2net.pth'
    modelcnn.load_state_dict(torch.load(model_path))

    input_data = torch.randn(48, 3, 256, 256).cuda(opt.gpu_id)
    res2net_features, res2net_fc_output = modelcnn(input_data)

# 将每个张量移动到指定的 GPU 上
    res2net_features = res2net_features.cuda(opt.gpu_id)
    res2net_fc_output = res2net_fc_output.cuda(opt.gpu_id)

    # 打印 Res2Net101 的输出特征大小
    print("Res2Net101 output features size:", res2net_features.size())
    print("Res2Net101 FC output size:", res2net_fc_output.size())


    print('--------------model1--------------------')
    print(model1)
    print('--------------modelcnn--------------------')
    print(modelcnn)
    print('----------------modelatt------------------')
    print(modelatt)
    print('----------------capnet------------------')
    print(capnet)


    capsule_loss = torch.nn.CrossEntropyLoss().cuda(opt.gpu_id)
    l1loss=torch.nn.L1Loss().cuda(opt.gpu_id)


    xent = torch.nn.CrossEntropyLoss().cuda(opt.gpu_id)
    tent = torch.nn.CrossEntropyLoss().cuda(opt.gpu_id)


    lr = opt.lr
    optimizer = Adam([
        {'params': model1.parameters(), 'lr': lr},
        {'params': modelcnn.parameters(), 'lr': lr},
        {'params': modelatt.parameters(), 'lr': lr},
        {'params': capnet.parameters(), 'lr': lr}
    ], lr=opt.lr, betas=(opt.beta1, 0.999))  # 自己





