import torch.nn as nn
from network import resnet101 as resnet101
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import torch.tensor as tensor
from torch.autograd import Variable
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
import os
import numpy as np
# from sklearn.model_selection import KFold
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_auc_score as auc
from sklearn.metrics import accuracy_score
from PIL import Image
from tqdm import tqdm

CUDA_STATUS = False

class spatial_RNN(nn.Module):
    def __init__(self, n_frames=10, use_fv=False):
        super(spatial_RNN,self).__init__()
        self.n_frames=n_frames
        self.use_fv=use_fv

        self.fc1=nn.Linear(512*4, 200)
        self.leakyRelu=nn.LeakyReLU(negative_slope=0.1)
        self.lstm=nn.LSTM(200, 500, batch_first=True)
        if self.use_fv:
            self.fc2=nn.Linear(500, 512*4)
        else:
            self.fc2=nn.Linear(500, 101)

    def forward(self, x): #x：(batch, n_frames, *)
        x=x.type(dtype=torch.float)
        data=torch.zeros(size=(x.size()[0], x.size()[1], 200))
        for frame in range(x.size()[1]):
            f=x[:,frame,:]
            f=self.fc1(f)
            f=self.leakyRelu(f)
            data[:,frame,:]=f
        data, _=self.lstm(data)
        if not self.use_fv:
            out=data[:,-1,:]
            out=self.fc2(out)
            return out


class rnn_dataset(Dataset):
    def __init__(self, videos, root, n_frames=10, trans=None):
        self.trans=trans
        self.videos=videos
        self.root=root
        self.n_frames=n_frames
        checkpoint=torch.load("D:/ResSpatial.tar", map_location=torch.device('cpu'))
        self.cnn=resnet101(pretrained=True, channel=3, with_fc=False)
        self.cnn.load_state_dict(checkpoint['state_dict'])

        self.label_idx=[]
        with open('./UCF_list/classInd.txt') as f:
            for l in f:
                label=l.split()[1]
                self.label_idx.append(label.lower())

    def __len__(self):
        return len(self.videos)

    def __getitem__(self, index):
        video=self.videos[index]
        label=self.get_label(video)
        data=self.get_video_frames(label, video)
        tar=self.label_idx.index(label)
        return data,tar

    def get_label(self, video):
        return video.split('_')[1].lower()

    def get_video_frames(self, label, video):  #获取某个视频的所有帧
        path=os.path.join(self.root, label, video)
        frames=os.listdir(path)
        gap=int(len(frames)/self.n_frames)
        data=np.zeros((self.n_frames, 512*4))
        n=0
        for i in range(0,len(frames), gap):
            if n==self.n_frames:
                break
            img=Image.open(os.path.join(path, frames[i]))
            if self.trans:
                img=self.trans(img)
            img=img.unsqueeze(0)
            img=self.cnn(img).data.cpu().numpy()
            data[n,:]=img
            n+=1
        return data

class rnn_dataloader():
    def __init__(self, videos, root, n_framse=10, trans=None):
        self.videos=videos
        self.trans=trans
        self.root=root
        self.n_frames=n_framse
    def get_dataloader(self, batch, shuffle=False):
        dataset=rnn_dataset(self.videos, self.root, self.n_frames, self.trans)
        return DataLoader(dataset=dataset, batch_size=batch, shuffle=shuffle)


import copy
def train_1epoch(model, train_loader, val_loader, criterion, optimizer, scheduler, epoch, best_model_wts):
    # 先深拷贝一份当前模型的参数，后面迭代过程中若遇到更优模型则替换
    # best_model_wts=copy.deepcopy(model.state_dict())
    best_auc=0.0

    print('- '*50)
    print('Epoch {}'.format(epoch+1))
    losses=0 #验证集的损失

    tars1=np.array([])
    pres1=np.array([])
    tars2=np.array([])
    pres2=np.array([])

    truth_val = np.array([])
    pred_val = np.array([])
    truth_train = np.array([])
    pred_train = np.array([])



    #训练
    model.train(True)
    pro=tqdm(train_loader)
    print('准备训练...')
    for batch_idx, (data, target) in enumerate(pro):
        if CUDA_STATUS:
            data=Variable(data.cuda())
            target=Variable(target.cuda())
        else:
            target=Variable(target)

        optimizer.zero_grad()
        outs=model(data)

        loss=criterion(outs, target)
        #计算auc
        tars1=np.append(tars1,label_binarize(target, np.arange(101)))
        pres1=np.append(pres1, outs.data.cpu().numpy())
        # 计算acc
        truth_train = np.append(truth_train, target.cpu().numpy())
        pred_train = np.append(pred_train, torch.argmax(outs).cpu().numpy())
        print(truth_train.shape, pred_train.shape)

        loss.backward()
        optimizer.step()
    print('训练结束')

    #验证
    print('开始验证...')
    model.train(False)

    for batch_idx, (data, target) in enumerate(val_loader):
        if CUDA_STATUS:
            data=Variable(data.cuda())
            target=Variable(target.cuda())
        else:
            target=Variable(target)

        outs=model(data)

        losses+=criterion(outs, target).item()
        
        # 计算auc
        tars2=np.append(tars2, label_binarize(target, np.arange(101)))
        pres2=np.append(pres2, outs.data.cpu().numpy())
        # 计算acc
        truth_val = np.append(truth_val, target.cpu().numpy())
        pred_val = np.append(pred_val, torch.argmax(outs).cpu().numpy())
        print(truth_val.shape, pred_val.shape)
        
    losses/=len(val_loader.dataset)

    accu1=auc(tars1, pres1, average='micro')
    accu2=auc(tars2, pres2, average='micro')
    acc1 = accuracy_score(truth_train, pred_train)
    acc2 = accuracy_score(truth_val, pred_val)
        
    print('train: loss: {}, auc: {}, acc: {}; val: loss: {}, auc: {}, acc: {}'.format(loss.item, accu1, acc1, losses, accu2, acc2))

    scheduler.step(losses)

    if accu2>best_auc:
        best_auc=accu1
        best_model_wts=copy.deepcopy(model.state_dict())

    model.load_state_dict(best_model_wts)
    return model, best_model_wts


def model_test(model, test_loader, criterion):
    with torch.no_grad():
        model.eval()
        loss=0
        tars=np.array([])
        pres=np.array([])

        for data, target in test_loader:
            output=model(data)
            loss+=criterion(output, target).item()

            tars=np.append(tars, label_binarize(target, np.arange(101)))
            pres=np.append(pres, output)
        loss/=len(test_loader.dataset)
        accu=auc(tars, pres, average='micro')
        print('\n test: loss: {}, auc: {}'.format(loss, accu))

import random
def build_lists():
    root='D:/data/UCF-101图片/UCF101'
    classes=os.listdir(root)
    videos=[]
    for c in classes:
        v=os.listdir(os.path.join(root, c))
        videos+=v
    random.shuffle(videos)
    n_t=int(len(videos)*0.7)
    train_vs=videos[: n_t]
    test_vs=videos[n_t:]
    return np.array(train_vs), np.array(test_vs)

# if __name__=='__main__':

root='D:/data/UCF-101图片/UCF101'
trans = transforms.Compose([
                transforms.Resize([224,224]),
                # transforms.RandomCrop(224),
                # transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
                ])


n_frames=10
model=spatial_RNN(n_frames=10)

if CUDA_STATUS:
    criterion=nn.CrossEntropyLoss().cuda()
else:
    criterion=nn.CrossEntropyLoss()

optimizer=torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
scheduler=ReduceLROnPlateau(optimizer, 'min', patience=1, verbose=True)

#训练
train_vs, test_vs=build_lists()
n_epochs=10
# kf=KFold(n_splits=n_epochs)
best_model_wts=copy.deepcopy(model.state_dict())
# for train_i, val_i in kf.split(train_vs):
tv_split=int(len(train_vs)*0.7)
train_v=train_vs[:tv_split]
val_v=train_vs[tv_split:]
train_loader=rnn_dataloader(train_v, root, n_frames, trans).get_dataloader(batch=25)
val_loader=rnn_dataloader(val_v, root, n_frames, trans).get_dataloader(batch=25)

for epoch in range(n_epochs):
    model, best_model_wts=train_1epoch(model=model, train_loader=train_loader, val_loader=val_loader, criterion=criterion, optimizer=optimizer,
                       scheduler=scheduler, epoch=epoch, best_model_wts=best_model_wts)

print('**********'*10)
path='./spatial_rnn.pth'
torch.save(model, path)
print('模型已保存')

#测试
print('开始测试')
test_loader=rnn_dataloader(test_vs, root, n_frames, trans).get_dataloader(batch=25)
model_test(model, test_loader, criterion)



