import os
import torch

from torch import nn

# import pdb
import numpy as np
import random
# from . import networks
from .base_model import BaseModel
from .tianyu import network
from utils.util import AverageMeter,save_csv,roc_plot
from utils.util import LD_param as get_res



def one_hot(batchsize,numclass,tensor):
    return torch.zeros(batchsize,numclass).to(tensor.device).scatter_(1,tensor.view(1,tensor.shape[0]).long().t(),1)

class polarTianYuModel(BaseModel):
    @staticmethod
    def modify_commandline_options(parser, is_train=True):
        """Add new dataset-specific options, and rewrite default values for existing options.

        Parameters:
            parser          -- original option parser
            is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.

        Returns:
            the modified parser.

        
        """
        parser.set_defaults(no_dropout=True)  # default CycleGAN did not use dropout
        parser.add_argument('--num_classes', type=int, default=3, help='the architecture situation of model')
        parser.add_argument('--arch_name', type=str, default='mobilenet', help='the arch of network')
        parser.add_argument('--arch_type', type=str, default='', help='the arch of network')
        parser.add_argument('--pretrained', type=bool, default=False, help='the arch of network')
        parser.add_argument('--input_modality', type=str, default='SD', help='the modality of input data')
        # parser.add_argument('--dataIn', type=str, default='s0', help='the data type of input')
        # parser.add_argument('--label', type=str, default='dolp', help='the data type of label')
        if is_train:
            pass
            parser.add_argument('--lambda_cm', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
            # parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
            # parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')

        return parser
    
    def __init__(self, opt):#basic_conv=networks.Conv2d_cd, theta=0.7 ):   
        BaseModel.__init__(self, opt)
        self.model_names=["polarLD"]
        
        self.netpolarLD= network.define_network(opt)
        # getattr(network,opt.arch_name)(opt)
        
        self.loss_names += ["Accurate","F_score","APCER","NPCER","ACER"]
        self.TP,self.TN,self.FP,self.FN = {},{},{},{}
        self.TP['all'],self.TN['all'],self.FP['all'],self.FN['all']  = 0,0,0,0
            
        if self.isTrain:
            self.loss_names += ["polarLD"]
            self.criterionCM = nn.L1Loss()
            self.criterion = nn.L1Loss()
            self.optimizer = torch.optim.Adam(self.netpolarLD.parameters(), lr=opt.lr, weight_decay=1e-3)
            self.optimizers.append(self.optimizer)
        else:
            self.set_requires_grad([self.netpolarLD], False)
            self.groud_truth = []
            self.prediction = []
        
        self.last_val = {}
        for key in self.loss_names:
            self.last_val[key] = 0
        
            # if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
            #     assert(opt.input_nc == opt.output_nc)
            # self.fake_A_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            # self.fake_B_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            # define loss functions
            
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            
            # self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            # self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            
            # self.optimizers.append(self.optimizer_D)
        pass
    def set_input(self, data):
        if self.opt.input_modality in data.keys() and 'G'+self.opt.input_modality in data.keys():
            indata = torch.cat((data[self.opt.input_modality],data['G'+self.opt.input_modality],))

            label = torch.cat((torch.zeros(data[self.opt.input_modality].shape[0]),torch.zeros(data['G'+self.opt.input_modality].shape[0])+1,),0)
            
            ind = np.arange(0,label.shape[0])
            random.shuffle(ind)
            
            self.data = indata[ind].to(self.device)
            self.label = label[ind]
            
        elif self.opt.input_modality in data.keys() and 'label' in data.keys():
            self.data = data[self.opt.input_modality].to(self.device)
            self.label = data['label']
        
        self.label_1hot = one_hot(self.label.shape[0],self.opt.num_classes,self.label).to(self.device)
        if 'cat_name' in data.keys():
            self.cat_name = data['cat_name']
        
        if 'category' in data.keys():
            self.cat= data['category']
        self.image_paths = None
        pass
    def forward(self):	    	# x [3, 256, 256]
        self.out = self.netpolarLD(self.data)
        # self.out,self.fc_vis = self.netpolarLD(self.data['S0'])
        # self.out_s1,self.fc_s1 = self.netpolarLD(self.data['S1'])
        # self.out_s2,self.fc_s2 = self.netpolarLD(self.data['S2'])
        pass
    def backward(self):
        # self.loss_cross_modal = self.opt.lambda_cm * self.criterionCM(self.fc['fc'][self.s0len:self.s0len+self.s1len],self.fc['fc_CM'][0:self.s0len])
        self.loss_polarLD = self.criterion(self.out['prediction'],self.label_1hot)
        # self.loss = self.loss_polarLD
        self.loss_polarLD.backward()
        self.cal_score()
    def optimize_parameters(self):
        self.forward()
        self.optimizer.zero_grad()
        self.backward()
        self.optimizer.step()
        pass
    
    def cal_score(self):
        ans = torch.max(self.out['prediction'],axis=1).indices.cpu()
        # self.loss_pred_acc = (torch.where(ans == self.label)[0].shape[0])  / self.label.shape[0]
        # self.count_mean_pred_acc.update(self.loss_pred_acc,self.label.shape[0])
        # self.loss_mean_pred_acc = self.count_mean_pred_acc.avg
        
        
        self.TP['all'] += torch.where((ans==1) &(self.label == 1))[0].shape[0]
        self.TN['all'] += torch.where((ans==0) &(self.label == 0))[0].shape[0]
        self.FP['all'] += torch.where((ans==1) &(self.label == 0))[0].shape[0]
        self.FN['all'] += torch.where((ans==0) &(self.label == 1))[0].shape[0]
        if not self.isTrain:
            self.prediction += ans.tolist()
            self.groud_truth += self.label.tolist()
            for key in self.cat_name.keys():
                if not key in self.TP.keys():
                    self.TP[key]=0
                    self.TN[key]=0
                    self.FP[key]=0
                    self.FN[key]=0
                self.TP[key] += torch.where((self.cat == self.cat_name[key])&(ans==1) &(self.label == 1))[0].shape[0]
                self.TN[key] += torch.where((self.cat == self.cat_name[key])&(ans==0) &(self.label == 0))[0].shape[0]
                self.FP[key] += torch.where((self.cat == self.cat_name[key])&(ans==1) &(self.label == 0))[0].shape[0]
                self.FN[key] += torch.where((self.cat == self.cat_name[key])&(ans==0) &(self.label == 1))[0].shape[0]

        param = get_res(self.TP,self.TN,self.FP,self.FN,2)
        
        self.loss_Accurate = param['Accurate']['all']
        self.loss_F_score = param['F_score']['all']
        self.loss_APCER = param['APCER']['all']
        self.loss_NPCER = param['NPCER']['all']
        self.loss_ACER = param['ACER']['all']

        pass

    def epoch_end(self):
        # self.count_mean_pred_acc.reset()
        if not self.isTrain:
            param = get_res(self.TP,self.TN,self.FP,self.FN,2)
            csvdir = os.path.join(self.opt.results_dir,self.opt.name)
            csvfile = '{}_{}_{}_{}'.format(self.opt.arch_name,self.opt.arch_type,self.opt.input_modality,self.opt.name.split('_')[-1])
            sheet_name = '{}{}'.format(self.opt.arch_name+self.opt.arch_type,self.opt.input_modality.upper())
            save_csv(param,csvdir,csvfile,sheet_name,self.loss_names,['all']+list(self.cat_name.keys()))

        roc_plot(self.groud_truth,self.prediction,'test.png','/home/yalin.huang/test/result/'+self.opt.name)
        self.TP,self.TN,self.FP,self.FN = {},{},{},{}
        self.TP['all'],self.TN['all'],self.FP['all'],self.FN['all']  = 0,0,0,0
        pass
    
    def val_save(self):
        if self.loss_F_score == 0 and self.last_val["F_score"] == 0:
            if self.last_val["Accurate"] < self.loss_Accurate:
                self.last_val["Accurate"] = self.loss_Accurate
                return True
        else:
            if self.last_val["F_score"] < self.loss_F_score:
                self.last_val["F_score"] = self.loss_F_score
                return True
        return False
        pass