import time
from network import *
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from dataset import TensorDataset
class CDAN(object):
    def __init__(self,Xs=None,Ys=None,Xt=None,Yt=None,save=False,maxiter=1000):
        torch.manual_seed(100)
        # torch.manual_seed(47)
        # self.batch_size = 200
        # self.batch_size = 40
        self.batch_size = 20
        self.feature = Lenet()
        self.classifier = classifier()
        self.discriminator = cdan_domain()
        self.optimizer = optim.Adam([{'params': self.feature.parameters()},
                                    {'params': self.classifier.parameters()},
                                    {'params': self.discriminator.parameters()}], lr=0.0002, weight_decay=0.0005,
                                   betas=(0.9,0.999))
        self.maxiter = maxiter
        #loss
        self.criterion = nn.CrossEntropyLoss()
        Xs = torch.from_numpy(Xs)
        Ys = torch.from_numpy(Ys)
        Xt = torch.from_numpy(Xt)
        Yt = torch.from_numpy(Yt)
        self.source_data = TensorDataset(data=Xs,label=Ys)
        self.target_data = TensorDataset(data=Xt,label=Yt)
        self.src_data_loader = torch.utils.data.DataLoader(
            self.source_data,
            shuffle=True,
            batch_size=self.batch_size,
            num_workers=4
        )
        self.tgt_data_laoder = torch.utils.data.DataLoader(
            self.target_data,
            shuffle=True,
            batch_size=self.batch_size,
            num_workers=4
        )
        if save == True:
            self.feature = torch.load('model/cdan/cdan_3/feature_618.pt')
            self.classifier = torch.load('model/cdan/cdan_3/classifier_618.pt')
            self.discriminator = torch.load('model/cdan/cdan_3/discriminator_618.pt')
    def fit(self):
        #train
        since = time.time()
        writer = SummaryWriter(log_dir='dann_scalar')
        for epoch in range(1,self.maxiter+1):
            len_source = len(self.src_data_loader)
            len_target = len(self.tgt_data_laoder)
            NUM_ITER = max(len_source,len_target)
            self.feature.train(True)
            self.classifier.train(True)
            self.discriminator.train(True)
            for num_iter in range(NUM_ITER):
                if num_iter % len_source == 0:
                    train_source_data = iter(self.src_data_loader)
                if num_iter % len_target == 0:
                    train_target_data = iter(self.tgt_data_laoder)
                xs,ys = train_source_data.next()
                xt, _ = train_target_data.next()
                self.optimizer.zero_grad()
                output_s = self.feature(xs.float())
                output_t = self.feature(xt.float())
                label_s = self.classifier(output_s)
                label_t = self.classifier(output_t)

                #label loss
                err_label = self.criterion(label_s,ys)
                loss_label_value = err_label.item()
                #domain loss
                source_domain_label = torch.ones(self.batch_size,dtype=torch.long)
                target_domain_label = torch.zeros(self.batch_size,dtype=torch.long)
                domain_label = torch.cat((source_domain_label,target_domain_label),0)
                output = torch.cat((output_s,output_t),0)
                label_out = torch.cat((label_s,label_t),0)
                softmax_output = nn.Softmax(dim=1)(label_out).detach()
                op_out = torch.bmm(softmax_output.unsqueeze(2), output.unsqueeze(1))
                domain_output = self.discriminator(op_out.view(-1, softmax_output.size(1) * output.size(1)))
                err_domain = self.criterion(domain_output,domain_label)
                loss_domain_value = err_domain.item()
                if epoch>10:
                    err = err_label + err_domain
                else:
                    err = err_label
                err.backward()
                self.optimizer.step()
            self.feature.train(False)
            self.classifier.train(False)
            print('time:' + str(time.time()-since))
            print('epoch:' + str(epoch) + '  label_loss:' + str(loss_label_value) + '  domain_loss:' + \
                  str(loss_domain_value) )
            writer.add_scalars('loss',
                               {'label_loss':loss_label_value,'domain_loss':loss_domain_value},epoch)
            self.val(epoch)
            if epoch >=618 and epoch <623:
                torch.save(self.feature, 'model/cdan/cdan_3/feature_%s.pt'%epoch)
                torch.save(self.classifier, 'model/cdan/cdan_3/classifier_%s.pt'%epoch)
                torch.save(self.discriminator, 'model/cdan/cdan_3/discriminator_%s.pt'%epoch)
            if epoch >=68 and epoch <73:
                torch.save(self.feature, 'model/cdan/cdan_3/feature_%s.pt' % epoch)
                torch.save(self.classifier, 'model/cdan/cdan_3/classifier_%s.pt' % epoch)
                torch.save(self.discriminator, 'model/cdan/cdan_3/discriminator_%s.pt' % epoch)
            # if epoch %100 == 0:
            #     torch.save(self.feature, 'model/jdda/jdda_3/feature_%s.pt' % epoch)
            #     torch.save(self.classifier1, 'model/jdda/jdda_3/classifier1_%s.pt' % epoch)
            #     torch.save(self.classifier2, 'model/jdda/jdda_3/classifier2_%s.pt' % epoch)
            #     torch.save(self.discriminator, 'model/jdda/jdda_3/discriminator_%s.pt' % epoch)
        writer.close()
        # torch.save(self.feature,'model/jdda/feature.pt')
        # torch.save(self.classifier1,'model/jdda/classifier1.pt')
        # torch.save(self.classifier2,'model/jdda/classifier2.pt')
        # torch.save(self.discriminator,'model/jdda/discriminator.pt')
    def predict(self,x):
        x = torch.from_numpy(x).float()
        output = self.feature(x)
        label = self.classifier(output)
        # _,predict_1 = label_1.max(1)
        # _,predict_2 = label_2.max(1)
        # return predict_1,predict_2
        # label = torch.max(label_1,label_2)
        _,predict = label.max(1)
        return predict
    def val(self,epoch):
        start = True
        writer = SummaryWriter(log_dir='cdan_scalar')
        with torch.no_grad():
            for  xs,ys in self.src_data_loader:
                output = self.feature(xs.float())
                label = self.classifier(output)
                if start:
                    start = False
                    all_output = label
                    real_label = ys
                else:
                    all_output = torch.cat((all_output,label),0)
                    real_label = torch.cat((real_label,ys),0)
            _,predict = all_output.max(1)
            accuracy = torch.sum(predict == real_label).item() / real_label.size(0)
            print('source accuracy: ' + str(accuracy))
            writer.add_scalars('accuracy',{'source_accuracy':accuracy},epoch)
        start = True
        with torch.no_grad():
            for xt,yt in self.tgt_data_laoder:
                output = self.feature(xt.float())
                label = self.classifier(output)
                if start:
                    start =False
                    all_output = label
                    real_label = yt
                else:
                    all_output = torch.cat((all_output,label),0)
                    real_label = torch.cat((real_label,yt),0)
            _,predict = all_output.max(1)
            accuracy = torch.sum(predict == real_label).item() / real_label.size(0)
            print('target accuracy:' + str(accuracy))
            writer.add_scalars('accuracy',{'target_accuracy':accuracy},epoch)
        writer.close()
    
    def Entropy(input_):
        bs = input_.size(0)
        epsilon = 1e-5
        entropy = -input_ * torch.log(input_ + epsilon)
        entropy = torch.sum(entropy, dim=1)
        return entropy 
