from tensorboardX.writer import SummaryWriter
from config import Config
import torch
import torch.nn as nn

from loss_function.utils import filter, filter_by_tao, filter_replace, CE_from_label, filter_replace_by_tao

# For updating learning rate
def update_lr(optimizer, lr):    
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


class Train():
    def __init__(self, student_net, teacher_net, train_loader, val_loader, test_loader, config):
        self.student_net = student_net
        self.teacher_net = teacher_net
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.test_loader = test_loader
        self.config = config
        
        self.criterion = nn.CrossEntropyLoss()
        self.student_optimizer = torch.optim.Adam(student_net.parameters(), lr=config.learning_rate)
        self.teacher_optimizer = torch.optim.Adam(teacher_net.parameters(), lr=config.learning_rate)

    def train(self, mode):
        model, data_loader, num_epochs, optimizer = (
            (self.student_net, self.train_loader, self.config.num_epochs, self.student_optimizer) 
            if mode == 'student' 
            else (self.teacher_net, self.val_loader, self.config.num_epochs_teacher, self.teacher_optimizer)
        )
        device = self.config.device
        for _ in range(num_epochs):
            model.train()
            for images, labels in data_loader:
                images = images.to(device)
                labels = labels.to(device)
                outputs = self.teacher_net(images)
                loss = self.criterion(outputs, labels)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
        
        return self.eval(mode)

    def train_batch_filter(self):
        device = self.config.device
        writer = SummaryWriter('runs/batch_filter/')
        curr_lr = self.config.learning_rate
        for epoch in range(self.config.num_epochs):
            self.student_net.train()
            avg_loss = 0
            for i, (images, _, noise) in enumerate(self.train_loader):
                images = images.to(device)
                noise = noise.to(device)
                outputs = self.student_net(images)
                loss = filter(outputs, noise, self.config.noise_rate)
                self.student_optimizer.zero_grad()
                loss.backward()
                self.student_optimizer.step()
                avg_loss = (avg_loss * i + loss.item()) / (i + 1)
            print("Epoch [{}/{}], Loss: {:.4f}".format(epoch+1, self.config.num_epochs, avg_loss))
            writer.add_scalar('train loss', avg_loss, global_step=epoch+1)

            # Decay learning rate
            if (epoch+1) % 20 == 0:
                curr_lr /= 3
                update_lr(self.student_optimizer, curr_lr)
            
            acc = self.eval('student')
            print('Accuracy of the student model on the test images: {} %'.format(acc))
            writer.add_scalar('test acc', acc / 100, global_step=epoch+1)

    def train_epoch_filter(self):
        device = self.config.device
        writer = SummaryWriter('runs/epoch_filter/')
        curr_lr = self.config.learning_rate
        for epoch in range(self.config.num_epochs):
            self.student_net.train()
            avg_loss = 0
            all_losses = []
            for i, (images, _, noise) in enumerate(self.train_loader):
                images = images.to(device)
                noise = noise.to(device)
                outputs = self.student_net(images)
                all_losses.extend(CE_from_label(outputs, noise).tolist())
            kth = int(len(all_losses) * (1 - self.config.noise_rate))
            tao = sorted(all_losses)[kth]
            for images, _, noise in self.train_loader:
                images = images.to(device)
                noise = noise.to(device)
                outputs = self.student_net(images)
                loss = filter_by_tao(outputs, noise, tao)
                self.student_optimizer.zero_grad()
                loss.backward()
                self.student_optimizer.step()
                avg_loss = (avg_loss * i + loss.item()) / (i + 1)
            
            print("Epoch [{}/{}], Loss: {:.4f}".format(epoch+1, self.config.num_epochs, avg_loss))
            writer.add_scalar('train loss', avg_loss, global_step=epoch+1)

            # Decay learning rate
            if (epoch+1) % 20 == 0:
                curr_lr /= 3
                update_lr(self.student_optimizer, curr_lr)
            
            acc = self.eval('student')
            print('Accuracy of the student model on the test images: {} %'.format(acc))
            writer.add_scalar('test acc', acc / 100, global_step=epoch+1)

    def train_batch_filter_replace(self):
        print('Teacher net initialization begin...')
        acc = self.train('teacher')
        print('Accuracy of teacher init model on the test images: {} %'.format(acc))

        device = self.config.device
        writer = SummaryWriter('runs/batch_filter_replace/')
        curr_lr = self.config.learning_rate
        for epoch in range(self.config.num_epochs):
            self.student_net.train()
            self.teacher_net.train()
            avg_loss = 0
            for i, (images, _, noise) in enumerate(self.train_loader):
                images = images.to(device)
                noise = noise.to(device)
                outputs1 = self.student_net(images)
                outputs2 = self.teacher_net(images)
                student_loss, teacher_loss = filter_replace(outputs1, outputs2, noise, self.config.noise_rate)
                self.student_optimizer.zero_grad()
                self.teacher_optimizer.zero_grad()
                student_loss.backward()
                teacher_loss.backward()
                self.student_optimizer.step()
                self.teacher_optimizer.step()
                avg_loss = (avg_loss * i + student_loss.item()) / (i + 1)
            print("Epoch [{}/{}], Loss: {:.4f}".format(epoch+1, self.config.num_epochs, avg_loss))
            writer.add_scalar('train loss', avg_loss, global_step=epoch+1)

            # Decay learning rate
            if (epoch+1) % 20 == 0:
                curr_lr /= 3
                update_lr(self.student_optimizer, curr_lr)
            
            acc = self.eval('student')
            print('Accuracy of the student model on the test images: {} %'.format(acc))
            writer.add_scalar('test acc', acc / 100, global_step=epoch+1)

            acc = self.eval('teacher')
            print('Accuracy of the teacher model on the test images: {} %'.format(acc))
            writer.add_scalar('teacher test acc', acc / 100, global_step=epoch+1)

    def train_epoch_filter_replace(self):
        print('Teacher net initialization begin...')
        acc = self.train('teacher')
        print('Accuracy of teacher init model on the test images: {} %'.format(acc))

        device = self.config.device
        writer = SummaryWriter('runs/epoch_filter_replace/')
        curr_lr = self.config.learning_rate
        for epoch in range(self.config.num_epochs):
            self.student_net.train()
            self.teacher_net.train()
            avg_loss = 0
            all_losses = []
            for i, (images, _, noise) in enumerate(self.train_loader):
                images = images.to(device)
                noise = noise.to(device)
                outputs = self.student_net(images)
                all_losses.extend(CE_from_label(outputs, noise).tolist())
            kth = int(len(all_losses) * (1 - self.config.noise_rate))
            tao = sorted(all_losses)[kth]
            
            for i, (images, _, noise) in enumerate(self.train_loader):
                images = images.to(device)
                noise = noise.to(device)
                outputs1 = self.student_net(images)
                outputs2 = self.teacher_net(images)
                student_loss, teacher_loss = filter_replace_by_tao(outputs1, outputs2, noise, tao)
                self.student_optimizer.zero_grad()
                self.teacher_optimizer.zero_grad()
                student_loss.backward()
                teacher_loss.backward()
                self.student_optimizer.step()
                self.teacher_optimizer.step()
                avg_loss = (avg_loss * i + student_loss.item()) / (i + 1)
            print("Epoch [{}/{}], Loss: {:.4f}".format(epoch+1, self.config.num_epochs, avg_loss))
            writer.add_scalar('train loss', student_loss.item(), global_step=epoch+1)

            # Decay learning rate
            if (epoch+1) % 20 == 0:
                curr_lr /= 3
                update_lr(self.student_optimizer, curr_lr)
            
            acc = self.eval('student')
            print('Accuracy of the student model on the test images: {} %'.format(acc))
            writer.add_scalar('test acc', acc / 100, global_step=epoch+1)

            acc = self.eval('teacher')
            print('Accuracy of the teacher model on the test images: {} %'.format(acc))
            writer.add_scalar('teacher test acc', acc / 100, global_step=epoch+1)
            
    def eval(self, mode):
        model = self.student_net if mode == 'student' else self.teacher_net
        device = self.config.device
        model.eval()
        with torch.no_grad():
            correct = 0
            total = 0
            for images, labels in self.test_loader:
                images = images.to(device)
                labels = labels.to(device)
                outputs = model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

            return 100 * correct /total
            print('Accuracy of teacher init model on the test images: {} %'.format(100 * correct / total))

        
