import os
import logging
from tqdm import tqdm
from sklearn.metrics import accuracy_score

import torch
import torch.nn as nn
import torch.nn.functional as F

from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler

from model.model import MMModel
from model.SharedSpecificViT import SharedSpecificViT
from model.SharedSpecificBert import SharedSpecificBERT
from model.MultiModalAttentionModel import MultiModalAttentionModel
from dataloader.datasets import MMDataset
from utils.functions import collect_metrics, dict_to_str

class ClientTrainer:
    def __init__(self, args, model, optimizer, scheduler, client_id, data_type, logger):
        self.args = args
        self.model = model
        self.optimizer = optimizer
        self.scheduler = scheduler

        self.client_id = client_id
        self.data_type = data_type

        self.logger = logger

        self.best_model_save_path = os.path.join(args.model_save_dir,  f'{args.dataset}-{client_id}-{data_type}-{args.seed}-best-torchattn.pth')

    def set_model(self):
        # self.model = MMModel(self.args, self.data_type)
        
        # self.model = SharedSpecificViT(self.args)
        # self.model = SharedSpecificBERT(self.args)

        self.model = MultiModalAttentionModel(self.args)

        self.model = self.model.to(self.args.device)
        if torch.cuda.device_count() > 1:
            self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=True)
        else:
            self.model = nn.parallel.DataParallel(self.model)
    
    def load_model(self, model = None, state_dict = None):
        # self.model = MMModel(self.args, self.data_type)
        self.model = MultiModalAttentionModel(self.args)
        if state_dict is not None:
            self.model.load_state_dict(state_dict)
        else:
            self.model.load_state_dict(model.state_dict())
        self.model = model.to(self.args.device)
        self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=True)


    def set_optimizer(self):        
        # text_enc_param = list(self.model.module.text_encoder.named_parameters())
        # text_clf_param = list(self.model.module.text_classfier.parameters())
        
        # img_enc_param = list(self.model.module.image_encoder.parameters())
        # img_clf_param = list(self.model.module.image_classfier.parameters())

        # mm_clf_param = list(self.model.module.mm_classfier.parameters())

        # # text_enc_param = list(self.model.text_encoder.named_parameters())
        # # text_clf_param = list(self.model.text_classfier.parameters())
        
        # # img_enc_param = list(self.model.image_encoder.parameters())
        # # img_clf_param = list(self.model.image_classfier.parameters())

        # # mm_clf_param = list(self.model.mm_classfier.parameters())

        # no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]

        # optimizer_grouped_parameters = [
        #     {"params": [p for n, p in text_enc_param if not any(nd in n for nd in no_decay)],
        #     "weight_decay": self.args.weight_decay_tfm, 'lr': self.args.lr_text_tfm},
        #     {"params": [p for n, p in text_enc_param if any(nd in n for nd in no_decay)], "weight_decay": 0.0,
        #     'lr': self.args.lr_text_tfm},
        #     {"params": text_clf_param, "weight_decay": self.args.weight_decay_other, 'lr': self.args.lr_text_cls},

        #     {"params": img_enc_param, "weight_decay": self.args.weight_decay_tfm, 'lr': self.args.lr_img_tfm},
        #     {"params": img_clf_param, "weight_decay": self.args.weight_decay_other, 'lr': self.args.lr_img_cls},

        #     {"params": mm_clf_param, "weight_decay": self.args.weight_decay_other, 'lr': self.args.lr_mm_cls},
        # ]
        # self.optimizer = optim.Adam(optimizer_grouped_parameters)


        # params = []
        # if self.data_type != 'text':
        #     params += list(self.model.module.image_encoder.parameters())
        #     if self.data_type == 'image':
        #         params += list(self.model.module.image_classfier.parameters())
        # if self.data_type != 'image':
        #     params += list(self.model.module.text_encoder.parameters())
        #     if self.data_type == 'text':
        #         params += list(self.model.module.text_classfier.parameters())
        # if self.data_type == 'it':
        #     params += list(self.model.module.mm_classfier.parameters())
        # self.optimizer = optim.Adam(params, lr=1e-4)

        self.optimizer = optim.Adam(self.model.parameters(), lr = 1e-4)

    def set_scheduler(self):
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, "max", patience=self.args.lr_patience, verbose=True, factor=self.args.lr_factor)

    def set_dataloader(self, train_set, valid_set, test_set):
        if torch.cuda.device_count() > 1:
            train_sampler = DistributedSampler(train_set)
            train_loader = DataLoader(train_set, batch_size=self.args.batch_size, num_workers=self.args.num_workers, sampler=train_sampler, pin_memory=False, drop_last=True)

            valid_sampler = DistributedSampler(valid_set)
            valid_loader = DataLoader(valid_set, batch_size=self.args.batch_size, num_workers=self.args.num_workers, sampler=valid_sampler, pin_memory=False, drop_last=True)
        else:
            train_loader = DataLoader(train_set, batch_size=self.args.batch_size, num_workers=self.args.num_workers, shuffle=True, pin_memory=False, drop_last=True)

            valid_loader = DataLoader(valid_set, batch_size=self.args.batch_size, num_workers=self.args.num_workers, shuffle=True, pin_memory=False, drop_last=True)

        test_loader = DataLoader(test_set, batch_size=self.args.batch_size, num_workers=self.args.num_workers, shuffle=False, pin_memory=False, drop_last=True)

        self.data = train_loader, valid_loader, test_loader

    def train(self):
        self.model.train()
        logger = logging.getLogger('ClientTrainer')
        best_valid = 1e-5
        nBetter = 0
        total_step = 0
        gradient_accumulation_steps = int(self.args.batch_gradient / self.args.batch_size)
        for epoch in range(self.args.num_epoch + 1):
            train_loader, valid_loader, test_loader = self.data
            y_pred = []
            y_true = []
            
            if torch.cuda.device_count() > 1:
                train_loader.sampler.set_epoch(epoch)

            with tqdm(train_loader) as td:
                for batch_image, text_input_ids, text_token_type_ids, text_attention_mask, batch_label in td:
                    text = text_input_ids.to(self.args.device), text_token_type_ids.to(self.args.device), text_attention_mask.to(self.args.device)
                    image = batch_image.to(self.args.device)
                    labels = batch_label.to(self.args.device).view(-1)
                    # optimizer.zero_grad()
                    loss, logit_m = self.model.module(text, image, labels, False)
                    
                    # loss, _, logit_m , _= self.model.module(image, labels, False)

                    # print(loss)
                    loss = loss.sum() # / gradient_accumulation_steps
                    loss.backward()
                    # optimizer.step()
                    y_pred.append(logit_m.cpu())
                    y_true.append(batch_label.cpu())
                    total_step += 1

                    if total_step % gradient_accumulation_steps == 0:
                        self.optimizer.step()
                        self.optimizer.zero_grad()

                    if (total_step % (self.args.valid_step * gradient_accumulation_steps) == 0) and (epoch > self.args.min_epoch):
                    # if total_step % args.valid_step == 0:
                        valid_results, best_valid, nBetter = self.valid(best_valid, nBetter, total_step, epoch)
                        if nBetter < 1:
                            logger.info("Client " + str(self.client_id) + ' ' + self.args.dataset + " Valid: " + dict_to_str(valid_results))
                            best_results = valid_results
                        if nBetter > self.args.patience:
                            return best_results
                logits = torch.cat(y_pred)
                tr_true = torch.cat(y_true).data.cpu().numpy()
                tr_prob = F.softmax(logits, dim=1).data.cpu().numpy()
                tuning_metric = accuracy_score(tr_true, tr_prob.argmax(1))
                self.scheduler.step(tuning_metric)
                # logger.info(f'last lr:' + self.scheduler.get_last_lr())
        return best_results
    
    def valid(self, best_valid=None, nBetter=None, step=None, epoch=None):
        self.model.eval()
        with torch.no_grad():
            train_loader, valid_loader, test_loader = self.data
            y_pred = []
            y_true = []
            with tqdm(valid_loader) as td:
                for batch_image, text_input_ids, text_token_type_ids, text_attention_mask, batch_label in td:
                    text = text_input_ids.to(self.args.device), text_token_type_ids.to(self.args.device), text_attention_mask.to(self.args.device)
                    image = batch_image.to(self.args.device)
                    # logit, _ = self.model.module.infer(image)
                    # logit, _ = self.model.module.infer(text)
                    logit = self.model.module.infer(text, image)
                    y_pred.append(logit.cpu())
                    y_true.append(batch_label.cpu())
                    # break
            logits = torch.cat(y_pred)
            te_true = torch.cat(y_true).data.cpu().numpy()
            te_prob = F.softmax(logits, dim=1).data.cpu().numpy()
            cur_valid = accuracy_score(te_true, te_prob.argmax(1))
            isBetter = cur_valid >= (best_valid + 1e-6)
            valid_results = {"step": step}
            valid_results.update({"epoch": epoch})
            valid_results.update(collect_metrics(self.args.dataset, te_true, te_prob))
            if isBetter:
                self.save_checkpoint(self.best_model_save_path)
                best_valid = cur_valid
                nBetter = 0
            else:
                nBetter += 1
            return valid_results, best_valid, nBetter
        
    def save_checkpoint(self, checkpoint_path):
        model_to_save = self.model.module if hasattr(self.model, 'module') else self.model

        state_dict = {}
        state_dict.update(model_to_save.state_dict())

        torch.save(state_dict, checkpoint_path)
    
    def test_epoch(self):
        self.model.eval()
        logger = logging.getLogger('ClientTrainer')
        train_loader, valid_loader, test_loader = self.data
        with torch.no_grad():
            y_pred = []
            y_true = []
            with tqdm(test_loader) as td:
                for batch_image, text_input_ids, text_token_type_ids, text_attention_mask, batch_label in td:
                    text = text_input_ids.cuda(), text_token_type_ids.cuda(), text_attention_mask.cuda()
                    image = batch_image.cuda()
                    # logit, _ = self.model.module.infer(image)
                    # logit, _ = self.model.module.infer(text)
                    logit = self.model.module.infer(text, image)
                    y_pred.append(logit.cpu())
                    y_true.append(batch_label.cpu())
            logits = torch.cat(y_pred)
            true = torch.cat(y_true).data.cpu().numpy()
            prob = F.softmax(logits, dim=1).data.cpu().numpy()
            test_results = collect_metrics(self.args.dataset, true, prob)
            logger.info("Client " + str(self.client_id) + ' ' + self.args.dataset + " Test: " + dict_to_str(test_results))
