from typing import List, Dict, Optional
from mindnlp.transformers import BertForSequenceClassification, BertTokenizer, PreTrainedModel, BertForMaskedLM, RobertaForMaskedLM, RobertaForSequenceClassification, RobertaTokenizer
from mindspore import nn
from mindspore.common import Tensor
import wandb
from mindspore import ops, Parameter, load_param_into_net, load_checkpoint
from math import ceil
import re
from copy import deepcopy
import mindspore
from mindspore.nn import MSELoss
import pickle
from utils.runcontext import Runcontext
import gc

class ModelFactory:
    def __init__(self, args) -> None:
        self.args = args
        
    def create_base_model(self, model_name: str):
        if self.args.phase == 'DT':
            model = RobertaForMaskedLM.from_pretrained(model_name)
        else:
            model = RobertaForSequenceClassification.from_pretrained(model_name, num_labels=2)
        tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
        print("Tokenizer:\n", tokenizer)
        return model, tokenizer
    
    def create_incubated_base_model(self, model_name: str):
        model = RobertaForSequenceClassification.from_pretrained(model_name, num_labels=2)
        divided_depths = self.args.divided_depths
        for i in range(len(self.args.meta_divided_depths)):
            model_load_path = f"./results/incub-meta-model-{i}.ckpt"
            config_load_path = f"./results/incub-meta-model-config-{i}.pickle"
            load_model_params = load_checkpoint(model_load_path)
            with open(config_load_path, 'rb') as f:
                load_config = pickle.load(f)
            meta_divided_depths = load_config["meta_divided_depths"]
            base_modular_layers = list(range(sum(divided_depths[:i]), sum(divided_depths[:i]) + divided_depths[i]))
            meta_modular_layers = list(range(sum(meta_divided_depths[:i]), sum(meta_divided_depths[:i]) + meta_divided_depths[i]))
            print("base_modular_layers: ", base_modular_layers)
            print("meta_modular_layers: ", meta_modular_layers)
            base2meta = {}
            for j in range(len(base_modular_layers)):
                base2meta[base_modular_layers[j]] = meta_modular_layers[j]
            print("base2meta loading meta into base: ", base2meta)
            model_params = model.parameters_dict()
            print("load before in incub test: ", model.parameters_dict()[f"roberta.encoder.layer.{base_modular_layers[0]}.attention.output.LayerNorm.weight"].asnumpy()[:5])
            for name, param in model_params.items():
                if "embeddings" in name and i == 0:
                    model_params[name].set_data(load_model_params[name])
                elif "classifier" in name and i == len(divided_depths) - 1:
                    model_params[name].set_data(load_model_params[name])
                elif "roberta.encoder.layer" in name:
                    prefix, suffix = re.split(r'\d{1,2}', name)
                    layer_index = re.findall(r'\d{1,2}', name)[0]
                    layer_index = int(layer_index)
                    if layer_index not in base_modular_layers:
                        continue
                    meta_key = f'{prefix}{base2meta[layer_index]}{suffix}'
                    model_params[name].set_data(load_model_params[meta_key])
            print("load after in incub test: ", model.parameters_dict()[f"roberta.encoder.layer.{base_modular_layers[0]}.attention.output.LayerNorm.weight"].asnumpy()[:5])
                    
        return model
    
    def create_integrated_model(self):
        divided_depths = self.args.divided_depths
        original_meta_divided_depths = self.args.meta_divided_depths
        base_model, tokenizer = self.create_base_model(self.args.model)
        def generator():
            for i in range(len(divided_depths)):
                meta_divided_depths = deepcopy(original_meta_divided_depths)
                meta_divided_depths[i] = divided_depths[i]
                print(f"creating the {i}th integrated model...")
                integrated_model = self.create_small_model(base_model=base_model, divided_depths=divided_depths, meta_divided_depths=meta_divided_depths, idx=i)
                print(f"finished creating the {i}th integrated model")
                yield integrated_model
        return generator(), tokenizer

    def create_small_model(self, base_model: PreTrainedModel, divided_depths: List, meta_divided_depths: List, idx=None):
        # init meta model structure
        teacher_model = deepcopy(base_model)
        base_model = deepcopy(base_model)
        print("divided_depths: ", divided_depths)
        print("meta_divided_depths: ", meta_divided_depths)
        config = base_model.config
        config.num_hidden_layers = sum(meta_divided_depths)
        print("config: ", config.num_hidden_layers)
        print("config label: ", config.num_labels)
        if self.args.phase == 'DT':
            meta_model = ModelDistillWrapper(config=config, teacher_model=teacher_model, args=self.args)
        else:
            meta_model = RobertaForSequenceClassification(config=config)
        print("Meta model: ", meta_model)

        # init meta model parameters using beta model
        base_model_dict_params = base_model.parameters_dict()
        print("dict params: ", base_model_dict_params)
        meta_model_dict_params: Dict[Parameter] = meta_model.parameters_dict()
        print("before: ", meta_model.parameters_dict()["roberta.encoder.layer.0.attention.output.LayerNorm.bias"].asnumpy()[:5])
        layer_base2meta = self.evenly_init_meta_model(divided_depths, meta_divided_depths)
        print("layer_base2meta: ", layer_base2meta)
        for name, param in base_model_dict_params.items():
            if "roberta.encoder.layer" in name:
                prefix, suffix = re.split(r'\d{1,2}', name)
                layer_index = re.findall(r'\d{1,2}', name)[0]
                layer_index = int(layer_index)
                if layer_index not in layer_base2meta.keys():
                    continue
                key = f'{prefix}{layer_base2meta[layer_index]}{suffix}'
                meta_model_dict_params[key].set_data(param)
            else:
                meta_model_dict_params[name].set_data(param)
        print("after: ", meta_model.parameters_dict()["roberta.encoder.layer.0.attention.output.LayerNorm.bias"].asnumpy()[:5])

        # load checkpoint
        model_load_path = self.args.model_path
        config_load_path = self.args.config_path
        load_params_names = []
        if self.args.phase == 'FT':
            # load all paramters
            load_model_params = load_checkpoint(model_load_path)
            print("load_dicts: ", load_model_params)
            with open(config_load_path, 'rb') as f:
                load_config = pickle.load(f)
            print("load_config: ", load_config)
            load_layers = range(sum(meta_divided_depths[:-1]))
            # load all paramters
            for name, param in  meta_model.parameters_dict().items():
                if "teacher" in name or name not in load_model_params.keys():
                    continue
                meta_model_dict_params[name].set_data(load_model_params[name])
                load_params_names.append(name)
            print(f"successfully load checkpoint from {model_load_path}, load_params_names: ", load_params_names)
        elif self.args.phase == 'OT':
            load_model_params = load_checkpoint(model_load_path)
            print("load_dicts: ", load_model_params)
            with open(config_load_path, 'rb') as f:
                load_config = pickle.load(f)
            print("load_config: ", load_config)
            load_layers = range(sum(meta_divided_depths[:-1]))
            print("load_layers: ", load_layers)
            # load all parameteres except the last modular and the classifier
            for name, param in  meta_model.parameters_dict().items():
                if "teacher" in name:
                    continue
                if "roberta.encoder.layer" in name:
                    prefix, suffix = re.split(r'\d{1,2}', name)
                    layer_index = re.findall(r'\d{1,2}', name)[0]
                    layer_index = int(layer_index)
                    if layer_index not in load_layers:
                        continue
                    tmp_key = name
                    meta_model_dict_params[tmp_key].set_data(load_model_params[tmp_key])
                    load_params_names.append(name)
                elif "embeddings" in name:
                    meta_model_dict_params[name].set_data(load_model_params[name])
                    load_params_names.append(name)
            print(f"successfully load checkpoint from {model_load_path}, load_params_names: ", load_params_names)
        elif self.args.phase == 'ICB':
            load_model_params = load_checkpoint(model_load_path)
            print("load_dicts: ", load_model_params)
            with open(config_load_path, 'rb') as f:
                load_config = pickle.load(f)
            print("load_config: ", load_config)
            
            load_params_names = []
            if idx == 0:
                for name, param in  meta_model.parameters_dict().items():
                    if "embeddings" in name:
                        meta_model_dict_params[name].set_data(load_model_params[name])
                        load_params_names.append(name)
            if idx == len(meta_divided_depths) - 1:
                for name, param in  meta_model.parameters_dict().items():
                    if "classifier" in name:
                        meta_model_dict_params[name].set_data(load_model_params[name])
                        load_params_names.append(name)
            distilled_divided_depth = load_config["meta_divided_depths"]
            distill2meta = {}
            tmp_index = 0
            for i in range(len(divided_depths)):
                distilled_layer_index = sum(distilled_divided_depth[:i])
                for j in range(distilled_divided_depth[i]):
                    distill2meta[distilled_layer_index + j] = tmp_index + j
                tmp_index += divided_depths[i]
            print("distill2meta: ", distill2meta)
            replaced_meta_layers = list(range(sum(distilled_divided_depth[:idx]), sum(distilled_divided_depth[:idx]) + distilled_divided_depth[idx]))
            print("replaced meta layers: ", replaced_meta_layers)
            print_layer = 0
            for i in range(sum(divided_depths)):
                if i not in replaced_meta_layers:
                    print_layer = i
                    break
            print("load before in incub train: ", meta_model.parameters_dict()[f"roberta.encoder.layer.{distill2meta[print_layer]}.attention.output.LayerNorm.weight"].asnumpy()[:5])
            for name, param in  load_model_params.items():
                if "roberta.encoder.layer" in name:
                    prefix, suffix = re.split(r'\d{1,2}', name)
                    layer_index = re.findall(r'\d{1,2}', name)[0]
                    layer_index = int(layer_index)
                    if layer_index in replaced_meta_layers:
                        continue
                    meta_key = f'{prefix}{distill2meta[layer_index]}{suffix}'
                    meta_model_dict_params[meta_key].set_data(load_model_params[name])
                    load_params_names.append(name)
            print("load after in incub train: ", meta_model.parameters_dict()[f"roberta.encoder.layer.{distill2meta[print_layer]}.attention.output.LayerNorm.weight"].asnumpy()[:5])

        # freeze paramters don't need to be updated
        self.freeze_parameters(model=meta_model, meta_divided_depths=meta_divided_depths, idx=idx)
        freezed_paramters_keys = []
        for name, param in meta_model.parameters_and_names():
            if param.requires_grad == False:
                freezed_paramters_keys.append(name)
        print("Freezed paramters: ", freezed_paramters_keys)

        del base_model
        gc.collect()

        return meta_model

    def evenly_init_meta_model(self, origin_divided_depth, meta_divided_depths):
        # origin_divided_depth = origin_divided_depth
        # meta_divided_depths = meta_divided_depths
        target2meta = {}
        for i in range(len(meta_divided_depths)):
            step = origin_divided_depth[i] / meta_divided_depths[i]
            step = ceil(step)
            meta_layer_index = sum(meta_divided_depths[:i])
            target_layer_index = sum(origin_divided_depth[:i])
            flag = meta_divided_depths[i] - origin_divided_depth[i] % meta_divided_depths[i] - 1
            if step * (meta_divided_depths[i] - 1) <  origin_divided_depth[i]:
                flag = 0
            for j in range(meta_divided_depths[i]):
                target2meta[target_layer_index] = j + meta_layer_index
                if flag != 0:
                    target_layer_index += 1
                    flag -= 1
                else:
                    target_layer_index += step
        return target2meta

    def freeze_parameters(self, model: PreTrainedModel, meta_divided_depths: List, idx=None):
        if self.args.phase == "OT":
            freezed_layers = list(range(sum(meta_divided_depths[:-1])))
            print("freezed_layers: ", freezed_layers)
            for name, param in model.parameters_and_names():
                if "roberta.encoder.layer" in name:
                    prefix, suffix = re.split(r'\d{1,2}', name)
                    layer_index = re.findall(r'\d{1,2}', name)[0]
                    layer_index = int(layer_index)
                    if layer_index in freezed_layers:
                        param.requires_grad = False
                elif "embeddings" in name:
                    param.requires_grad = False
        elif self.args.phase == 'DT':
            # Only the first module has been distilled so far. None of the modules except the first one need to be all False
            freezed_layers = list(range(sum(meta_divided_depths[:-1]), sum(meta_divided_depths)))
            print("freezed_layers: ", freezed_layers)
            for name, param in model.parameters_and_names():
                if "teacher" in name:
                    param.requires_grad = False
        elif self.args.phase == 'ICB':
            replaced_meta_layers = list(range(sum(meta_divided_depths[:idx]), sum(meta_divided_depths[:idx]) + meta_divided_depths[idx]))
            for name, param in model.parameters_and_names():
                if "embeddings" in name and idx != 0:
                    param.requires_grad = False
                elif "classifier" in name and idx != len(meta_divided_depths) - 1:
                    param.requires_grad = False
                elif "roberta.encoder.layer" in name:
                    prefix, suffix = re.split(r'\d{1,2}', name)
                    layer_index = re.findall(r'\d{1,2}', name)[0]
                    layer_index = int(layer_index)
                    if layer_index not in replaced_meta_layers:
                        param.requires_grad = False
                    else:
                        param.requires_grad = True

    def create_divided_model(self):
        model = self.create_incubated_base_model(self.args.model)
        client_config = deepcopy(model.config)
        client_config.num_hidden_layers = self.args.client_layer_num
        client_model = RobertaForSequenceClassification(client_config)
        server_config = deepcopy(model.config)
        server_config.num_hidden_layers = model.config.num_hidden_layers - self.args.client_layer_num
        server_model = RobertaForSequenceClassification(server_config)
        
        # load parameters
        original_model_params = model.parameters_dict()
        client_model_params = client_model.parameters_dict()
        server_model_params = server_model.parameters_dict()
        print("client model param before loading: ", client_model.parameters_dict()["roberta.embeddings.word_embeddings.weight"][:5])
        print("server model param before loading: ", server_model.parameters_dict()["classifier.dense.weight"][:5])
        client_layers = list(range(self.args.client_layer_num))
        print("client_layers: ", client_layers)
        client_layer_id = 0
        server_layer_id = 0
        for name, param in original_model_params.items():
            if "embeddings" in name:
                client_model_params[name].set_data(param)
            elif "classifier" in name:
                server_model_params[name].set_data(param)
            elif "roberta.encoder.layer" in name:
                    prefix, suffix = re.split(r'\d{1,2}', name)
                    layer_index = re.findall(r'\d{1,2}', name)[0]
                    layer_index = int(layer_index)
                    if layer_index in client_layers:
                        client_model_params[f'{prefix}{layer_index}{suffix}'].set_data(param)
                    else:
                        server_model_params[f'{prefix}{layer_index - self.args.client_layer_num}{suffix}'].set_data(param)
        print("client model param after loading: ", client_model.parameters_dict()["roberta.embeddings.word_embeddings.weight"][:5])
        print("server model param after loading: ", server_model.parameters_dict()["classifier.dense.weight"][:5])

        # create tokenizer
        tokenizer = RobertaTokenizer.from_pretrained("roberta-base")

        print("client model: ", client_model)
        print("server model: ", server_model)
        
        return client_model, model, tokenizer

class ModelDistillWrapper(RobertaForMaskedLM):
    def __init__(self, config, teacher_model: None, args):
        super().__init__(config)
        self.teacher_model = teacher_model
        self.loss_fn = MSELoss()
        self.args = args
        self.loss_weights = [1 / (len(self.args.divided_depths) + 1)] * (len(self.args.divided_depths) + 1)
        print("loss_weights: ", self.loss_weights)
    
    def construct(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        output_hidden_states = True
        output = super().construct(input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, labels, output_attentions, output_hidden_states, return_dict)
        teacher_output = self.teacher_model(input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, labels, output_attentions, output_hidden_states, return_dict)
        teacher_divided_depth = self.args.divided_depths
        meta_divided_depths = self.args.meta_divided_depths
        hidden_losses = {}
        loss = 0
        for i in range(len(teacher_divided_depth)):
            teacher_hidden_idx = sum(teacher_divided_depth[:i+1])
            meta_hidden_idx = sum(meta_divided_depths[:i+1])
            hidden_loss = self.loss_fn(output.hidden_states[meta_hidden_idx], teacher_output.hidden_states[teacher_hidden_idx])
            loss += hidden_loss * self.loss_weights[i + 1]

            hidden_losses[f"hidden_loss_{i+1}"] = hidden_loss.item()
        output_loss = self.loss_fn(output.logits, teacher_output.logits) * self.loss_weights[-1]
        loss += output_loss
        hidden_losses[f"hidden_loss{len(teacher_divided_depth) + 1}"] = output_loss.item()
        output.loss = loss
        wandb.log(hidden_losses, step=Runcontext.step)
        return output