import json
import os

import yaml
from google.protobuf.json_format import MessageToDict

from knowledge_extract.service.pb.model_server_pb2 import PredictRequest, Experiment, OptimizerConfig, ModelConfig, ActiveLearningRequest


class PbHelper:
    def __init__(self):
        self.optimizer = None
        self.model_config = None
        self.experiment = None

    def set_optimizer_config(self, optimizer_config=None):
        if optimizer_config is not None:
            assert isinstance(optimizer_config, dict)
            assert isinstance(optimizer_config["params"], dict)
        else:
            optimizer_config = {}
        optimizer_class = optimizer_config.get("optimizer_class", "AdamW")
        params = optimizer_config.get("params", {"lr": 3e-5, "eps": 1e-8})
        self.optimizer = OptimizerConfig(optimizer_class=optimizer_class, params=json.dumps(params))

    def set_model_config(self, model_config=None,):
        known_params = {"train_max_seq_length", "eval_max_seq_length", "batch_size","other_config",
                        "num_train_epochs", "evaluate_during_training", "save_mode", "load_mode"}

        if model_config is not None:
            assert isinstance(model_config, dict)
            if "other_config" in model_config:
                assert isinstance(model_config["other_config"], dict)
        else:
            model_config = {}

        train_max_seq_length = model_config.get("train_max_seq_length", 128)
        eval_max_seq_length = model_config.get("eval_max_seq_length", 128)
        batch_size = model_config.get("batch_size", 32)
        num_train_epochs = model_config.get("num_train_epochs", 3)
        evaluate_during_training = model_config.get("evaluate_during_training", True)
        save_mode = model_config.get("save_mode", "all")
        load_mode = model_config.get("load_mode", "all")
        other_config = model_config.get("other_config", {})

        for key, value in model_config.items():
            if key not in known_params:
                other_config[key] = value

        self.model_config = ModelConfig(train_max_seq_length=train_max_seq_length,
                                        eval_max_seq_length=eval_max_seq_length,
                                        batch_size=batch_size,
                                        num_train_epochs=num_train_epochs,
                                        evaluate_during_training=evaluate_during_training,
                                        save_mode=save_mode,
                                        load_mode=load_mode,
                                        other_config=json.dumps(other_config))

    def create_experiment(self, task, model, dataset=None, experiment_id=None, project_id=None, version=None,
                          model_config=None, optimizer_config=None, data=None, label_list=None):
        self.set_optimizer_config(optimizer_config)
        self.set_model_config(model_config)
        assert task in ["ner", "re", "ner_re"]
        if data is not None:
            data = json.dumps(data)
        if label_list is not None:
            label_list = json.dumps(label_list)
        experiment_id = -1 if experiment_id is None else experiment_id
        project_id = -1 if project_id is None else project_id
        version = -1 if version is None else version
        experiment = Experiment(task=task, model=model, dataset=dataset, experiment_id=experiment_id,
                                project_id=project_id, version=version,
                                optimizer=self.optimizer, model_config=self.model_config,
                                data=data, label_list=label_list)
        self.experiment = experiment
        return experiment

    def create_active_learning_request(self, task, model, dataset, sampling_dataset, sampling_task, sampling_num,version=None, experiment_id=None, project_id=None,
                       model_config=None, optimizer_config=None, label_list=None):
        self.set_optimizer_config(optimizer_config)
        self.set_model_config(model_config)
        assert task in ["ner", "re", "ner_re"]
        if label_list is not None:
            label_list = json.dumps(label_list)
        experiment_id = -1 if experiment_id is None else experiment_id
        project_id = -1 if project_id is None else project_id
        version = -1 if version is None else version
        self.model_config.num_train_epochs = 5
        request = ActiveLearningRequest(task=task, model=model, dataset=dataset,
                                experiment_id=experiment_id,
                                project_id=project_id, version=version,
                                optimizer_config=self.optimizer, model_config=self.model_config,
                                 label_list=label_list,sampling_dataset=sampling_dataset, sampling_task=sampling_task, sampling_num=sampling_num)
        return request



    def create_predict_request(self, text, task, model, dataset=None, version=None, experiment_id=None, project_id=None,
                       model_config=None, label_list=None):
        # 验证
        assert task in ["ner", "re", "ner_re"]
        assert isinstance(text, list)
        assert isinstance(text[0], str) or isinstance(text[0], dict)
        # 设置模型配置
        self.set_model_config(model_config)
        # 设置缺省值
        experiment_id = -1 if experiment_id is None else experiment_id
        project_id = -1 if project_id is None else project_id
        version = -1 if version is None else version
        # json格式化
        if label_list is not None:
            label_list = json.dumps(label_list)
        # 构造请求
        request = PredictRequest(text=json.dumps(text), task=task, model=model, project_id=project_id,
                                 version=version,experiment_id=experiment_id,dataset=dataset,
                                 model_config=self.model_config, label_list=label_list)
        return request

    @staticmethod
    def experiment_to_config(experiment: Experiment):
        config = {}
        config["main"] = {"model": experiment.model, "task": experiment.task}
        model_config = MessageToDict(experiment.model_config, preserving_proto_field_name=True)
        if "other_config" in model_config:
            other_config = model_config.pop("other_config")
            other_config = json.loads(other_config)
            model_config.update(other_config)
        config["model_config"] = model_config
        config["optimizer"] = {"optimizer_class": experiment.optimizer.optimizer_class,
                               "params": json.loads(experiment.optimizer.params)}
        if experiment.dataset:
            config["main"]["dataset"] = experiment.dataset
        if experiment.experiment_id >= 0:
            config["main"]["experiment_id"] = experiment.experiment_id
        if experiment.version >= 0:
            config["main"]["version"] = experiment.version
        if experiment.project_id >= 0:
            config["main"]["project_id"] = experiment.project_id
        if experiment.data:
            config["data"] = json.loads(experiment.data)
        if experiment.label_list:
            config["label_list"] = json.loads(experiment.label_list)
        return config

    @staticmethod
    def active_learning_request_to_config(experiment: ActiveLearningRequest):
        config = {}
        config["main"] = {"model": experiment.model, "task": experiment.task}
        model_config = MessageToDict(experiment.model_config, preserving_proto_field_name=True)
        if "other_config" in model_config:
            other_config = model_config.pop("other_config")
            other_config = json.loads(other_config)
            model_config.update(other_config)
        config["model_config"] = model_config
        config["optimizer"] = {"optimizer_class": experiment.optimizer_config.optimizer_class,
                               "params": json.loads(experiment.optimizer_config.params)}
        if experiment.dataset:
            config["main"]["dataset"] = experiment.dataset
        if experiment.experiment_id >= 0:
            config["main"]["experiment_id"] = experiment.experiment_id
        if experiment.version >= 0:
            config["main"]["version"] = experiment.version
        if experiment.project_id >= 0:
            config["main"]["project_id"] = experiment.project_id
        if experiment.label_list:
            config["label_list"] = json.loads(experiment.label_list)
        return config,experiment.sampling_dataset,experiment.sampling_task,experiment.sampling_num

    @staticmethod
    def predict_request_to_config(predict_request: PredictRequest):
        # print(predict_request)
        config = {"main": {"model": predict_request.model,
                           "task": predict_request.task}, "text": json.loads(predict_request.text)}
        model_config = MessageToDict(predict_request.model_config, preserving_proto_field_name=True)
        if "other_config" in model_config:
            other_config = model_config.pop("other_config")
            other_config = json.loads(other_config)
            model_config.update(other_config)
        config["model_config"] = model_config

        if predict_request.dataset:
            config["main"]["dataset"]=predict_request.dataset
            # config_path=os.path.join(PROJECT_ROOT,
            #                          f"config_template/{predict_request.model}_{predict_request.dataset}.yaml")
            # with open(config_path,encoding='utf-8') as f:
            #     yaml_config=yaml.load(f,yaml.FullLoader)
            # for key, value in yaml_config["model_config"].items():
            #     config["model_config"][key]=value
        if predict_request.version>=0:
            config["main"]["version"]=predict_request.version
        if predict_request.experiment_id>=0:
            config["main"]["experiment_id"]=predict_request.experiment_id
        if predict_request.project_id>=0:
            config["main"]["project_id"]=predict_request.project_id
        if predict_request.label_list:
            config['label_list']=json.loads(predict_request.label_list)
        return config


# def texttool_to_config(texttool_request: TexttoolRequest):
#     config = {}
#     config["main"] = {"model": texttool_request.model, "version": texttool_request.version,
#                       "task": texttool_request.task, "project_id": texttool_request.project_id}
#     model_config = MessageToDict(texttool_request.model_config, preserving_proto_field_name=True)
#     if "other_config" in model_config:
#         other_config = model_config.pop("other_config")
#         other_config = json.loads(other_config)
#         model_config.update(other_config)
#     config["model_config"] = model_config
#     config["optimizer"] = {"optimizer_class": texttool_request.optimizer_config.optimizer_class,
#                            "params": json.loads(texttool_request.optimizer_config.params)}
#     config["train_data"] = json.loads(texttool_request.train_data)
#     config["test_data"] = json.loads(texttool_request.test_data)
#     config["label_list"] = json.loads(texttool_request.label_list)
#     config['acquire'] = texttool_request.acquire
#     return config


def get_optimizer_config(optimizer_config=None):
    if optimizer_config is not None:
        assert isinstance(optimizer_config, dict)
        assert isinstance(optimizer_config["params"], dict)
    else:
        optimizer_config = {}
    optimizer_class = optimizer_config.get("optimizer_class", "AdamW")
    params = optimizer_config.get("params", {"lr": 3e-5, "eps": 1e-8})
    optimizer = OptimizerConfig(optimizer_class=optimizer_class, params=json.dumps(params))
    return optimizer


def get_model_config(model_config=None):
    if model_config is not None:
        assert isinstance(model_config, dict)
        if "other_config" in model_config:
            assert isinstance(model_config["other_config"], dict)
    else:
        model_config = {}
    train_max_seq_length = model_config.get("train_max_seq_length", 128)
    eval_max_seq_length = model_config.get("eval_max_seq_length", 128)
    batch_size = model_config.get("batch_size", 32)
    num_train_epochs = model_config.get("num_train_epochs", 3)
    evaluate_during_training = model_config.get("evaluate_during_training", True)
    save_mode = model_config.get("save_mode", "all")
    load_mode = model_config.get("load_mode", "all")
    other_config = model_config.get("other_config", {})

    model_config = ModelConfig(train_max_seq_length=train_max_seq_length,
                               eval_max_seq_length=eval_max_seq_length,
                               batch_size=batch_size,
                               num_train_epochs=num_train_epochs,
                               evaluate_during_training=evaluate_during_training,
                               save_mode=save_mode,
                               load_mode=load_mode,
                               other_config=json.dumps(other_config))
    return model_config


# def create_texttool_request(task, model, label_list, train_data, test_data, project_id, version, model_config=None,
#                             optimizer_config=None, acquire=5):
#     optimizer_config = get_optimizer_config(optimizer_config)
#     model_config = get_model_config(model_config)
#     assert task in ["ner", "re", "ner_re"]
#     assert isinstance(label_list, list)
#     assert isinstance(train_data, list)
#     assert isinstance(test_data, list)
#     texttool_request = TexttoolRequest(task=task, model=model, label_list=json.dumps(label_list),
#                                        train_data=json.dumps(train_data), test_data=json.dumps(test_data),
#                                        version=version, project_id=project_id, optimizer_config=optimizer_config,
#                                        model_config=model_config, acquire=acquire)
#
#     return texttool_request
