import paddle
import paddle.nn as nn
from paddlenlp.transformers import AutoModel
from ..modules import FlatTransformer
from tqdm import tqdm


class Base(nn.Layer):
    def forward_transformer(self, input_ids, *args, **kwargs):
        sequence_output, *_ = self.transformer(input_ids, *args, **kwargs)
        if self.transformer_output_type == 'seq':
            return sequence_output
        elif self.transformer_output_type == 'cls':
            return sequence_output[:, 0]

    def train_step(self, batch):
        out = self(**batch)
        loss = self.compute_loss(out, batch)
        loss.backward()
        return loss

    @paddle.no_grad()
    def predict(self, test_data):
        ret = []
        self.eval()
        for batch in tqdm(test_data):
            output = self(**batch)
            ret.append(output)
        return ret

    @paddle.no_grad()
    def evaluate(self, dev_data, metrics):
        self.eval()
        for batch in tqdm(dev_data):
            output = self(**batch)
            metrics.update(batch['labels'], output, batch)
        self.train()
        main_metrics = metrics.accumulate()
        return main_metrics

    def compute_loss(self, output, batch):
        return


class BaseModel(Base):
    def __init__(self, args, transformer_output_type='seq'):
        super().__init__()
        self.transformer = AutoModel.from_pretrained(args.model_path)
        self.transformer_output_type = transformer_output_type
        self.args = args


class FlatBaseModel(BaseModel):
    def __init__(self, args, transformer_output_type='seq'):
        super().__init__(args)
        self.transformer = FlatTransformer(args)
        self.transformer_output_type = transformer_output_type
        self.args = args


class EnsembleModel(nn.Layer):
    def __init__(self, model_list):
        super().__init__()
        self.model_list = nn.LayerList(model_list)
        self.compute_loss = self.model_list[0].compute_loss

    def train_step(self, batch):
        out = self(**batch)
        loss = self.compute_loss(out, batch)
        loss.backward()
        return loss

    def forward(self, **kwargs):
        out = [self.model_list[i](**kwargs) for i in range(len(self.model_list))]
        out = paddle.stack(out, 0).mean(0)
        return out

    @paddle.no_grad()
    def predict(self, test_data):
        ret = []
        self.eval()
        for batch in tqdm(test_data):
            output = self(**batch)
            ret.append(output)
        return output

    @paddle.no_grad()
    def evaluate(self, dev_data, metrics):
        self.eval()
        for batch in tqdm(dev_data):
            output = self(**batch)
            metrics.update(batch['labels'], output, batch)
        self.train()
        main_metrics = metrics.accumulate()
        return main_metrics
