import flair
from flair.models import *
from flair.embeddings.base import load_embeddings
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.training_utils import store_embeddings
from flair.data import Dictionary, Label, Sentence, Span, get_spans_from_bio

from tqdm import tqdm
import torch
import onnx
import onnxruntime as ort
from typing import Union, Dict, Any, Type, cast, List, Optional
import pickle

class SequenceTaggerForNPU(SequenceTagger):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.onnx_session=None
        self.aclmodel=None
    @classmethod
    def load_onnx(cls, model_file: str, **kwargs):

        pickle_file = model_file.replace(".onnx", ".pkl")
        with open(pickle_file, 'rb') as f:
            model_info= pickle.load(f)

        embeddings=model_info.get("embeddings")
        if isinstance(embeddings, dict):
            embeddings = load_embeddings(embeddings)
        kwargs["embeddings"] = embeddings
        kwargs["tag_dictionary"] = model_info.get("tag_dictionary")
        kwargs["tag_format"] = model_info.get("tag_format")
        kwargs["tag_type"] = model_info.get("tag_type")
        kwargs["use_crf"] = model_info.get("use_crf")
        kwargs["use_rnn"] = model_info.get("use_rnn")
        kwargs["reproject_embeddings"] = model_info.get("reproject_embeddings")
        kwargs["init_from_state_dict"] = model_info.get("init_from_state_dict")

        instance = cls(**kwargs)
        instance.onnx_session = ort.InferenceSession(model_file)
        return instance

    def forward_onnx(self, sentence_tensor, lengths):
        """Forward propagation through network using NOOX.

        Args:
            sentence_tensor: A tensor representing the batch of sentences.
            lengths: A IntTensor representing the lengths of the respective sentences.
        """
        # Convert input tensors to numpy arrays
        sentence_tensor_np = sentence_tensor.cpu().numpy()
        lengths_np = lengths.cpu().numpy()

        # Run the ONNX model
        inputs = {
            'input_tensor': sentence_tensor_np,
            'lengths': lengths_np
        }
        outputs = self.onnx_session.run(None, inputs)
        # Convert outputs back to torch tensors
        outputs = ([torch.tensor(out) for out in outputs])
        return outputs

    def predict_onnx(self,sentences: Union[List[Sentence], Sentence],
        mini_batch_size: int = 32,
        return_probabilities_for_all_classes: bool = False,
        verbose: bool = False,
        label_name: Optional[str] = None,
        return_loss=False,
        embedding_storage_mode="none",
        force_token_predictions: bool = False,
    ):
        """Predicts labels for current batch with CRF or Softmax.

        Args:
            sentences: List of sentences in batch
            mini_batch_size: batch size for test data
            return_probabilities_for_all_classes: Whether to return probabilities for all classes
            verbose: whether to use progress bar
            label_name: which label to predict
            return_loss: whether to return loss value
            embedding_storage_mode: determines where to store embeddings - can be "gpu", "cpu" or None.
            force_token_predictions: add labels per token instead of span labels, even if `self.predict_spans` is True
        """
        if label_name is None:
            label_name = self.tag_type

        if not sentences:
            return sentences

        # make sure it's a list
        if not isinstance(sentences, list) and not isinstance(sentences, flair.data.Dataset):
            sentences = [sentences]

        Sentence.set_context_for_sentences(cast(List[Sentence], sentences))

        # filter empty sentences
        sentences = [sentence for sentence in sentences if len(sentence) > 0]

        # reverse sort all sequences by their length
        reordered_sentences = sorted(sentences, key=len, reverse=True)

        if len(reordered_sentences) == 0:
            return sentences

        dataloader = DataLoader(
            dataset=FlairDatapointDataset(reordered_sentences),
            batch_size=mini_batch_size,
        )
        # progress bar for verbosity
        if verbose:
            dataloader = tqdm(dataloader, desc="Batch inference")

        overall_loss = torch.zeros(1, device=flair.device)
        label_count = 0
        for batch in dataloader:
            # stop if all sentences are empty
            if not batch:
                continue

            # get features from forward propagation
            sentence_tensor, lengths = self._prepare_tensors(batch)
            features = self.forward_onnx(sentence_tensor, lengths)

            # remove previously predicted labels of this type
            for sentence in batch:
                sentence.remove_labels(label_name)

            # if return_loss, get loss value
            if return_loss:
                gold_labels = self._prepare_label_tensor(batch)
                loss = self._calculate_loss(features, gold_labels)
                overall_loss += loss[0]
                label_count += loss[1]

            # make predictions
            if self.use_crf:
                predictions, all_tags = self.viterbi_decoder.decode(
                    features, return_probabilities_for_all_classes, batch
                )
            else:
                predictions, all_tags = self._standard_inference(
                    features, batch, return_probabilities_for_all_classes
                )

            # add predictions to Sentence
            for sentence, sentence_predictions in zip(batch, predictions):
                # BIOES-labels need to be converted to spans
                if self.predict_spans and not force_token_predictions:
                    sentence_tags = [label[0] for label in sentence_predictions]
                    sentence_scores = [label[1] for label in sentence_predictions]
                    predicted_spans = get_spans_from_bio(sentence_tags, sentence_scores)
                    for predicted_span in predicted_spans:
                        span: Span = sentence[predicted_span[0][0] : predicted_span[0][-1] + 1]
                        span.add_label(label_name, value=predicted_span[2], score=predicted_span[1])

                # token-labels can be added directly ("O" and legacy "_" predictions are skipped)
                else:
                    for token, label in zip(sentence.tokens, sentence_predictions):
                        if label[0] in ["O", "_"]:
                            continue
                        token.add_label(typename=label_name, value=label[0], score=label[1])

            # all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided
            for sentence, sent_all_tags in zip(batch, all_tags):
                for token, token_all_tags in zip(sentence.tokens, sent_all_tags):
                    token.add_tags_proba_dist(label_name, token_all_tags)

            store_embeddings(sentences, storage_mode=embedding_storage_mode)

            if return_loss:
                return overall_loss, label_count
            return None