#!/usr/bin/env python3

from concurrent import futures
import logging

import grpc
import re

import generated.markov_backend_pb2 as markov_backend_pb2
import generated.markov_backend_pb2_grpc as markov_backend_pb2_grpc

from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
import torch.nn.functional as F


# python -m grpc_tools.protoc -I./proto --python_out=./backend/generated/proto --pyi_out=./backend/generated/proto --grpc_python_out=./backend/generated/proto ./proto/markov_backend.proto
# python3 -m mypy backend/markov_gpt2.py

class Continuation(markov_backend_pb2_grpc.ContinuationServicer):

    def __init__(self):

        self.model = GPT2LMHeadModel.from_pretrained('gpt2')
        self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')


    def Reset(self, request, context):

        print("reset to " + request.start)

        self.input_txt = request.start

    def Tokenize(self, request, context):
        encoded_input = self.tokenizer(request.wholestring, return_tensors="pt")
        tokens = list(map(self.tokenizer.decode, encoded_input.input_ids[0]))
        return markov_backend_pb2.TokenlistResponse(tokens=tokens)

    def GetProbabilities(self, request, context):

        encoded_input = self.tokenizer(self.input_txt, return_tensors='pt')
        outputs = self.model(**encoded_input)

        predictions = F.softmax(outputs.logits, dim=-1)

        thresh = 1e-2
        vocab_size = predictions.shape[-1]

        # Predictions has one sentence (index 0) and we look at the last token predicted (-1)
        # idxs = torch.arange(0, vocab_size)[predictions[0][-1] >= thresh]
        # print(self.tokenizer.convert_ids_to_tokens(idxs))

        # TODO better handling of "<|endoftext|>" and anything that could break re-tokenizing
        # (how can I know? it's a black box)

        next_token_logits = outputs.logits[0, -1, :]
        next_token_probs = torch.softmax(next_token_logits, dim=-1)
        sorted_ids = torch.argsort(next_token_probs, dim=-1, descending=True)

        entries = []

        choices_per_step = 10

        does_not_tokenize_well = re.compile(r'(?:<\|endoftext\|>|[\n]|[ㅋ]|\u00a0|\?\?)', re.UNICODE)

        #for token_id in sorted_ids:
        for choice_idx in range(choices_per_step):
            token_id = sorted_ids[choice_idx]

            token = self.tokenizer.decode(token_id)

            if does_not_tokenize_well.match(token):
                print("not using '%s'" % token.replace("\n", "\\n"))
                continue
                # and add one more entry?

            prob = next_token_probs[token_id].cpu()

            entries += [
                markov_backend_pb2.ProbAndToken(
                    prob = prob,
                    token = token)
                ]

        print(entries)

        return markov_backend_pb2.ProbabilitiesResponse(
            entries = entries
            #  = [
            #    markov_backend_pb2.ProbAndToken(prob=0.5, token="A"),
            #    markov_backend_pb2.ProbAndToken(prob=0.5, token="B")
            #]
        )

    def SetNextWord(self, request, context):

        print("next: '%s'" % request.token)

        self.input_txt += request.token

        return markov_backend_pb2.OKResponse()


def serve():
    port = "50051"
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    markov_backend_pb2_grpc.add_ContinuationServicer_to_server(Continuation(), server)
    server.add_insecure_port("[::]:" + port)
    server.start()
    print("Server started, listening on " + port)
    server.wait_for_termination()


if __name__ == "__main__":
    logging.basicConfig()
    serve()
