from backend.experiment.framework.perplexity_calculator import \
    PerplexityCalculator
from backend.experiment.tokenizer import tokenizer
from backend.experiment.model import model
from backend.server import c3_samples, c3_task_handler, chid_samples, \
    chid_task_handler, afqmc_samples, afqmc_task_handler

import numpy as np
import re

server_port = 10525

frontend_ip = 'www.test.com'
frontend_port = '3000'
# noinspection HttpUrlsUsage
frontend_address = f'http://{frontend_ip}:{frontend_port}'

default_max_length = 256
default_temperature = 2.0
default_top_k = 10
default_top_p = 0.9

ppl_calculator = PerplexityCalculator(model)


class C3Solver:
    @staticmethod
    def solve(request_id: int, query_index: int) -> (str, list):
        sample = c3_samples[query_index]
        print(f'handling C3 query: request_id: {request_id}'
              f'text: {sample.text}, question: {sample.question},'
              f' answer: {sample.answer}',
              )
        for each in sample.task.candidates:
            print(f'candidate: {each.content}, prompt:\n'
                  f'{tokenizer.token_texts(each.prompt)}')
        result = c3_task_handler.handle(sample.task)
        answer = result["content"]
        ppls = result["perplexities"]
        print(f'predict: {answer}, perplexity: {ppls}')
        confidence = np.exp(np.array(ppls) * (-1))
        confidence_sum = np.sum(confidence)
        confidence = confidence / confidence_sum
        assert answer is not None
        return answer, confidence.tolist()


class CHIDSolver:
    @staticmethod
    def solve(request_id: int, query_index: int) -> (str, list):
        sample = chid_samples[query_index]
        print(f'handling CHID query: request_id: {request_id}'
              f'text: {sample.text} answer: {sample.answer}',
              )
        for each in sample.task.candidates:
            print(f'candidate: {each.content}, prompt:\n'
                  f'{tokenizer.token_texts(each.prompt)}')
        result = chid_task_handler.handle(sample.task)
        answer = result["content"]
        ppls = result["perplexities"]
        print(f'predict: {answer}, perplexity: {ppls}')
        confidence = np.exp(np.array(ppls) * (-1))
        confidence_sum = np.sum(confidence)
        confidence = confidence / confidence_sum
        assert answer is not None
        return answer, confidence.tolist()


class AFQMCSolver:
    @staticmethod
    def solve(request_id: int, query_index: int) -> (str, list):
        sample = afqmc_samples[query_index]
        print(f'handling AFQMC query: request_id: {request_id}'
              f'sentence1: {sample.s1} sentence2: {sample.s2} '
              f'answer {sample.label}',
              )
        for each in sample.task.candidates:
            print(f'candidate: {each.content}, prompt:\n'
                  f'{tokenizer.token_texts(each.prompt)}')
        result = afqmc_task_handler.handle(sample.task)
        answer = result["content"]
        ppls = result["perplexities"]
        print(f'predict: {answer}, perplexity: {ppls}')
        confidence = np.exp(np.array(ppls) * (-1))
        confidence_sum = np.sum(confidence)
        confidence = confidence / confidence_sum
        assert answer is not None
        return answer, confidence.tolist()


class TextGenerator:
    @classmethod
    def generate(
            cls,
            request_id: int,
            input_text: str, length: int = 1024,
            temperature: float = default_temperature,
            top_k: int = default_top_k,
            top_p: float = default_top_p,
            cut_duplicate_chapter: bool = True
    ) -> (str, float):
        print(f'text generator generating request_id: {request_id}, '
              f'input_text: {input_text}')
        input_ids = tokenizer.tokenize(input_text)
        if len(input_ids) == 0:
            input_ids = tokenizer.sample_start_token
        else:
            input_ids = np.insert(input_ids, 0, tokenizer.sample_start_token)
        output = model.generate(
            inputs=input_ids, generate_count=length - len(input_ids),
            temperature=temperature,
            top_k=top_k, top_p=top_p
        )
        ppl = ppl_calculator.perplexity(output)
        text = tokenizer.token_texts(output)
        clean_text = cls.clean_output(text, cut_duplicate_chapter)
        print(f'text generator generate: {text}, after cleaning:\n'
              f'{clean_text}\nperplexity: {ppl}')
        return input_text + clean_text, float(ppl)

    @staticmethod
    def clean_output(output_text: str,
                     cut_duplicate_chapter: bool = True) -> str:
        # 去除所有## 连接符和空格和所有未知字符
        output_text = output_text.replace('##', '').replace(' ', '').replace(
            '[UNK]', '')
        # 将[SEP]符替换成换行
        output_text = re.sub(r'(\[SEP])+', '\n', output_text)
        # 将[MASK]和[CLS]替换成统一的[SEP], 然后用它来截取第一段生成的文本
        output_text = re.sub(r'(\[MASK]|\[CLS])+', '[SEP]', output_text)
        sep_texts = output_text.split('[SEP]')
        if cut_duplicate_chapter:
            # 如果有多段生成的文本，只取第一段
            output_text = sep_texts[0]
        else:
            output_text = '\n'.join(sep_texts)

        end_comma = ['。', '，', '.', '.', '\n']
        # 除去不完整的句子
        ok = False
        for comma in end_comma:
            if output_text.endswith(comma):
                ok = True
                break
        if not ok:
            for comma in ['。', '，', '.', '.']:
                sentences = output_text.split(comma)
                if len(sentences) > 1:
                    output_text = comma.join(sentences[:len(sentences) - 1]) + \
                                  comma
                    ok = True
                    break
        if not ok:
            # 生成的都是一大句话, 那么没办法，只能在结尾加一一个标识符表示没有生成完毕
            output_text += '[TO_BE_CONTINUED]'
        return output_text
