from typing import Optional, Awaitable
import tornado.ioloop
import tornado.web
import json
from abc import ABCMeta
import re
from threading import Lock
import numpy as np

from backend.server.implement import frontend_address, frontend_port, \
    TextGenerator, \
    default_temperature, default_top_k, default_top_p, \
    default_max_length, C3Solver, CHIDSolver, ppl_calculator, tokenizer, \
    AFQMCSolver
from backend.server import c3_samples, chid_samples, afqmc_samples
from backend.server.afqmc import AFQMCImplement
from backend.experiment.tokenizer import id2word
from backend.experiment.model import model


class RequestIdCounter:
    def __init__(self):
        self.__lock = Lock()
        self.__counter = 0

    def new_request_id(self) -> int:
        self.__lock.acquire()
        res = self.__counter
        self.__counter += 1
        self.__lock.release()
        return res


request_id_counter = RequestIdCounter()


class BaseHandler(tornado.web.RequestHandler, metaclass=ABCMeta):
    #  允许跨域访问的地址
    def allow_my_origin(self):
        allow_list = [
            f'http://localhost:{frontend_port}',
            f'http://127.0.0.1:{frontend_port}',
            frontend_address,
        ]
        if 'Origin' in self.request.headers:
            origin = self.request.headers['Origin']
            # 域名
            re_ret = re.match(r".+\.(domain0.com|domain1.com)", origin)
            # 内网和本地
            re_ret2 = re.match(r"^(192.168.1.*|127.0.0.1.*|192.168.2.*)",
                               origin)
            if re_ret or re_ret2 or origin in allow_list:
                self.set_header("Access-Control-Allow-Origin",
                                origin)  # 这个地方可以写域名
                self.set_header("Access-Control-Allow-Headers",
                                "x-requested-with")
                self.set_header('Access-Control-Allow-Methods',
                                'POST, GET, OPTIONS')

    def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
        print(f'{type(self)} data_received: {chunk.decode("utf8")}')
        return

    def set_default_headers(self) -> None:
        self.allow_my_origin()


class IndexHandler(BaseHandler):
    def get(self):
        self.write('please access correct interface')


class BaseGenerateHandler(BaseHandler):
    def get(self):
        self.write(json.dumps({
            'max_length': default_max_length,
            'temperature': default_temperature,
            'top_k': default_top_k,
            'top_p': default_top_p,
        }, ensure_ascii=False))

    def _get_generate_params(self) -> (int, float, int, float):
        """
        获取get函数里的默认的最大生成长度，默认的温度，默认的top_k, 默认的top_p参数。
        :return:
        """
        res = []
        for param_name, default, p_type in [
            ('max_length', default_max_length, int),
            ('temperature', default_temperature, float),
            ('top_k', default_top_k, int),
            ('top_p', default_top_p, float),
        ]:
            try:
                res.append(p_type(self.get_query_argument(param_name)))
            except tornado.web.MissingArgumentError:
                res.append(default)
            except ValueError:
                raise tornado.web.HTTPError(400, f'{param_name} must be type: '
                                                 f'{p_type}')
        return tuple(res)


class GenerateSampleHandler(BaseGenerateHandler):
    def get(self):
        prompt = str(self.get_query_argument('prompt'))
        r_id = request_id_counter.new_request_id()
        self.handle_generate(prompt, r_id)

    def handle_generate(self, prompt: str, request_id: int):
        max_length, temperature, top_k, top_p = self._get_generate_params()
        try:
            length = int(self.get_query_argument('length'))
        except tornado.web.MissingArgumentError:
            length = max_length
        length = min(max_length, length)
        print(
            f'handling generative task: prompt:\n{prompt}\n'
            f'length: {length}, temperature: {temperature},'
            f' top_k: {top_k}, top_p: {top_p}')
        generated, ppl = TextGenerator.generate(
            request_id,
            prompt.replace('\n', '[SEP]'),
            length=length,
            temperature=temperature,
            top_k=top_k,
            top_p=top_p,
            cut_duplicate_chapter=False,
        )
        generated = generated.replace('[TO_BE_CONTINUED]', '。。。')

        print(f'generative task for prompt:\n{prompt}\n'
              f'length: {length}, temperature: {temperature},'
              f' top_k: {top_k}, top_p: {top_p}\n'
              f'result: \n{generated}')
        self.write(json.dumps({
            'generated': generated,
            'perplexity': ppl
        }, ensure_ascii=False))


class C3Handler(BaseHandler):
    def get(self):
        index = int(self.get_query_argument('index'))
        r_id = request_id_counter.new_request_id()
        res, confidences = C3Solver.solve(r_id, index)
        self.write(json.dumps(
            {'predict': res, 'confidences': confidences}, ensure_ascii=False))


class C3ExampleHandler(BaseHandler):
    def get(self):
        try:
            index = int(self.get_query_argument('index'))
            sample = c3_samples[index]
            self.write(json.dumps(
                {
                    'text': sample.text,
                    'candidates': sample.candidates,
                    'answer': sample.answer,
                    'question': sample.question
                },
                ensure_ascii=False)
            )
        except tornado.web.MissingArgumentError:
            # 获取样例的数量
            self.write(json.dumps(
                {'samples': len(c3_samples)}, ensure_ascii=False))


class CHIDHandler(BaseHandler):
    def get(self):
        index = int(self.get_query_argument('index'))
        r_id = request_id_counter.new_request_id()
        res, confidences = CHIDSolver.solve(r_id, index)
        self.write(json.dumps(
            {'predict': res, 'confidences': confidences}, ensure_ascii=False))


class CHIDExampleHandler(BaseHandler):
    def get(self):
        try:
            index = int(self.get_query_argument('index'))
            sample = chid_samples[index]
            self.write(json.dumps(
                {
                    'text': sample.text,
                    'candidates': sample.candidates,
                    'answer': sample.answer,
                },
                ensure_ascii=False)
            )
        except tornado.web.MissingArgumentError:
            # 获取样例的数量
            self.write(json.dumps(
                {'samples': len(chid_samples)}, ensure_ascii=False))


class AFQMCHandler(BaseHandler):
    def get(self):
        index = int(self.get_query_argument('index'))
        r_id = request_id_counter.new_request_id()
        res, confidences = AFQMCSolver.solve(r_id, index)
        self.write(json.dumps(
            {'predict': res, 'confidences': confidences}, ensure_ascii=False))


class AFQMCExampleHandler(BaseHandler):
    def get(self):
        try:
            index = int(self.get_query_argument('index'))
            sample = afqmc_samples[index]
            self.write(json.dumps(
                {
                    'sentence1': sample.s1,
                    'sentence2': sample.s2,
                    'candidates': AFQMCImplement.label_map(),
                    'answer': sample.label,
                },
                ensure_ascii=False)
            )
        except tornado.web.MissingArgumentError:
            # 获取样例的数量
            self.write(json.dumps(
                {'samples': len(afqmc_samples)}, ensure_ascii=False))


class PerplexityCalculationHandler(BaseHandler):
    def get(self):
        text = self.get_query_argument('text')
        token_ids = np.insert(
            tokenizer.tokenize(text), 0, tokenizer.sample_start_token)
        ppl = ppl_calculator.perplexity(token_ids)
        print(f'PerplexityCalculationHandler: {text} ppl: {ppl}')
        self.write(json.dumps({'perplexity': ppl}, ensure_ascii=False))


class TrueTextPredictionHandler(BaseHandler):
    def get(self):
        text = self.get_query_argument('text')
        top_n = int(self.get_query_argument('top_n'))
        print(f'TrueTextPredictionHandler: {text}')
        inputs = np.insert(
            tokenizer.tokenize(text), 0,
            tokenizer.sample_start_token).reshape((1, -1))
        input_len = inputs.shape[1]
        assert input_len < model.context_length
        # 模型接收2阶形状的输入
        output_logits = model.predict(inputs)
        if model.output_logits:
            logits_exp = np.exp(output_logits)
            logits_exp_sum = np.sum(logits_exp, axis=-1)
            logits_exp_sum = np.expand_dims(
                logits_exp_sum, axis=logits_exp_sum.ndim)
            output_prob = np.squeeze(logits_exp / logits_exp_sum)
        else:
            output_prob = np.squeeze(output_logits)
        if len(output_prob.shape) == 1:
            output_prob = output_prob.reshape((1, -1))

        predictions = []
        for i in range(1, input_len):
            token_id = inputs[0][i]
            probs = output_prob[i - 1]
            probs_argsort = probs.argsort()[::-1]
            token_id_rank = np.where(probs_argsort == token_id)[0][0]
            prediction = [{
                'word': text[i - 1],
                'possibility': str(probs[token_id]),
                'rank': int(token_id_rank) + 1
            }]
            for j in range(top_n):
                token_id = probs_argsort[j]
                prediction.append({
                    'word': id2word[token_id],
                    'possibility': str(probs[token_id]),
                    'rank': j + 1
                })
            predictions.append({
                'word': text[i - 1],
                'prediction': prediction
            })

        self.write(json.dumps(predictions, ensure_ascii=False))


class FixTextHandler(BaseHandler):
    def post(self):
        text = self.get_argument('text')
        print(f'FixTextHandler got argument text: {text}')
        if len(text) == 0:
            self.write(json.dumps({'error': 'empty task'}, ensure_ascii=False))
            return
        if '[MASK]' not in text:
            self.write(json.dumps({'text': text}, ensure_ascii=False))
            return
        options = self.get_argument('options')
        print(f'FixTextHandler got argument options: {options}')
        if len(options) == 0:
            self.write(
                json.dumps({'error': 'empty options'}, ensure_ascii=False))
            return
        try:
            options = json.loads(options)
        except json.decoder.JSONDecodeError:
            self.write(json.dumps(
                {'error': 'options must be a json string'},
                ensure_ascii=False
            ))
            return
        if not isinstance(options, list):
            self.write(json.dumps(
                {'error': 'options type must be List[List[Str]]'},
                ensure_ascii=False
            ))
            return
        split = text.split('[MASK]')
        assert len(split) > 1
        if len(options) != len(split) - 1:
            self.write(json.dumps(
                {'error': 'options len must match number of text blanks.'
                          f'options len: {len(options)}'
                          f', blanks: {len(split) - 1}'}
            ))
        if not (isinstance(options[0], list) and len(options[0]) > 0):
            self.write(json.dumps(
                {'error': 'options type must be List[List[Str]]'},
                ensure_ascii=False
            ))
            return
        if not (isinstance(options[0][0], str)):
            self.write(json.dumps(
                {'error': 'options type must be List[List[Str]]'},
                ensure_ascii=False
            ))
            return

        begin_ids = tokenizer.sample_start_token

        details = []
        chosen = ''
        text = ''
        for i in range(len(split) - 1):
            text += chosen + split[i]
            token_ids = np.concatenate([
                begin_ids,
                tokenizer.tokenize(text)
            ], axis=0)
            output_logits = model.predict(token_ids.reshape((1, -1)))
            if model.output_logits:
                logits_exp = np.exp(output_logits)
                logits_exp_sum = np.sum(logits_exp, axis=-1)
                logits_exp_sum = np.expand_dims(
                    logits_exp_sum, axis=logits_exp_sum.ndim)
                output_prob = np.squeeze(logits_exp / logits_exp_sum)
            else:
                output_prob = np.squeeze(output_logits)
            if len(output_prob.shape) == 1:
                output_prob = output_prob.reshape((1, -1))
            sorted_options_with_prob = sorted([
                (each, float(output_prob[-1][int(tokenizer.tokenize(each))]))
                for each in options[i]
            ], key=lambda x: x[1], reverse=True)
            details.append({
                'chosen': sorted_options_with_prob[0][0],
                'full': sorted_options_with_prob
            })
            chosen: str = sorted_options_with_prob[0][0]

        text += chosen + split[-1]

        res = json.dumps({'text': text, 'details': details}, ensure_ascii=False)
        print(f'FixTextHandler returns: {res}')

        self.write(res)
