import json
import re

import numpy as np
import torch
from flask import make_response

from .utils import text_dispose, sample_logits
from mlwrite.config import MODEL_DICT, DEVICE
from app.write.model import GPT, GPTConfig
from torch.nn import functional as F


def generate_start_init(params):
    pre_context = params['pre']
    generate_len = params['generate_len']
    label_id = params['label_id']
    size = params['size']
    context = text_dispose(pre_context)  # 去掉空格和换行
    model_dict = MODEL_DICT[label_id]
    # 正则匹配标点符号
    pattern = r'(\n|\.|\,|\?|\!|\;|\-|\—|\||\'|\"|\。|\，|\、|\！|\`|\)|\]|\}|\+|\*|\\|\/|\~|\&|\$|\#|\%)'
    data_list = []
    for num in range(size):
        x = np.array([model_dict['train_dataset'].stoi.get(s, model_dict['unknown_char']) for s in context],
                     dtype=np.int64)
        real_len = len(x)
        print_begin = 0
        out_txt = ''
        # for i in range(generate_len):
        flag = 0
        while True:
            if flag == 0:
                print(('-' * 60) + '\n' + context.replace('\n', '\n  ').strip('\n'), end='')
                print_begin = real_len
            with torch.no_grad():
                xxx = torch.tensor(x[-model_dict['ctx_len']:], dtype=torch.long)[None, ...]
                if model_dict['device'] == 'gpu':
                    if torch.cuda.is_available():
                        xxx = xxx.cuda()
                out, _ = model_dict['model'](xxx)
                out[:, :, model_dict['unknown_char']] = -float('Inf')
            pos = -1 if real_len >= model_dict['ctx_len'] else real_len - 1
            if model_dict['train_dataset'].itos[int(x[real_len - 1])] == '\n':
                char = sample_logits(out, pos, temperature=1.0, top_p=model_dict['top_p_newline'])
            else:
                char = sample_logits(out, pos, temperature=1.0, top_p=model_dict['top_p'])
            x = np.append(x, char)
            real_len += 1
            # if flag % 2 == 1 or flag == generate_len - 1 or flag < 10 or model_dict['device'] != 'gpu':
            completion = ''.join([model_dict['train_dataset'].itos[int(i)] for i in x[print_begin:real_len]])
            out_txt += completion
            flag = flag + 1
            print(completion.replace('\n', '\n  '), end='', flush=True)
            print_begin = real_len
            if re.match(pattern, completion) is not None and flag > generate_len:
                break

        data_list.append(out_txt)
    result = json.dumps({
        "code": 0,
        "message": 'success',
        'data': data_list
    }, ensure_ascii=False)
    response = make_response(result, 200)
    response.headers['Content-Type'] = 'application/json'
    return response


def generate_start_un_init(params, module):
    pre_context = params['pre']
    generate_len = params['generate_len']
    label_id = params['label_id']
    size = params['size']
    model_name = module.module_path
    word_name = module.vocab_path
    ctx_len = module.ctx_len  # 上下文长度
    n_layer = 12  # 隐藏层
    n_head = 12
    n_embd = n_head * 64
    n_attn = n_embd
    n_ffn = n_embd
    top_p = 0.75  # 这个的范围是 0 到 1。越大，变化越多。越小，生成效果越规矩。自己试试 0 和 0.5 和 1.0 的效果就知道了
    top_p_newline = 0.9
    # 打开模型对应词库加载为词典
    with open(word_name, "r", encoding="utf-16") as result_file:
        word_table = json.load(result_file)
    # 词典长度
    vocab_size = len(word_table)
    train_dataset = lambda: None

    # 词典映射
    train_dataset.stoi = {v: int(k) for k, v in word_table.items()}
    train_dataset.itos = {int(k): v for k, v in word_table.items()}
    # 未知字符替换
    unknown_char = train_dataset.stoi['.']
    model = GPT(
        GPTConfig(vocab_size, ctx_len, n_layer=n_layer, n_head=n_head, n_embd=n_embd, n_attn=n_attn,
                  n_ffn=n_ffn))
    m2 = torch.load(model_name, map_location='cpu').state_dict()
    for i in range(n_layer):
        prefix = f'blocks.{i}.attn.'
        time_w = m2[prefix + 'time_w']
        time_alpha = m2[prefix + 'time_alpha']
        time_beta = m2[prefix + 'time_beta']

        TT = ctx_len
        T = ctx_len
        w = F.pad(time_w, (0, TT))
        w = torch.tile(w, [TT])
        w = w[:, :-TT].reshape(-1, TT, 2 * TT - 1)
        w = w[:, :, TT - 1:]
        w = w[:, :T, :T] * time_alpha[:, :, :T] * time_beta[:, :T, :]

        m2[prefix + 'time_ww'] = w
        del m2[prefix + 'time_w']
        del m2[prefix + 'time_alpha']
        del m2[prefix + 'time_beta']
    if DEVICE == 'gpu':
        if torch.cuda.is_available():
            model = model.cuda()
    model.load_state_dict(m2)
    context = text_dispose(pre_context)  # 去掉空格和换行
    # 正则匹配标点符号
    pattern = r'(\n|\.|\,|\?|\!|\;|\-|\—|\||\'|\"|\。|\，|\、|\！|\`|\)|\]|\}|\+|\*|\\|\/|\~|\&|\$|\#|\%)'
    data_list = []
    for num in range(size):
        x = np.array([train_dataset.stoi.get(s, unknown_char) for s in context],
                     dtype=np.int64)
        real_len = len(x)
        print_begin = 0
        out_txt = ''
        # for i in range(generate_len):
        flag = 0
        while True:
            if flag == 0:
                print(('-' * 60) + '\n' + context.replace('\n', '\n  ').strip('\n'), end='')
                print_begin = real_len
            with torch.no_grad():
                xxx = torch.tensor(x[-ctx_len:], dtype=torch.long)[None, ...]
                if DEVICE == 'gpu':
                    if torch.cuda.is_available():
                        xxx = xxx.cuda()
                out, _ = model(xxx)
                out[:, :, unknown_char] = -float('Inf')
            pos = -1 if real_len >= ctx_len else real_len - 1
            if train_dataset.itos[int(x[real_len - 1])] == '\n':
                char = sample_logits(out, pos, temperature=1.0, top_p=top_p_newline)
            else:
                char = sample_logits(out, pos, temperature=1.0, top_p=top_p)
            x = np.append(x, char)
            real_len += 1
            # if flag % 2 == 1 or flag == generate_len - 1 or flag < 10 or DEVICE != 'gpu':
            completion = ''.join([train_dataset.itos[int(i)] for i in x[print_begin:real_len]])
            out_txt += completion
            flag = flag + 1
            print(completion.replace('\n', '\n  '), end='', flush=True)
            print_begin = real_len
            if re.match(pattern, completion) is not None and flag > generate_len:
                break

        data_list.append(out_txt)
    result = json.dumps({
        "code": 0,
        "message": 'success',
        'data': data_list
    }, ensure_ascii=False)
    response = make_response(result, 200)
    response.headers['Content-Type'] = 'application/json'
    return response
