# -*- coding: utf-8 -*-

"""
需 pytorch 1.9.x 及以上版本
AI人工智障写作 - https://github.com/BlinkDL/AI-Writer
gpu：只支持 nvidia 显卡，速度最快，需 cuda+cudnn
dml：支持 amd / intel / nvidia 显卡，需不同模型，需 pip install onnxruntime-directml 然后在 run.py 和 server.py 设置为 dml 模式
cpu：没显卡就选它，但也用 nvidia 卡的模型

"""

import sys, os
sys.path.insert(0, '../')
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
sys.path.append(curPath)


import numpy as np
import math, json
import torch
import torch.nn as nn
from torch.nn import functional as F
import datetime
import time
import src.utils
from src.model import GPT, GPTConfig

def writerAI(kaitou,cishu=1,zishu=20,lingmin=0.05,chakan_changdu_dayu=512 ):
    # 传入参数
    ##############################################################################
    NUM_OF_RUNS = cishu # 写多少遍
    LENGTH_OF_EACH = zishu # 每次写多少字
    min_p_ratio = lingmin # 这个的范围是 0 到 1。越大，生成效果越规矩。越小，变化越多。自己试试 0 和 0.1 和 1.0 的效果就知道了
    # context = "张三一睁开眼，眼前一暗后缓慢的适应，旁边是一个破旧的操作台，自己躺在床架上。" # 开头
    context = kaitou
    ctx_len = chakan_changdu_dayu # 模型读取字数限制，只会看最后ctx_len位

    # src.utils.set_seed(42) # 是否固定随机数（固定后每次运行的生成结果都一样）

    RUN_DEVICE = 'dml' # gpu 或 dml 或 cpu
    MODEL_NAME = 'model/wangwen-2022-01-09' # 模型名
    WORD_NAME = 'model/wangwen-2022-01-09' # 这个也修改

    # 输出信息集合
    dataform = {}
    ##############################################################################

    n_layer = 12
    n_head = 12
    n_embd = n_head * 64
    n_attn = n_embd
    n_ffn = n_embd
    ################################################################################
    curr_time = datetime.datetime.now()

    context = context.strip().split('\n')
    for c in range(len(context)):
        context[c] = context[c].strip().strip('\u3000')
    context = '\n' + ('\n'.join(context)).strip()

    dataform['title']='传入'+ str(len(context)) + ' 个字，读取限制：'+ str(ctx_len) + ' 个字。'
    # print('您输入的开头有 ' + str(len(context)) + ' 个字。注意，模型只会看最后 ' + str(ctx_len) + ' 个字。')
    # print(os.path.abspath(os.path.dirname(__file__))+ os.sep + 'model')    
    with open(os.path.abspath(os.path.dirname(__file__))+ os.sep + WORD_NAME + '.json', "r", encoding="utf-16") as result_file:
        word_table = json.load(result_file)   

    vocab_size = len(word_table)

    train_dataset = lambda: None
    train_dataset.stoi = {v: int(k) for k, v in word_table.items()}
    train_dataset.itos = {int(k): v for k, v in word_table.items()}
    UNKNOWN_CHAR = train_dataset.stoi['\ue083']

    # print(f'\nLoading model for {RUN_DEVICE}...', end=' ')
    if RUN_DEVICE == 'dml':
        import onnxruntime as rt
        sess_options = rt.SessionOptions()
        sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_ALL
        sess_options.execution_mode = rt.ExecutionMode.ORT_SEQUENTIAL
        sess_options.enable_mem_pattern = False
        rt_session = rt.InferenceSession(os.path.abspath(os.path.dirname(__file__))+ os.sep + MODEL_NAME + '.onnx', sess_options=sess_options)
        rt_session.set_providers(['DmlExecutionProvider'])
    else:
        model = GPT(GPTConfig(vocab_size, ctx_len, n_layer=n_layer, n_head=n_head, n_embd=n_embd, n_attn=n_attn, n_ffn=n_ffn))
        m2 = torch.load(os.path.abspath(os.path.dirname(__file__))+ os.sep + MODEL_NAME + '.pth', map_location='cpu').state_dict()
        for i in range(n_layer):
            prefix = f'blocks.{i}.attn.'
            time_w = m2[prefix + 'time_w']
            time_alpha = m2[prefix + 'time_alpha']
            time_beta = m2[prefix + 'time_beta']
            mask = m2[prefix + 'mask']
            
            TT = ctx_len
            T = ctx_len
            w = F.pad(time_w, (0, TT))
            w = torch.tile(w, [TT])
            w = w[:, :-TT].reshape(-1, TT, 2 * TT - 1)
            w = w[:, :, TT-1:]
            w = w[:, :T, :T] * time_alpha[:, :, :T] * time_beta[:, :T, :]
            w = w.masked_fill(mask[:T, :T] == 0, 0)    
            
            m2[prefix + 'time_ww'] = w
            del m2[prefix + 'time_w']
            del m2[prefix + 'time_alpha']
            del m2[prefix + 'time_beta']
            del m2[prefix + 'mask']    
        if RUN_DEVICE == 'gpu':
            model = model.cuda()
        model.load_state_dict(m2)

    # print('done:', MODEL_NAME, '&', WORD_NAME)

    ##############################################################################
    dataform['data']=[]
    for run in range(NUM_OF_RUNS):

        x = np.array([train_dataset.stoi.get(s, UNKNOWN_CHAR) for s in context], dtype=np.int64)

        real_len = len(x)
        print_begin = 0
        printStr = ''
        for i in range(LENGTH_OF_EACH):

            if i == 0:
                # print(('-' * 60) + '\n' + context.replace('\n', '\n  ').strip('\n'), end = '') # 含原文输出
                # print(('-' * 60) + '\n' , end = '') # 不含原文输出
                print_begin = real_len

            with torch.no_grad():
                if RUN_DEVICE == 'dml':
                    if real_len < ctx_len:
                        xxx = np.pad(x, (0, ctx_len - real_len))
                    else:
                        xxx = x
                    out = rt_session.run(None, {rt_session.get_inputs()[0].name: [xxx[-ctx_len:]]})
                    out = torch.tensor(out[0])
                else:
                    xxx = torch.tensor(x[-ctx_len:], dtype=torch.long)[None,...]
                    if RUN_DEVICE == 'gpu':
                        xxx = xxx.cuda()
                    out, _ = model(xxx)            
                out[:, :, UNKNOWN_CHAR] = -float('Inf')
            pos = -1 if real_len >= ctx_len else real_len - 1

            if train_dataset.itos[int(x[real_len-1])] == '\n':
                char = src.utils.sample_logits(out, pos, temperature=1.0, top_p=0.995)
            else:
                char = src.utils.sample_logits(out, pos, temperature=1.0, min_p_pow=2.0, min_p_ratio=min_p_ratio)
            
            x = np.append(x, char)
            real_len += 1
            # print(x)

            if i % 10 == 9 or i == LENGTH_OF_EACH-1 or i < 10 or RUN_DEVICE != 'gpu':
                completion = ''.join([train_dataset.itos[int(i)] for i in x[print_begin:real_len]])
                # print(completion.replace('\n', '\n  '), end = '', flush=True)
                printStr = printStr + completion
                print_begin = real_len
        # print(printStr)
        dataform['data'].append(printStr)
    # print()
    curr_time2 = datetime.datetime.now()
    dataform['title']=dataform['title']+'耗时：'+str(curr_time2-curr_time)
    return dataform
