########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################

print('\nChatRWKV https://github.com/BlinkDL/ChatRWKV\n')

import os, sys, torch
import numpy as np
import re


np.set_printoptions(precision=4, suppress=True, linewidth=200)

from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
import uvicorn, json, datetime


# current_path = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(f'{current_path}/rwkv_pip_package/src')

# Tune these below (test True/False for all of them) to find the fastest setting:
# torch._C._jit_set_profiling_executor(True)
# torch._C._jit_set_profiling_mode(True)
# torch._C._jit_override_can_fuse_on_cpu(True)
# torch._C._jit_override_can_fuse_on_gpu(True)
# torch._C._jit_set_texpr_fuser_enabled(False)
# torch._C._jit_set_nvfuser_enabled(False)

########################################################################################################
#
# Use '/' in model path, instead of '\'. Use ctx4096 models if you need long ctx.
#
# fp16 = good for GPU (!!! DOES NOT support CPU !!!)
# fp32 = good for CPU
# bf16 = worse accuracy, supports CPU
# xxxi8 (example: fp16i8) = xxx with int8 quantization to save 50% VRAM/RAM, slower, slightly less accuracy
#
# Read https://pypi.org/project/rwkv/ for Strategy Guide
#
########################################################################################################
# set these before import RWKV
os.environ['RWKV_JIT_ON'] = '1'
os.environ["RWKV_CUDA_ON"] = '1' # '1' to compile CUDA kernel (10x faster), requires c++ compiler & cuda libraries
from rwkv.model import RWKV # pip install rwkv
# 模型路径，后面不需要加.pth
model_path='C:/Users/ZTC/Documents/RWKV-4-Raven-7B-v7-ChnEng-20230404-ctx2048'
# 运行模式，见官方文档说明，修改配置之后，需要去模型所在的目录删除缓存文件，就是带"-convert.pth"结尾的文件
strategy_set='cuda fp16i8 *31+'

convert_model_path=f"{model_path}-convert"

if os.path.exists(f"{convert_model_path}.pth")==False:
    print("没有看到转换后的模型，开始模型转换，转换完需要重新执行当前脚本")
    RWKV(model=model_path, strategy=strategy_set, convert_and_save_and_exit = f"{convert_model_path}.pth")

model = RWKV(model=convert_model_path, strategy=strategy_set)



# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-169m/RWKV-4-Pile-169M-20220807-8023', strategy='cuda fp16i8')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-169m/RWKV-4-Pile-169M-20220807-8023', strategy='cuda fp16i8 *6 -> cuda fp16 *0+ -> cpu fp32 *1')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-169m/RWKV-4-Pile-169M-20220807-8023', strategy='cpu fp32')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-169m/RWKV-4-Pile-169M-20220807-8023', strategy='cpu fp32 *3 -> cuda fp16 *6+')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-1b5/RWKV-4-Pile-1B5-20220903-8040', strategy='cpu fp32')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-1b5/RWKV-4-Pile-1B5-20220903-8040', strategy='cuda fp16')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-1b5/RWKV-4-Pile-1B5-20220903-8040', strategy='cuda fp16 *8 -> cpu fp32')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-1b5/RWKV-4-Pile-1B5-20220903-8040', strategy='cuda:0 fp16 -> cuda:1 fp16 -> cpu fp32 *1')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-1b5/RWKV-4-Pile-1B5-20220903-8040', strategy='cuda fp16 *6+')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-14b/RWKV-4-Pile-14B-20230213-8019', strategy='cuda fp16 *0+ -> cpu fp32 *1')
# model = RWKV(model='/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-3b/RWKV-4-Pile-3B-20221110-ctx4096', strategy='cuda:0 fp16 *25 -> cuda:1 fp16')

# out, state = model.forward([187], None)
# print(out.detach().cpu().numpy())

#out, state = model.forward([187, 510, 1563, 310, 247], None)
#print(out.detach().cpu().numpy())                   # get logits
#out, state = model.forward([187, 510], None)
#out, state = model.forward([1563], state)           # RNN has state (use deepcopy to clone states)
# out, state = model.forward([310, 247], state)
# print(out.detach().cpu().numpy())                   # same result as above

# print('\n')
# exit(0)
from rwkv.utils import PIPELINE, PIPELINE_ARGS
pipeline = PIPELINE(model, "20B_tokenizer.json")

#ctx = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
#print(ctx, end='')

def my_print(s):
    print(s, end='', flush=True)

# For alpha_frequency and alpha_presence, see "Frequency and presence penalties":
# https://platform.openai.com/docs/api-reference/parameter-details

########################################################################################################
# 1. set os.environ["RWKV_CUDA_ON"] = '1' if possible, for faster preprocess of a long ctx.
# 2. Reuse the state (use deepcopy to clone it) when you are running the same ctx multiple times. 


def genChatText(chat,history=[],roles=['User','Ai']):
    genStr = ''
    for row in history:
        genStr += '\n'+roles[0]+": "+row[0]+'\n\n'
        genStr += '\n'+roles[1]+": "+row[1]+'\n\n'
    genStr+='\n'+roles[0]+": "+chat+'\n\n'
    return genStr;

def getRoleNum(text):
    """
    获取角色对话数
    """
    roleRegex = re.compile(r'\n[a-zA-Z0-9._%+-]+:')
    roles = roleRegex.findall(text)
    return len(roles)

def removeRoleStr(text):
    """
    获取角色对话数
    """
    text = text.replace(':\n', ':')
    pattern = r'\n[a-zA-Z0-9 #._%+-]+:'
    return re.sub(pattern, '', text).strip()



def generateNew(self, ctx, token_count=100, args=PIPELINE_ARGS(), callback=None, state=None):
    all_tokens = []
    out_last = 0
    out_str = ''
    occurrence = {}
    for i in range(token_count):

        # forward & adjust prob.
        tokens = self.encode(ctx) if i == 0 else [token]
        while len(tokens) > 0:
            out, state = self.model.forward(tokens[:args.chunk_len], state)
            tokens = tokens[args.chunk_len:]
            
        for n in args.token_ban:
            out[n] = -float('inf')
        for n in occurrence:
            out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
        
        # sampler
        token = self.sample_logits(out, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k)
        if token in args.token_stop:
            break

        all_tokens += [token]
        if token not in occurrence:
            occurrence[token] = 1
        else:
            occurrence[token] += 1
        
        # output
        tmp = self.decode(all_tokens[out_last:])
        if '\ufffd' not in tmp: # is valid utf-8 string?
            if callback:
                callback(tmp)
            out_str += tmp
            out_last = i + 1
        roleNum = getRoleNum(out_str);
        if roleNum>1:
            break
    return out_str





app = FastAPI()

# 2、声明一个 源 列表；重点：要包含跨域的客户端 源
origins = ["*"]

# 3、配置 CORSMiddleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,  # 允许访问的源
    allow_credentials=True,  # 支持 cookie
    allow_methods=["*"],  # 允许使用的请求方法
    allow_headers=["*"]  # 允许携带的 Headers
)




@app.post("/")
async def create_item(request: Request):
    json_post_raw = await request.json()
    json_post = json.dumps(json_post_raw)
    json_post_list = json.loads(json_post)
    prompt = json_post_list.get('prompt')
    history = json_post_list.get('history')
    max_length = json_post_list.get('max_length')
    top_p = json_post_list.get('top_p')
    temperature = json_post_list.get('temperature')
    
    now = datetime.datetime.now()
    time = now.strftime("%Y-%m-%d %H:%M:%S")


    args = PIPELINE_ARGS(temperature = 1.0, top_p = 0.7, top_k=0, # top_k = 0 then ignore
                         alpha_frequency = 0.25,
                         alpha_presence = 0.25,
                         token_ban = [], # 这里输入[0,187]，下面输入[]，则AI就会自言自语扮演Ai和User不断书写新内容
                         token_stop = [0], # 这里输入[0,187]，上面输入[]，则遇到换行ai就会退出，只进行一次对话
                         chunk_len = 256) # split input into chunks to save VRAM (shorter -> slower)
    
    prompt = prompt.strip().replace('\r','').replace('\n','')
    
    #print(prompt)
    chatText = genChatText(prompt,history)
    print(chatText)
    response = generateNew(pipeline, chatText, token_count=999, args=args, callback=my_print)
    #print(response)
    response = removeRoleStr(response)
    history.append([prompt,response])
    
    answer = {
        "response": response,
        "history": history,
        "status": 200,
        "time": time
    }
    #log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
    #print(log)
    print(answer)
    
    return answer


if __name__ == '__main__':

    uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)
