import numpy as np
import json
from mindspore import nn
from mindspore.common.initializer import Normal
import mindspore
mindspore.set_context(max_device_memory="6GB", device_target="Ascend")

import os, sys
from inferemote.model_service import ModelService
service, args = ModelService.Create(__name__)
import time
from inferemote.ccbuf2numpy import Numpy2Ccbuf
from mindnlp.transformers import AutoModelForCausalLM, AutoTokenizer
from mindnlp.transformers import TextIteratorStreamer
from threading import Thread

# # Loading the tokenizer and model from Hugging Face's model hub.
# tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat", ms_dtype=mindspore.float16)
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B-Chat", ms_dtype=mindspore.float16)    #第一次取消注释自动下载



local_path = "./.mindnlp/model/Qwen/Qwen1.5-0.5B-Chat"

# 使用本地路径加载
tokenizer = AutoTokenizer.from_pretrained(
    local_path, 
    ms_dtype=mindspore.float16,
    local_files_only=True  # 强制使用本地文件
)
model = AutoModelForCausalLM.from_pretrained(
    local_path, 
    ms_dtype=mindspore.float16,
    local_files_only=True  # 强制使用本地文件
)

# http transport
from flask import Flask, jsonify, abort
app = Flask(__name__)

def run_flask():
    app.run(host='192.168.0.71', port=5000, threaded=True)
    
    
# endpoint function
@app.route('/chat/<int:id>', methods=['GET'])
def chat(id):
    try:
        res = next(g[id])
        print("DEBUG: ", res)
        return jsonify(res)
    except StopIteration:
        abort(404, description="No more data available")
    
# Define model
class Nlp:
    def __init__(self):
        pass

    def pre_process(self, blob):
        print("DEBUG: before pps", blob)
        result = blob.decode('utf-8')
        if result == 'V':
            return result
        result = json.loads(blob)
        print("DEBUG: after pps", result)
        return result

    def post_process(self, output):
        return output.encode('utf-8')

    def helpinfo(self):
        return 'This is a **NLP Mindspore** Remote'
    
def func(messages):
    input_ids = tokenizer.apply_chat_template(
            messages,
            add_generation_prompt=True,
            return_tensors="ms",
            tokenize=True
        )
    streamer = TextIteratorStreamer(tokenizer, timeout=120, skip_prompt=True, skip_special_tokens=True)
    generate_kwargs = dict(
        input_ids=input_ids,
        streamer=streamer,
        max_new_tokens=1024,
        do_sample=True,
        top_p=0.9,
        temperature=0.1,
        num_beams=1,
    )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()  # Starting the generation in a separate thread.
    partial_message = ""
    for new_token in streamer:
        partial_message += new_token
        if '</s>' in partial_message:  # Breaking the loop if the stop token is generated.
            break
        yield partial_message
            


class MindsporeModel(object):

  def __init__(self, net):

    self.net = net
    print("done.")

  def helpinfo(self):
    try:
      info = self.net.helpinfo()
    except:
      info = 'This is a **MindSpore ONLINE** remote.'

    return info

  def __helpinfo(self):
    info = self.helpinfo().encode()
    return Numpy2Ccbuf([info])
      
  def inference(self, blob):
    msgs = self.net.pre_process(blob)
    
    print(f'.', end='', flush = True) 
    if msgs == 'V':
        output = msgs
    else:
        global g
        g.append(func(msgs))
        
    url = f'http://192.168.0.71:5000/chat/{len(g) - 1}'
    result = self.net.post_process(url)
    blob = Numpy2Ccbuf([result])
    return blob

  def run(self):
    service.Run(self)
      
# Ends.


if __name__ == '__main__':
    g = []
    
    flask_thread = Thread(target=run_flask)
    flask_thread.daemon = True
    flask_thread.start()
    
    net = Nlp()
    MindsporeModel(net).run()
