import argparse
import json

import torch

try:
    from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer, AutoModel
except ImportError:
    from transformers import AutoTokenizer, AutoModelForCausalLM, LLaMATokenizer, AutoModel
from compression import compress_module
from monkey_patch_non_inplace import replace_llama_attn_with_non_inplace_operations
import websocket
from websocket import WebSocketApp

try:
    import thread
except ImportError:
    import _thread as thread
import time

import dataclasses
from enum import auto, Enum
from typing import List, Any


# 分隔符样式
class SeparatorStyle(Enum):
    """Different separator style."""
    SINGLE = auto()
    TWO = auto()


# 对话
@dataclasses.dataclass
class Conversation:
    """A class that keeps all conversation history."""
    system: str
    roles: List[str]
    messages: List[List[str]]
    offset: int
    sep_style: SeparatorStyle = SeparatorStyle.SINGLE
    sep: str = "###"
    sep2: str = None

    skip_next: bool = False
    conv_id: Any = None

    def get_prompt(self):
        if self.sep_style == SeparatorStyle.SINGLE:
            ret = self.system + self.sep
            for role, message in self.messages:
                if message:
                    ret += role + ": " + message + self.sep
                else:
                    ret += role + ":"
            return ret
        elif self.sep_style == SeparatorStyle.TWO:
            seps = [self.sep, self.sep2]
            ret = self.system + seps[0]
            for i, (role, message) in enumerate(self.messages):
                if message:
                    ret += role + ": " + message + seps[i % 2]
                else:
                    ret += role + ":"
            return ret
        else:
            raise ValueError(f"Invalid style: {self.sep_style}")

    def append_message(self, role, message):
        self.messages.append([role, message])

    def to_gradio_chatbot(self):
        ret = []
        for i, (role, msg) in enumerate(self.messages[self.offset:]):
            if i % 2 == 0:
                ret.append([msg, None])
            else:
                ret[-1][-1] = msg
        return ret

    def copy(self):
        return Conversation(
            system=self.system,
            roles=self.roles,
            messages=[[x, y] for x, y in self.messages],
            offset=self.offset,
            sep_style=self.sep_style,
            sep=self.sep,
            sep2=self.sep2,
            conv_id=self.conv_id)

    def dict(self):
        return {
            "system": self.system,
            "roles": self.roles,
            "messages": self.messages,
            "offset": self.offset,
            "sep": self.sep,
            "sep2": self.sep2,
            "conv_id": self.conv_id,
        }


class WebsocketClient(object):
    def __init__(self, args):
        self.model = None
        self.tokenizer = None
        self.args = args
        self.ws = None

    def on_message(self, conn: WebSocketApp, message: str):
        print(f"on message：{message}")

        dict = json.loads(message)
        if dict["act"] == 1000:
            data = dict["data"]
            conv_id = dict["id"]

            try:
                # 对话
                self.on_conversation(Conversation(
                    conv_id=conv_id,
                    system=data["system"],
                    roles=data["roles"],
                    messages=data["messages"],
                    offset=2,
                    sep_style=SeparatorStyle.SINGLE,
                    sep="###",
                ))
            except Exception as e:
                # 异常停止
                self.send_obj(create_ws_response(0, None, conv_id))
                print(e)
                # 重新加载模型
                self.re_load_model()
        else:
            self.send_obj({'act': 2999, 'msg': '请求未处理'})

    def send_obj(self, obj):
        message: str = json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)
        print(f"==> {message}")
        # 发生对象
        self.ws.send(message)

    def on_conversation(self, conv: Conversation):
        # 当消息是对话的时候
        prompt = conv.get_prompt()
        skip_echo_len = len(prompt.replace("</s>", " ")) + 1

        params = {
            "model": args.model_path,
            "prompt": prompt,
            "temperature": args.temperature,
            "max_new_tokens": args.max_new_tokens,
            "stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2,
        }

        print(f"{conv.roles[1]}: ", end="", flush=True)
        output_stream = generate_stream(self.model, self.tokenizer, params, args.device)

        pre = 0
        index = 0
        for outputs in output_stream:
            outputs = outputs[skip_echo_len:].strip()
            outputs = outputs.split(" ")
            now = len(outputs) - 1
            if now > pre:
                txt = " ".join(outputs[pre:now])
                print(txt, end=" ", flush=True)

                self.send_obj(create_ws_response(3, txt, conv.conv_id))
                pre = now
                index = index + 1

        ent_txt = " ".join(outputs[pre:])
        self.send_obj(create_ws_response(2, ent_txt, conv.conv_id))
        print(ent_txt, flush=True)

        if args.debug:
            print("\n", {"prompt": prompt, "outputs": outputs}, "\n")

    def on_error(self, conn: WebSocketApp, error: TypeError):
        print(f"error：{error}")

    def on_close(self, conn: WebSocketApp, v3, v4):
        print("on close sleep 5s restart.")
        time.sleep(5)
        self.start()

    def on_open(self, conn: WebSocketApp):
        print("ws connected.")
        self.re_load_model()

    def re_load_model(self):
        # 启动中
        del self.model
        del self.tokenizer

        # 给释放显存预留时间
        time.sleep(1)
        self.send_obj(create_ws_response(1))
        model, tokenizer = load_model(self.args)
        self.model = model
        self.tokenizer = tokenizer
        # 就绪
        self.send_obj(create_ws_response(2))

    def start(self):
        # 开启运行状态追踪。debug 的时候最好打开他，便于追踪定位问题。
        # websocket.enableTrace(True)
        print(f"正在连接ws服务器url={self.args.controller_address}")
        self.ws = WebSocketApp(self.args.controller_address,
                               on_open=self.on_open,
                               on_message=self.on_message,
                               on_error=self.on_error,
                               on_close=self.on_close)
        self.ws.run_forever()


def create_ws_response(state: int, message: str = None, conv_id: int = 0):
    msg = None
    if state == 0:
        msg = '停止'
    elif state == 1:
        msg = '启动中'
    elif state == 2:
        msg = '就绪'
    elif state == 3:
        msg = '忙碌'

    return {
        'id': conv_id,
        'act': 2000,
        'data': {
            'state': state,
            'message': message
        },
        'msg': msg
    }


def load_model(args):
    if args.device == "cpu":
        kwargs = {}
    elif args.device == "cuda":
        kwargs = {"torch_dtype": torch.float16}
        if args.num_gpus == "auto":
            kwargs["device_map"] = "auto"
        else:
            num_gpus = int(args.num_gpus)
            if num_gpus != 1:
                kwargs.update({
                    "device_map": "auto",
                    "max_memory": {i: "13GiB" for i in range(num_gpus)},
                })
    elif args.device == "mps":
        kwargs = {"torch_dtype": torch.float16}
        # Avoid bugs in mps backend by not using in-place operations.
        replace_llama_attn_with_non_inplace_operations()
    else:
        raise ValueError(f"Invalid device: {args.device}")

    tokenizer = AutoTokenizer.from_pretrained(args.model_path, use_fast=False)
    model = AutoModelForCausalLM.from_pretrained(args.model_path,
                                                 low_cpu_mem_usage=True, **kwargs)

    if args.load_8bit:
        compress_module(model, args.device)

    if (args.device == "cuda" and num_gpus == 1) or args.device == "mps":
        model.to(args.device)

    if args.debug:
        print(model)

    return model, tokenizer


@torch.inference_mode()
def generate_stream(model, tokenizer, params, device,
                    context_len=2048, stream_interval=2):
    prompt = params["prompt"]  #
    l_prompt = len(prompt)
    temperature = float(params.get("temperature", 1.0))
    max_new_tokens = int(params.get("max_new_tokens", 256))
    stop_str = params.get("stop", None)

    input_ids = tokenizer(prompt).input_ids
    output_ids = list(input_ids)

    max_src_len = context_len - max_new_tokens - 8
    input_ids = input_ids[-max_src_len:]
    """循环过程中输出文本"""
    for i in range(max_new_tokens):
        if i == 0:
            out = model(
                torch.as_tensor([input_ids], device=device), use_cache=True)  # input_ids.size=401
            logits = out.logits
            past_key_values = out.past_key_values
        else:
            # 这里会发生内存溢出错误
            attention_mask = torch.ones(
                1, past_key_values[0][0].shape[-2] + 1, device=device)
            out = model(input_ids=torch.as_tensor([[token]], device=device),
                        use_cache=True,
                        attention_mask=attention_mask,
                        past_key_values=past_key_values)
            logits = out.logits
            past_key_values = out.past_key_values

        last_token_logits = logits[0][-1]

        if device == "mps":
            # Switch to CPU by avoiding some bugs in mps backend.
            last_token_logits = last_token_logits.float().to("cpu")

        if temperature < 1e-4:
            token = int(torch.argmax(last_token_logits))
        else:
            probs = torch.softmax(last_token_logits / temperature, dim=-1)
            token = int(torch.multinomial(probs, num_samples=1))

        output_ids.append(token)  # size=402

        if token == tokenizer.eos_token_id:
            stopped = True
        else:
            stopped = False

        if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
            output = tokenizer.decode(output_ids, skip_special_tokens=True)
            pos = output.rfind(stop_str, l_prompt)
            if pos != -1:
                output = output[:pos]
                stopped = True
            yield output

        if stopped:
            break

    del past_key_values


# --model-path C:\Users\yma\Documents\vicuna-7b --device cpu
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model-path", type=str, default="facebook/opt-350m",
                        help="The path to the weights")
    parser.add_argument("--controller-address", type=str)
    parser.add_argument("--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda")
    parser.add_argument("--num-gpus", type=str, default="1")
    parser.add_argument("--load-8bit", action="store_true",
                        help="Use 8-bit quantization.")
    parser.add_argument("--conv-template", type=str, default="v1",
                        help="Conversation prompt template.")
    parser.add_argument("--temperature", type=float, default=0.7)
    parser.add_argument("--max-new-tokens", type=int, default=512)
    parser.add_argument("--debug", action="store_true", default=True)
    args = parser.parse_args()
    WebsocketClient(args).start()
