import os
import random
import string
import argparse
import time
import gc
import uvicorn
from typing import List, Callable
from m import *

import torch, pyfiglet
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from starlette.middleware.base import BaseHTTPMiddleware
from contextlib import asynccontextmanager

from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig

# 配置常量
TOKEN_FILE_PATH = "run.token"
TOKEN_LENGTH = 32
DEFAULT_PORT = 60061
DEFAULT_MODEL = "Qwen/Qwen1.5-1.8B-Chat"


@asynccontextmanager
async def lifespan(app: FastAPI): # collects GPU memory
    yield
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

# 创建 Flask 应用
app = FastAPI(lifespan=lifespan)
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 创建自定义中间件类
class TokenAuthMiddleware(BaseHTTPMiddleware):
    async def dispatch(self, request: Request, call_next: Callable):
        # 如果请求来自 127.0.0.1 或本地地址，则跳过验证
        if request.client.host == "127.0.0.1":
            return await call_next(request)
        
        # 获取 Authorization 头部
        authorization: str = request.headers.get("Authorization")
        
        # 如果没有 Authorization 头部，返回 401 未授权错误
        if authorization is None:
            raise HTTPException(status_code=401, detail="Authorization header missing")
        
        # 判断 token 是否以 'Bearer ' 开头
        if not authorization.startswith("Bearer "):
            raise HTTPException(status_code=401, detail="Invalid authorization format")
        
        # 获取 token 并验证
        token = authorization[len("Bearer "):]
        if not is_valid_token(token):
            raise HTTPException(status_code=401, detail="Invalid token")
        
        # 如果验证通过，继续处理请求
        response = await call_next(request)
        return response

app.add_middleware(TokenAuthMiddleware)


# 生成一个32位的随机token并保存到文件
def generate_token():
    token = create_token()
    with open(TOKEN_FILE_PATH, "w") as token_file:
        token_file.write(token)
    return token
def create_token():
    return ''.join(random.choices(string.ascii_letters + string.digits, k=TOKEN_LENGTH))

# 读取保存的token，若文件不存在则生成新的token
def read_token():
    if os.path.exists(TOKEN_FILE_PATH):
        with open(TOKEN_FILE_PATH, "r") as token_file:
            return token_file.read().strip()
    return generate_token()

# 验证请求中的token
def is_valid_token(request_token):
    expected_token = read_token()
    return request_token == expected_token

# 用于加载模型（占位实现，根据需求填充）
def load_models(model_name):

    # 构建本地路径
    local_model_path = f"/Volumes/develop/local-elk/data/chat/model/{model_name}"
    load_model = model_name
    if os.path.exists(local_model_path):
        load_model = local_model_path

    tokenizer = AutoTokenizer.from_pretrained(load_model, use_fast=False, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(load_model, torch_dtype=torch.float32, trust_remote_code=True)
    if torch.cuda.is_available():
        model = model.cuda()
    else:
        model.to("cpu")
    model.generation_config = GenerationConfig.from_pretrained(load_model) 
    return tokenizer, model

#
# 生成对话
#
def generate_id():
    possible_characters = string.ascii_letters + string.digits
    random_string = ''.join(random.choices(possible_characters, k=29))
    return 'chatcmpl-' + random_string

async def predict(messages: List[List[str]], model_id: str):
    global model, tokenizer
    id = generate_id()
    created = int(time.time())
    choice_data = ChatCompletionResponseStreamChoice(
        index=0,
        delta=DeltaMessage(role="assistant",content=""),
        finish_reason=None
    )

    chunk = ChatCompletionResponse(id=id,object="chat.completion.chunk",created=created,model=model_id, choices=[choice_data])
    yield "{}".format(chunk.json())


    msg = messages[-1]['content']
    print("input:",msg)
   # 对话生成
    input_ids = tokenizer.encode(msg, return_tensors='pt')
    output = model.generate(input_ids)


    # 解码生成的文本序列
    for sequence in output:
        generated_text = tokenizer.decode(sequence, skip_special_tokens=True)
        choice_data = ChatCompletionResponseStreamChoice(
            index=0,
            delta=DeltaMessage(content=generated_text),
            finish_reason=None
        )
        print("output:",msg)
        chunk = ChatCompletionResponse(id=id,object="chat.completion.chunk",created=created,model=model_id, choices=[choice_data])
        yield "{}".format(chunk.json())


    choice_data = ChatCompletionResponseStreamChoice(
        index=0,
        delta=DeltaMessage(),
        finish_reason="stop"
    )
    chunk = ChatCompletionResponse(id=id,object="chat.completion.chunk",created=created,model=model_id, choices=[choice_data])
    yield "{}".format(chunk.json())
    yield '[DONE]'
    
def notStream(messages: List[List[str]], model_id: str):
    global model, tokenizer
    id = generate_id()
    created = int(time.time())
    choice_data = ChatCompletionResponseStreamChoice(
        index=0,
        delta=DeltaMessage(role="assistant",content=""),
        finish_reason=None
    )
    chunk = ChatCompletionResponse(id=id,object="chat.completion.chunk",created=created,model=model_id, choices=[choice_data])

    msg = messages[-1]['content']
    print("input:",msg)
   # 对话生成
    input_ids = tokenizer.encode(msg, return_tensors='pt')
    attention_mask = (input_ids != tokenizer.pad_token_id).long() 
    output = model.generate(input_ids, attention_mask=attention_mask)
    result = []
    generated_text = ''
    for sequence in output:
        generated_text += tokenizer.decode(sequence, skip_special_tokens=True)
        
    result.append(generated_text)
    return result

# 处理 /v1/models 路由，返回模型信息
@app.get("/v1/models")
def list_models():
    model_info = {"id": read_token()}  # 这里可以根据实际需求返回模型的更多信息
    return {"object": "list", "data": [model_info]}

# 处理 /v1/chat/completions 路由，生成聊天回复
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def create_chat_completion(request: Request, body: ChatCompletionRequest):

    global model, tokenizer
    if body.messages[-1].role != "user":
        raise HTTPException(status_code=400, detail="Invalid body")
    query = body.messages[-1].content
    prev_messages = body.messages[:-1]
    if len(prev_messages) > 0 and prev_messages[0].role == "system":
        query = prev_messages.pop(0).content + query
    messages = []
    for message in prev_messages:
        messages.append({"role": message.role, "content": message.content})
    
    messages.append({"role": "user", "content": query})
    
    if body.stream:
        generate = predict(messages, body.model)
        return EventSourceResponse(generate, media_type="text/event-stream")
    

    response = "本接口只支持流输出！"
    choice_data = ChatCompletionResponseChoice(
        index=0,
        message=ChatMessage(role="assistant", content=response),
        finish_reason="stop"
    )
    id = create_token()

    return ChatCompletionResponse(id=id,model=body.model, choices=[choice_data], object="chat.completion")

@app.post("/v1/chat/completions_nostream")
async def create_chat_completion_nostream(body: ChatCompletionRequest):
    global model, tokenizer  # 假设这两个是已加载的模型和分词器

    if body.messages[-1].role != "user":
        raise HTTPException(status_code=400, detail="Invalid body")

    query = body.messages[-1].content
    prev_messages = body.messages[:-1]

    if len(prev_messages) > 0 and prev_messages[0].role == "system":
        query = prev_messages.pop(0).content + query

    messages = [{"role": message.role, "content": message.content} for message in prev_messages]
    messages.append({"role": "user", "content": query})

    print('begin')
    response = notStream(messages, body.model)
    
    return {"response": response}

# 处理 /getToken 路由，返回token和端口
@app.get("/getToken")
def get_token():
    return {
        "LazyDuck": "LazyDuck 对话模型 Python 脚本!",
        "token": read_token(),
        "model": DEFAULT_MODEL,  # 返回当前的模型
        "port": port,  # 返回当前端口
        "host": "0.0.0.0"  # 返回当前主机配置（此处示例使用默认的host）
    }

# 解析命令行参数
def parse_args():
    parser = argparse.ArgumentParser(description="Run Flask app with model and port")
    parser.add_argument("-m", "--model", type=str, default=DEFAULT_MODEL, help="Model name")
    parser.add_argument("-p", "--port", type=int, default=DEFAULT_PORT, help="Port to run the web service")
    return parser.parse_args()

def success_message(model_name, port, token):
    print('------------------------------')

    print('LazyDuck 对话模型 Python 脚本 启动成功!')

    print(f'加载的模型名称为: {model_name, port, token}')
    print(f'Web 服务正在启动，监听端口: {port}')
    print('')
    print('访问以下端点:')
    print('- /v1/models: 获取当前模型信息')
    print('- /v1/chat/completions: 生成聊天对话（支持流式响应）')
    print(f'- /getToken: 获取当前token和端口信息')

    print(f'请求头示例: uthorization: Bearer {token} Content-Type: application/json')
    
    print('------------------------------')

if __name__ == "__main__":
    print(pyfiglet.figlet_format("LazyDuck"))

    # 解析参数
    args = parse_args()
    model_name = args.model
    port = args.port

    # 加载模型（此处只打印模型名称）
    tokenizer, model = load_models(model_name)

    # 生成 token
    token = read_token()
    
    # 启动 Flask 服务
    uvicorn.run(app, host='0.0.0.0', port=port, workers=1)
    
    # 输出启动信息
    success_message(model_name, port, token)
    while True:
        try:
            # 在这里执行您的程序逻辑

            # 检查显存使用情况，如果超过阈值（例如90%），则触发垃圾回收
            if torch.cuda.is_available():
                gpu_memory_usage = torch.cuda.memory_allocated() / torch.cuda.max_memory_allocated()
                if gpu_memory_usage > 0.9:
                    gc.collect()
                    torch.cuda.empty_cache()
        except RuntimeError as e:
            if "out of memory" in str(e):
                print("显存不足，正在重启程序...")
                gc.collect()
                torch.cuda.empty_cache()
                time.sleep(5) # 等待一段时间以确保显存已释放
                tokenizer, model = load_models()
            else:
                raise e
            

# python run.py -p 60056 -m Qwen/Qwen1.5-1.8B-Chat
