Spaces:
Sleeping
Sleeping
from fastapi import FastAPI | |
from fastapi.responses import StreamingResponse | |
from pydantic import BaseModel | |
from huggingface_hub import InferenceClient | |
import uvicorn | |
from typing import Generator | |
import json # Asegúrate de que esta línea esté al principio del archivo | |
app = FastAPI() | |
# Initialize the InferenceClient with your model | |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
class Item(BaseModel): | |
prompt: str | |
history: list | |
system_prompt: str | |
temperature: float = 0.8 | |
max_new_tokens: int = 9000 | |
top_p: float = 0.15 | |
repetition_penalty: float = 1.0 | |
def format_prompt(message, history): | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def generate_stream(item: Item) -> Generator[bytes, None, None]: | |
formatted_prompt = format_prompt(f"{item.system_prompt}, {item.prompt}", item.history) | |
generate_kwargs = { | |
"temperature": item.temperature, | |
"max_new_tokens": item.max_new_tokens, | |
"top_p": item.top_p, | |
"repetition_penalty": item.repetition_penalty, | |
"do_sample": True, | |
"seed": 42, # Adjust or omit the seed as needed | |
} | |
# Stream the response from the InferenceClient | |
for response in client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True): | |
# This assumes 'details=True' gives you a structure where you can access the text like this | |
chunk = { | |
"text": response.token.text, | |
"complete": response.generated_text is not None # Adjust based on how you detect completion | |
} | |
yield json.dumps(chunk).encode("utf-8") + b"\n" | |
async def generate_text(item: Item): | |
# Stream response back to the client | |
return StreamingResponse(generate_stream(item), media_type="application/x-ndjson") | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=8000) | |