How to use miku model in spaces ?

#23
by Empereur-Pirate - opened
from fastapi import FastAPI, Request, Depends
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from transformers import pipeline
from pydantic import BaseModel
from typing import Optional, Any
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig

# Helper function to read raw request bodies
async def parse_raw(request: Request):
    return await request.body()

app = FastAPI()

# Serve the static files
app.mount("/static", StaticFiles(directory="static"), name="static")

pipe_flan = pipeline("text2text-generation", model="google/flan-t5-small")

def t5(input: str) -> dict[str, str]:
    output = pipe_flan(input)
    return {"output": output[0].get("generated_text", "")}

class ParseRaw(BaseModel):
    raw: bytes



@app

	.post("/infer_t5")
async def infer_endpoint(data: ParseRaw = Depends(parse_raw)):
    """Receive input and generate text."""
    try:
        input_text = data.raw.decode("utf-8")

        # Validate that the input is a string
        assert isinstance(input_text, str), "Input must be a string."

        if input_text is None:
            return {"error": "No input text detected."}

        result = t5(input_text)
        return result

    except AssertionError as e:
        return JSONResponse({"error": f"Invalid Input Format: {e}"}, status_code=400)



@app

	.get("/infer_t5")
def get_default_inference_endpoint():
    return {"message": "Use POST method to submit input data"}

# Load the MIKU model and tokenizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

try:
    # Attempt to load the model and tokenizer regularly
    model_config = AutoConfig.from_pretrained("miqudev/miqu-1-70b")
    model = AutoModelForCausalLM.from_pretrained("miqudev/miqu-1-70b", config=model_config).to(device)
    tokenizer = AutoTokenizer.from_pretrained("miqudev/miqu-1-70b")
except Exception as e:
    print("[WARNING]: Failed to load model and tokenizer conventionally.")
    print(f"Exception: {e}")

    # Construct a dummy configuration object
    model_config = AutoConfig.from_pretrained("miqudev/miqu-1-70b", trust_remote_code=True)

    # Load the model using the constructed configuration
    model = AutoModelForCausalLM.from_pretrained("miqudev/miqu-1-70b", config=model_config).to(device)
    tokenizer = AutoTokenizer.from_pretrained("miqudev/miqu-1-70b")

def miuk_answer(query: str) -> str:
    query_tokens = tokenizer.encode(query, return_tensors="pt")
    query_tokens = query_tokens.to(device)
    answer = model.generate(query_tokens, max_length=128, temperature=1, pad_token_id=tokenizer.pad_token_id)
    return tokenizer.decode(answer[:, 0]).replace(" ", "")



@app

	.post("/infer_miku")
async def infer_endpoint(data: ParseRaw = Depends(parse_raw)):
    """Receive input and generate text."""
    try:
        input_text = data.raw.decode("utf-8")

        # Validate that the input is a string
        assert isinstance(input_text, str), "Input must be a string."

        if input_text is None:
            return {"error": "No input text detected."}

        result = {"output": miuk_answer(input_text)}
        return result

    except AssertionError as e:
        return JSONResponse({"error": f"Invalid Input Format: {e}"}, status_code=400)



@app

	.get("/infer_miku")
def get_default_inference_endpoint():
    return {"message": "Use POST method to submit input data"}


Traceback (most recent call last):
  File "/usr/local/bin/uvicorn", line 8, in <module>
    sys.exit(main())
  File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1157, in __call__
    return self.main(*args, **kwargs)
  File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1078, in main
    rv = self.invoke(ctx)
  File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1434, in invoke
    return ctx.invoke(self.callback, **ctx.params)
  File "/usr/local/lib/python3.9/site-packages/click/core.py", line 783, in invoke
    return __callback(*args, **kwargs)
  File "/usr/local/lib/python3.9/site-packages/uvicorn/main.py", line 437, in main
    run(app, **kwargs)
  File "/usr/local/lib/python3.9/site-packages/uvicorn/main.py", line 463, in run
    server.run()
  File "/usr/local/lib/python3.9/site-packages/uvicorn/server.py", line 60, in run
    return asyncio.run(self.serve(sockets=sockets))
  File "/usr/local/lib/python3.9/asyncio/runners.py", line 44, in run
    return loop.run_until_complete(main)
  File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete
  File "/usr/local/lib/python3.9/site-packages/uvicorn/server.py", line 67, in serve
    config.load()
  File "/usr/local/lib/python3.9/site-packages/uvicorn/config.py", line 458, in load
    self.loaded_app = import_from_string(self.app)
  File "/usr/local/lib/python3.9/site-packages/uvicorn/importer.py", line 21, in import_from_string
    module = importlib.import_module(module_str)
  File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module
    return _bootstrap._gcd_import(name[level:], package, level)
  File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
  File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
  File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
  File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
  File "<frozen importlib._bootstrap_external>", line 850, in exec_module
  File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
  File "/home/user/app/./main.py", line 63, in <module>
    model_config = AutoConfig.from_pretrained("miqudev/miqu-1-70b", trust_remote_code=True)
  File "/usr/local/lib/python3.9/site-packages/transformers/models/auto/configuration_auto.py", line 1100, in from_pretrained
    config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
  File "/usr/local/lib/python3.9/site-packages/transformers/configuration_utils.py", line 634, in get_config_dict
    config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
  File "/usr/local/lib/python3.9/site-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
    resolved_config_file = cached_file(
  File "/usr/local/lib/python3.9/site-packages/transformers/utils/hub.py", line 436, in cached_file
    raise EnvironmentError(
OSError: miqudev/miqu-1-70b does not appear to have a file named config.json. Checkout 'https://huggingface.co/miqudev/miqu-1-70b/main' for available files.

You should use ctransformers for quantised models . Or use dequantised versions of miqu-1-70b. There are few on HF such as 152334H/miqu-1-70b-sf

thanks for the reply, it actually helped me very much. I now have an issue installing the library sentencepiece : https://huggingface.co/152334H/miqu-1-70b-sf/discussions/20

Sign up or log in to comment