import chainlit as cl
from fastapi import FastAPI
from chainlit.utils import mount_chainlit

from fastapi import FastAPI
from chainlit.utils import mount_chainlit
from transformers import AutoTokenizer

app = FastAPI()


@app.get("/app")
def read_main():
    '''

    测试内容
    '''
    return {"message": "Hello World from main app"}
 
'''过载在 fastapi 下'''

# uv pip install vllm
'''
vllm serve meta-llama/Llama-3.2-1B 
    --task generate 
    --model-impl transformers
'''
#transformers chat localhost:8000 --model-name-or-path Qwen/Qwen3-4B

# conda activate mTransformers
# uvicorn main:app --host 0.0.0.0 --port 8899
# 127.0.0.1:8899/mDataset
#mount_chainlit(app=app, target="mDataset.py", path="/mDataset")
# 127.0.0.1:8899/mTokenization
# mount_chainlit(app=app, target="mTokenization.py", path="/mTokenization")
# 127.0.0.1:8899/mTokenization
#mount_chainlit(app=app, target="mPipeline.py", path="/mPipeline")
# 127.0.0.1:8899/mTokenization
mount_chainlit(app=app, target="mPreprocessors.py", path="/mPreprocessors")