import requests
import json
from retry import retry
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
import uvicorn
from pydantic import BaseModel
import logging

# Configure logging
logging.basicConfig(
    level=logging.DEBUG,  # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('logs/app.log'),  # Log to a file
        logging.StreamHandler()  # Log to the console
    ]
)

# Create a logger instance
logger = logging.getLogger(__name__)


class Item(BaseModel):
    prompt: str

class LocalLLMClinet:
    def __init__(self):
        self.url = "http://localhost:8000/v1/chat/completions"
        self.model_name = "Qwen"
        # self.system_prompt = "you are helpful assistant."
        logger.info("LocalLLMClinet initialized")

    # def test(self):
    #     print(1)

    @retry(tries=20, delay=2)
    def client_infer(self,prompt):
        logger.info(f"Starting client_infer with prompt: {prompt}")
        try:
            data = {
                "model": self.model_name,
                "messages": [
                    {
                        "role": "assistant",
                        "content": prompt
                    }
                ],
                "max_tokens": 800,
                "top_k": -1,
                "top_p": 1,
                "temperature": 0.7,
                "ignore_eos": False,
                "stream": False
            }
            response = requests.post(self.url, json=data)
            result_all = json.loads(response.text)
            result = result_all['choices'][0]['message']['content']
            # result_dic = {"request_data": data, "response_result": result}
            logger.info("client_infer completed successfully")
            return {"response": result}
        except Exception as e:
            logger.error(f"Error in client_infer: {e}", exc_info=True)
            raise e

    @retry(tries=20, delay=2)
    def client_infer_stream(self, prompt):
        logger.info(f"Starting client_infer_stream with prompt: {prompt}")
        try:
            data = {
                "model": self.model_name,
                "messages": [
                    {
                        "role": "assistant",
                        "content": prompt
                    }
                ],
                "max_tokens": 800,
                "top_k": -1,
                "top_p": 1,
                "temperature": 0.7,
                "ignore_eos": False,
                "stream": True
            }
            response = requests.post(self.url, json=data)
            # response = requests.request("POST", self.api_url, headers=headers, data=data)
            if response.status_code == 200:
                # Process and yield each chunk from the response
                for line in response.iter_lines():
                    if not line:  # Skip blank lines (such as SSE's heartbeat or end flag)
                        continue

                    # Remove possible 'data:' prefix (compatible with SSE format)
                    line = line.decode('utf-8').strip()
                    if line.startswith("data:"):
                        line = line[5:].strip()

                    # Check the end of flow flag (such as OpenAI's [DONE])
                    if line == "[DONE]":
                        break

                    try:
                        # Remove 'data: ' prefix and parse JSON
                        json_data = json.loads(line)
                        # Extract and yield only the 'text' field from the nested 'data' object
                        if (
                                isinstance(json_data, dict)
                                and "choices" in json_data
                                and len(json_data["choices"]) > 0
                                and "delta" in json_data["choices"][0]
                                and "content" in json_data["choices"][0]["delta"]
                        ):
                            content = json_data["choices"][0]["delta"]["content"]
                            print(content)
                            if content:  # output SSE format
                                logger.debug(f"Stream content: {content}")
                                yield f"data: {content} \n\n"

                    except json.JSONDecodeError:
                        logger.warning(f"JSON decode error, raw data: {line}")
                        yield f"JSON decode error, raw data: {line}"
                    except KeyError as e:
                        logger.warning(f"Missing expected field: {e}, raw data: {line}")
                        yield f"Missing expected field: {e}, raw data: {line}"
                    except Exception as e:
                        logger.error(f"Unknown error: {e}, raw data: {line}", exc_info=True)
                        yield f"Unknown error: {e}, raw data: {line}"
            else:
                logger.error(f"Request failed with status code: {response.status_code}")
                yield f"Request failed with status code: {response.status_code}"

        except Exception as e:
            logger.error(f"Error in client_infer_stream: {e}", exc_info=True)
            yield f"Error in client_infer_stream: {e}"


app = FastAPI()
local_llm_client = LocalLLMClinet()

@app.post("/test/")
async def test():
    logger.info("Test endpoint called")
    return 1

@app.post("/generate/")
async def generate(item: Item):
    logger.info(f"Generate endpoint called with prompt: {item.prompt}")
    result = local_llm_client.client_infer(item.prompt)
    return result

@app.post("/generate_stream/")
async def generate_steam(item: Item):
    logger.info(f"Generate_stream endpoint called with prompt: {item.prompt}")
    return StreamingResponse(local_llm_client.client_infer_stream(item.prompt), media_type="text/event-stream")

def test_stream():
    for i in range(5):
        if i ==4:
            break
        yield f"data: Chunk {i} \n\n"  # 注意换行符
        # await asyncio.sleep(1)  # 模拟延迟


@app.post("/test_stream/")
async def stream_test():
    logger.info("Test_stream endpoint called")
    return StreamingResponse(test_stream(), media_type="text/event-stream")


if __name__ == '__main__':
    logger.info("Starting FastAPI application")
    uvicorn.run(app, host="0.0.0.0", port=50000)

# if __name__ == '__main__':
#     local_llm_client = LocalLLMClinet()
#     local_llm_client.test()
#     print(local_llm_client.client_infer("hello"))
#     result_stream = local_llm_client.client_infer_stream("hello")
#     for line in result_stream:
#         print(line)
