from optimum.intel import OVModelForCausalLM
from transformers import AutoTokenizer
from asr import asrReadText
import tts
from tts import ttsSayText
import json
import threading
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
import uvicorn
import time
import cloud
import camera
import lcd
currentStatus = {}
import socket

lcd.clear()
lcd.drawText(x = 0, y = 0, text="PocketBuddy Loading...")

def getWifiIP():
    try:
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(("8.8.8.8", 80))
        ip = s.getsockname()[0]
        s.close()
        return ip
    except:
        return 'Unknown'

print('Loading LLM...')
model = OVModelForCausalLM.from_pretrained("./models/pb-v24.1/ov", device="GPU")
tok = AutoTokenizer.from_pretrained("./models/pb-v24.1/final")
print('LLM loaded.')

app = FastAPI()

@app.post("/api/getStatus")
async def get_status():
    return JSONResponse(content=currentStatus)

app.mount("/static", StaticFiles(directory="static"), name="static")

def runWebServer():
    uvicorn.run(app, host="0.0.0.0", port=8000)

server_thread = threading.Thread(target=runWebServer)
server_thread.daemon = True
server_thread.start()


def updateStatus(currentNodeName):
    global currentStatus
    currentStatus = {'currentNodeName': currentNodeName}

updateStatus('Core/PocketBuddy')

lcd.clear(fl = lcd.FLAG_OLED)
lcd.drawText(fl = lcd.FLAG_OLED, x = 0, y = 0, text="PocketBuddy ready!")
lcd.drawText(fl = lcd.FLAG_OLED, x = 0, y = 16, text="IP: " + getWifiIP())
lcd.drawText(fl = lcd.FLAG_OLED, x = 0, y = 32, text="https://openbuddy.ai")


def aiAgentLoop():
    tts.init(tts.cfgFanchenTTS)
    tts.alsa.setrate(16000)
    lcd.clear()
    ttsSayText("你好！我已经成功启动所有端侧模型，快来和我聊天吧。", 1.5, 14)
    time.sleep(0.5)
    while True:
        updateStatus('IO/ASR')
        lcd.drawTitle('正在倾听...', bgColor=lcd.COLOR_YELLOW)
        lcd.drawImage(lcd.LCD_W - 80, lcd.LCD_H - 80, '/2.jpg')
        userQuestion = asrReadText()
        if len(userQuestion) < 3:
            continue
        updateStatus('Core/PocketBuddy')
        lcd.drawTitle('思考中...', bgColor=lcd.COLOR_GREEN)
        prompt = """<|role|>system<|says|>你（assistant）是一个AI智能助手。
    
当前应用：
你正在扮演Buddy，一个智能电子宠物，形象是一只小狗。

你的回复风格：
请你总是给出一个简短、友好的回答，模仿一只电子宠物的回答风格，不要超过100字。

你的能力：
你是一个小型AI，拥有一定的知识储备，可以回答一些常见问题，但不是万事通。
当你无法回答时，你可以调用视觉AI模型或强大AI模型来帮助你回答。

你可以用的工具：
CALL ANSWER：你可以直接回答用户提出的简单、不涉及专业领域的问题，此时你输出“CALL ANSWER”，然后换行并输出你的回答。
CALL POWERAI：当用户询问复杂、专业的问题时，你输出“CALL POWERAI”，调用知识储备更为丰富、能力更为强大的云端AI模型。
CALL VISION：当用户明确要求你读取摄像头里的信息时（例如：请识别摄像头里拍到的东西），你输出“CALL VISION”，调用视觉AI模型。

请注意，如果用户请求与摄像头无关，且不涉及专业领域，你应该CALL ANSWER然后直接回答，而不是CALL任何工具。你的回答必须非常简短，不超过100字。
<|end|>
"""
        prompt += f"<|role|>user<|says|>{userQuestion}<|end|>\<|role|>assistant<|says|>CALL"
        print(f"User question: {userQuestion}")
        inputs = tok(prompt, return_tensors="pt")
        outputs = model.generate(**inputs, max_new_tokens=100, temperature=0.1, repetition_penalty=1.1, top_p=0.95)
        modelOutput = tok.decode(outputs[0][inputs.input_ids.size(1):]).strip()
        if modelOutput.endswith('<|end|>'):
            modelOutput = modelOutput[:-7]
        print(modelOutput)
        arr = modelOutput.split('\n', 1)
        toolChoice = arr[0].strip()
        generatedAnswer = ''
        if len(arr) > 1:
            generatedAnswer = arr[1].strip()

        if toolChoice == 'VISION':
            lcd.drawTitle('调用云端视觉AI...', bgColor=lcd.COLOR_PURPLE)
            updateStatus('IO/Camera')
            tmpImageFn = 'tmp/photo.jpg'
            camera.take_photo(tmpImageFn)
            with open(tmpImageFn, "rb") as f:
                imgData = f.read()
            updateStatus('Cloud/VLM')
            generatedAnswer = cloud.callVLM(imgData, userQuestion)
        
        if toolChoice == 'POWERAI':
            lcd.drawTitle('调用云端强AI...', bgColor=lcd.COLOR_PURPLE)
            updateStatus('Cloud/LLM')
            try:
                generatedAnswer = cloud.callLLM(userQuestion)
            except Exception as e:
                print(f"Error calling LLM: {e}")
        
        lcd.clear()
        lcd.drawTitle('TTS...', bgColor=lcd.COLOR_PURPLE)
        lcd.drawText(0, 30, generatedAnswer)
        print(f"Generated answer: {generatedAnswer}")
        updateStatus('IO/TTS')
        ttsSayText(generatedAnswer, 1.5, 14)
        time.sleep(0.5)


def translatorLoop():
    tts.init(tts.cfgLJSTTS)
    tts.alsa.setrate(22050)
    lcd.clear()
    ttsSayText("HI! I'am an AI-powered Chinese-to-English translator.")
    time.sleep(0.5)
    while True:
        updateStatus('IO/ASR')
        lcd.drawTitle('正在倾听...', bgColor=lcd.COLOR_YELLOW)
        userQuestion = asrReadText()
        if len(userQuestion) < 3:
            continue
        updateStatus('Core/PocketBuddy')
        lcd.drawTitle('思考中...', bgColor=lcd.COLOR_GREEN)
        prompt = """<|role|>system<|says|>你（assistant）是一个翻译助手，请直接输出英语译文，不要输出多余的内容。
<|end|>
"""
        prompt += f"<|role|>user<|says|>{userQuestion}<|end|>\<|role|>assistant<|says|>English Translation:"
        print(f"User question: {userQuestion}")
        inputs = tok(prompt, return_tensors="pt")
        outputs = model.generate(**inputs, max_new_tokens=100, temperature=0.1, repetition_penalty=1.1, top_p=0.95)
        modelOutput = tok.decode(outputs[0][inputs.input_ids.size(1):]).strip()
        if modelOutput.endswith('<|end|>'):
            modelOutput = modelOutput[:-7]
        generatedAnswer = modelOutput.strip()
        lcd.clear()
        lcd.drawTitle('TTS...', bgColor=lcd.COLOR_PURPLE)
        lcd.drawText(0, 30, generatedAnswer)
        updateStatus('IO/TTS')
        ttsSayText(generatedAnswer)
        time.sleep(0.5)
        print(f"Generated answer: {generatedAnswer}")


aiAgentLoop()

#translatorLoop()
