import sys
import time
import json
import requests
import os
import azure.cognitiveservices.speech as speechsdk
from dotenv import dotenv_values
import platform

import control

config = dotenv_values(".env")


api_key = config.get("OPENAI_API_KEY")
custom_base_url = config.get("OPENAI_API_BASE_URL") or 'https://api.openai.com'
model = config.get("OPENAI_API_MODEL")

gemini_api_key = config.get("GEMINI_API_KEY")
gemini_custom_base_url = config.get(
    "GEMINI_API_BASE_URL") or 'https://generativelanguage.googleapis.com'
gemini_model = config.get("GEMINI_API_MODEL")

azure_api_key = config.get("AZURE_API_KEY")
service_region = config.get("SERVICE_REGION")
voice_name = config.get("VOICE_NAME")
last_time = 0
time_out = 45  # 语音输入休眠时间，单位秒

chat_log = [{
    "role": "system",
    "content": ''
}]
gemini_chat_log = []


def get_assist_system():
    file_path = os.path.abspath(__file__)
    assist_system = os.path.join(os.path.dirname(file_path), 'assist-system')
    with open(assist_system, 'r', encoding='utf-8') as f:
        assist_system = f.read()
        assist_system += '\nThe current system information is ' + platform.system()
        assist_system += '\nThe current system width and height are ' + control.system_info()
        assist_system += '\n注意对话过程请使用中文'
        chat_log[0].update({"content": assist_system})


# 运行时，获取assist-system
if (chat_log[0].get("content") == ''):
    get_assist_system()


def send_message(message):
    if (api_key):
        global chat_log
        chat_log.append({"role": "user", "content": message})
        payload = {
            "model": model,
            "messages": chat_log
        }
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}"
        }
        response = requests.post(
            custom_base_url + "/v1/chat/completions", headers=headers, data=json.dumps(payload))
        content = response.json().get("choices")[
            0].get("message").get("content")
        chat_log.append({"role": "assistant", "content": content})
        return content
    if (gemini_api_key):
        global gemini_chat_log
        gemini_chat_log.append({"role": "user", "parts": [{"text": message}]})
        payload = {
            "contents": gemini_chat_log
        }
        headers = {
            "Content-Type": "application/json",
        }
        response = requests.post(
            gemini_custom_base_url + "/v1beta/models/" + gemini_model + ":generateContent" + '?key='+gemini_api_key, headers=headers, data=json.dumps(payload))
        content = response.json().get("candidates")[
            0].get("content").get("parts")[0].get("text")
        gemini_chat_log.append({"role": "model", "parts": [{"text": content}]})
        return content


def handle_message_input(input):
    if ("stop" in input.lower() or "停止" in input or "退出" in input):
        sys.exit("退出程序")
    print(f"输入：{input}")
    ans = send_message(input)
    if (not handle_common_input(ans)):
        print(f"回复：{ans}")
        speak_text(ans)


def handle_common_input(ans):
    list = ans.split('@@')
    is_to_do = False
    for item in list:
        is_to_do = to_do_common(item)
    return is_to_do


def to_do_common(ans):
    if (ans.startswith('common:')):
        result = control.use_subprocess(ans.replace('common:', '').strip())
        print(f"执行结果：{result}")
    elif (ans.startswith('move_mouse:')):
        control.move_mouse(*ans.replace('move_mouse:', '').strip().split(','))
    elif (ans.startswith('click_mouse:')):
        control.click_mouse()
    elif (ans.startswith('click_right_mouse:')):
        control.click_right_mouse()
    elif (ans.startswith('screenshot_all:')):
        control.screenshotAll()
    elif (ans.startswith('create_file:')):
        control.create_file(
            *ans.replace('create_file:', '').strip().split('||'))
    elif (ans.startswith('save_chat_log:')):
        control.create_file('chat_log.json', json.dumps(
            chat_log, ensure_ascii=False))
    elif (ans.startswith('press_key:')):
        control.press_key(ans.replace('press_key:', '').strip())
    elif (ans.startswith('hotkey:')):
        control.hotKey(ans.replace('hotkey:', '').strip())
    elif (ans.startswith('input_text:')):
        control.input_text(ans.replace('input_text:', '').strip())
    else:
        return False
    return True


def speech_recognize_keyword_locally_from_microphone():
    if (not azure_api_key):
        sys.exit('请设置azure密钥')
    if (not service_region):
        sys.exit('请设置azure地区')
    if (not api_key and not gemini_api_key):
        sys.exit('请设置openai_api_key 或者 gemini_api_key')

    print('等待唤醒...')
    model = speechsdk.KeywordRecognitionModel("xiaopeng.table")
    keyword_recognizer = speechsdk.KeywordRecognizer()
    result_future = keyword_recognizer.recognize_once_async(model)
    result = result_future.get()
    if result.reason == speechsdk.ResultReason.RecognizedKeyword:
        recognize_from_microphone()


def speak_text(text):
    if (voice_name and azure_api_key and service_region):
        speech_config = speechsdk.SpeechConfig(
            subscription=azure_api_key, region=service_region)
        audio_config = speechsdk.audio.AudioOutputConfig(
            use_default_speaker=True)
        speech_config.speech_synthesis_voice_name = voice_name
        speech_synthesizer = speechsdk.SpeechSynthesizer(
            speech_config=speech_config, audio_config=audio_config)
        speech_synthesizer.speak_text_async(text).get()


def recognize_from_microphone():
    if (azure_api_key and service_region):
        global last_time
        global time_out
        if (last_time == 0):
            last_time = int(time.time())
        speech_config = speechsdk.SpeechConfig(
            subscription=azure_api_key, region=service_region)
        speech_config.speech_recognition_language = "zh-CN"
        audio_config = speechsdk.audio.AudioConfig(use_default_microphone=True)
        speech_recognizer = speechsdk.SpeechRecognizer(
            speech_config=speech_config, audio_config=audio_config)

        print("等待说话...")
        speech_recognition_result = speech_recognizer.recognize_once_async().get()

        if speech_recognition_result.reason == speechsdk.ResultReason.RecognizedSpeech:
            handle_message_input(speech_recognition_result.text)

        if (int(time.time()) - last_time > time_out):
            last_time = 0
            speech_recognize_keyword_locally_from_microphone()
        else:
            recognize_from_microphone()


if __name__ == "__main__":
    speech_recognize_keyword_locally_from_microphone()
