# import streamlit as st
from textual.app import App, ComposeResult
import json
import requests
import os


def curl(prompt:str):
    # url = 'http://118.178.197.102:11434/api/generate'
    url = 'http://127.0.0.1:11434/api/generate'
    data = {
       'model': 'qwen2.5:0.5b',
        'prompt': prompt
    }
    headers = {
        'Content-Type': 'application/json'
    }

    response = requests.post(url, data=json.dumps(data), headers=headers, stream=True, allow_redirects=True)
    return response

def generate(prompt:str):
    try:
        response = curl(prompt)
    except:
        yield '请求异常，请检查网络连接.'

    # 检查响应状态码
    if response.status_code!= 200:
        yield f'请求失败，状态码: {response.status_code}'
        # st.session_state.messages.append({'role': 'ai', 'content': errstr, 'error': True})
        # st.warning(errstr)
    else:
        reply = ''
        for line in response.iter_lines():
            answer = json.loads(line.decode(errors='ignore'))
            reply = answer['response']
            # if reply != "":
                # print(answer['response'])
            if answer['done']:
                break
            yield reply



from textual.widgets import Header, Footer, Input, Static, Markdown
from textual.reactive import reactive,var
from textual.containers import Vertical, VerticalScroll
from textual import on, work

class Prompt(Markdown):
    ''''''

class ChatOllamaApp(App):
    AUTO_FOCUS = "Input"

    message : var[str] = var("")

    def compose(self):
        '''构建界面'''
        yield Header()
        with VerticalScroll():
            yield Markdown("", id="output")
        yield Input(placeholder="请输入你的问题", id="input")
        yield Footer()

    def on_mount(self) -> None:
        '''应用面板初始化'''
        self.input = self.query_one(Input)
        self.output = self.query_one(Markdown)

    @on(Input.Submitted)
    def on_submitted_message(self, event: Input.Submitted):
        '''提交问题'''

        self.send_prompt(event.value)
        self.notify(event.value)

    @work(thread=True)
    def send_prompt(self, text):
        channel = generate(text)
        response = ""
        for chunk in channel:
            if isinstance(chunk, str):
                response += chunk
                self.call_from_thread(self.output.update, response)
    def watch_message(self):
        self.output.update(self.message)

if __name__ == "__main__":
    app = ChatOllamaApp()
    app.title = 'Ollama Qwen2.5 App'.title()
    app.sub_title = '当前运行在 2c/2g 服务器虚拟机中，可能会有一些慢.'
    app.run()

# st.title('Ollama Qwen2.5')

# 获取环境变量
# ollama_host = os.environ.get('OLLAMA_HOST', 'localhost')
# if ollama_host != 'localhost':
#     st.write('当前运行在 2c/2g 服务器虚拟机中，可能会有一些慢.')
    
# if 'channel' not in st.session_state:  
#     with st.spinner('初始化加载中....'):
#         st.write("对话已就绪.")
#     #     st.chat_message('ai').write("对话已就绪.")

# if 'messages' not in st.session_state:
#     st.session_state.messages = []

# for message in st.session_state.messages:
#     with st.chat_message(message['role']):
#         st.write(message['content'])

# if input := st.chat_input():
#     st.chat_message('user').write(f'{input}')
#     st.session_state.messages.append({'role': 'user', 'avatar':'👧', 'content': input})
    
#     channel = generate(input)
    
#     with st.chat_message('ai'):
#         # with st.spinner('回答中'):
#         st.session_state.output = st.write_stream(channel)
#     st.session_state.messages.append({'role': 'ai', 'avatar':'🤖', 'content': st.session_state.output})

# print(st.session_state.messages)
# print(len(st.session_state.messages))


# curl http://localhost:11435/api/generate -d '{
#   "model": "qwen2.5:0.5b",
#   "prompt": "Why is the sky blue?"
# }'

# curl http://localhost:11435/api/generate -d '{
#   "model": "qwen2.5:0.5b",
#   "prompt": "ping(You should reply to pong. no more)"
# }'

# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:14.991635704Z","response":"P","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.008693472Z","response":"ong","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.025251081Z","response":"!","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.041723138Z","response":" I","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.057079791Z","response":"'m","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.070504678Z","response":" ready","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.085858515Z","response":" for","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.09927591Z","response":" your","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.11461409Z","response":" next","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.13094215Z","response":" question","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.147369888Z","response":".","done":false}
# {"model":"qwen2.5:0.5b","created_at":"2024-10-29T16:50:15.162694905Z","response":"","done":true,"done_reason":"stop","context":[151644,8948,198,2610,525,1207,16948,11,3465,553,54364,14817,13,1446,525,264,10950,17847,13,151645,198,151644,872,198,9989,7,2610,1265,9851,311,77622,6138,151645,198,151644,77091,198,47,644,0,358,2776,5527,369,697,1790,3405,13],"total_duration":318626309,"load_duration":24524869,"prompt_eval_count":37,"prompt_eval_duration":78966000,"eval_count":12,"eval_duration":171120000}


# deploy
# docker pull ollama/ollama
# docker run -d -v /home/ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
# docker exec -it ollama /bin/bash
    # ollama run qwen2.5:0.5b


# curl http://localhost:11434/api/generate -d '{
#   "model": "llama3.2",
#   "prompt": "Why is the sky blue?"
# }'

# @st.dialog("关于此脚本", width="large")
# def readme():
#     st.write("""
#     这是一个由麻法编写的可视化 Ollama Qwen2.5 的 0.5b 模型接口调用测试。

#     如果你需要这个脚本，请在下面点击下载吧！
#     """)

#     if ollama_host != 'localhost':
#         filename = "pages/7_🤖_Ollama_Qwen2.5.py"
#     else:    
#         filename = "streamtli-ollama-qwen2.5.py"
    
#     st.code(open(filename, 'r').read(), language="python")
    
#     if st.download_button('下载我?', open(filename, 'rb'), file_name="streamtli-ollama-qwen2.5.py"):
#         st.success('下载成功!')

# if st.sidebar.button('关于此脚本'):
#     readme()