from typing import List, Dict, Optional, Union, Generator, Any
from langchain_community.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import tiktoken

def count_tokens(text: str) -> int:
    """
    计算文本的token数量
    """
    encoding = tiktoken.get_encoding("cl100k_base")
    return len(encoding.encode(text))

def call_ollama_api(
    messages: List[Dict[str, str]],
    api_key: Optional[str] = None,
    model: str = "deepseek-r1:1.5b",
    temperature: float = 0.7,
    max_tokens: Optional[int] = None,
    stream: bool = False,
    base_url: str = "http://localhost:11434",
) -> Union[Generator[str, None, None], Any]:
    """
    调用Ollama API的方法
    """
    model_name = model

    # 创建回调管理器
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) if stream else None

    # 创建Ollama实例
    llm = Ollama(
        model=model_name,
        base_url=base_url,
        temperature=temperature,
        callback_manager=callback_manager
    )

    # 合并消息内容
    prompt = ""
    for msg in messages:
        if msg["role"] == "user":
            prompt += f"{msg['content']}\n"

    try:
        # 计算输入tokens
        input_tokens = count_tokens(prompt)
        
        # 调用模型
        response = llm.invoke(prompt)
        
        # 计算输出tokens
        output_tokens = count_tokens(str(response))
        
        # 假设回复内容包含思考部分和回复部分
        response_text = str(response)
        
        # 解析回复，分离思考和回复内容
        think_content = ""
        reply_content = response_text
        
        if "<think>" in response_text and "</think>" in response_text:
            parts = response_text.split("</think>")
            if len(parts) > 1:
                think_content = parts[0].replace("<think>", "").strip()
                reply_content = parts[1].strip()
        
        if stream:
            def stream_generator():
                yield {
                    "choices": [{
                        "delta": {
                            "think_content": think_content,
                            "reply_content": reply_content
                        }
                    }],
                    "usage": {
                        "prompt_tokens": input_tokens,
                        "completion_tokens": output_tokens,
                        "total_tokens": input_tokens + output_tokens
                    }
                }
            return stream_generator()
        else:
            return {
                "content": reply_content,
                "thinking_content": think_content,
                "usage": {
                    "prompt_tokens": input_tokens,
                    "completion_tokens": output_tokens,
                    "total_tokens": input_tokens + output_tokens
                },
                "model": model_name,
                "created": 0,
                "id": "",
            }
    except Exception as e:
        raise Exception(f"Ollama API 调用失败: {str(e)}")

def test_ollama_api():
    """
    测试Ollama API调用
    """
    messages = [
        {
            "role": "user",
            "content": "思考一下，祖冲之是那个朝代的？"
        }
    ]
    
    try:
        # # 测试普通调用
        # print("\n测试普通调用:")
        # response = call_ollama_api(messages, stream=False)
        # if response.get("thinking_content"):
        #     print("\n思考过程:")
        #     print("-" * 50)
        #     print(response["thinking_content"])
        #     print("-" * 50)
        
        # print("\n最终回复:")
        # print("-" * 50)
        # print(response["content"])
        # print("-" * 50)
        
        # print("\nToken 使用情况:")
        # print(f"- 输入tokens: {response['usage']['prompt_tokens']}")
        # print(f"- 输出tokens: {response['usage']['completion_tokens']}")
        # print(f"- 总tokens: {response['usage']['total_tokens']}\n")

        # 测试流式调用
        print("\n测试流式调用:")
        stream_response = call_ollama_api(messages, stream=True)
        if stream_response:
            for chunk in stream_response:
                think = chunk['choices'][0]['delta']['think_content']
                reply = chunk['choices'][0]['delta']['reply_content']
                
                if think:
                    print("\n思考过程:")
                    print("-" * 50)
                    print(think)
                
                print("\n回复内容:")
                print("-" * 50)
                print(reply)
                
                print(f"\nToken 使用情况: {chunk['usage']}")
                print("-" * 50)

    except Exception as e:
        print(f"测试失败: {str(e)}")

if __name__ == "__main__":
    test_ollama_api()