#!env python
#-*- coding: utf-8 -*-

from typing import Annotated, Any, Tuple, List, Dict, Literal
from pydantic import BaseModel
import requests
import json

class OllamaClient:   
    choices = ['test', 'list', 'tags', 'show', 'embed', 'generate', 'chat', 'blobs']
    def __init__(self):     
        self._base_url = "http://localhost:11434"

    def list(self):
        return '\n'.join(self.choices)

    def tags(self, **kwargs):
        response = self.get(endpoint='/api/tags')
        if response.status_code == 200:
            import json
            data = json.loads(response.text)
            models = data["models"]
            for model in models:
                if not kwargs.pop('silent', False):
                    print(model["name"])
            return models

    def show(self, model, **kwargs):
        import json
        payload = json.dumps({ "model": model, "verbose": kwargs.pop('verbose', False) })
        response = self.post(endpoint='/api/show', data=payload)
        print(f'status_code = {response.status_code}')
        if response.status_code == 200:
            import json
            data = json.loads(response.text)
            print(f'license = {data["license"]}')
            print(f'modelfile = {data["modelfile"]}')
            print(f'parameters = {data["parameters"]}')
            print(f'template = {data["template"]}')
            print(f'modified_at = {data["modified_at"]}')
            details = data["details"]
            print(f'details = {details}')
            model_info = data["model_info"]
            print(f'model_info = {model_info}')

    def embed(self, model, prompt, 
              truncate=True, options={}, keep_alive='5m', 
              **kwargs):
        payload = json.dumps({
            "model": model,
            "input": prompt,
            "truncate": truncate,
            "options": options,
            "keep_alive": keep_alive
            })
        response = self.post(endpoint='/api/embed', data=payload)
        data = json.loads(response.text)
        return data["embeddings"]

    def generate(self, model, prompt,
                 endpoint='/api/generate', stream=True,
                 **kwargs):
        """
            # "suffix": suffix,
            # "images": images,
            # "format": format,
            # "options": kwargs,
            # "system": system,
            # "template": template,
            # "stream": stream
        """
        data = {
                "model": model,
                "prompt": prompt
                }
        payload = json.dumps(data)
        response = self.post(endpoint=endpoint, data=payload)
        if stream:
            for line in response.iter_lines():
                data = json.loads(line)
                print(data["response"], end="")
                if data["done"]:
                    print("\n")
                    print("="* 20)
                    print(statistics(data))
        else:
            print(response.text)
        return response

    def chat(self, model, prompt, system=None, stream=True, **kwargs):
        """ 生成聊天完成
            POST /api/chat
            使用提供的模型在聊天中生成下一条消息。这是一个流式处理终结点，因此将有一系列响应。可以使用"stream": false.最终响应对象将包含来自请求的统计信息和其他数据。

            参数
            model：（必需）模型名称
            messages：聊天的消息，可用于保存聊天记录
            tools：JSON 中供模型使用的工具列表（如果支持）
            这messageobject 具有以下字段：

            role：消息的角色system,user,assistant或tool
            content：消息的内容
            images（可选）：要包含在消息中的图像列表（对于多模态模型，例如llava)
            tool_calls（可选）：模型要使用的 JSON 中的工具列表
            高级参数（可选）：

            format：返回响应的格式。格式可以是json或 JSON 架构。
            options：Modelfile 文档中列出的其他模型参数，例如temperature
            stream：如果false响应将作为单个响应对象返回，而不是对象流
            keep_alive：控制模型在请求后加载到内存中的时间（默认值：5m)
            结构化输出
            通过在format参数。该模型将生成与架构匹配的响应。请参阅下面的 Chat request （Structured outputs） 示例。
        """
        while prompt != '/bye':
            data = {
                    "model": model,
                    "messages": [
                        { "role": "system", "content": system  },
                        { "role": "user", "content": prompt  }
                        ],
                    "stream": stream
                    }
            payload = json.dumps(data)
            response = self.post(endpoint='/api/chat', data=payload)
            if stream:
                for line in response.iter_lines():
                    chunk = json.loads(line)
                    message = chunk["message"]
                    print(message["content"], end="")
                    if chunk["done"]:
                        print('\n' + '---'*20)
                        print(statistics(chunk))
            return response

    def get(self,
            endpoint : str,
            headers : Dict[str, Any]=None,
            data : Dict[str, Any]=None
            ) -> requests.Response:  
        url = f"{self._base_url}{endpoint}"
        return requests.get(url, headers=headers, data=data)

    def post(self,
             endpoint : str,
             headers : Dict[str, Any]=None,
             data : Dict[str, Any]=None
             ) -> requests.Response:
        url = f"{self._base_url}{endpoint}"
        return requests.post(url, headers=headers, data=data)


client = OllamaClient()

def list_commands(**kwargs):
    print('available commands:')
    print(client.list())


def statistics(data):
    return  {
            "model": data["model"],
            "created_at": data["created_at"],
            "total_duration": data["total_duration"],
            "load_duration": data["load_duration"],
            "prompt_eval_count": data["prompt_eval_count"],
            "prompt_eval_duration": data["prompt_eval_duration"],
            "eval_count": data["eval_count"],
            "eval_duration": data["eval_duration"]
            }



if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(prog='ai')
    parser.add_argument('command', 
                        default='list',
                        help='command',
                        choices=OllamaClient.choices)
    parser.add_argument('--model', type=str, default='deepseek-r1:1.5b', help='model')
    parser.add_argument('--prompt', type=str, default='你好', help='prompt')
    parser.add_argument('--stream', type=bool, default=True, help='prompt')
    args = parser.parse_args()
    if args.command == 'list':
        list_commands()
    else:
        kwargs = {
                "model": args.model,
                "prompt": args.prompt,
                "stream": args.stream
                }
        cmdline = f'print(client.{args.command}(**{kwargs}))'
        # print(f"cmdline = {cmdline}")
        eval(cmdline)
