import ollama

def api_generate(text: str, model_name: str = 'deepseek-r1:1.5b', context: list = None):
    """
    使用Ollama生成文本的函数
    
    Args:
        text: 输入的提示文本
        model_name: 使用的模型名称，默认为'deepseek-r1:1.5b'
        context: 对话上下文，默认为None
    """
    print(f'提问：{text}')
    
    # 构建请求参数
    params = {
        'stream': True,
        'model': model_name,
        'prompt': text,
    }
    if context is not None:
        params['context'] = context
        
    try:
        stream = ollama.generate(**params)
    except Exception as e:
        print(f"生成文本时发生错误: {e}")
        return None, None

    print('-' * 40)
    response_text = ''
    for chunk in stream:
        if not chunk.get('done'):
            text_piece = chunk.get('response', '')
            response_text += text_piece
            print(text_piece, end='', flush=True)
        else:
            print('\n')
            print('-' * 40)
            total_duration = chunk.get('total_duration', 0)
            print(f'总耗时：{total_duration:.2f}秒')
            print('-' * 40)
            return response_text, chunk.get('context', context)  # 如果没有新的context，则保留旧的

if __name__ == '__main__':
    # 初始化上下文
    context = None
    
    while True:
        # 获取用户输入
        question = input("\n请输入您的问题(输入'q'退出)：")
        
        # 检查是否退出
        if question.lower() in ['q', 'exit']:
            print("对话结束!")
            break
            
        # 使用流式输出进行对话，并更新上下文
        response, context = api_generate(text=question, context=context)
        if response is None:
            print("由于错误无法获取响应，请检查日志。")