text
stringlengths
41
4.1k
def test_llm_new_async_chat_request() -> None: test_system_message = 'πŸ€– You are OpenAI' test_messages = [ { 'role': 'system', 'content': test_system_message }, { 'role': 'user', 'content': 'This is a πŸ”₯ flamethrower test.' } ] (test_content, test_prompt_tokens, test_completion_tokens, test_model) = ('Test content', 42, 69, 'Test model') test_result = (test_content, test_prompt_tokens, test_completion_tokens, test_model) with patch('flamethrower.models.llm.OpenAIClient') as mock_openai, \ patch('flamethrower.models.llm.TokenCounter.add_input_tokens') as mock_add_input_tokens, \ patch('flamethrower.models.llm.TokenCounter.add_output_tokens') as mock_add_output_tokens: llm = LLM(system_message=test_system_message) llm_client = mock_openai.return_value llm_client.new_basic_async_chat_request = AsyncMock(return_value=test_result) loop = asyncio.get_event_loop() result = loop.run_until_complete(llm.new_async_chat_request(test_messages)) mock_add_input_tokens.assert_called_once_with(test_prompt_tokens, test_model) mock_add_output_tokens.assert_called_once_with(test_completion_tokens, test_model) assert result == test_content def test_llm_new_json_request() -> None: test_system_message = 'πŸ€– You are OpenAI' test_messages = [ { 'role': 'system', 'content': test_system_message }, { 'role': 'user', 'content': 'Return a json of a random Person with a name and age.' } ] (test_content, test_prompt_tokens, test_completion_tokens, test_model) = ('{ person: { name: "Ragnaros the Firelord", age: 9000 } }', 42, 69, 'Test model') test_result = (test_content, test_prompt_tokens, test_completion_tokens, test_model) with patch('flamethrower.models.llm.OpenAIClient.new_basic_chat_request', return_value=test_result ) as mock_new_basic_chat_request, \ patch('flamethrower.models.llm.TokenCounter.add_input_tokens') as mock_add_input_tokens, \ patch('flamethrower.models.llm.TokenCounter.add_output_tokens') as mock_add_output_tokens: llm = LLM(system_message=test_system_message) result = llm.new_chat_request(test_messages) assert result == test_content mock_new_basic_chat_request.assert_called_once_with(test_messages) mock_add_input_tokens.assert_called_once_with(test_prompt_tokens, test_model) mock_add_output_tokens.assert_called_once_with(test_completion_tokens, test_model)
import asyncio from unittest.mock import AsyncMock, patch from flamethrower.models.openai_client import OpenAIClient from openai.types.completion_usage import CompletionUsage from openai.types.chat.chat_completion import ChatCompletion, Choice as BasicChoice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, Choice as ChunkChoice def test_openai_client_init() -> None: test_message = 'test_message' test_model = 'test_model' test_api_key = 'test_api_key' with patch('flamethrower.utils.key_handler.get_api_key', return_value=test_api_key): client = OpenAIClient(system_message=test_message, model=test_model) assert client.system_message == test_message assert client.model == test_model assert client.client.api_key == test_api_key def test_openai_client_new_basic_chat_request() -> None: test_message, test_model = 'test_message', 'test_model' test_prompt_tokens, test_completion_tokens = 42, 69 test_messages = [ { 'role': 'system', 'content': 'You are OpenAI.' }, { 'role': 'user', 'content': 'Say "This is a πŸ”₯ flamethrower test."' } ] test_content = 'This is a πŸ”₯ flamethrower test.' test_response = ChatCompletion( id='chatcmpl-123', object='chat.completion', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ BasicChoice( index=0, message={ 'role': 'assistant', 'content': test_content }, logprobs=None, finish_reason='stop' ) ], usage=CompletionUsage( prompt_tokens=42, completion_tokens=69, total_tokens=111 ) ) with patch('flamethrower.models.openai_client.OpenAI') as mock_openai, \ patch('flamethrower.models.openai_client.OpenAIClient.get_token_usage', return_value=(test_prompt_tokens, test_completion_tokens) ): client = OpenAIClient(system_message=test_message, model=test_model) model = mock_openai.return_value model.chat.completions.create.return_value = test_response response = client.new_basic_chat_request(test_messages) assert response == (test_content, test_prompt_tokens, test_completion_tokens, test_model)
def test_openai_client_new_streaming_chat_request() -> None: test_message, test_model = 'test_message', 'test_model' test_messages = [ { 'role': 'system', 'content': 'You are OpenAI.' }, { 'role': 'user', 'content': 'Say "This is a πŸ”₯ flamethrower test."' } ] test_contents = ['This', 'is', 'a', 'πŸ”₯', 'flamethrower', 'test.'] test_responses = [ ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice( index=0, delta={ 'role': 'assistant', 'content': test_contents[0] }, logprobs=None, finish_reason=None ) ], ), ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice( index=0, delta={ 'role': 'assistant', 'content': test_contents[1] }, logprobs=None, finish_reason=None ) ], ), ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice( index=0, delta={ 'role': 'assistant', 'content': test_contents[2] }, logprobs=None, finish_reason=None ) ], ), ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice( index=0, delta={ 'role': 'assistant', 'content': test_contents[3] }, logprobs=None, finish_reason=None ) ], ), ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice( index=0, delta={ 'role': 'assistant', 'content': test_contents[4] }, logprobs=None, finish_reason=None ) ], ), ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice( index=0, delta={ 'role': 'assistant', 'content': test_contents[5] }, logprobs=None, finish_reason=None ) ], ), ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice(
'content': test_contents[5] }, logprobs=None, finish_reason=None ) ], ), ChatCompletionChunk( id='chatcmpl-123', object='chat.completion.chunk', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ ChunkChoice( index=0, delta={ 'role': 'assistant', 'content': '' }, logprobs=None, finish_reason='stop' ) ], ), ] with patch('flamethrower.models.openai_client.OpenAI') as mock_openai: client = OpenAIClient(system_message=test_message, model=test_model)
model = mock_openai.return_value model.chat.completions.create.return_value = test_responses stream = client.new_streaming_chat_request(test_messages) assert stream is not None idx = 0 for chunk in stream: assert chunk == test_contents[idx] idx += 1
def test_openai_client_new_basic_async_chat_request() -> None: test_message, test_model = 'test_message', 'test_model' test_prompt_tokens, test_completion_tokens = 42, 69 test_messages = [ { 'role': 'system', 'content': 'You are OpenAI.' }, { 'role': 'user', 'content': 'Say "This is a πŸ”₯ flamethrower test."' } ] test_content = 'This is a πŸ”₯ flamethrower test.' test_response = ChatCompletion( id='chatcmpl-123', object='chat.completion', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ BasicChoice( index=0, message={ 'role': 'assistant', 'content': test_content }, logprobs=None, finish_reason='stop' ) ], usage=CompletionUsage( prompt_tokens=42, completion_tokens=69, total_tokens=111 ) ) with patch('flamethrower.models.openai_client.AsyncOpenAI') as mock_openai, \ patch('flamethrower.models.openai_client.OpenAIClient.get_token_usage', return_value=(test_prompt_tokens, test_completion_tokens) ): client = OpenAIClient(system_message=test_message, model=test_model) model = mock_openai.return_value model.chat.completions.create = AsyncMock(return_value=test_response) loop = asyncio.get_event_loop() response = loop.run_until_complete(client.new_basic_async_chat_request(test_messages)) assert response == (test_content, test_prompt_tokens, test_completion_tokens, test_model) def test_openai_client_new_json_request() -> None: """ Basically the same as `basic_chat_request` """ test_message, test_model = 'test_message', 'test_model' test_prompt_tokens, test_completion_tokens = 42, 69 test_messages = [ { 'role': 'system', 'content': 'You are OpenAI.' }, { 'role': 'user', 'content': 'Return a json of a random Person with a name and age.' } ] test_content = '{ person: { name: "Ragnaros the Firelord", age: 9000 } }' test_response = ChatCompletion( id='chatcmpl-123', object='chat.completion', created=1677652288, model='gpt-3.5-turbo-0613', system_fingerprint='fp_fingerprint', choices=[ BasicChoice( index=0, message={ 'role': 'assistant', 'content': test_content }, logprobs=None, finish_reason='stop' ) ], usage=CompletionUsage( prompt_tokens=42, completion_tokens=69, total_tokens=111 ) ) with patch('flamethrower.models.openai_client.OpenAI') as mock_openai, \ patch('flamethrower.models.openai_client.OpenAIClient.get_token_usage', return_value=(test_prompt_tokens, test_completion_tokens) ): client = OpenAIClient(system_message=test_message, model=test_model) model = mock_openai.return_value model.chat.completions.create.return_value = test_response response = client.new_json_request(test_messages) assert response == (test_content, test_prompt_tokens, test_completion_tokens, test_model)
from flamethrower.utils.colors import * def get_quota_exceeded_message() -> str: return ( f'You might have {STDIN_RED.decode("utf-8")}exceeded your current quota for OpenAI{STDIN_DEFAULT.decode("utf-8")}.\n\n' f'We are working hard to provide a {STDIN_ORANGE.decode("utf-8")}free, open source πŸ”₯ flamethrower server{STDIN_DEFAULT.decode("utf-8")} for your usage.\n\n' f'Please check {STDIN_UNDERLINE.decode("utf-8")}https://github.com/scottsus/flamethrower{STDIN_DEFAULT.decode("utf-8")} for updates!' )
class QuotaExceededException(Exception): """Raised when the user has exceeded their quota.""" pass
from dependency_injector import containers, providers from flamethrower.utils.token_counter import TokenCounter class LMContainer(containers.DeclarativeContainer): token_counter = providers.Singleton(TokenCounter) lm_container = LMContainer()
import sys from dependency_injector import containers, providers from flamethrower.shell.command_handler import CommandHandler from flamethrower.context.conv_manager import ConversationManager from flamethrower.context.prompt import PromptGenerator from flamethrower.agents.operator import Operator from flamethrower.utils.token_counter import TokenCounter from flamethrower.shell.shell_manager import ShellManager from flamethrower.shell.printer import Printer from flamethrower.containers.lm_container import lm_container class Container(containers.DeclarativeContainer): token_counter = providers.Dependency(instance_of=TokenCounter) conv_manager = providers.Singleton(ConversationManager) tty_settings = providers.Dependency(instance_of=list) shell_manager = providers.Singleton( ShellManager, old_settings=tty_settings ) leader_fd = providers.Dependency(instance_of=int) printer = providers.Singleton( Printer, leader_fd=leader_fd, stdout_fd=sys.stdout.fileno(), conv_manager=conv_manager, shell_manager=shell_manager, token_counter=token_counter ) prompt_generator = providers.Singleton( PromptGenerator, conv_manager=conv_manager, token_counter=token_counter, printer=printer ) base_dir = providers.Dependency(instance_of=str) operator = providers.Singleton( Operator, base_dir=base_dir, conv_manager=conv_manager, prompt_generator=prompt_generator, printer=printer ) command_handler = providers.Singleton( CommandHandler, conv_manager=conv_manager, operator=operator, printer=printer, ) container = Container()
from unittest.mock import patch, mock_open from flamethrower.containers.container import Container from flamethrower.shell.command_handler import CommandHandler from flamethrower.context.conv_manager import ConversationManager from flamethrower.context.prompt import PromptGenerator from flamethrower.agents.operator import Operator from flamethrower.utils.token_counter import TokenCounter from flamethrower.shell.shell_manager import ShellManager from flamethrower.shell.printer import Printer from flamethrower.test_utils.mocks.mock_token_counter import mock_token_counter as mock_tc """ Don't use pytest.fixtures as mock_open only works for 1 level of abstraction? Need to look more into this. """ def mock_container() -> Container: with patch('flamethrower.containers.container.lm_container') as mock_lm_container: mock_token_counter = mock_tc() mock_lm_container.token_counter.return_value = mock_token_counter container = Container() container.token_counter.override(mock_token_counter) container.tty_settings.override([]) container.leader_fd.override(1) container.base_dir.override('/user/tester') return container def test_container_init() -> None: with patch('builtins.open', mock_open()): container = mock_container() assert isinstance(container.conv_manager(), ConversationManager) assert isinstance(container.token_counter(), TokenCounter) assert isinstance(container.shell_manager(), ShellManager) assert isinstance(container.printer(), Printer) assert isinstance(container.prompt_generator(), PromptGenerator) assert isinstance(container.operator(), Operator) assert isinstance(container.command_handler(), CommandHandler) def test_container_wiring() -> None: with patch('builtins.open', mock_open()): container = mock_container() shell_manager = container.shell_manager() assert shell_manager.old_settings == container.tty_settings() printer = container.printer() assert printer.conv_manager is container.conv_manager() assert printer.shell_manager is container.shell_manager() assert isinstance(printer.token_counter, TokenCounter) # best effort prompt_generator = container.prompt_generator() assert prompt_generator.conv_manager is container.conv_manager() assert prompt_generator.printer is container.printer() operator = container.operator() assert operator.base_dir == container.base_dir() assert operator.conv_manager is container.conv_manager() assert operator.prompt_generator is container.prompt_generator() assert operator.printer is container.printer() command_handler = container.command_handler() assert command_handler.conv_manager is container.conv_manager() assert command_handler.operator is container.operator() assert command_handler.printer is container.printer()
from flamethrower.containers.lm_container import LMContainer from flamethrower.utils.token_counter import TokenCounter def mock_lm_container() -> LMContainer: return LMContainer() def test_lm_container_init() -> None: container = mock_lm_container() assert isinstance(container.token_counter(), TokenCounter)
<h1 align='center'>πŸ”₯ flamethrower</h1> No bugs can survive the test of <span style='color: orange'>fire</span>; not even the ones you wrote into your codebase πŸͺ². [![GitHub Repo](https://img.shields.io/badge/scottsus-flamethrower-red?&logo=github)](https://github.com/scottsus/flamethrower) ![PyPI](https://img.shields.io/pypi/v/flamethrower.svg) ![Code Size](https://img.shields.io/github/languages/code-size/scottsus/flamethrower.svg) [![Discord](https://img.shields.io/discord/XP4vVUQKPf.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/XP4vVUQKPf) ![License](https://img.shields.io/github/license/scottsus/flamethrower.svg) [![Twitter](https://img.shields.io/twitter/follow/susantoscott.svg)](https://twitter.com/susantoscott) ## What is this? πŸ”₯ flamethrower is an open source, multi-agent, context-intelligent, debugger that utilizes AI superpowers to automate the painful task of debugging. Think a combination of GitHub Copilot's context-awareness in [KillianLucas' Open Interpreter](https://github.com/KillianLucas/open-interpreter) packed into a beautiful shell that works out of the box with any existing repo. Automate: [ Write Code β†’ Run Action β†’ Check Logs β†’ Repeat ] πŸš€πŸš€ **Main Differentiators** - πŸ”₯ Automate the most painful part of writing code: print statements & error logs - ☘️ Specialized context agent for operating within existing repo - πŸ€– Debugging agent optimized to iteratively brute-force locate and fix bugs - πŸ“¦ Out of the box support for any unix machine (no VS Code or VS Code alternatives) - 🎨 Seamless integration into any existing repo; just type `flamethrower` ## Demo https://github.com/scottsus/flamethrower/assets/88414565/e3c979c0-40ff-4976-aa30-2503a2827742 ## Quick Start <img src='https://github.com/scottsus/flamethrower/assets/88414565/4be238a7-642a-4149-a1ed-98ff7c61f9b8' alt='Quick Start' width='500px'/> ### Install πŸ”₯ flamethrower ``` pip install flamethrower ``` Or, if you have an existing version and are looking to upgrade to the latest version ``` pip install --upgrade flamethrower ``` ### Terminal Navigate to your current workspace, and simply run `flamethrower`, or `ft` for the pros. ``` cd ./unbelievably/complicated/workspace flamethrower ``` ### Example Usage Use lowercase letters for commands you run in the shell, like `python main.py` or `node server.ts` ``` πŸ”₯ flamethrower: Debugging on Autopilot Instructions: - ⌨️ Regular shell Use commands like ls, cd, python hello.py - πŸ€– LLM assistance Start command with a Capital letter, try Who are you? - πŸ“š Context Intelligent context-awareness from command, files, and stdout logs - πŸͺ΅ Terminal logs All conversation & code output inside flamethrower is logged ... $ python main.py -> SOME_ERROR $ Wtf???? # Literally type this in the terminal ``` An implementation run is initiated with a natural language query that begins with an `uppercase letter`. ## Features ### πŸ’€ AFK Debugging If you say 'Yes', πŸ”₯ flamethrower will debug in the background while you focus on other tasks at hand. It acts similarly to any other human engineer: adding `print` statements to find the root cause of the issue (which, as we know is the most annoying part). We find this pattern strikingly effective, and is where we believe LAMs have the strongest use case. If it looks like πŸ”₯ flamethrower is obviously headed in the direction of doom, simply press `CTRL+C` and give it more suggestions or context. <img src='https://github.com/scottsus/flamethrower/assets/88414565/11886370-1da4-478e-8fac-853fd305621a' alt='AFK' width='500px'/> ### πŸŽ™οΈ Conversation History As long as any shell command or natural language query happens within the context of πŸ”₯ flamethrower, then it is by default captured in the conversation history. That means you can: - ask about an error that just happened, or happened 2 dialogues ago - follow up on a previous response provided by πŸ”₯ flamethrower ### πŸ” Prompt Transparency
### πŸŽ™οΈ Conversation History As long as any shell command or natural language query happens within the context of πŸ”₯ flamethrower, then it is by default captured in the conversation history. That means you can: - ask about an error that just happened, or happened 2 dialogues ago - follow up on a previous response provided by πŸ”₯ flamethrower ### πŸ” Prompt Transparency Prompts sent to LLM are transparent and easy to observe. All πŸ”₯ flamethrower metadata are neatly kept in a `.flamethrower` subdirectory, including prompts, conversations, logs, directory info, summaries, and other metadata. <img src='https://github.com/scottsus/flamethrower/assets/88414565/8905018d-41f5-48e8-92f5-da2b0512af3d' alt='Transparency' width='500px'/> ### πŸ„β€β™€οΈ Real Time File Tracking Everytime you send a query, the latest version of your files are sent over, meaning πŸ”₯ flamethrower understands that you changed your files, and are ready to process those changes. <img src='https://github.com/scottsus/flamethrower/assets/88414565/f3f49b91-1cc8-452c-8625-54d88dcb2a42' alt='Context' width='500px'/> ## Motivation for πŸ”₯ flamethrower ### πŸ‘©β€βœˆοΈ GitHub Copilot Closed source GitHub Copilot draws context very effectively, and `Quick Fix` is a neat feature that explains error from stdout logs if the last command returned a non-zero return code. ### πŸ€– Open Interpreter The Open Interpreter, an open-source gem, specializes in crafting new code from the ground up. It's a favorite among data scientists and those needing sophisticated chart plotting, thanks to its iterative approach to achieving desired results. ### πŸ”¬ Research πŸ”₯ flamethrower combines the agency afforded by Large Action Models (LAM) with the workspace awareness of Copilot, allowing it to take context-specific suggestions and continue iteration until a successful outcome. πŸ”₯ flamethrower is workspace-first, and aims to serve software engineers in complex tasks that need a lot of context management. ## πŸ₯‚ Contributing πŸ”₯ flamethrower is everyone's debugger. Fork it for your own use case, and, one PR at a time we can make the world a more bug-free place ✨ just ping me at scottsus@usc.edu and I'll help you get started. ## πŸ›« Project Roadmap - [x] πŸ§ͺ Better testing - [ ] πŸ”­ Telemetry and the ability to opt in/out - [ ] πŸ₯½ LLM Vision to debug visual elements - [ ] πŸ¦™ Running CodeLlama locally - [ ] πŸ€– Other models besides OpenAI - [ ] 🦾 Default model finetuned on telemetry data - [ ] πŸŽ—οΈ VS Code integration - [ ] πŸ’» Browser interface