|
import os |
|
import json |
|
import chainlit as cl |
|
from openai import AsyncOpenAI |
|
from config import AUDITOR_PROMPT, CRITIC_PROMPT |
|
from dotenv import load_dotenv |
|
|
|
|
|
cl.instrument_openai() |
|
|
|
settings = { |
|
"model": "gpt-3.5-turbo", |
|
"temperature": 0, |
|
|
|
} |
|
|
|
|
|
async def initiate_analysis(file_content): |
|
|
|
await cl.Message(content="π Starting auditor analysis...").send() |
|
|
|
auditor_response = await perform_auditor_analysis(file_content) |
|
|
|
|
|
await cl.Message(content=f"π Auditor response:\n```json\n{format_response(auditor_response)}\n```").send() |
|
|
|
|
|
await cl.Message(content="π΅οΈ Starting critic analysis...").send() |
|
|
|
critic_response = await perform_critic_analysis(auditor_response) |
|
|
|
|
|
await cl.Message(content=f"π‘ Critic response:\n```json\n{format_response(critic_response)}\n```").send() |
|
|
|
|
|
continue_further = await cl.AskActionMessage( |
|
content="Would you like to continue? π", |
|
actions=[ |
|
cl.Action(name="yes", value="yes", label="β
Yes, continue"), |
|
cl.Action(name="no", value="no", label="β No, stop"), |
|
], |
|
).send() |
|
|
|
if continue_further.get("value") == "yes": |
|
await initiate_analysis(file_content) |
|
else: |
|
await cl.Message(content="π Thanks for using GPTLens!").send() |
|
|
|
async def perform_auditor_analysis(file_content): |
|
auditor = await client.chat.completions.create( |
|
messages=[ |
|
{"content": AUDITOR_PROMPT, "role": "system"}, |
|
{"content": file_content, "role": "user"} |
|
], |
|
**settings |
|
) |
|
return auditor.choices[0].message.content |
|
|
|
async def perform_critic_analysis(auditor_response): |
|
critic = await client.chat.completions.create( |
|
messages=[ |
|
{"content": CRITIC_PROMPT, "role": "system"}, |
|
{"content": auditor_response, "role": "user"} |
|
], |
|
**settings |
|
) |
|
return critic.choices[0].message.content |
|
|
|
def format_response(response): |
|
try: |
|
|
|
json_obj = json.loads(response) |
|
formatted_json = json.dumps(json_obj, indent=2) |
|
return formatted_json |
|
except json.JSONDecodeError: |
|
|
|
return response |
|
|
|
@cl.on_chat_start |
|
async def prestart(): |
|
pass |
|
|
|
@cl.on_message |
|
async def start(): |
|
api_key_message = await cl.AskUserMessage(content="π Please enter your OpenAI API key:").send() |
|
await cl.Message(content="β
API key used only for this session, don't worry!").send() |
|
|
|
global client |
|
client = AsyncOpenAI(api_key=api_key_message['output']) |
|
|
|
model_type = await cl.AskActionMessage( |
|
content="Pick a model!", |
|
actions=[ |
|
cl.Action(name="gpt3", value="gpt3", label="GPT-3.5 Turbo"), |
|
cl.Action(name="gpt4", value="gpt4", label="GPT-4"), |
|
cl.Action(name="gpt4turbo", value="gpt4turbo", label="GPT-4 Turbo Preview"), |
|
], |
|
).send() |
|
|
|
if model_type: |
|
settings['model'] = { |
|
"gpt3": "gpt-3.5-turbo", |
|
"gpt4": "gpt-4", |
|
"gpt4turbo": "gpt-4-turbo-preview" |
|
}.get(model_type.get("value"), settings['model']) |
|
|
|
temperature = await cl.AskUserMessage(content="Give the temperature value (between 0 and 1):").send() |
|
if temperature: |
|
try: |
|
temperature_value = float(temperature['output']) |
|
if 0 <= temperature_value <= 1: |
|
settings["temperature"] = temperature_value |
|
except ValueError: |
|
await cl.Message(content="Invalid temperature value provided. Using default.").send() |
|
|
|
files = [] |
|
while not files: |
|
files = await cl.AskFileMessage( |
|
content="Please upload a text file (.txt) or a Solidity file (.sol) to begin!", |
|
accept={"text/plain": [".sol", ".txt"]} |
|
).send() |
|
await cl.Message(content="π Setup complete! Ready to start analysis.").send() |
|
if files: |
|
text_file = files[0] |
|
with open(text_file.path, "r", encoding="utf-8") as f: |
|
file_content = f.read() |
|
|
|
|
|
await initiate_analysis(file_content) |
|
|