import gradio as gr import boto3, json, os, wandb from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"] aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"] wandb_api_key = os.environ["WANDB_API_KEY"] config = { "model": "anthropic.claude-v2", "temperature": 1, } wandb.login(key = wandb_api_key) wandb.init(project = "bedrock-txt", config = config) config = wandb.config bedrock_runtime = boto3.client( aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key, service_name = "bedrock-runtime", region_name = "us-west-2" ) def invoke(prompt): body = json.dumps({"prompt": "\n\nHuman: " + prompt + "\n\nAssistant: ", "max_tokens_to_sample": 300, "temperature": config.temperature, "top_k": 250, "top_p": 0.999, "stop_sequences": ["\n\nHuman: "] }) modelId = "anthropic.claude-v2" accept = "application/json" contentType = "application/json" response = bedrock_runtime.invoke_model(body = body, modelId = modelId, accept = accept, contentType = contentType) response_body = json.loads(response.get("body").read()) completion = response_body["completion"] wandb.log({"prompt": prompt, "completion": completion}) return completion gr.close_all() demo = gr.Interface(fn=invoke, inputs = [gr.Textbox(label = "Prompt", lines = 1)], outputs = [gr.Textbox(label = "Completion", lines = 1)], title = "Generative AI - Language", description = "Gradio UI using Amazon Bedrock API with Anthropic Claude 2 foundation model") demo.launch()