|
|
import gradio as gr |
|
|
from huggingface_hub import InferenceClient |
|
|
import json |
|
|
|
|
|
|
|
|
client = InferenceClient("pramodkoujalagi/SmolLM2-360M-Instruct-Text-2-JSON") |
|
|
|
|
|
def respond(message, history: list[tuple[str, str]]): |
|
|
|
|
|
formatted_prompt = f"""<|im_start|>user |
|
|
Extract the relevant event information from this text and organize it into a JSON structure with fields for action, date, time, attendees, location, duration, recurrence, and notes. If a field is not present, return null for that field. |
|
|
|
|
|
Text: {message} |
|
|
<|im_end|> |
|
|
<|im_start|>assistant |
|
|
""" |
|
|
|
|
|
|
|
|
complete_response = "" |
|
|
for chunk in client.text_generation( |
|
|
formatted_prompt, |
|
|
max_new_tokens=512, |
|
|
stream=True, |
|
|
temperature=0.1, |
|
|
top_p=0.95, |
|
|
stop_sequences=["<|im_end|>"] |
|
|
): |
|
|
complete_response += chunk |
|
|
|
|
|
|
|
|
cleaned_response = complete_response.strip() |
|
|
|
|
|
cleaned_response = cleaned_response.replace("<|im_end|>", "").strip() |
|
|
|
|
|
try: |
|
|
|
|
|
json_obj = json.loads(cleaned_response) |
|
|
|
|
|
return json.dumps(json_obj, indent=2) |
|
|
except json.JSONDecodeError: |
|
|
|
|
|
return cleaned_response |
|
|
|
|
|
|
|
|
demo = gr.ChatInterface( |
|
|
respond, |
|
|
examples=[ |
|
|
"Plan an exhibition walkthrough on 25th, April 2025 at 3 PM with Harper, Grace, and Alex in the art gallery for 1 hour, bring your bag.", |
|
|
"Schedule a meeting with the marketing team on 21/04/2025 at 2 PM in the conference room.", |
|
|
"Brainstorming session at the innovation hub on 27th, Jan 2025 at 10:00 am for 90 minutes.", |
|
|
"Interview scheduled on 24 - April - 2024 at 10am for 1.5 hours on Microsoft Teams with Chris and Laura." |
|
|
], |
|
|
title="Calendar Event Extraction", |
|
|
description="Enter text containing event information, and I'll extract the details into a JSON format." |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |