Spaces:
Sleeping
Sleeping
| import re | |
| import requests | |
| from markdownify import markdownify | |
| from requests.exceptions import RequestException | |
| from smolagents import tool | |
| from huggingface_hub import InferenceClient | |
| def visit_webpage(url: str) -> str: | |
| """Visits a webpage at the given URL and returns its content as a markdown string. | |
| Args: | |
| url: The URL of the webpage to visit. | |
| Returns: | |
| The content of the webpage converted to Markdown, or an error message if the request fails. | |
| """ | |
| try: | |
| # Send a GET request to the URL | |
| response = requests.get(url) | |
| response.raise_for_status() # Raise an exception for bad status codes | |
| # Convert the HTML content to Markdown | |
| markdown_content = markdownify(response.text).strip() | |
| # Remove multiple line breaks | |
| markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content) | |
| return markdown_content | |
| except RequestException as e: | |
| return f"Error fetching the webpage: {str(e)}" | |
| except Exception as e: | |
| return f"An unexpected error occurred: {str(e)}" | |
| def analyze_image(url: str, prompt: str) -> str: | |
| """Uses a vision model to identify features in an describe an image. | |
| Args: | |
| url: The URL of the image to analyze | |
| prompt: Specific questions or things you are looking for in the image. Can also specify how to format a response. The model will return a general description if this is blank. | |
| Retruns: | |
| Answers to your question(s) or else a textual description of the image | |
| """ | |
| model_id = "Qwen/Qwen2.5-VL-32B-Instruct" | |
| client = InferenceClient() | |
| image_url = "https://agents-course-unit4-scoring.hf.space/files/cca530fc-4052-43b2-b130-b30968d8aa44" | |
| if prompt is None: | |
| prompt = "Describe the content of the image in detail." | |
| model_prompt = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| {"type": "image_url", "image_url": {"url": image_url}}, | |
| {"type": "text", "text": prompt} | |
| ] | |
| } | |
| ] | |
| response = client.chat_completion( | |
| model=model_id, | |
| messages=model_prompt, | |
| max_tokens=1000, | |
| temperature=0.7 | |
| ) | |
| description = response.choices[0].message.content | |