|
|
|
import gradio as gr |
|
import torch |
|
import torch.nn as nn |
|
import numpy as np |
|
from PIL import Image |
|
import requests |
|
from io import BytesIO |
|
import easyocr |
|
import cv2 |
|
import re |
|
from urllib.parse import urlparse |
|
import json |
|
import logging |
|
from typing import Dict, List, Tuple, Optional |
|
import warnings |
|
warnings.filterwarnings("ignore") |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
from transformers import ( |
|
AutoTokenizer, AutoModelForSequenceClassification, |
|
AutoProcessor, AutoModel, SiglipVisionModel, |
|
SiglipProcessor, pipeline |
|
) |
|
|
|
class EnhancedEnsembleMemeAnalyzer: |
|
def __init__(self): |
|
"""Initialize the enhanced ensemble model with best available models""" |
|
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
logger.info(f"Using device: {self.device}") |
|
|
|
|
|
self.setup_models() |
|
self.setup_ocr() |
|
self.setup_ensemble_weights() |
|
|
|
def setup_models(self): |
|
"""Initialize BERT and SigLIP models with error handling""" |
|
try: |
|
|
|
logger.info("Loading fine-tuned BERT model...") |
|
self.bert_tokenizer = AutoTokenizer.from_pretrained("./fine_tuned_bert_sentiment") |
|
self.bert_model = AutoModelForSequenceClassification.from_pretrained("./fine_tuned_bert_sentiment") |
|
self.bert_model.to(self.device) |
|
logger.info("β
Fine-tuned BERT loaded successfully!") |
|
|
|
except Exception as e: |
|
logger.warning(f"β οΈ Could not load custom BERT model: {e}") |
|
logger.info("Loading fallback BERT model...") |
|
|
|
self.bert_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest") |
|
self.bert_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest") |
|
self.bert_model.to(self.device) |
|
|
|
try: |
|
|
|
logger.info("Loading SigLIP-Large model...") |
|
self.siglip_processor = AutoProcessor.from_pretrained("google/siglip-large-patch16-384") |
|
self.siglip_model = AutoModel.from_pretrained("google/siglip-large-patch16-384") |
|
self.siglip_model.to(self.device) |
|
|
|
|
|
self.hate_classifier = nn.Sequential( |
|
nn.Linear(1152, 512), |
|
nn.ReLU(), |
|
nn.Dropout(0.3), |
|
nn.Linear(512, 256), |
|
nn.ReLU(), |
|
nn.Dropout(0.2), |
|
nn.Linear(256, 4) |
|
).to(self.device) |
|
|
|
logger.info("β
SigLIP-Large loaded successfully!") |
|
|
|
except Exception as e: |
|
logger.warning(f"β οΈ Could not load SigLIP-Large, trying base model: {e}") |
|
|
|
self.siglip_processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") |
|
self.siglip_model = AutoModel.from_pretrained("google/siglip-base-patch16-224") |
|
self.siglip_model.to(self.device) |
|
|
|
self.hate_classifier = nn.Sequential( |
|
nn.Linear(768, 256), |
|
nn.ReLU(), |
|
nn.Dropout(0.2), |
|
nn.Linear(256, 4) |
|
).to(self.device) |
|
|
|
def setup_ocr(self): |
|
"""Initialize OCR with multiple engines for better accuracy""" |
|
try: |
|
|
|
self.ocr_reader = easyocr.Reader(['en'], gpu=torch.cuda.is_available()) |
|
logger.info("β
EasyOCR initialized") |
|
|
|
|
|
self.use_easyocr = True |
|
|
|
except Exception as e: |
|
logger.warning(f"β οΈ OCR initialization issue: {e}") |
|
self.use_easyocr = False |
|
|
|
def setup_ensemble_weights(self): |
|
"""Initialize ensemble weights and thresholds""" |
|
self.ensemble_weights = { |
|
'text_sentiment': 0.4, |
|
'image_content': 0.35, |
|
'multimodal_context': 0.25 |
|
} |
|
|
|
self.risk_thresholds = { |
|
'high_risk': 0.8, |
|
'medium_risk': 0.6, |
|
'low_risk': 0.4 |
|
} |
|
|
|
|
|
self.hate_keywords = [ |
|
'hate', 'kill', 'death', 'violence', 'attack', |
|
'discriminate', 'racist', 'nazi', 'terrorist' |
|
] |
|
|
|
def extract_text_from_image(self, image: Image.Image) -> str: |
|
"""Enhanced OCR text extraction with multiple methods""" |
|
extracted_texts = [] |
|
|
|
try: |
|
if self.use_easyocr: |
|
|
|
img_array = np.array(image) |
|
results = self.ocr_reader.readtext(img_array, detail=0) |
|
if results: |
|
easyocr_text = ' '.join(results) |
|
extracted_texts.append(easyocr_text) |
|
logger.info(f"EasyOCR extracted: {easyocr_text[:100]}...") |
|
|
|
|
|
img_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
|
gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) |
|
|
|
|
|
kernel = np.ones((1,1), np.uint8) |
|
processed = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel) |
|
|
|
|
|
|
|
except Exception as e: |
|
logger.error(f"OCR Error: {e}") |
|
|
|
|
|
final_text = ' '.join(extracted_texts) if extracted_texts else "" |
|
return self.clean_text(final_text) |
|
|
|
def clean_text(self, text: str) -> str: |
|
"""Clean and preprocess text""" |
|
if not text: |
|
return "" |
|
|
|
|
|
text = re.sub(r'\s+', ' ', text) |
|
text = re.sub(r'[^\w\s\.\!\?\,\-\:\;\(\)]', '', text) |
|
|
|
return text.strip().lower() |
|
|
|
def analyze_sentiment(self, text: str) -> Dict: |
|
"""Analyze sentiment using fine-tuned BERT with confidence calibration""" |
|
if not text.strip(): |
|
return {"label": "NEUTRAL", "score": 0.5, "probabilities": [0.33, 0.34, 0.33]} |
|
|
|
try: |
|
inputs = self.bert_tokenizer( |
|
text, |
|
return_tensors="pt", |
|
truncation=True, |
|
padding=True, |
|
max_length=512 |
|
).to(self.device) |
|
|
|
with torch.no_grad(): |
|
outputs = self.bert_model(**inputs) |
|
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) |
|
|
|
|
|
predicted_class = torch.argmax(probabilities, dim=-1).item() |
|
confidence = torch.max(probabilities).item() |
|
probs_list = probabilities[0].cpu().tolist() |
|
|
|
|
|
if len(probs_list) == 3: |
|
label_mapping = {0: "NEGATIVE", 1: "NEUTRAL", 2: "POSITIVE"} |
|
else: |
|
label_mapping = {0: "NEGATIVE", 1: "POSITIVE"} |
|
|
|
return { |
|
"label": label_mapping.get(predicted_class, "UNKNOWN"), |
|
"score": confidence, |
|
"probabilities": probs_list |
|
} |
|
|
|
except Exception as e: |
|
logger.error(f"Sentiment analysis error: {e}") |
|
return {"label": "NEUTRAL", "score": 0.5, "probabilities": [0.5, 0.5]} |
|
|
|
def classify_multimodal_content(self, image: Image.Image, text: str = "") -> Dict: |
|
"""Enhanced multimodal classification using SigLIP""" |
|
try: |
|
|
|
hate_queries = [ |
|
"hateful meme targeting specific groups", |
|
"discriminatory content with offensive imagery", |
|
"violent or threatening visual content", |
|
"meme promoting hatred or discrimination", |
|
"offensive visual propaganda", |
|
"cyberbullying visual content" |
|
] |
|
|
|
safe_queries = [ |
|
"harmless funny meme", |
|
"positive social media content", |
|
"safe entertainment image", |
|
"normal social media post", |
|
"friendly humorous content", |
|
"non-offensive visual content" |
|
] |
|
|
|
|
|
if text: |
|
context_query = f"image with text saying: {text[:100]}" |
|
hate_queries.append(f"hateful {context_query}") |
|
safe_queries.append(f"harmless {context_query}") |
|
|
|
all_queries = hate_queries + safe_queries |
|
|
|
|
|
inputs = self.siglip_processor( |
|
text=all_queries, |
|
images=image, |
|
return_tensors="pt", |
|
padding=True |
|
).to(self.device) |
|
|
|
with torch.no_grad(): |
|
outputs = self.siglip_model(**inputs) |
|
logits_per_image = outputs.logits_per_image |
|
probs = torch.softmax(logits_per_image, dim=-1) |
|
|
|
|
|
hate_prob = torch.sum(probs[0][:len(hate_queries)]).item() |
|
safe_prob = torch.sum(probs[0][len(hate_queries):]).item() |
|
|
|
|
|
total_prob = hate_prob + safe_prob |
|
if total_prob > 0: |
|
hate_prob /= total_prob |
|
safe_prob /= total_prob |
|
|
|
|
|
keyword_boost = self.check_hate_keywords(text) |
|
hate_prob = min(1.0, hate_prob + keyword_boost * 0.1) |
|
|
|
return { |
|
"is_hateful": hate_prob > 0.5, |
|
"hate_probability": hate_prob, |
|
"safe_probability": safe_prob, |
|
"confidence": abs(hate_prob - 0.5) * 2, |
|
"detailed_scores": probs[0].cpu().tolist() |
|
} |
|
|
|
except Exception as e: |
|
logger.error(f"Multimodal classification error: {e}") |
|
return { |
|
"is_hateful": False, |
|
"hate_probability": 0.3, |
|
"safe_probability": 0.7, |
|
"confidence": 0.5, |
|
"detailed_scores": [] |
|
} |
|
|
|
def check_hate_keywords(self, text: str) -> float: |
|
"""Check for hate speech keywords and return boost factor""" |
|
if not text: |
|
return 0.0 |
|
|
|
text_lower = text.lower() |
|
keyword_count = sum(1 for keyword in self.hate_keywords if keyword in text_lower) |
|
|
|
return min(1.0, keyword_count * 0.2) |
|
|
|
def fetch_social_media_content(self, url: str) -> Dict: |
|
"""Enhanced social media content fetching with better error handling""" |
|
try: |
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' |
|
} |
|
|
|
response = requests.get(url, headers=headers, timeout=15) |
|
response.raise_for_status() |
|
|
|
content_type = response.headers.get('content-type', '').lower() |
|
|
|
|
|
if any(img_type in content_type for img_type in ['image/jpeg', 'image/png', 'image/gif', 'image/webp']): |
|
image = Image.open(BytesIO(response.content)) |
|
return {"type": "image", "content": image, "url": url} |
|
|
|
|
|
elif 'text/html' in content_type: |
|
html_content = response.text |
|
|
|
|
|
img_urls = re.findall(r'<img[^>]+src=["\']([^"\']+)["\']', html_content) |
|
|
|
|
|
for img_url in img_urls[:3]: |
|
try: |
|
if not img_url.startswith('http'): |
|
img_url = requests.compat.urljoin(url, img_url) |
|
|
|
img_response = requests.get(img_url, headers=headers, timeout=10) |
|
img_response.raise_for_status() |
|
|
|
image = Image.open(BytesIO(img_response.content)) |
|
|
|
|
|
text_content = re.sub(r'<[^>]+>', ' ', html_content) |
|
text_content = re.sub(r'\s+', ' ', text_content)[:500] |
|
|
|
return { |
|
"type": "webpage", |
|
"content": image, |
|
"text": text_content, |
|
"url": url |
|
} |
|
|
|
except Exception as img_e: |
|
logger.warning(f"Failed to fetch image {img_url}: {img_e}") |
|
continue |
|
|
|
|
|
text_content = re.sub(r'<[^>]+>', ' ', html_content) |
|
text_content = re.sub(r'\s+', ' ', text_content)[:1000] |
|
|
|
return {"type": "text", "content": text_content, "url": url} |
|
|
|
else: |
|
return {"type": "error", "content": f"Unsupported content type: {content_type}"} |
|
|
|
except requests.RequestException as e: |
|
logger.error(f"Request error for URL {url}: {e}") |
|
return {"type": "error", "content": f"Failed to fetch URL: {str(e)}"} |
|
except Exception as e: |
|
logger.error(f"General error fetching {url}: {e}") |
|
return {"type": "error", "content": f"Error processing content: {str(e)}"} |
|
|
|
def ensemble_prediction(self, sentiment_result: Dict, multimodal_result: Dict, extracted_text: str = "") -> Dict: |
|
"""Advanced ensemble prediction with risk stratification""" |
|
|
|
|
|
sentiment_risk = self.sentiment_to_risk_score(sentiment_result["label"], sentiment_result["score"]) |
|
|
|
|
|
multimodal_risk = multimodal_result["hate_probability"] |
|
|
|
|
|
text_weight = self.ensemble_weights['text_sentiment'] |
|
multimodal_weight = self.ensemble_weights['image_content'] + self.ensemble_weights['multimodal_context'] |
|
|
|
|
|
if not extracted_text.strip(): |
|
text_weight *= 0.5 |
|
multimodal_weight = 1.0 - text_weight |
|
|
|
|
|
combined_risk = (text_weight * sentiment_risk + multimodal_weight * multimodal_risk) |
|
|
|
|
|
if combined_risk >= self.risk_thresholds['high_risk']: |
|
risk_level = "HIGH" |
|
risk_description = "Potentially harmful content requiring immediate attention" |
|
elif combined_risk >= self.risk_thresholds['medium_risk']: |
|
risk_level = "MEDIUM" |
|
risk_description = "Concerning content that may require review" |
|
elif combined_risk >= self.risk_thresholds['low_risk']: |
|
risk_level = "LOW" |
|
risk_description = "Mildly concerning content, likely safe" |
|
else: |
|
risk_level = "SAFE" |
|
risk_description = "Content appears safe and non-harmful" |
|
|
|
|
|
confidence = self.calculate_ensemble_confidence(sentiment_result, multimodal_result) |
|
|
|
return { |
|
"risk_level": risk_level, |
|
"risk_score": combined_risk, |
|
"risk_description": risk_description, |
|
"confidence": confidence, |
|
"sentiment_analysis": sentiment_result, |
|
"multimodal_analysis": multimodal_result, |
|
"explanation": self.generate_explanation(sentiment_result, multimodal_result, risk_level) |
|
} |
|
|
|
def sentiment_to_risk_score(self, sentiment_label: str, confidence: float) -> float: |
|
"""Convert sentiment analysis to risk score""" |
|
base_scores = {"NEGATIVE": 0.7, "NEUTRAL": 0.3, "POSITIVE": 0.1} |
|
base_score = base_scores.get(sentiment_label, 0.3) |
|
|
|
|
|
return base_score * confidence + (1 - confidence) * 0.3 |
|
|
|
def calculate_ensemble_confidence(self, sentiment_result: Dict, multimodal_result: Dict) -> float: |
|
"""Calculate overall ensemble confidence""" |
|
sentiment_conf = sentiment_result["score"] |
|
multimodal_conf = multimodal_result["confidence"] |
|
|
|
|
|
overall_conf = (sentiment_conf + multimodal_conf) / 2 |
|
|
|
|
|
sentiment_negative = sentiment_result["label"] == "NEGATIVE" |
|
multimodal_hateful = multimodal_result["is_hateful"] |
|
|
|
if sentiment_negative == multimodal_hateful: |
|
overall_conf = min(1.0, overall_conf * 1.2) |
|
|
|
return overall_conf |
|
|
|
def generate_explanation(self, sentiment_result: Dict, multimodal_result: Dict, risk_level: str) -> str: |
|
"""Generate human-readable explanation of the decision""" |
|
explanations = [] |
|
|
|
|
|
sentiment_label = sentiment_result["label"] |
|
sentiment_conf = sentiment_result["score"] |
|
explanations.append(f"Text sentiment: {sentiment_label} (confidence: {sentiment_conf:.1%})") |
|
|
|
|
|
hate_prob = multimodal_result["hate_probability"] |
|
explanations.append(f"Visual content analysis: {hate_prob:.1%} probability of harmful content") |
|
|
|
|
|
explanations.append(f"Overall risk assessment: {risk_level}") |
|
|
|
return " | ".join(explanations) |
|
|
|
|
|
analyzer = EnhancedEnsembleMemeAnalyzer() |
|
|
|
def analyze_content(input_type: str, text_input: str, image_input: Image.Image, url_input: str) -> Tuple[str, str, str]: |
|
"""Main analysis function for Gradio interface""" |
|
try: |
|
extracted_text = "" |
|
image_content = None |
|
source_info = "" |
|
|
|
|
|
if input_type == "Text Only" and text_input: |
|
extracted_text = text_input |
|
source_info = "Direct text input" |
|
|
|
elif input_type == "Image Only" and image_input: |
|
image_content = image_input |
|
extracted_text = analyzer.extract_text_from_image(image_input) |
|
source_info = "Direct image upload" |
|
|
|
elif input_type == "URL" and url_input: |
|
content = analyzer.fetch_social_media_content(url_input) |
|
source_info = f"Content from: {url_input}" |
|
|
|
if content["type"] == "image": |
|
image_content = content["content"] |
|
extracted_text = analyzer.extract_text_from_image(content["content"]) |
|
elif content["type"] == "webpage": |
|
image_content = content["content"] |
|
extracted_text = content.get("text", "") + " " + analyzer.extract_text_from_image(content["content"]) |
|
elif content["type"] == "text": |
|
extracted_text = content["content"] |
|
else: |
|
return f"β Error: {content['content']}", "", "" |
|
|
|
elif input_type == "Text + Image" and text_input and image_input: |
|
extracted_text = text_input + " " + analyzer.extract_text_from_image(image_input) |
|
image_content = image_input |
|
source_info = "Combined text and image input" |
|
|
|
else: |
|
return "β οΈ Please provide appropriate input based on the selected type.", "", "" |
|
|
|
|
|
sentiment_result = analyzer.analyze_sentiment(extracted_text) |
|
|
|
if image_content: |
|
multimodal_result = analyzer.classify_multimodal_content(image_content, extracted_text) |
|
else: |
|
|
|
multimodal_result = { |
|
"is_hateful": False, |
|
"hate_probability": 0.2, |
|
"safe_probability": 0.8, |
|
"confidence": 0.5, |
|
"detailed_scores": [] |
|
} |
|
|
|
|
|
final_result = analyzer.ensemble_prediction(sentiment_result, multimodal_result, extracted_text) |
|
|
|
|
|
risk_emoji = {"HIGH": "π¨", "MEDIUM": "β οΈ", "LOW": "π‘", "SAFE": "β
"} |
|
|
|
result_text = f""" |
|
# π€ Enhanced Ensemble Analysis Results |
|
|
|
## {risk_emoji[final_result['risk_level']]} Overall Assessment |
|
**Risk Level**: {final_result['risk_level']} |
|
**Risk Score**: {final_result['risk_score']:.1%} |
|
**Confidence**: {final_result['confidence']:.1%} |
|
**Description**: {final_result['risk_description']} |
|
|
|
--- |
|
|
|
## π Detailed Analysis |
|
|
|
### π Text Analysis |
|
**Source**: {source_info} |
|
**Extracted Text**: {extracted_text[:300]}{'...' if len(extracted_text) > 300 else ''} |
|
**Sentiment**: {sentiment_result['label']} ({sentiment_result['score']:.1%} confidence) |
|
|
|
### πΌοΈ Visual Content Analysis |
|
**Contains Harmful Content**: {'Yes' if multimodal_result['is_hateful'] else 'No'} |
|
**Harm Probability**: {multimodal_result['hate_probability']:.1%} |
|
**Safe Probability**: {multimodal_result['safe_probability']:.1%} |
|
**Visual Analysis Confidence**: {multimodal_result['confidence']:.1%} |
|
|
|
### π§ Ensemble Decision Process |
|
{final_result['explanation']} |
|
|
|
--- |
|
|
|
## π‘ Recommendations |
|
{analyzer.get_recommendations(final_result['risk_level'])} |
|
""" |
|
|
|
|
|
detailed_output = json.dumps({ |
|
"risk_assessment": { |
|
"level": final_result['risk_level'], |
|
"score": final_result['risk_score'], |
|
"confidence": final_result['confidence'] |
|
}, |
|
"text_analysis": sentiment_result, |
|
"visual_analysis": multimodal_result, |
|
"extracted_text": extracted_text |
|
}, indent=2) |
|
|
|
return result_text, extracted_text, detailed_output |
|
|
|
except Exception as e: |
|
logger.error(f"Analysis error: {e}") |
|
return f"β Error during analysis: {str(e)}", "", "" |
|
|
|
|
|
def get_recommendations(self, risk_level: str) -> str: |
|
"""Get recommendations based on risk level""" |
|
recommendations = { |
|
"HIGH": "π¨ **Immediate Action Required**: This content should be reviewed by moderators and potentially removed. Consider issuing warnings or taking enforcement action.", |
|
"MEDIUM": "β οΈ **Review Recommended**: Content may violate community guidelines. Manual review suggested before taking action.", |
|
"LOW": "π‘ **Monitor**: Content shows some concerning signals but may be acceptable. Consider additional context before action.", |
|
"SAFE": "β
**No Action Needed**: Content appears safe and compliant with community standards." |
|
} |
|
return recommendations.get(risk_level, "No specific recommendations available.") |
|
|
|
|
|
EnhancedEnsembleMemeAnalyzer.get_recommendations = get_recommendations |
|
|
|
|
|
with gr.Blocks(title="Enhanced Ensemble Meme & Text Analyzer", theme=gr.themes.Soft()) as demo: |
|
gr.Markdown(""" |
|
# π€ Enhanced Ensemble Meme & Text Analyzer |
|
|
|
**Advanced AI system combining:** |
|
- π― Fine-tuned BERT (93% accuracy) for sentiment analysis |
|
- ποΈ SigLIP-Large for visual content understanding |
|
- π Advanced OCR for text extraction |
|
- π§ Intelligent ensemble decision making |
|
|
|
**Analyzes content risk across multiple dimensions with explainable AI** |
|
""") |
|
|
|
with gr.Row(): |
|
input_type = gr.Dropdown( |
|
choices=["Text Only", "Image Only", "URL", "Text + Image"], |
|
value="Text Only", |
|
label="π₯ Input Type" |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
text_input = gr.Textbox( |
|
label="π Text Input", |
|
placeholder="Enter text content to analyze (tweets, posts, comments)...", |
|
lines=4 |
|
) |
|
image_input = gr.Image( |
|
label="πΌοΈ Image Input", |
|
type="pil" |
|
) |
|
url_input = gr.Textbox( |
|
label="π URL Input", |
|
placeholder="Enter social media URL (Twitter, Reddit, etc.)..." |
|
) |
|
|
|
with gr.Column(scale=1): |
|
analyze_btn = gr.Button("π Analyze Content", variant="primary", size="lg") |
|
|
|
gr.Markdown(""" |
|
### π― Model Information |
|
- **BERT**: Fine-tuned sentiment analysis (93% accuracy) |
|
- **SigLIP**: Large-scale vision-language model |
|
- **OCR**: Multi-engine text extraction |
|
- **Ensemble**: Weighted decision fusion |
|
""") |
|
|
|
with gr.Row(): |
|
output_analysis = gr.Markdown(label="π Analysis Results") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
output_text = gr.Textbox(label="π Extracted Text", lines=4) |
|
with gr.Column(): |
|
output_detailed = gr.Code(label="π§ Detailed Results (JSON)", language="json") |
|
|
|
|
|
gr.Examples( |
|
examples=[ |
|
["Text Only", "This meme is so offensive and targets innocent people. Absolutely disgusting!", None, ""], |
|
["Text Only", "Haha this meme made my day! So funny and clever π", None, ""], |
|
["URL", "", None, "https://i.imgur.com/example.jpg"], |
|
["Text + Image", "Check out this hilarious meme I found!", None, ""] |
|
], |
|
inputs=[input_type, text_input, image_input, url_input], |
|
label="π‘ Try these examples" |
|
) |
|
|
|
analyze_btn.click( |
|
fn=analyze_content, |
|
inputs=[input_type, text_input, image_input, url_input], |
|
outputs=[output_analysis, output_text, output_detailed] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch( |
|
share=True, |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_error=True |
|
) |