from transformers import pipeline | |
from PIL import Image | |
# Load a pre-trained image classification model (can be any model suitable for image classification) | |
# Here, we use a simple CNN model like ResNet50 from Hugging Face, which can be fine-tuned for this task. | |
model = pipeline("image-classification", model="google/vit-base-patch16-224-in21k") | |
# Class labels for your solar panel damage types | |
damage_classes = ['cracked', 'dusted', 'shaded', 'overheated'] | |
def predict_damage(image_path): | |
# Open and preprocess the image | |
image = Image.open(image_path) | |
# Get prediction from the model | |
result = model(image) | |
# Return the damage prediction text (i.e., class with highest confidence) | |
predicted_class = result[0]['label'] | |
confidence = result[0]['score'] | |
# Check the predicted label and map it to the respective damage type | |
if predicted_class in damage_classes: | |
return f"This panel is {predicted_class} (Confidence: {confidence:.2f})" | |
else: | |
return "Unable to determine the damage type" | |