File size: 1,713 Bytes
8cd3b31 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import json
import os
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
# Load the model from Hugging Face
model_path = "your_model_path" # Replace with your own model path
tokenizer = AutoTokenizer.from_pretrained("OttoYu/Tree-Dbh")
model = AutoModelForSequenceClassification.from_pretrained("OttoYu/Tree-Dbh")
# Set up the inference pipeline
text_classification = pipeline(
"text-classification",
model=model,
tokenizer=tokenizer,
device=0 if torch.cuda.is_available() else -1,
return_all_scores=True,
)
# Define a function to get the predicted tree height and crown spread for a given dbh
def predict_tree_properties(dbh):
# Prepare the input text
input_text = f"dbh: {dbh}"
# Get the predicted probabilities for each class
results = text_classification(input_text)
probs = results[0]["scores"]
# Convert the probabilities to tree height and crown spread
tree_height = probs[0] * 100 # Scale the probability to 0-100
crown_spread = probs[1] * 10 # Scale the probability to 0-10
# Return the predicted tree properties
return {"tree_height": tree_height, "crown_spread": crown_spread}
# Define a function to get user input and display the predicted tree properties
def run_inference():
# Get user input for dbh
dbh = input("Enter the dbh value (in cm): ")
# Make the prediction and display the results
tree_properties = predict_tree_properties(dbh)
print(f"Predicted Tree Height: {tree_properties['tree_height']:.2f} m")
print(f"Predicted Crown Spread: {tree_properties['crown_spread']:.2f} m")
# Call the function to run the inference
run_inference()
|