Alphar3d commited on
Commit
1c34ee2
1 Parent(s): 7955936

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitattributes +2 -35
  2. api.txt +1 -0
  3. app.py +101 -0
  4. requirements.txt +3 -0
.gitattributes CHANGED
@@ -1,35 +1,2 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ GOOGLE_API_KEY="AIzaSyDGsO9HM1KDZ0BdZ1RGWP8lC2XR4A_Oz5w"
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ from pathlib import Path
3
+ import gradio as gr
4
+ from dotenv import load_dotenv
5
+ import os
6
+
7
+ # Load environment variables from a .env file
8
+ load_dotenv()
9
+
10
+ # Configure the GenerativeAI API key using the loaded environment variable
11
+ genai.configure(api_key=os.getenv("AIzaSyDGsO9HM1KDZ0BdZ1RGWP8lC2XR4A_Oz5w"))
12
+
13
+ # Set up the model configuration for text generation
14
+ generation_config = {
15
+ "temperature": 0.4,
16
+ "top_p": 1,
17
+ "top_k": 32,
18
+ "max_output_tokens": 4096,
19
+ }
20
+
21
+ # Define safety settings for content generation
22
+ safety_settings = [
23
+ {"category": f"HARM_CATEGORY_{category}",
24
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"}
25
+ for category in ["HARASSMENT", "HATE_SPEECH", "SEXUALLY_EXPLICIT", "DANGEROUS_CONTENT"]
26
+ ]
27
+
28
+ # Initialize the GenerativeModel with the specified model name, configuration, and safety settings
29
+ model = genai.GenerativeModel(
30
+ model_name="gemini-pro-vision",
31
+ generation_config=generation_config,
32
+ safety_settings=safety_settings,
33
+ )
34
+ # Function to read image data from a file path
35
+
36
+
37
+ def read_image_data(file_path):
38
+ image_path = Path(file_path)
39
+ if not image_path.exists():
40
+ raise FileNotFoundError(f"Could not find image: {image_path}")
41
+ return {"mime_type": "image/jpeg", "data": image_path.read_bytes()}
42
+
43
+ # Function to generate a response based on a prompt and an image path
44
+
45
+
46
+ def generate_gemini_response(prompt, image_path):
47
+ image_data = read_image_data(image_path)
48
+ response = model.generate_content([prompt, image_data])
49
+ return response.text
50
+
51
+
52
+ # Initial input prompt for the plant pathologist
53
+ input_prompt = """
54
+ As a highly skilled plant pathologist, your expertise is indispensable in our pursuit of maintaining optimal plant health. You will be provided with information or samples related to plant diseases, and your role involves conducting a detailed analysis to identify the specific issues, propose solutions, and offer recommendations.
55
+
56
+ **Analysis Guidelines:**
57
+
58
+ 1. **Disease Identification:** Examine the provided information or samples to identify and characterize plant diseases accurately.
59
+
60
+ 2. **Detailed Findings:** Provide in-depth findings on the nature and extent of the identified plant diseases, including affected plant parts, symptoms, and potential causes.
61
+
62
+ 3. **Next Steps:** Outline the recommended course of action for managing and controlling the identified plant diseases. This may involve treatment options, preventive measures, or further investigations.
63
+
64
+ 4. **Recommendations:** Offer informed recommendations for maintaining plant health, preventing disease spread, and optimizing overall plant well-being.
65
+
66
+ 5. **Important Note:** As a plant pathologist, your insights are vital for informed decision-making in agriculture and plant management. Your response should be thorough, concise, and focused on plant health.
67
+
68
+ **Disclaimer:**
69
+ *"Please note that the information provided is based on plant pathology analysis and should not replace professional agricultural advice. Consult with qualified agricultural experts before implementing any strategies or treatments."*
70
+
71
+ Your role is pivotal in ensuring the health and productivity of plants. Proceed to analyze the provided information or samples, adhering to the structured
72
+ """
73
+
74
+ # Function to process uploaded files and generate a response
75
+
76
+
77
+ def process_uploaded_files(files):
78
+ file_path = files[0].name if files else None
79
+ response = generate_gemini_response(
80
+ input_prompt, file_path) if file_path else None
81
+ return file_path, response
82
+
83
+
84
+ # Gradio interface setup
85
+ with gr.Blocks() as demo:
86
+ file_output = gr.Textbox()
87
+ image_output = gr.Image()
88
+ combined_output = [image_output, file_output]
89
+
90
+ # Upload button for user to provide images
91
+ upload_button = gr.UploadButton(
92
+ "Click to Upload an Image",
93
+ file_types=["image"],
94
+ file_count="multiple",
95
+ )
96
+ # Set up the upload button to trigger the processing function
97
+ upload_button.upload(process_uploaded_files,
98
+ upload_button, combined_output)
99
+
100
+ # Launch the Gradio interface with debug mode enabled
101
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ google-generativeai
3
+ python-dotenv