danidanidani commited on
Commit
37c00da
1 Parent(s): 69f1449

Upload 105 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +71 -0
  2. EXAMPLEvars.env +0 -0
  3. LICENSE +28 -0
  4. app.py +384 -0
  5. notebooks/.ipynb_checkpoints/langchain experimenting-checkpoint.ipynb +843 -0
  6. notebooks/langchain experimenting.ipynb +843 -0
  7. notebooks/llamaindex_llama2_test.ipynb +258 -0
  8. readme.md +56 -0
  9. src/.DS_Store +0 -0
  10. src/assets/bot.png +0 -0
  11. src/assets/cool.png +0 -0
  12. src/assets/flower.jpg +3 -0
  13. src/assets/flower2.jpg +3 -0
  14. src/assets/flower_tech.png +3 -0
  15. src/assets/lights.jpg +0 -0
  16. src/assets/logo_title.png +0 -0
  17. src/assets/logo_title_transparent.png +0 -0
  18. src/assets/plant_images/plant_0.png +3 -0
  19. src/assets/plant_images/plant_1.png +3 -0
  20. src/assets/plant_images/plant_10.png +3 -0
  21. src/assets/plant_images/plant_11.png +3 -0
  22. src/assets/plant_images/plant_12.png +3 -0
  23. src/assets/plant_images/plant_13.png +3 -0
  24. src/assets/plant_images/plant_14.png +3 -0
  25. src/assets/plant_images/plant_15.png +3 -0
  26. src/assets/plant_images/plant_16.png +3 -0
  27. src/assets/plant_images/plant_17.png +3 -0
  28. src/assets/plant_images/plant_18.png +3 -0
  29. src/assets/plant_images/plant_19.png +3 -0
  30. src/assets/plant_images/plant_2.png +3 -0
  31. src/assets/plant_images/plant_20.png +3 -0
  32. src/assets/plant_images/plant_21.png +3 -0
  33. src/assets/plant_images/plant_22.png +3 -0
  34. src/assets/plant_images/plant_23.png +3 -0
  35. src/assets/plant_images/plant_24.png +3 -0
  36. src/assets/plant_images/plant_25.png +3 -0
  37. src/assets/plant_images/plant_26.png +3 -0
  38. src/assets/plant_images/plant_27.png +3 -0
  39. src/assets/plant_images/plant_28.png +3 -0
  40. src/assets/plant_images/plant_29.png +3 -0
  41. src/assets/plant_images/plant_3.png +3 -0
  42. src/assets/plant_images/plant_30.png +3 -0
  43. src/assets/plant_images/plant_31.png +3 -0
  44. src/assets/plant_images/plant_32.png +3 -0
  45. src/assets/plant_images/plant_33.png +3 -0
  46. src/assets/plant_images/plant_34.png +3 -0
  47. src/assets/plant_images/plant_35.png +3 -0
  48. src/assets/plant_images/plant_36.png +3 -0
  49. src/assets/plant_images/plant_37.png +3 -0
  50. src/assets/plant_images/plant_38.png +3 -0
.gitattributes CHANGED
@@ -34,3 +34,74 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  llama-2-7b-chat.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  llama-2-7b-chat.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
37
+ src/assets/flower_tech.png filter=lfs diff=lfs merge=lfs -text
38
+ src/assets/flower.jpg filter=lfs diff=lfs merge=lfs -text
39
+ src/assets/flower2.jpg filter=lfs diff=lfs merge=lfs -text
40
+ src/assets/plant_images/plant_0.png filter=lfs diff=lfs merge=lfs -text
41
+ src/assets/plant_images/plant_1.png filter=lfs diff=lfs merge=lfs -text
42
+ src/assets/plant_images/plant_10.png filter=lfs diff=lfs merge=lfs -text
43
+ src/assets/plant_images/plant_11.png filter=lfs diff=lfs merge=lfs -text
44
+ src/assets/plant_images/plant_12.png filter=lfs diff=lfs merge=lfs -text
45
+ src/assets/plant_images/plant_13.png filter=lfs diff=lfs merge=lfs -text
46
+ src/assets/plant_images/plant_14.png filter=lfs diff=lfs merge=lfs -text
47
+ src/assets/plant_images/plant_15.png filter=lfs diff=lfs merge=lfs -text
48
+ src/assets/plant_images/plant_16.png filter=lfs diff=lfs merge=lfs -text
49
+ src/assets/plant_images/plant_17.png filter=lfs diff=lfs merge=lfs -text
50
+ src/assets/plant_images/plant_18.png filter=lfs diff=lfs merge=lfs -text
51
+ src/assets/plant_images/plant_19.png filter=lfs diff=lfs merge=lfs -text
52
+ src/assets/plant_images/plant_2.png filter=lfs diff=lfs merge=lfs -text
53
+ src/assets/plant_images/plant_20.png filter=lfs diff=lfs merge=lfs -text
54
+ src/assets/plant_images/plant_21.png filter=lfs diff=lfs merge=lfs -text
55
+ src/assets/plant_images/plant_22.png filter=lfs diff=lfs merge=lfs -text
56
+ src/assets/plant_images/plant_23.png filter=lfs diff=lfs merge=lfs -text
57
+ src/assets/plant_images/plant_24.png filter=lfs diff=lfs merge=lfs -text
58
+ src/assets/plant_images/plant_25.png filter=lfs diff=lfs merge=lfs -text
59
+ src/assets/plant_images/plant_26.png filter=lfs diff=lfs merge=lfs -text
60
+ src/assets/plant_images/plant_27.png filter=lfs diff=lfs merge=lfs -text
61
+ src/assets/plant_images/plant_28.png filter=lfs diff=lfs merge=lfs -text
62
+ src/assets/plant_images/plant_29.png filter=lfs diff=lfs merge=lfs -text
63
+ src/assets/plant_images/plant_3.png filter=lfs diff=lfs merge=lfs -text
64
+ src/assets/plant_images/plant_30.png filter=lfs diff=lfs merge=lfs -text
65
+ src/assets/plant_images/plant_31.png filter=lfs diff=lfs merge=lfs -text
66
+ src/assets/plant_images/plant_32.png filter=lfs diff=lfs merge=lfs -text
67
+ src/assets/plant_images/plant_33.png filter=lfs diff=lfs merge=lfs -text
68
+ src/assets/plant_images/plant_34.png filter=lfs diff=lfs merge=lfs -text
69
+ src/assets/plant_images/plant_35.png filter=lfs diff=lfs merge=lfs -text
70
+ src/assets/plant_images/plant_36.png filter=lfs diff=lfs merge=lfs -text
71
+ src/assets/plant_images/plant_37.png filter=lfs diff=lfs merge=lfs -text
72
+ src/assets/plant_images/plant_38.png filter=lfs diff=lfs merge=lfs -text
73
+ src/assets/plant_images/plant_39.png filter=lfs diff=lfs merge=lfs -text
74
+ src/assets/plant_images/plant_4.png filter=lfs diff=lfs merge=lfs -text
75
+ src/assets/plant_images/plant_40.png filter=lfs diff=lfs merge=lfs -text
76
+ src/assets/plant_images/plant_41.png filter=lfs diff=lfs merge=lfs -text
77
+ src/assets/plant_images/plant_42.png filter=lfs diff=lfs merge=lfs -text
78
+ src/assets/plant_images/plant_43.png filter=lfs diff=lfs merge=lfs -text
79
+ src/assets/plant_images/plant_44.png filter=lfs diff=lfs merge=lfs -text
80
+ src/assets/plant_images/plant_45.png filter=lfs diff=lfs merge=lfs -text
81
+ src/assets/plant_images/plant_46.png filter=lfs diff=lfs merge=lfs -text
82
+ src/assets/plant_images/plant_47.png filter=lfs diff=lfs merge=lfs -text
83
+ src/assets/plant_images/plant_48.png filter=lfs diff=lfs merge=lfs -text
84
+ src/assets/plant_images/plant_49.png filter=lfs diff=lfs merge=lfs -text
85
+ src/assets/plant_images/plant_5.png filter=lfs diff=lfs merge=lfs -text
86
+ src/assets/plant_images/plant_50.png filter=lfs diff=lfs merge=lfs -text
87
+ src/assets/plant_images/plant_51.png filter=lfs diff=lfs merge=lfs -text
88
+ src/assets/plant_images/plant_52.png filter=lfs diff=lfs merge=lfs -text
89
+ src/assets/plant_images/plant_53.png filter=lfs diff=lfs merge=lfs -text
90
+ src/assets/plant_images/plant_54.png filter=lfs diff=lfs merge=lfs -text
91
+ src/assets/plant_images/plant_55.png filter=lfs diff=lfs merge=lfs -text
92
+ src/assets/plant_images/plant_56.png filter=lfs diff=lfs merge=lfs -text
93
+ src/assets/plant_images/plant_57.png filter=lfs diff=lfs merge=lfs -text
94
+ src/assets/plant_images/plant_58.png filter=lfs diff=lfs merge=lfs -text
95
+ src/assets/plant_images/plant_59.png filter=lfs diff=lfs merge=lfs -text
96
+ src/assets/plant_images/plant_6.png filter=lfs diff=lfs merge=lfs -text
97
+ src/assets/plant_images/plant_60.png filter=lfs diff=lfs merge=lfs -text
98
+ src/assets/plant_images/plant_61.png filter=lfs diff=lfs merge=lfs -text
99
+ src/assets/plant_images/plant_62.png filter=lfs diff=lfs merge=lfs -text
100
+ src/assets/plant_images/plant_63.png filter=lfs diff=lfs merge=lfs -text
101
+ src/assets/plant_images/plant_64.png filter=lfs diff=lfs merge=lfs -text
102
+ src/assets/plant_images/plant_65.png filter=lfs diff=lfs merge=lfs -text
103
+ src/assets/plant_images/plant_66.png filter=lfs diff=lfs merge=lfs -text
104
+ src/assets/plant_images/plant_67.png filter=lfs diff=lfs merge=lfs -text
105
+ src/assets/plant_images/plant_7.png filter=lfs diff=lfs merge=lfs -text
106
+ src/assets/plant_images/plant_8.png filter=lfs diff=lfs merge=lfs -text
107
+ src/assets/plant_images/plant_9.png filter=lfs diff=lfs merge=lfs -text
EXAMPLEvars.env ADDED
File without changes
LICENSE ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2023, Danielle Heymann
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ 1. Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ 3. Neither the name of the copyright holder nor the names of its
16
+ contributors may be used to endorse or promote products derived from
17
+ this software without specific prior written permission.
18
+
19
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
app.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import libraries
2
+ import pandas as pd
3
+ import numpy as np
4
+ import os
5
+ import time
6
+ import math
7
+ import streamlit as st
8
+ #from streamlit_chat import message
9
+ from streamlit_extras.colored_header import colored_header
10
+ from streamlit_extras.add_vertical_space import add_vertical_space
11
+ from PIL import Image
12
+
13
+ # import modules
14
+ from src.backend.chatbot import *
15
+ from src.backend.optimization_algo import *
16
+ from src.frontend.visualizations import *
17
+
18
+
19
+ # import compatibilities matrix
20
+ # make plant_compatibility.csv into a matrix. it currently has indexes as rows and columns for plant names and then compatibility values as the values
21
+ st.session_state.raw_plant_compatibility = pd.read_csv('src/data/plant_compatibility.csv', index_col=0)
22
+ # fill NaN values with 0
23
+ st.session_state.raw_plant_compatibility = st.session_state.raw_plant_compatibility.fillna(0)
24
+ # get list of plants
25
+ st.session_state.plant_list = st.session_state.raw_plant_compatibility.index.tolist()
26
+
27
+
28
+
29
+ # setup keys and api info
30
+ OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
31
+ os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
32
+
33
+
34
+
35
+ #chat = ChatOpenAI()
36
+
37
+
38
+ # UI page config
39
+ st.set_page_config(
40
+ #page_title="GRDN.AI",
41
+ page_icon="🌱",
42
+ layout="wide",
43
+ initial_sidebar_state="expanded",
44
+ )
45
+
46
+ # Function to display chat message with an icon
47
+ def chat_message(message, is_user=False):
48
+ if is_user:
49
+ icon = Image.open("src/assets/cool.png")
50
+ side = "left"
51
+ else:
52
+ icon = Image.open("src/assets/bot.png")
53
+ side = "right"
54
+
55
+ chat_container = st.container()
56
+
57
+ with chat_container:
58
+ col1, col2, col3, col4= st.columns([1,7,7,1])
59
+
60
+ with col1:
61
+ if is_user == True:
62
+ st.image(icon, width=50)
63
+
64
+ with col2:
65
+ if is_user == True:
66
+ st.markdown(f'<div style="text-align: {side};">{message}</div>', unsafe_allow_html=True)
67
+ with col3:
68
+ if is_user == False:
69
+ st.markdown(f'<div style="text-align: {side};">{message}</div>', unsafe_allow_html=True)
70
+ with col4:
71
+ if is_user == False:
72
+ st.image(icon, width=50)
73
+
74
+
75
+
76
+
77
+ st.image ("src/assets/logo_title_transparent.png", caption=None, use_column_width=None, clamp=False, channels='RGB', output_format='auto')
78
+
79
+ st.write("AI and optimization powered companion gardening")
80
+ colored_header(label='', description='', color_name='green-30')
81
+
82
+ # Sidebar
83
+ # st.sidebar.title("Navigation")
84
+ # page = st.sidebar.radio("Select a page:", ("Home", "Companion Gardening", "Optimization", "About"))
85
+
86
+ # add vertical space
87
+ with st.sidebar:
88
+ add_vertical_space(2)
89
+ # Sidebar
90
+ st.sidebar.title("Navigation")
91
+
92
+ # Define the page options
93
+ pages = ["Garden Optimization", "About"]
94
+
95
+ # Render the selected page content
96
+ page = st.sidebar.selectbox("Select a page:", pages)
97
+
98
+
99
+ if page == "Garden Optimization":
100
+ st.sidebar.subheader("Companion Gardening")
101
+ st.write("GRDN is a companion gardening app that helps you plan your garden and maximize your harvest. It uses AI to predict the best plants to grow together and optimization algorithms to optimize how you build your garden.")
102
+ st.write("This app is currently in beta. Please report any bugs.")
103
+ companion_planting_info = """
104
+ Key Benefits
105
+ - **Pest control:**
106
+ - **Improved pollination:**
107
+ - **Maximized space:**
108
+ - **Nutrient enhancement:**
109
+ - **Complementary growth:**
110
+
111
+ """
112
+
113
+ st.sidebar.markdown(companion_planting_info)
114
+ # Set the initial value of user_name
115
+ if 'user_name' not in st.session_state:
116
+ st.session_state.user_name = ''
117
+ # add in some vertical space
118
+ add_vertical_space(3)
119
+ # Display the welcome message
120
+ st.title('Let\'s get started! Decide on your garden parameters')
121
+
122
+
123
+ # add in some vertical space
124
+ add_vertical_space(2)
125
+
126
+ # make a container for this section
127
+ container1 = st.container(border=True)
128
+
129
+ with container1:
130
+ # Modify the user_name variable based on user input
131
+ if st.session_state['user_name'] == '':
132
+ col1, col2, col3= st.columns([1,2,1])
133
+ with col1:
134
+ st.session_state['user_name_input'] = st.text_input('Enter your name', st.session_state.user_name)
135
+ if 'user_name_input' in st.session_state:
136
+ st.session_state.user_name = st.session_state.user_name_input
137
+ if st.session_state.user_name != '':
138
+ st.write('Hello ' + st.session_state['user_name'] + '! Let\'s optimize your garden. 🌱')
139
+
140
+ # # add in some vertical space
141
+ add_vertical_space(2)
142
+
143
+ print("")
144
+ print("____________________")
145
+ print("start of session")
146
+
147
+ col1a, col2a= st.columns([1,2])
148
+ enable_max_species = False
149
+ enable_min_species = False
150
+
151
+
152
+ # make a form to get the plant list from the user
153
+ with col1a:
154
+ with st.form(key = "plant_list_form"):
155
+ input_plants_raw = st.multiselect('plants', st.session_state.plant_list)
156
+ submit_button = st.form_submit_button(label='Submit Plant List')
157
+ if submit_button:
158
+ st.session_state['input_plants_raw'] = input_plants_raw
159
+ st.session_state.submitted_plant_list = True
160
+
161
+ # add in some vertical space
162
+ add_vertical_space(1)
163
+
164
+ with col2a:
165
+ col1, col2, col3= st.columns([1,1,1])
166
+ if 'input_plants_raw' in st.session_state:
167
+ print("BP1")
168
+ # first question is what plants would you like to plant
169
+ plants_response = st.session_state.input_plants_raw
170
+
171
+ # Initialize session state variables if they don't exist
172
+ if 'n_plant_beds' not in st.session_state:
173
+ st.session_state['n_plant_beds'] = 1
174
+
175
+ if 'min_species' not in st.session_state:
176
+ st.session_state['min_species'] = 1
177
+
178
+ if 'max_species' not in st.session_state:
179
+ st.session_state['max_species'] = 2
180
+
181
+ # Number of plant beds input
182
+ with col1:
183
+ n_plant_beds = st.number_input('Number of plant beds \n', min_value=1, max_value=20, value=st.session_state.n_plant_beds, step=1)
184
+ st.session_state.n_plant_beds = n_plant_beds
185
+ with col2:
186
+ # Minimum species per plant bed input
187
+ min_species = st.number_input('Minimum number of species per plant bed',
188
+ min_value=1,
189
+ max_value=len(st.session_state.input_plants_raw),
190
+ value=st.session_state.min_species,
191
+ step=1)
192
+ st.session_state.min_species = min_species
193
+
194
+ # Maximum species per plant bed input
195
+ # It will be enabled only if min_species is set
196
+ enable_max_species = st.session_state.min_species > 0
197
+ with col3:
198
+ max_species = st.number_input('Maximum number of species per plant bed',
199
+ min_value=st.session_state.min_species,
200
+ max_value=len(st.session_state.input_plants_raw),
201
+ value=max(st.session_state.min_species, st.session_state.max_species),
202
+ step=1,
203
+ disabled=not enable_max_species)
204
+ if enable_max_species:
205
+ st.session_state.max_species = max_species
206
+
207
+ # extract the compatibility matrix from the user's input
208
+ if 'extracted_mat' not in st.session_state:
209
+ valid = False
210
+ if 'submitted_plant_list' in st.session_state and st.session_state.submitted_plant_list:
211
+ # check if the user's input is valid
212
+ # min species per bed must be less than or equal to max species per bed
213
+ if (st.session_state.min_species <= st.session_state.max_species
214
+ ) and (
215
+ # max species per bed must be less than or equal to the number of plants
216
+ st.session_state.max_species <= len(st.session_state.input_plants_raw)
217
+ ) and (
218
+ # max species per bed must be greater than or equal to the min species per bed
219
+ st.session_state.max_species >= st.session_state.min_species
220
+ ) and (
221
+ # min species per bed must be less than or equal to the number of plants
222
+ st.session_state.min_species <= len(st.session_state.input_plants_raw)
223
+ ) and (
224
+ # number of plant beds multiplied by min species per bed must be less than or equal to the number of plants
225
+ len(st.session_state.input_plants_raw) >= st.session_state.n_plant_beds * st.session_state.min_species
226
+ ) and (
227
+ # number of plant beds multiplied by max species per bed must be greater than or equal to the number of plants
228
+ len(st.session_state.input_plants_raw) <= st.session_state.n_plant_beds * st.session_state.max_species
229
+ ):
230
+ valid = True
231
+ else:
232
+ # add a warning message
233
+ st.warning('Please enter valid parameters. The minimum number of species per plant bed must be less than or equal to the maximum number of species per plant bed. The maximum number of species per plant bed must be less than or equal to the number of plants. The maximum number of species per plant bed must be greater than or equal to the minimum number of species per plant bed. The minimum number of species per plant bed must be less than or equal to the number of plants. The number of plant beds multiplied by the minimum number of species per plant bed must be less than or equal to the number of plants. The number of plant beds multiplied by the maximum number of species per plant bed must be greater than or equal to the number of plants.')
234
+
235
+ if valid:
236
+ # add in some vertical space
237
+ add_vertical_space(2)
238
+ if st.button('Generate Companion Plant Compatibility Matrix'):
239
+ with st.spinner('generating companion plant compatibility matrix...'):
240
+ st.session_state['generating_mat'] = True
241
+ # now get compatibility matrix for companion planting
242
+ time.sleep(1)
243
+ extracted_mat, full_mat, plant_index_mapping= get_compatibility_matrix_2(st.session_state.input_plants_raw)
244
+ print(extracted_mat)
245
+ st.session_state.extracted_mat = extracted_mat
246
+ st.session_state.full_mat = full_mat
247
+ st.session_state.plant_index_mapping = plant_index_mapping
248
+ # add in some vertical space
249
+ add_vertical_space(4)
250
+
251
+ # display the companion plant compatibility matrix
252
+ if 'extracted_mat' in st.session_state:
253
+ # add a title for the next section- companion plant compatibility matrix based on user input
254
+ st.title('Your companion plant compatibility matrix')
255
+ # make a container for this section
256
+ container2 = st.container(border=True)
257
+ with container2:
258
+ col1, col2 = st.columns([8,4])
259
+ # display the companion plant compatibility matrix
260
+ with col2:
261
+ st.write("Here is your companion plant compatibility matrix:")
262
+ with st.expander("Show ugly compatibility matrix of 1's 0's and -1's"):
263
+ st.write(st.session_state.extracted_mat)
264
+ with col1:
265
+ st.write("Here is a network visualization of your companion plant compatibility matrix. It is color coded to show which plants are companions (green), antagonists (violetred), or neutral (grey).")
266
+ plot_compatibility_with_agraph(st.session_state.input_plants_raw, st.session_state.full_mat)
267
+ st.session_state['got_mat'] = True
268
+
269
+ if 'got_mat' in st.session_state:
270
+ # add in some vertical space
271
+ add_vertical_space(4)
272
+ # make a container for this section
273
+ container3 = st.container(border=True)
274
+ with container3:
275
+ st.title('Optimizing companion planting with the genetic algorithm and AI')
276
+ st.write("Now that we have your companion plant compatibility matrix, we can use optimization to maximize your harvest. We will use a genetic algorithm to determine the best way to plant your garden. The genetic algorithm will determine the best way to plant your garden by maximizing the number of companion plants and minimizing the number of antagonists.")
277
+ st.write("Set the parameters for the genetic algorithm. Here is more info for your reference:")
278
+ with st.form(key = "genetic_algorithm_form"):
279
+ col1, col2= st.columns([1,1])
280
+ with col2:
281
+ with st.expander("Show more information about the genetic algorithm parameters"):
282
+ st.subheader("Plant Optimization Heuristic Performance")
283
+ st.write("The genetic algorithm parameters impact the performance of the plant optimization heuristic in the following ways:")
284
+ st.markdown("- **Population Size**: A larger population size allows for a more diverse exploration of the solution space. However, it also increases computational complexity.")
285
+ st.markdown("- **Number of Generations**: Increasing the number of generations provides more opportunities for the algorithm to converge towards an optimal solution.")
286
+ st.markdown("- **Tournament Size**: A larger tournament size promotes stronger selection pressure and can lead to faster convergence, but it may also increase the risk of premature convergence.")
287
+ st.markdown("- **Crossover Rate**: A higher crossover rate increases the exploration capability by creating diverse offspring, potentially improving the algorithm's ability to escape local optima.")
288
+ st.markdown("- **Mutation Rate**: Mutation introduces random changes in individuals, helping to maintain diversity in the population and preventing premature convergence.")
289
+ # seed population rate
290
+ st.markdown("- **Seed Population Rate**: The seed population rate is the percentage of the population that is generated based on the LLM's interpretation of compatibility. The remaining percentage of the population is generated randomly. A higher seed population rate increases the likelihood that the genetic algorithm will converge towards a solution that is compatible.")
291
+ # Run the Genetic Algorithm
292
+ with col1:
293
+ st.subheader("Genetic Algorithm Parameters")
294
+ st.write("These parameters control the behavior of the genetic algorithm.")
295
+
296
+ # Genetic Algorithm parameters
297
+ st.session_state.population_size = st.slider("Population Size", min_value=400, max_value=1000, value=500,
298
+ help="The number of individuals in each generation of the genetic algorithm.")
299
+ st.session_state.num_generations = st.slider("Number of Generations", min_value=400, max_value=1000, value=450,
300
+ help="The total number of generations to evolve through.")
301
+ st.session_state.tournament_size = st.slider("Tournament Size", min_value=5, max_value=20, value=10,
302
+ help="The number of individuals competing in each tournament selection round.")
303
+ st.session_state.crossover_rate = st.slider("Crossover Rate", min_value=0.1, max_value=1.0, step=0.1, value=0.8,
304
+ help="The probability of two individuals undergoing crossover to create offspring.")
305
+ st.session_state.mutation_rate = st.slider("Mutation Rate", min_value=0.01, max_value=0.9, step=0.01, value=0.3,
306
+ help="The probability of an individual undergoing mutation.")
307
+ st.session_state.seed_population_rate = st.slider("Seed Population Rate", min_value=0.0, max_value=.02, step=0.001, value=0.08,
308
+ help="The percentage of the population that is generated based on the LLM's interpretation of compatibility. The remaining percentage of the population is generated randomly.")
309
+
310
+ #
311
+ # Run the genetic algorithm
312
+ if st.form_submit_button(label='Run Genetic Algorithm'):
313
+ with st.spinner('running genetic algorithm... this may take a minute'):
314
+ grouping = genetic_algorithm_plants()
315
+ st.session_state.grouping = grouping
316
+
317
+ # visualize the groupings
318
+ # add in some vertical space
319
+ add_vertical_space(4)
320
+ # make a container for this section
321
+ st.title(st.session_state.user_name + "'s optimized garden")
322
+ st.header("Here are the optimized groupings of plants for your garden")
323
+ container4 = st.container(border=True)
324
+ with container4:
325
+ if 'grouping' in st.session_state:
326
+ visualize_groupings()
327
+ if 'best_fitness' in st.session_state:
328
+ # embed score.png
329
+ col1b, col2b = st.columns([2,11])
330
+ with col1b:
331
+ st.image("src/assets/score.png", caption=None, width = 160, use_column_width=None, clamp=False, channels='RGB', output_format='auto')
332
+ with col2b:
333
+ #st.write("\n")
334
+ st.header("| " + str(st.session_state.best_fitness))
335
+ st.write("The genetic algorithm converged towards a solution with a fitness score of " + str(st.session_state.best_fitness) + ".")
336
+ # Add vertical space
337
+ add_vertical_space(4)
338
+ # show plant care tips
339
+ st.header("Plant care tips")
340
+ with st.spinner('generating plant care tips...'):
341
+ st.write("Here are some plant care tips for your plants. Good luck!")
342
+ #if 'plant_care_tips' not in st.session_state:
343
+ st.session_state.plant_care_tips = get_plant_care_tips(st.session_state.input_plants_raw)
344
+ styled_text = f'<div style="background-color: #2d5a59; color: white; padding: 10px; border-radius: 5px;">{st.session_state.plant_care_tips}</div>'
345
+ st.write(styled_text, unsafe_allow_html=True)
346
+
347
+
348
+
349
+
350
+
351
+
352
+ if page == "About":
353
+ st.sidebar.subheader("About")
354
+ st.sidebar.write("GRDN is a companion gardening app that helps you plan your garden and maximize your harvest. It uses AI to predict the best plants to grow together and optimization algorithms to optimize how you build your garden.")
355
+ st.sidebar.write("Companion gardening is the practice of planting different plants together to maximize their growth. Companion gardening can help to increase the yield of your garden, improve the health of your plants, and reduce the need for pesticides.")
356
+ st.write("This app is currently in beta. Please report any bugs to the team.")
357
+
358
+ col1, col2= st.columns([1,1])
359
+ with col1:
360
+ st.subheader("Contact Information")
361
+ st.write("Author: Danielle Heymann")
362
+ st.write("Email: dheymann314@gmail.com")
363
+ st.write("LinkedIn: https://www.linkedin.com/in/danielle-heymann/")
364
+ with col2:
365
+ st.subheader("Software, data, and libraries used")
366
+ st.write("Libraries and Software")
367
+ st.markdown("""
368
+ - Python
369
+ - streamlit
370
+ - openai
371
+ - plotly
372
+ - pandas
373
+ - numpy
374
+ - PIL
375
+ - langchain
376
+ - streamlit_chat
377
+ - github copilot
378
+ - chatGPT
379
+ - GPT family of models
380
+ - DALL·E 3 (in preprocessing script for image generation)
381
+ """)
382
+ st.write("Data sources in addition to what GPT was trained on: \n https://waldenlabs.com/the-ultimate-companion-planting-guide-chart/ ")
383
+
384
+ st.write("avatars from: https://www.flaticon.com/free-icons/bot")
notebooks/.ipynb_checkpoints/langchain experimenting-checkpoint.ipynb ADDED
@@ -0,0 +1,843 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import langchain\n",
11
+ "from langchain import PromptTemplate, LLMChain"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "markdown",
16
+ "metadata": {},
17
+ "source": [
18
+ "# huggingface"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": [
27
+ "from langchain import HuggingFacePipeline\n",
28
+ "\n",
29
+ "llm = HuggingFacePipeline.from_model_id(\n",
30
+ " model_id=\"bigscience/bloom-560m\",\n",
31
+ " task=\"text-generation\",\n",
32
+ " model_kwargs={\"temperature\": 0, \"max_length\": 64},\n",
33
+ ")\n",
34
+ "\n",
35
+ "\n",
36
+ "# Integrate the model in an LLMChain\n",
37
+ "from langchain import PromptTemplate, LLMChain\n",
38
+ "\n",
39
+ "template = \"\"\"Question: {question}\n",
40
+ "\n",
41
+ "Answer: Let's think step by step.\"\"\"\n",
42
+ "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
43
+ "\n",
44
+ "llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
45
+ "\n",
46
+ "question = \"What is electroencephalography?\"\n",
47
+ "\n",
48
+ "print(llm_chain.run(question))"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "markdown",
53
+ "metadata": {},
54
+ "source": [
55
+ "# galactica"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": null,
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "import galai as gal\n",
65
+ "\n",
66
+ "model = gal.load_model(\"standard\")\n",
67
+ "# model.generate(\"Scaled dot product attention:\\n\\n\\\\[\")\n",
68
+ "# Scaled dot product attention:\\n\\n\\\\[ \\\\displaystyle\\\\text{Attention}(Q,K,V)=\\\\text{softmax}(\\\\frac{QK^{T}}{\\\\sqrt{d_{k}}}%\\n)V \\\\]"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": null,
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "model.generate(\"from this list, [vodka, strawberries, corn, peas],create a new python list that ONLY includes produce <work>\")"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": null,
83
+ "metadata": {},
84
+ "outputs": [],
85
+ "source": [
86
+ "TEXT = \"vodka, strawberries, corn, peas, cherries, sodapop\"\n",
87
+ "model.generate( '\\n\\nQuestion: Of the items in this list, \\n\\n vodka, strawberries, corn, peas, cherries, diet coke, \\n\\n which can grow in a garden?\\n\\nAnswer:')\n"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "metadata": {},
94
+ "outputs": [],
95
+ "source": [
96
+ "model.generate(\"a plant compatability matrix is a a python matrix and will have a score of -1 for negative relationship between plants, 0 for neutral relationship between plants, and 1 for a positive relationship between plants. create a python array of plant compatibility between the plants listed: \" + str(plant_list))"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": null,
102
+ "metadata": {},
103
+ "outputs": [],
104
+ "source": []
105
+ },
106
+ {
107
+ "cell_type": "markdown",
108
+ "metadata": {},
109
+ "source": [
110
+ "# openai + langchain"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "metadata": {},
117
+ "outputs": [],
118
+ "source": [
119
+ "# plant compatiblity context source: https://waldenlabs.com/the-ultimate-companion-planting-guide-chart/"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": null,
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "import os\n",
129
+ "from langchain.chat_models import ChatOpenAI\n",
130
+ "\n",
131
+ "file_path = 'C:/Users/dheym/OneDrive/Documents/api_keys/openai_api_keys.txt'\n",
132
+ "with open(file_path, 'r') as file:\n",
133
+ " OPENAI_API_KEY = file.read()\n",
134
+ "\n",
135
+ "os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\n",
136
+ "#If you'd prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\n",
137
+ "\n",
138
+ "\n",
139
+ "chat = ChatOpenAI()\n",
140
+ "\n"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": null,
146
+ "metadata": {},
147
+ "outputs": [],
148
+ "source": [
149
+ "def parse_and_evaluate_text(text):\n",
150
+ " # Find the indices of the opening and closing brackets\n",
151
+ " opening_bracket_index = text.find(\"[\")\n",
152
+ " closing_bracket_index = text.find(\"]\")\n",
153
+ "\n",
154
+ " if opening_bracket_index != -1 and closing_bracket_index != -1:\n",
155
+ " # Extract the text within the brackets\n",
156
+ " extracted_list = \"[\" + text[opening_bracket_index + 1: closing_bracket_index] + \"]\"\n",
157
+ " # Return the evaluated text list\n",
158
+ " return eval(extracted_list)\n",
159
+ " \n",
160
+ "\n",
161
+ " else:\n",
162
+ " print(\"Error with parsing plant list\")\n",
163
+ " return None"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {},
170
+ "outputs": [],
171
+ "source": [
172
+ "from langchain.prompts.chat import (\n",
173
+ " ChatPromptTemplate,\n",
174
+ " SystemMessagePromptTemplate,\n",
175
+ " AIMessagePromptTemplate,\n",
176
+ " HumanMessagePromptTemplate,\n",
177
+ ")\n",
178
+ "\n",
179
+ "\n",
180
+ "def parse_and_evaluate_text(text):\n",
181
+ " # Find the indices of the opening and closing brackets\n",
182
+ " opening_bracket_index = text.find(\"[\")\n",
183
+ " closing_bracket_index = text.find(\"]\")\n",
184
+ "\n",
185
+ " if opening_bracket_index != -1 and closing_bracket_index != -1:\n",
186
+ " # Extract the text within the brackets\n",
187
+ " extracted_list = \"[\" + text[opening_bracket_index + 1: closing_bracket_index] + \"]\"\n",
188
+ " # Return the evaluated text list\n",
189
+ " return eval(extracted_list)\n",
190
+ " \n",
191
+ "\n",
192
+ " else:\n",
193
+ " print(\"Error with parsing plant list\")\n",
194
+ " return None\n",
195
+ " \n",
196
+ "def chat_response(template, prompt_text):\n",
197
+ " system_message_prompt = SystemMessagePromptTemplate.from_template(template)\n",
198
+ " human_template=\"{text}\"\n",
199
+ " human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)\n",
200
+ " chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])\n",
201
+ " response = chat(chat_prompt.format_prompt(text= prompt_text).to_messages())\n",
202
+ " return response\n",
203
+ "\n",
204
+ "# get the plant list from user input\n",
205
+ "def get_plant_list(input_plant_text):\n",
206
+ " template=\"You are a helpful assistant that knows all about gardening and plants and python data structures.\"\n",
207
+ " text = 'which of the elements of this list can be grown in a garden, [' + input_plant_text + ']? Return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.'\n",
208
+ " plant_list_text = chat_response(template, text)\n",
209
+ " plant_list = parse_and_evaluate_text(plant_list_text.content)\n",
210
+ " print(plant_list)\n",
211
+ " return plant_list\n",
212
+ "\n",
213
+ "# get compatability matrix for companion planting\n",
214
+ "def get_compatibility_matrix(plant_list):\n",
215
+ " # Convert the compatibility matrix to a string\n",
216
+ " with open('compatibilities_text.txt', 'r') as file:\n",
217
+ " # Read the contents of the file\n",
218
+ " compatibility_text = file.read()\n",
219
+ " plant_comp_context = compatibility_text\n",
220
+ " template=\"You are a helpful assistant that knows all about gardening, companion planting, and python data structures- specifically compatibility matrices.\"\n",
221
+ " text = 'from this list of plants, [' + str(plant_list) + '], Return JUST a python array (with values separated by commas like this: [[0,1],[1,0]]\\n\\n ) for companion plant compatibility. Each row and column should represent plants, and the element of the array will contain a -1, 0, or 1 depending on if the relationship between plants is antagonists, neutral, or companions, respectively. You must refer to this knowledge base of information on plant compatibility: \\n\\n, ' + plant_comp_context + '\\n\\n A plant\\'s compatibility with itself is always 0. Do not include any other text or explanation.'\n",
222
+ " compatibility_mat = chat_response(template, text)\n",
223
+ " \n",
224
+ " # Find the indices of the opening and closing brackets\n",
225
+ " opening_bracket_index = compatibility_mat.content.find(\"[[\")\n",
226
+ " closing_bracket_index = compatibility_mat.content.find(\"]]\")\n",
227
+ " if opening_bracket_index != -1 and closing_bracket_index != -1:\n",
228
+ " # Extract the text within the brackets\n",
229
+ " extracted_mat = \"[\" + compatibility_mat.content[opening_bracket_index + 1: closing_bracket_index] + \"]]\"\n",
230
+ " # Return the evaluated mat\n",
231
+ " return eval(extracted_mat)\n",
232
+ " else:\n",
233
+ " print(\"Error with parsing plant compatibility matrix\")\n",
234
+ " return None\n",
235
+ " return \n",
236
+ "\n",
237
+ "\n",
238
+ "input_plant_text = \"strawberries, mint, pepper, diet coke, carrots, lettuce, vodka, basil, tomatoes, marigolds, lemons, spinach, brocoli\"\n",
239
+ "input_plant_text = \"apples, basil, bean, rue, oregano, onion\"\n",
240
+ "plant_list = get_plant_list(input_plant_text)\n",
241
+ "extracted_mat = get_compatibility_matrix(plant_list)\n",
242
+ "print(extracted_mat)"
243
+ ]
244
+ },
245
+ {
246
+ "cell_type": "markdown",
247
+ "metadata": {},
248
+ "source": [
249
+ "## langchain additional context and fine tuning"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": null,
255
+ "metadata": {},
256
+ "outputs": [],
257
+ "source": [
258
+ "# process the plant compatibility matrix to a json\n",
259
+ "import csv\n",
260
+ "import json\n",
261
+ "\n",
262
+ "# Open the CSV file\n",
263
+ "with open('plant_compatability.csv', 'r') as file:\n",
264
+ " reader = csv.reader(file)\n",
265
+ " \n",
266
+ " # Read the header row to get the plant names\n",
267
+ " header = next(reader)\n",
268
+ " \n",
269
+ " # Create an empty dictionary to store the compatibility matrix\n",
270
+ " compatibility_matrix = {}\n",
271
+ " \n",
272
+ " # Iterate over the rows in the CSV file\n",
273
+ " for row in reader:\n",
274
+ " # Extract the plant name from the first column\n",
275
+ " plant = row[0]\n",
276
+ " \n",
277
+ " # Create a dictionary to store the compatibility values for the current plant\n",
278
+ " compatibility_values = {}\n",
279
+ " \n",
280
+ " # Iterate over the compatibility values in the row\n",
281
+ " for i, value in enumerate(row[1:], start=1):\n",
282
+ " # Extract the plant name from the header row\n",
283
+ " companion_plant = header[i]\n",
284
+ " \n",
285
+ " # Convert the compatibility value to an integer\n",
286
+ " compatibility = int(value) if value else 0\n",
287
+ " \n",
288
+ " # Add the compatibility value to the dictionary\n",
289
+ " compatibility_values[companion_plant] = compatibility\n",
290
+ " \n",
291
+ " # Add the compatibility values dictionary to the main compatibility matrix\n",
292
+ " compatibility_matrix[plant] = compatibility_values\n",
293
+ "\n",
294
+ "# Save the compatibility matrix as a JSON file\n",
295
+ "with open('compatibility_matrix.json', 'w') as file:\n",
296
+ " json.dump(compatibility_matrix, file, indent=4)"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": null,
302
+ "metadata": {},
303
+ "outputs": [],
304
+ "source": [
305
+ "import openai\n",
306
+ "\n",
307
+ "# Load the compatibility matrix from the JSON file\n",
308
+ "with open('compatibility_matrix.json', 'r') as file:\n",
309
+ " compatibility_matrix = json.load(file)\n",
310
+ "\n",
311
+ "# Convert the compatibility matrix to a string\n",
312
+ "compatibility_matrix_text = json.dumps(compatibility_matrix)\n",
313
+ "with open('compatibilities_text.txt', 'r') as file:\n",
314
+ " # Read the contents of the file\n",
315
+ " compatibility_matrix_text = file.read()\n",
316
+ "\n",
317
+ "# Set up the LangChain API credentials\n",
318
+ "openai.api_key = OPENAI_API_KEY\n",
319
+ "\n",
320
+ "# Define the prompt for the GPT model\n",
321
+ "prompt = \"Can you provide companion plant suggestions for my garden given what you know about companion planting?\"\n",
322
+ "\n",
323
+ "# Concatenate the prompt and compatibility matrix text as the input to the GPT model\n",
324
+ "input_text = f\"{prompt}\\n\\n{compatibility_matrix_text}\"\n",
325
+ "\n",
326
+ "# Generate a response from the GPT model\n",
327
+ "response = openai.Completion.create(\n",
328
+ " engine='text-davinci-003',\n",
329
+ " prompt=input_text,\n",
330
+ " max_tokens=575\n",
331
+ ")\n",
332
+ "\n",
333
+ "print(response.choices[0])\n",
334
+ "# Extract the generated companion plant suggestions from the response\n",
335
+ "suggestions = response.choices[0].text.strip()\n",
336
+ "\n",
337
+ "# Print the companion plant suggestions\n",
338
+ "print(suggestions)"
339
+ ]
340
+ },
341
+ {
342
+ "cell_type": "code",
343
+ "execution_count": null,
344
+ "metadata": {},
345
+ "outputs": [],
346
+ "source": [
347
+ "from langchain.client import Client\n",
348
+ "\n",
349
+ "# Load the fine-tuned GPT model\n",
350
+ "model = OpenAI(openai_api_key=OPENAI_API_KEY)\n",
351
+ "\n",
352
+ "# Load the knowledge base or context from websites or documents\n",
353
+ "knowledge_base = YourProcessedData()\n",
354
+ "\n",
355
+ "# Initialize the Langchain client\n",
356
+ "client = Client()\n",
357
+ "\n",
358
+ "# Set the context for the GPT model\n",
359
+ "context = \"Context from websites or documents\"\n",
360
+ "model.set_context(context)\n",
361
+ "\n",
362
+ "# Get companion plant compatibility predictions\n",
363
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil']\n",
364
+ "predictions = []\n",
365
+ "\n",
366
+ "for plant in plants:\n",
367
+ " # Generate a question for each plant\n",
368
+ " question = f\"Which plants are compatible with {plant}?\"\n",
369
+ " \n",
370
+ " # Provide the question and context to the Langchain client\n",
371
+ " response = client.query(question, context=context)\n",
372
+ " \n",
373
+ " # Process and extract the answer from the response\n",
374
+ " answer = response['answer']\n",
375
+ " predictions.append((plant, answer))\n",
376
+ "\n",
377
+ "# Process and display the compatibility predictions\n",
378
+ "for plant, compatibility in predictions:\n",
379
+ " print(f\"{plant}: {compatibility}\")"
380
+ ]
381
+ },
382
+ {
383
+ "cell_type": "code",
384
+ "execution_count": null,
385
+ "metadata": {},
386
+ "outputs": [],
387
+ "source": []
388
+ },
389
+ {
390
+ "cell_type": "code",
391
+ "execution_count": null,
392
+ "metadata": {},
393
+ "outputs": [],
394
+ "source": []
395
+ },
396
+ {
397
+ "cell_type": "code",
398
+ "execution_count": null,
399
+ "metadata": {},
400
+ "outputs": [],
401
+ "source": [
402
+ "import numpy as np\n",
403
+ "import networkx as nx\n",
404
+ "import matplotlib.pyplot as plt\n",
405
+ "\n",
406
+ "# Define the plant list\n",
407
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil', 'tomatoes', 'marigolds', 'lemons', 'strawberries', 'spinach', 'broccoli']\n",
408
+ "\n",
409
+ "# Define the compatibility matrix\n",
410
+ "compatibility_matrix = np.array([\n",
411
+ "[0, 1, 0, 0, 1, -1, 1, 0, 0, 0, -1],\n",
412
+ "[1, 0, 0, 0, 1, -1, 1, 0, 1, 0, -1],\n",
413
+ "[0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0],\n",
414
+ "[0, 0, -1, 0, 0, 1, 0, 0, 0, 1, 0],\n",
415
+ "[1, 1, 0, 0, 0, -1, 1, 0, 0, 0, -1],\n",
416
+ "[-1, -1, 1, 1, -1, 0, -1, 0, 0, 0, 1],\n",
417
+ "[1, 1, 0, 0, 1, -1, 0, 0, 0, 0, -1],\n",
418
+ "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
419
+ "[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
420
+ "[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, -1],\n",
421
+ "[-1, -1, 0, 0, -1, 1, -1, 0, 0, -1, 0]\n",
422
+ "])\n",
423
+ "\n",
424
+ "# Create an empty graph\n",
425
+ "G = nx.Graph()\n",
426
+ "\n",
427
+ "# Add nodes (plants) to the graph\n",
428
+ "G.add_nodes_from(plants)\n",
429
+ "\n",
430
+ "# Add edges (compatibility) to the graph\n",
431
+ "for i in range(len(plants)):\n",
432
+ " for j in range(i + 1, len(plants)):\n",
433
+ " if compatibility_matrix[i][j] == 0:\n",
434
+ " color = 'grey'\n",
435
+ " elif compatibility_matrix[i][j] == -1:\n",
436
+ " color = 'pink'\n",
437
+ " else:\n",
438
+ " color = 'green'\n",
439
+ " G.add_edge(plants[i], plants[j], color=color)\n",
440
+ "\n",
441
+ "# Plot the graph\n",
442
+ "pos = nx.spring_layout(G)\n",
443
+ "colors = [G[u][v]['color'] for u, v in G.edges()]\n",
444
+ "nx.draw_networkx(G, pos, with_labels=True, node_color='lightgreen', edge_color=colors, width=2.0, alpha=0.8)\n",
445
+ "\n",
446
+ "# Set edge colors in the legend\n",
447
+ "color_legend = {'Neutral': 'grey', 'Negative': 'pink', 'Positive': 'green'}\n",
448
+ "legend_lines = [plt.Line2D([0], [0], color=color, linewidth=3) for color in color_legend.values()]\n",
449
+ "legend_labels = list(color_legend.keys())\n",
450
+ "plt.legend(legend_lines, legend_labels, loc='best')\n",
451
+ "\n",
452
+ "# Show the plot\n",
453
+ "plt.show()"
454
+ ]
455
+ },
456
+ {
457
+ "cell_type": "code",
458
+ "execution_count": null,
459
+ "metadata": {},
460
+ "outputs": [],
461
+ "source": [
462
+ "import streamlit as st\n",
463
+ "import networkx as nx\n",
464
+ "import plotly.graph_objects as go\n",
465
+ "\n",
466
+ "# Define the plants and compatibility matrix\n",
467
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil', 'tomatoes', 'marigolds', 'lemons', 'strawberries', 'spinach', 'broccoli']\n",
468
+ "compatibility_matrix = [\n",
469
+ " [0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1],\n",
470
+ " [1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1],\n",
471
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
472
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
473
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
474
+ " [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
475
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
476
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
477
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
478
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
479
+ " [1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0]\n",
480
+ "]\n",
481
+ "\n",
482
+ "# Create a directed graph\n",
483
+ "G = nx.DiGraph()\n",
484
+ "\n",
485
+ "# Add nodes to the graph\n",
486
+ "G.add_nodes_from(plants)\n",
487
+ "\n",
488
+ "# Define positions for the nodes\n",
489
+ "pos = nx.spring_layout(G)\n",
490
+ "\n",
491
+ "# Assign positions to the nodes\n",
492
+ "for node, position in pos.items():\n",
493
+ " G.nodes[node]['pos'] = position\n",
494
+ "\n",
495
+ "# Iterate over the compatibility matrix and add edges with corresponding colors\n",
496
+ "for i in range(len(plants)):\n",
497
+ " for j in range(len(plants)):\n",
498
+ " if compatibility_matrix[i][j] == -1:\n",
499
+ " G.add_edge(plants[i], plants[j], color='red')\n",
500
+ " elif compatibility_matrix[i][j] == 1:\n",
501
+ " G.add_edge(plants[i], plants[j], color='green')\n",
502
+ " else:\n",
503
+ " G.add_edge(plants[i], plants[j], color='lightgray')\n",
504
+ "\n",
505
+ "# Create edge traces\n",
506
+ "# Create edge traces\n",
507
+ "edge_traces = []\n",
508
+ "for edge in G.edges():\n",
509
+ " x0, y0 = G.nodes[edge[0]]['pos']\n",
510
+ " x1, y1 = G.nodes[edge[1]]['pos']\n",
511
+ " color = G.edges[edge]['color']\n",
512
+ " trace = go.Scatter(x=[x0, x1, None], y=[y0, y1, None], mode='lines', line=dict(color=color, width=2))\n",
513
+ " edge_traces.append(trace)\n",
514
+ "\n",
515
+ "# Create node traces\n",
516
+ "node_traces = []\n",
517
+ "for node in G.nodes():\n",
518
+ " x, y = G.nodes[node]['pos']\n",
519
+ " trace = go.Scatter(x=[x], y=[y], mode='markers', marker=dict(color='black', size=10), name=node)\n",
520
+ " node_traces.append(trace)\n",
521
+ "\n",
522
+ "# Create figure\n",
523
+ "fig = go.Figure(data=edge_traces + node_traces)\n",
524
+ "\n",
525
+ "# Set layout options\n",
526
+ "fig.update_layout(\n",
527
+ " title='Plant Network',\n",
528
+ " showlegend=False,\n",
529
+ " hovermode='closest',\n",
530
+ " margin=dict(b=20, l=5, r=5, t=40),\n",
531
+ " xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),\n",
532
+ " yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)\n",
533
+ ")\n",
534
+ "\n",
535
+ "# Render the graph\n",
536
+ "#st.plotly_chart(fig)\n",
537
+ "fig"
538
+ ]
539
+ },
540
+ {
541
+ "cell_type": "code",
542
+ "execution_count": null,
543
+ "metadata": {},
544
+ "outputs": [],
545
+ "source": [
546
+ "import streamlit as st\n",
547
+ "import networkx as nx\n",
548
+ "import plotly.graph_objects as go\n",
549
+ "\n",
550
+ "# Define the plants and compatibility matrix\n",
551
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil', 'tomatoes', 'marigolds', 'lemons', 'strawberries', 'spinach', 'broccoli']\n",
552
+ "compatibility_matrix = [\n",
553
+ " [0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1],\n",
554
+ " [1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1],\n",
555
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
556
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
557
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
558
+ " [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
559
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
560
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
561
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
562
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
563
+ " [1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0]\n",
564
+ "]\n",
565
+ "\n",
566
+ "# Create the graph\n",
567
+ "G = nx.Graph()\n",
568
+ "G.add_nodes_from(plants)\n",
569
+ "for i in range(len(plants)):\n",
570
+ " for j in range(i + 1, len(plants)):\n",
571
+ " if compatibility_matrix[i][j] == 0:\n",
572
+ " G.add_edge(plants[i], plants[j], color='lightgrey')\n",
573
+ " else:\n",
574
+ " G.add_edge(plants[i], plants[j], color='green' if compatibility_matrix[i][j] == 1 else 'pink')\n",
575
+ "\n",
576
+ "# Generate positions for the nodes\n",
577
+ "pos = nx.spring_layout(G)\n",
578
+ "\n",
579
+ "# Create node trace\n",
580
+ "node_trace = go.Scatter(\n",
581
+ " x=[pos[node][0] for node in G.nodes()],\n",
582
+ " y=[pos[node][1] for node in G.nodes()],\n",
583
+ " text=list(G.nodes()),\n",
584
+ " mode='markers+text',\n",
585
+ " textposition='top center',\n",
586
+ " hoverinfo='text',\n",
587
+ " marker=dict(\n",
588
+ " size=20,\n",
589
+ " color='lightblue',\n",
590
+ " line_width=2,\n",
591
+ " )\n",
592
+ ")\n",
593
+ "\n",
594
+ "# Create edge trace\n",
595
+ "edge_trace = go.Scatter(\n",
596
+ " x=[],\n",
597
+ " y=[],\n",
598
+ " line=dict(width=1, color='lightgrey'),\n",
599
+ " hoverinfo='none',\n",
600
+ " mode='lines'\n",
601
+ ")\n",
602
+ "\n",
603
+ "# Add coordinates to edge trace\n",
604
+ "for edge in G.edges():\n",
605
+ " x0, y0 = pos[edge[0]]\n",
606
+ " x1, y1 = pos[edge[1]]\n",
607
+ " edge_trace['x'] += tuple([x0, x1, None])\n",
608
+ " edge_trace['y'] += tuple([y0, y1, None])\n",
609
+ "\n",
610
+ "# Create edge traces for colored edges\n",
611
+ "edge_traces = []\n",
612
+ "for edge in G.edges(data=True):\n",
613
+ " x0, y0 = pos[edge[0]]\n",
614
+ " x1, y1 = pos[edge[1]]\n",
615
+ " color = edge[2]['color']\n",
616
+ " trace = go.Scatter(\n",
617
+ " x=[x0, x1],\n",
618
+ " y=[y0, y1],\n",
619
+ " mode='lines',\n",
620
+ " line=dict(width=2, color=color),\n",
621
+ " hoverinfo='none'\n",
622
+ " )\n",
623
+ " edge_traces.append(trace)\n",
624
+ "\n",
625
+ "# Create layout\n",
626
+ "layout = go.Layout(\n",
627
+ " title='Plant Compatibility Network Graph',\n",
628
+ " showlegend=False,\n",
629
+ " hovermode='closest',\n",
630
+ " margin=dict(b=20, l=5, r=5, t=40),\n",
631
+ " xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),\n",
632
+ " yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)\n",
633
+ ")\n",
634
+ "\n",
635
+ "# Create figure\n",
636
+ "fig = go.Figure(data=[edge_trace, *edge_traces, node_trace], layout=layout)\n",
637
+ "\n",
638
+ "# Render the graph using Plotly in Streamlit\n",
639
+ "st.plotly_chart(fig)\n"
640
+ ]
641
+ },
642
+ {
643
+ "cell_type": "code",
644
+ "execution_count": null,
645
+ "metadata": {},
646
+ "outputs": [],
647
+ "source": []
648
+ },
649
+ {
650
+ "cell_type": "code",
651
+ "execution_count": null,
652
+ "metadata": {},
653
+ "outputs": [],
654
+ "source": [
655
+ "import random\n",
656
+ "import numpy as np\n",
657
+ "\n",
658
+ "# Define the compatibility matrix\n",
659
+ "compatibility_matrix = np.array([\n",
660
+ " [-1, 1, 0, -1],\n",
661
+ " [1, -1, 1, -1],\n",
662
+ " [1, 0, -1, 1],\n",
663
+ " [-1, -1, 1, -1]\n",
664
+ "])\n",
665
+ "\n",
666
+ "# Define the user-selected plants, number of plant beds, and constraints\n",
667
+ "user_plants = [\"A\", \"B\", \"C\", \"D\"]\n",
668
+ "num_plant_beds = 4\n",
669
+ "min_species_per_bed = 1\n",
670
+ "max_species_per_bed = 2\n",
671
+ "\n",
672
+ "# Genetic Algorithm parameters\n",
673
+ "population_size = 50\n",
674
+ "num_generations = 100\n",
675
+ "tournament_size = 3\n",
676
+ "crossover_rate = 0.8\n",
677
+ "mutation_rate = 0.1\n",
678
+ "\n",
679
+ "# Generate an initial population randomly\n",
680
+ "def generate_initial_population():\n",
681
+ " population = []\n",
682
+ " for _ in range(population_size):\n",
683
+ " grouping = []\n",
684
+ " for _ in range(num_plant_beds):\n",
685
+ " num_species = random.randint(min_species_per_bed, max_species_per_bed)\n",
686
+ " species = random.sample(user_plants, num_species)\n",
687
+ " grouping.append(species)\n",
688
+ " population.append(grouping)\n",
689
+ " return population\n",
690
+ "\n",
691
+ "# Calculate the fitness score of a grouping\n",
692
+ "def calculate_fitness(grouping):\n",
693
+ " score = 0\n",
694
+ " for bed1 in range(num_plant_beds):\n",
695
+ " for bed2 in range(bed1 + 1, num_plant_beds):\n",
696
+ " for species1 in grouping[bed1]:\n",
697
+ " for species2 in grouping[bed2]:\n",
698
+ " species1_index = user_plants.index(species1)\n",
699
+ " species2_index = user_plants.index(species2)\n",
700
+ " score += compatibility_matrix[species1_index][species2_index]\n",
701
+ " return score\n",
702
+ "\n",
703
+ "# Perform tournament selection\n",
704
+ "def tournament_selection(population):\n",
705
+ " selected = []\n",
706
+ " for _ in range(population_size):\n",
707
+ " participants = random.sample(population, tournament_size)\n",
708
+ " winner = max(participants, key=calculate_fitness)\n",
709
+ " selected.append(winner)\n",
710
+ " return selected\n",
711
+ "\n",
712
+ "# Perform crossover between two parents\n",
713
+ "def crossover(parent1, parent2):\n",
714
+ " if random.random() < crossover_rate:\n",
715
+ " crossover_point = random.randint(1, num_plant_beds - 1)\n",
716
+ " child1 = parent1[:crossover_point] + parent2[crossover_point:]\n",
717
+ " child2 = parent2[:crossover_point] + parent1[crossover_point:]\n",
718
+ " return child1, child2\n",
719
+ " else:\n",
720
+ " return parent1, parent2\n",
721
+ "\n",
722
+ "# Perform mutation on an individual\n",
723
+ "def mutate(individual):\n",
724
+ " if random.random() < mutation_rate:\n",
725
+ " mutated_bed = random.randint(0, num_plant_beds - 1)\n",
726
+ " new_species = random.sample(user_plants, random.randint(min_species_per_bed, max_species_per_bed))\n",
727
+ " individual[mutated_bed] = new_species\n",
728
+ " return individual\n",
729
+ "\n",
730
+ "# Perform replacement of the population with the offspring\n",
731
+ "def replacement(population, offspring):\n",
732
+ " sorted_population = sorted(population, key=calculate_fitness, reverse=True)\n",
733
+ " sorted_offspring = sorted(offspring, key=calculate_fitness, reverse=True)\n",
734
+ " return sorted_population[:population_size - len(offspring)] + sorted_offspring\n",
735
+ "\n",
736
+ "# Genetic Algorithm main function\n",
737
+ "def genetic_algorithm():\n",
738
+ " population = generate_initial_population()\n",
739
+ "\n",
740
+ " for generation in range(num_generations):\n",
741
+ " print(f\"Generation {generation + 1}\")\n",
742
+ "\n",
743
+ " selected_population = tournament_selection(population)\n",
744
+ " offspring = []\n",
745
+ "\n",
746
+ " for _ in range(population_size // 2):\n",
747
+ " parent1 = random.choice(selected_population)\n",
748
+ " parent2 = random.choice(selected_population)\n",
749
+ " child1, child2 = crossover(parent1, parent2)\n",
750
+ " child1 = mutate(child1)\n",
751
+ " child2 = mutate(child2)\n",
752
+ " offspring.extend([child1, child2])\n",
753
+ "\n",
754
+ " population = replacement(population, offspring)\n",
755
+ "\n",
756
+ " best_grouping = max(population, key=calculate_fitness)\n",
757
+ " best_fitness = calculate_fitness(best_grouping)\n",
758
+ " print(f\"Best Grouping: {best_grouping}\")\n",
759
+ " print(f\"Fitness Score: {best_fitness}\")\n",
760
+ "\n",
761
+ "# Run the Genetic Algorithm\n",
762
+ "genetic_algorithm()\n",
763
+ " "
764
+ ]
765
+ },
766
+ {
767
+ "cell_type": "code",
768
+ "execution_count": null,
769
+ "metadata": {},
770
+ "outputs": [],
771
+ "source": [
772
+ "def query_farmersalmanac(title, first_paragraph_only=True):\n",
773
+ " base_url = \"https://www.almanac.com/companion-planting-guide-vegetables\"\n",
774
+ " url = f\"{base_url}/w/api.php?format=json&action=query&prop=extracts&explaintext=1&titles={title}\"\n",
775
+ " if first_paragraph_only:\n",
776
+ " url += \"&exintro=1\"\n",
777
+ " data = requests.get(url).json()\n",
778
+ " return Document(\n",
779
+ " metadata={\"source\": f\"{base_url}\"},\n",
780
+ " page_content=list(data[\"query\"][\"pages\"].values())[0][\"extract\"],\n",
781
+ " )"
782
+ ]
783
+ },
784
+ {
785
+ "cell_type": "code",
786
+ "execution_count": null,
787
+ "metadata": {},
788
+ "outputs": [],
789
+ "source": []
790
+ },
791
+ {
792
+ "cell_type": "code",
793
+ "execution_count": null,
794
+ "metadata": {},
795
+ "outputs": [],
796
+ "source": []
797
+ },
798
+ {
799
+ "cell_type": "code",
800
+ "execution_count": null,
801
+ "metadata": {},
802
+ "outputs": [],
803
+ "source": []
804
+ },
805
+ {
806
+ "cell_type": "code",
807
+ "execution_count": null,
808
+ "metadata": {},
809
+ "outputs": [],
810
+ "source": []
811
+ },
812
+ {
813
+ "cell_type": "markdown",
814
+ "metadata": {},
815
+ "source": [
816
+ "## future with sources\n",
817
+ "- https://dzlab.github.io/2023/01/02/prompt-langchain/\n",
818
+ "- https://techcommunity.microsoft.com/t5/startups-at-microsoft/build-a-chatbot-to-query-your-documentation-using-langchain-and/ba-p/3833134"
819
+ ]
820
+ }
821
+ ],
822
+ "metadata": {
823
+ "kernelspec": {
824
+ "display_name": "GRDN_env",
825
+ "language": "python",
826
+ "name": "grdn_env"
827
+ },
828
+ "language_info": {
829
+ "codemirror_mode": {
830
+ "name": "ipython",
831
+ "version": 3
832
+ },
833
+ "file_extension": ".py",
834
+ "mimetype": "text/x-python",
835
+ "name": "python",
836
+ "nbconvert_exporter": "python",
837
+ "pygments_lexer": "ipython3",
838
+ "version": "3.10.9"
839
+ }
840
+ },
841
+ "nbformat": 4,
842
+ "nbformat_minor": 2
843
+ }
notebooks/langchain experimenting.ipynb ADDED
@@ -0,0 +1,843 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import langchain\n",
11
+ "from langchain import PromptTemplate, LLMChain"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "markdown",
16
+ "metadata": {},
17
+ "source": [
18
+ "# huggingface"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": [
27
+ "from langchain import HuggingFacePipeline\n",
28
+ "\n",
29
+ "llm = HuggingFacePipeline.from_model_id(\n",
30
+ " model_id=\"bigscience/bloom-560m\",\n",
31
+ " task=\"text-generation\",\n",
32
+ " model_kwargs={\"temperature\": 0, \"max_length\": 64},\n",
33
+ ")\n",
34
+ "\n",
35
+ "\n",
36
+ "# Integrate the model in an LLMChain\n",
37
+ "from langchain import PromptTemplate, LLMChain\n",
38
+ "\n",
39
+ "template = \"\"\"Question: {question}\n",
40
+ "\n",
41
+ "Answer: Let's think step by step.\"\"\"\n",
42
+ "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
43
+ "\n",
44
+ "llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
45
+ "\n",
46
+ "question = \"What is electroencephalography?\"\n",
47
+ "\n",
48
+ "print(llm_chain.run(question))"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "markdown",
53
+ "metadata": {},
54
+ "source": [
55
+ "# galactica"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": null,
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "import galai as gal\n",
65
+ "\n",
66
+ "model = gal.load_model(\"standard\")\n",
67
+ "# model.generate(\"Scaled dot product attention:\\n\\n\\\\[\")\n",
68
+ "# Scaled dot product attention:\\n\\n\\\\[ \\\\displaystyle\\\\text{Attention}(Q,K,V)=\\\\text{softmax}(\\\\frac{QK^{T}}{\\\\sqrt{d_{k}}}%\\n)V \\\\]"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": null,
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "model.generate(\"from this list, [vodka, strawberries, corn, peas],create a new python list that ONLY includes produce <work>\")"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": null,
83
+ "metadata": {},
84
+ "outputs": [],
85
+ "source": [
86
+ "TEXT = \"vodka, strawberries, corn, peas, cherries, sodapop\"\n",
87
+ "model.generate( '\\n\\nQuestion: Of the items in this list, \\n\\n vodka, strawberries, corn, peas, cherries, diet coke, \\n\\n which can grow in a garden?\\n\\nAnswer:')\n"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "metadata": {},
94
+ "outputs": [],
95
+ "source": [
96
+ "model.generate(\"a plant compatability matrix is a a python matrix and will have a score of -1 for negative relationship between plants, 0 for neutral relationship between plants, and 1 for a positive relationship between plants. create a python array of plant compatibility between the plants listed: \" + str(plant_list))"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": null,
102
+ "metadata": {},
103
+ "outputs": [],
104
+ "source": []
105
+ },
106
+ {
107
+ "cell_type": "markdown",
108
+ "metadata": {},
109
+ "source": [
110
+ "# openai + langchain"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "metadata": {},
117
+ "outputs": [],
118
+ "source": [
119
+ "# plant compatiblity context source: https://waldenlabs.com/the-ultimate-companion-planting-guide-chart/"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": null,
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "import os\n",
129
+ "from langchain.chat_models import ChatOpenAI\n",
130
+ "\n",
131
+ "file_path = 'C:/Users/dheym/OneDrive/Documents/api_keys/openai_api_keys.txt'\n",
132
+ "with open(file_path, 'r') as file:\n",
133
+ " OPENAI_API_KEY = file.read()\n",
134
+ "\n",
135
+ "os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\n",
136
+ "#If you'd prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\n",
137
+ "\n",
138
+ "\n",
139
+ "chat = ChatOpenAI()\n",
140
+ "\n"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": null,
146
+ "metadata": {},
147
+ "outputs": [],
148
+ "source": [
149
+ "def parse_and_evaluate_text(text):\n",
150
+ " # Find the indices of the opening and closing brackets\n",
151
+ " opening_bracket_index = text.find(\"[\")\n",
152
+ " closing_bracket_index = text.find(\"]\")\n",
153
+ "\n",
154
+ " if opening_bracket_index != -1 and closing_bracket_index != -1:\n",
155
+ " # Extract the text within the brackets\n",
156
+ " extracted_list = \"[\" + text[opening_bracket_index + 1: closing_bracket_index] + \"]\"\n",
157
+ " # Return the evaluated text list\n",
158
+ " return eval(extracted_list)\n",
159
+ " \n",
160
+ "\n",
161
+ " else:\n",
162
+ " print(\"Error with parsing plant list\")\n",
163
+ " return None"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {},
170
+ "outputs": [],
171
+ "source": [
172
+ "from langchain.prompts.chat import (\n",
173
+ " ChatPromptTemplate,\n",
174
+ " SystemMessagePromptTemplate,\n",
175
+ " AIMessagePromptTemplate,\n",
176
+ " HumanMessagePromptTemplate,\n",
177
+ ")\n",
178
+ "\n",
179
+ "\n",
180
+ "def parse_and_evaluate_text(text):\n",
181
+ " # Find the indices of the opening and closing brackets\n",
182
+ " opening_bracket_index = text.find(\"[\")\n",
183
+ " closing_bracket_index = text.find(\"]\")\n",
184
+ "\n",
185
+ " if opening_bracket_index != -1 and closing_bracket_index != -1:\n",
186
+ " # Extract the text within the brackets\n",
187
+ " extracted_list = \"[\" + text[opening_bracket_index + 1: closing_bracket_index] + \"]\"\n",
188
+ " # Return the evaluated text list\n",
189
+ " return eval(extracted_list)\n",
190
+ " \n",
191
+ "\n",
192
+ " else:\n",
193
+ " print(\"Error with parsing plant list\")\n",
194
+ " return None\n",
195
+ " \n",
196
+ "def chat_response(template, prompt_text):\n",
197
+ " system_message_prompt = SystemMessagePromptTemplate.from_template(template)\n",
198
+ " human_template=\"{text}\"\n",
199
+ " human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)\n",
200
+ " chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])\n",
201
+ " response = chat(chat_prompt.format_prompt(text= prompt_text).to_messages())\n",
202
+ " return response\n",
203
+ "\n",
204
+ "# get the plant list from user input\n",
205
+ "def get_plant_list(input_plant_text):\n",
206
+ " template=\"You are a helpful assistant that knows all about gardening and plants and python data structures.\"\n",
207
+ " text = 'which of the elements of this list can be grown in a garden, [' + input_plant_text + ']? Return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.'\n",
208
+ " plant_list_text = chat_response(template, text)\n",
209
+ " plant_list = parse_and_evaluate_text(plant_list_text.content)\n",
210
+ " print(plant_list)\n",
211
+ " return plant_list\n",
212
+ "\n",
213
+ "# get compatability matrix for companion planting\n",
214
+ "def get_compatibility_matrix(plant_list):\n",
215
+ " # Convert the compatibility matrix to a string\n",
216
+ " with open('compatibilities_text.txt', 'r') as file:\n",
217
+ " # Read the contents of the file\n",
218
+ " compatibility_text = file.read()\n",
219
+ " plant_comp_context = compatibility_text\n",
220
+ " template=\"You are a helpful assistant that knows all about gardening, companion planting, and python data structures- specifically compatibility matrices.\"\n",
221
+ " text = 'from this list of plants, [' + str(plant_list) + '], Return JUST a python array (with values separated by commas like this: [[0,1],[1,0]]\\n\\n ) for companion plant compatibility. Each row and column should represent plants, and the element of the array will contain a -1, 0, or 1 depending on if the relationship between plants is antagonists, neutral, or companions, respectively. You must refer to this knowledge base of information on plant compatibility: \\n\\n, ' + plant_comp_context + '\\n\\n A plant\\'s compatibility with itself is always 0. Do not include any other text or explanation.'\n",
222
+ " compatibility_mat = chat_response(template, text)\n",
223
+ " \n",
224
+ " # Find the indices of the opening and closing brackets\n",
225
+ " opening_bracket_index = compatibility_mat.content.find(\"[[\")\n",
226
+ " closing_bracket_index = compatibility_mat.content.find(\"]]\")\n",
227
+ " if opening_bracket_index != -1 and closing_bracket_index != -1:\n",
228
+ " # Extract the text within the brackets\n",
229
+ " extracted_mat = \"[\" + compatibility_mat.content[opening_bracket_index + 1: closing_bracket_index] + \"]]\"\n",
230
+ " # Return the evaluated mat\n",
231
+ " return eval(extracted_mat)\n",
232
+ " else:\n",
233
+ " print(\"Error with parsing plant compatibility matrix\")\n",
234
+ " return None\n",
235
+ " return \n",
236
+ "\n",
237
+ "\n",
238
+ "input_plant_text = \"strawberries, mint, pepper, diet coke, carrots, lettuce, vodka, basil, tomatoes, marigolds, lemons, spinach, brocoli\"\n",
239
+ "input_plant_text = \"apples, basil, bean, rue, oregano, onion\"\n",
240
+ "plant_list = get_plant_list(input_plant_text)\n",
241
+ "extracted_mat = get_compatibility_matrix(plant_list)\n",
242
+ "print(extracted_mat)"
243
+ ]
244
+ },
245
+ {
246
+ "cell_type": "markdown",
247
+ "metadata": {},
248
+ "source": [
249
+ "## langchain additional context and fine tuning"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": null,
255
+ "metadata": {},
256
+ "outputs": [],
257
+ "source": [
258
+ "# process the plant compatibility matrix to a json\n",
259
+ "import csv\n",
260
+ "import json\n",
261
+ "\n",
262
+ "# Open the CSV file\n",
263
+ "with open('plant_compatability.csv', 'r') as file:\n",
264
+ " reader = csv.reader(file)\n",
265
+ " \n",
266
+ " # Read the header row to get the plant names\n",
267
+ " header = next(reader)\n",
268
+ " \n",
269
+ " # Create an empty dictionary to store the compatibility matrix\n",
270
+ " compatibility_matrix = {}\n",
271
+ " \n",
272
+ " # Iterate over the rows in the CSV file\n",
273
+ " for row in reader:\n",
274
+ " # Extract the plant name from the first column\n",
275
+ " plant = row[0]\n",
276
+ " \n",
277
+ " # Create a dictionary to store the compatibility values for the current plant\n",
278
+ " compatibility_values = {}\n",
279
+ " \n",
280
+ " # Iterate over the compatibility values in the row\n",
281
+ " for i, value in enumerate(row[1:], start=1):\n",
282
+ " # Extract the plant name from the header row\n",
283
+ " companion_plant = header[i]\n",
284
+ " \n",
285
+ " # Convert the compatibility value to an integer\n",
286
+ " compatibility = int(value) if value else 0\n",
287
+ " \n",
288
+ " # Add the compatibility value to the dictionary\n",
289
+ " compatibility_values[companion_plant] = compatibility\n",
290
+ " \n",
291
+ " # Add the compatibility values dictionary to the main compatibility matrix\n",
292
+ " compatibility_matrix[plant] = compatibility_values\n",
293
+ "\n",
294
+ "# Save the compatibility matrix as a JSON file\n",
295
+ "with open('compatibility_matrix.json', 'w') as file:\n",
296
+ " json.dump(compatibility_matrix, file, indent=4)"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": null,
302
+ "metadata": {},
303
+ "outputs": [],
304
+ "source": [
305
+ "import openai\n",
306
+ "\n",
307
+ "# Load the compatibility matrix from the JSON file\n",
308
+ "with open('compatibility_matrix.json', 'r') as file:\n",
309
+ " compatibility_matrix = json.load(file)\n",
310
+ "\n",
311
+ "# Convert the compatibility matrix to a string\n",
312
+ "compatibility_matrix_text = json.dumps(compatibility_matrix)\n",
313
+ "with open('compatibilities_text.txt', 'r') as file:\n",
314
+ " # Read the contents of the file\n",
315
+ " compatibility_matrix_text = file.read()\n",
316
+ "\n",
317
+ "# Set up the LangChain API credentials\n",
318
+ "openai.api_key = OPENAI_API_KEY\n",
319
+ "\n",
320
+ "# Define the prompt for the GPT model\n",
321
+ "prompt = \"Can you provide companion plant suggestions for my garden given what you know about companion planting?\"\n",
322
+ "\n",
323
+ "# Concatenate the prompt and compatibility matrix text as the input to the GPT model\n",
324
+ "input_text = f\"{prompt}\\n\\n{compatibility_matrix_text}\"\n",
325
+ "\n",
326
+ "# Generate a response from the GPT model\n",
327
+ "response = openai.Completion.create(\n",
328
+ " engine='text-davinci-003',\n",
329
+ " prompt=input_text,\n",
330
+ " max_tokens=575\n",
331
+ ")\n",
332
+ "\n",
333
+ "print(response.choices[0])\n",
334
+ "# Extract the generated companion plant suggestions from the response\n",
335
+ "suggestions = response.choices[0].text.strip()\n",
336
+ "\n",
337
+ "# Print the companion plant suggestions\n",
338
+ "print(suggestions)"
339
+ ]
340
+ },
341
+ {
342
+ "cell_type": "code",
343
+ "execution_count": null,
344
+ "metadata": {},
345
+ "outputs": [],
346
+ "source": [
347
+ "from langchain.client import Client\n",
348
+ "\n",
349
+ "# Load the fine-tuned GPT model\n",
350
+ "model = OpenAI(openai_api_key=OPENAI_API_KEY)\n",
351
+ "\n",
352
+ "# Load the knowledge base or context from websites or documents\n",
353
+ "knowledge_base = YourProcessedData()\n",
354
+ "\n",
355
+ "# Initialize the Langchain client\n",
356
+ "client = Client()\n",
357
+ "\n",
358
+ "# Set the context for the GPT model\n",
359
+ "context = \"Context from websites or documents\"\n",
360
+ "model.set_context(context)\n",
361
+ "\n",
362
+ "# Get companion plant compatibility predictions\n",
363
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil']\n",
364
+ "predictions = []\n",
365
+ "\n",
366
+ "for plant in plants:\n",
367
+ " # Generate a question for each plant\n",
368
+ " question = f\"Which plants are compatible with {plant}?\"\n",
369
+ " \n",
370
+ " # Provide the question and context to the Langchain client\n",
371
+ " response = client.query(question, context=context)\n",
372
+ " \n",
373
+ " # Process and extract the answer from the response\n",
374
+ " answer = response['answer']\n",
375
+ " predictions.append((plant, answer))\n",
376
+ "\n",
377
+ "# Process and display the compatibility predictions\n",
378
+ "for plant, compatibility in predictions:\n",
379
+ " print(f\"{plant}: {compatibility}\")"
380
+ ]
381
+ },
382
+ {
383
+ "cell_type": "code",
384
+ "execution_count": null,
385
+ "metadata": {},
386
+ "outputs": [],
387
+ "source": []
388
+ },
389
+ {
390
+ "cell_type": "code",
391
+ "execution_count": null,
392
+ "metadata": {},
393
+ "outputs": [],
394
+ "source": []
395
+ },
396
+ {
397
+ "cell_type": "code",
398
+ "execution_count": null,
399
+ "metadata": {},
400
+ "outputs": [],
401
+ "source": [
402
+ "import numpy as np\n",
403
+ "import networkx as nx\n",
404
+ "import matplotlib.pyplot as plt\n",
405
+ "\n",
406
+ "# Define the plant list\n",
407
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil', 'tomatoes', 'marigolds', 'lemons', 'strawberries', 'spinach', 'broccoli']\n",
408
+ "\n",
409
+ "# Define the compatibility matrix\n",
410
+ "compatibility_matrix = np.array([\n",
411
+ "[0, 1, 0, 0, 1, -1, 1, 0, 0, 0, -1],\n",
412
+ "[1, 0, 0, 0, 1, -1, 1, 0, 1, 0, -1],\n",
413
+ "[0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0],\n",
414
+ "[0, 0, -1, 0, 0, 1, 0, 0, 0, 1, 0],\n",
415
+ "[1, 1, 0, 0, 0, -1, 1, 0, 0, 0, -1],\n",
416
+ "[-1, -1, 1, 1, -1, 0, -1, 0, 0, 0, 1],\n",
417
+ "[1, 1, 0, 0, 1, -1, 0, 0, 0, 0, -1],\n",
418
+ "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
419
+ "[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
420
+ "[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, -1],\n",
421
+ "[-1, -1, 0, 0, -1, 1, -1, 0, 0, -1, 0]\n",
422
+ "])\n",
423
+ "\n",
424
+ "# Create an empty graph\n",
425
+ "G = nx.Graph()\n",
426
+ "\n",
427
+ "# Add nodes (plants) to the graph\n",
428
+ "G.add_nodes_from(plants)\n",
429
+ "\n",
430
+ "# Add edges (compatibility) to the graph\n",
431
+ "for i in range(len(plants)):\n",
432
+ " for j in range(i + 1, len(plants)):\n",
433
+ " if compatibility_matrix[i][j] == 0:\n",
434
+ " color = 'grey'\n",
435
+ " elif compatibility_matrix[i][j] == -1:\n",
436
+ " color = 'pink'\n",
437
+ " else:\n",
438
+ " color = 'green'\n",
439
+ " G.add_edge(plants[i], plants[j], color=color)\n",
440
+ "\n",
441
+ "# Plot the graph\n",
442
+ "pos = nx.spring_layout(G)\n",
443
+ "colors = [G[u][v]['color'] for u, v in G.edges()]\n",
444
+ "nx.draw_networkx(G, pos, with_labels=True, node_color='lightgreen', edge_color=colors, width=2.0, alpha=0.8)\n",
445
+ "\n",
446
+ "# Set edge colors in the legend\n",
447
+ "color_legend = {'Neutral': 'grey', 'Negative': 'pink', 'Positive': 'green'}\n",
448
+ "legend_lines = [plt.Line2D([0], [0], color=color, linewidth=3) for color in color_legend.values()]\n",
449
+ "legend_labels = list(color_legend.keys())\n",
450
+ "plt.legend(legend_lines, legend_labels, loc='best')\n",
451
+ "\n",
452
+ "# Show the plot\n",
453
+ "plt.show()"
454
+ ]
455
+ },
456
+ {
457
+ "cell_type": "code",
458
+ "execution_count": null,
459
+ "metadata": {},
460
+ "outputs": [],
461
+ "source": [
462
+ "import streamlit as st\n",
463
+ "import networkx as nx\n",
464
+ "import plotly.graph_objects as go\n",
465
+ "\n",
466
+ "# Define the plants and compatibility matrix\n",
467
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil', 'tomatoes', 'marigolds', 'lemons', 'strawberries', 'spinach', 'broccoli']\n",
468
+ "compatibility_matrix = [\n",
469
+ " [0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1],\n",
470
+ " [1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1],\n",
471
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
472
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
473
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
474
+ " [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
475
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
476
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
477
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
478
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
479
+ " [1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0]\n",
480
+ "]\n",
481
+ "\n",
482
+ "# Create a directed graph\n",
483
+ "G = nx.DiGraph()\n",
484
+ "\n",
485
+ "# Add nodes to the graph\n",
486
+ "G.add_nodes_from(plants)\n",
487
+ "\n",
488
+ "# Define positions for the nodes\n",
489
+ "pos = nx.spring_layout(G)\n",
490
+ "\n",
491
+ "# Assign positions to the nodes\n",
492
+ "for node, position in pos.items():\n",
493
+ " G.nodes[node]['pos'] = position\n",
494
+ "\n",
495
+ "# Iterate over the compatibility matrix and add edges with corresponding colors\n",
496
+ "for i in range(len(plants)):\n",
497
+ " for j in range(len(plants)):\n",
498
+ " if compatibility_matrix[i][j] == -1:\n",
499
+ " G.add_edge(plants[i], plants[j], color='red')\n",
500
+ " elif compatibility_matrix[i][j] == 1:\n",
501
+ " G.add_edge(plants[i], plants[j], color='green')\n",
502
+ " else:\n",
503
+ " G.add_edge(plants[i], plants[j], color='lightgray')\n",
504
+ "\n",
505
+ "# Create edge traces\n",
506
+ "# Create edge traces\n",
507
+ "edge_traces = []\n",
508
+ "for edge in G.edges():\n",
509
+ " x0, y0 = G.nodes[edge[0]]['pos']\n",
510
+ " x1, y1 = G.nodes[edge[1]]['pos']\n",
511
+ " color = G.edges[edge]['color']\n",
512
+ " trace = go.Scatter(x=[x0, x1, None], y=[y0, y1, None], mode='lines', line=dict(color=color, width=2))\n",
513
+ " edge_traces.append(trace)\n",
514
+ "\n",
515
+ "# Create node traces\n",
516
+ "node_traces = []\n",
517
+ "for node in G.nodes():\n",
518
+ " x, y = G.nodes[node]['pos']\n",
519
+ " trace = go.Scatter(x=[x], y=[y], mode='markers', marker=dict(color='black', size=10), name=node)\n",
520
+ " node_traces.append(trace)\n",
521
+ "\n",
522
+ "# Create figure\n",
523
+ "fig = go.Figure(data=edge_traces + node_traces)\n",
524
+ "\n",
525
+ "# Set layout options\n",
526
+ "fig.update_layout(\n",
527
+ " title='Plant Network',\n",
528
+ " showlegend=False,\n",
529
+ " hovermode='closest',\n",
530
+ " margin=dict(b=20, l=5, r=5, t=40),\n",
531
+ " xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),\n",
532
+ " yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)\n",
533
+ ")\n",
534
+ "\n",
535
+ "# Render the graph\n",
536
+ "#st.plotly_chart(fig)\n",
537
+ "fig"
538
+ ]
539
+ },
540
+ {
541
+ "cell_type": "code",
542
+ "execution_count": null,
543
+ "metadata": {},
544
+ "outputs": [],
545
+ "source": [
546
+ "import streamlit as st\n",
547
+ "import networkx as nx\n",
548
+ "import plotly.graph_objects as go\n",
549
+ "\n",
550
+ "# Define the plants and compatibility matrix\n",
551
+ "plants = ['strawberries', 'mint', 'carrots', 'lettuce', 'basil', 'tomatoes', 'marigolds', 'lemons', 'strawberries', 'spinach', 'broccoli']\n",
552
+ "compatibility_matrix = [\n",
553
+ " [0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1],\n",
554
+ " [1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1],\n",
555
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
556
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
557
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
558
+ " [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
559
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
560
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
561
+ " [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n",
562
+ " [1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n",
563
+ " [1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0]\n",
564
+ "]\n",
565
+ "\n",
566
+ "# Create the graph\n",
567
+ "G = nx.Graph()\n",
568
+ "G.add_nodes_from(plants)\n",
569
+ "for i in range(len(plants)):\n",
570
+ " for j in range(i + 1, len(plants)):\n",
571
+ " if compatibility_matrix[i][j] == 0:\n",
572
+ " G.add_edge(plants[i], plants[j], color='lightgrey')\n",
573
+ " else:\n",
574
+ " G.add_edge(plants[i], plants[j], color='green' if compatibility_matrix[i][j] == 1 else 'pink')\n",
575
+ "\n",
576
+ "# Generate positions for the nodes\n",
577
+ "pos = nx.spring_layout(G)\n",
578
+ "\n",
579
+ "# Create node trace\n",
580
+ "node_trace = go.Scatter(\n",
581
+ " x=[pos[node][0] for node in G.nodes()],\n",
582
+ " y=[pos[node][1] for node in G.nodes()],\n",
583
+ " text=list(G.nodes()),\n",
584
+ " mode='markers+text',\n",
585
+ " textposition='top center',\n",
586
+ " hoverinfo='text',\n",
587
+ " marker=dict(\n",
588
+ " size=20,\n",
589
+ " color='lightblue',\n",
590
+ " line_width=2,\n",
591
+ " )\n",
592
+ ")\n",
593
+ "\n",
594
+ "# Create edge trace\n",
595
+ "edge_trace = go.Scatter(\n",
596
+ " x=[],\n",
597
+ " y=[],\n",
598
+ " line=dict(width=1, color='lightgrey'),\n",
599
+ " hoverinfo='none',\n",
600
+ " mode='lines'\n",
601
+ ")\n",
602
+ "\n",
603
+ "# Add coordinates to edge trace\n",
604
+ "for edge in G.edges():\n",
605
+ " x0, y0 = pos[edge[0]]\n",
606
+ " x1, y1 = pos[edge[1]]\n",
607
+ " edge_trace['x'] += tuple([x0, x1, None])\n",
608
+ " edge_trace['y'] += tuple([y0, y1, None])\n",
609
+ "\n",
610
+ "# Create edge traces for colored edges\n",
611
+ "edge_traces = []\n",
612
+ "for edge in G.edges(data=True):\n",
613
+ " x0, y0 = pos[edge[0]]\n",
614
+ " x1, y1 = pos[edge[1]]\n",
615
+ " color = edge[2]['color']\n",
616
+ " trace = go.Scatter(\n",
617
+ " x=[x0, x1],\n",
618
+ " y=[y0, y1],\n",
619
+ " mode='lines',\n",
620
+ " line=dict(width=2, color=color),\n",
621
+ " hoverinfo='none'\n",
622
+ " )\n",
623
+ " edge_traces.append(trace)\n",
624
+ "\n",
625
+ "# Create layout\n",
626
+ "layout = go.Layout(\n",
627
+ " title='Plant Compatibility Network Graph',\n",
628
+ " showlegend=False,\n",
629
+ " hovermode='closest',\n",
630
+ " margin=dict(b=20, l=5, r=5, t=40),\n",
631
+ " xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),\n",
632
+ " yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)\n",
633
+ ")\n",
634
+ "\n",
635
+ "# Create figure\n",
636
+ "fig = go.Figure(data=[edge_trace, *edge_traces, node_trace], layout=layout)\n",
637
+ "\n",
638
+ "# Render the graph using Plotly in Streamlit\n",
639
+ "st.plotly_chart(fig)\n"
640
+ ]
641
+ },
642
+ {
643
+ "cell_type": "code",
644
+ "execution_count": null,
645
+ "metadata": {},
646
+ "outputs": [],
647
+ "source": []
648
+ },
649
+ {
650
+ "cell_type": "code",
651
+ "execution_count": null,
652
+ "metadata": {},
653
+ "outputs": [],
654
+ "source": [
655
+ "import random\n",
656
+ "import numpy as np\n",
657
+ "\n",
658
+ "# Define the compatibility matrix\n",
659
+ "compatibility_matrix = np.array([\n",
660
+ " [-1, 1, 0, -1],\n",
661
+ " [1, -1, 1, -1],\n",
662
+ " [1, 0, -1, 1],\n",
663
+ " [-1, -1, 1, -1]\n",
664
+ "])\n",
665
+ "\n",
666
+ "# Define the user-selected plants, number of plant beds, and constraints\n",
667
+ "user_plants = [\"A\", \"B\", \"C\", \"D\"]\n",
668
+ "num_plant_beds = 4\n",
669
+ "min_species_per_bed = 1\n",
670
+ "max_species_per_bed = 2\n",
671
+ "\n",
672
+ "# Genetic Algorithm parameters\n",
673
+ "population_size = 50\n",
674
+ "num_generations = 100\n",
675
+ "tournament_size = 3\n",
676
+ "crossover_rate = 0.8\n",
677
+ "mutation_rate = 0.1\n",
678
+ "\n",
679
+ "# Generate an initial population randomly\n",
680
+ "def generate_initial_population():\n",
681
+ " population = []\n",
682
+ " for _ in range(population_size):\n",
683
+ " grouping = []\n",
684
+ " for _ in range(num_plant_beds):\n",
685
+ " num_species = random.randint(min_species_per_bed, max_species_per_bed)\n",
686
+ " species = random.sample(user_plants, num_species)\n",
687
+ " grouping.append(species)\n",
688
+ " population.append(grouping)\n",
689
+ " return population\n",
690
+ "\n",
691
+ "# Calculate the fitness score of a grouping\n",
692
+ "def calculate_fitness(grouping):\n",
693
+ " score = 0\n",
694
+ " for bed1 in range(num_plant_beds):\n",
695
+ " for bed2 in range(bed1 + 1, num_plant_beds):\n",
696
+ " for species1 in grouping[bed1]:\n",
697
+ " for species2 in grouping[bed2]:\n",
698
+ " species1_index = user_plants.index(species1)\n",
699
+ " species2_index = user_plants.index(species2)\n",
700
+ " score += compatibility_matrix[species1_index][species2_index]\n",
701
+ " return score\n",
702
+ "\n",
703
+ "# Perform tournament selection\n",
704
+ "def tournament_selection(population):\n",
705
+ " selected = []\n",
706
+ " for _ in range(population_size):\n",
707
+ " participants = random.sample(population, tournament_size)\n",
708
+ " winner = max(participants, key=calculate_fitness)\n",
709
+ " selected.append(winner)\n",
710
+ " return selected\n",
711
+ "\n",
712
+ "# Perform crossover between two parents\n",
713
+ "def crossover(parent1, parent2):\n",
714
+ " if random.random() < crossover_rate:\n",
715
+ " crossover_point = random.randint(1, num_plant_beds - 1)\n",
716
+ " child1 = parent1[:crossover_point] + parent2[crossover_point:]\n",
717
+ " child2 = parent2[:crossover_point] + parent1[crossover_point:]\n",
718
+ " return child1, child2\n",
719
+ " else:\n",
720
+ " return parent1, parent2\n",
721
+ "\n",
722
+ "# Perform mutation on an individual\n",
723
+ "def mutate(individual):\n",
724
+ " if random.random() < mutation_rate:\n",
725
+ " mutated_bed = random.randint(0, num_plant_beds - 1)\n",
726
+ " new_species = random.sample(user_plants, random.randint(min_species_per_bed, max_species_per_bed))\n",
727
+ " individual[mutated_bed] = new_species\n",
728
+ " return individual\n",
729
+ "\n",
730
+ "# Perform replacement of the population with the offspring\n",
731
+ "def replacement(population, offspring):\n",
732
+ " sorted_population = sorted(population, key=calculate_fitness, reverse=True)\n",
733
+ " sorted_offspring = sorted(offspring, key=calculate_fitness, reverse=True)\n",
734
+ " return sorted_population[:population_size - len(offspring)] + sorted_offspring\n",
735
+ "\n",
736
+ "# Genetic Algorithm main function\n",
737
+ "def genetic_algorithm():\n",
738
+ " population = generate_initial_population()\n",
739
+ "\n",
740
+ " for generation in range(num_generations):\n",
741
+ " print(f\"Generation {generation + 1}\")\n",
742
+ "\n",
743
+ " selected_population = tournament_selection(population)\n",
744
+ " offspring = []\n",
745
+ "\n",
746
+ " for _ in range(population_size // 2):\n",
747
+ " parent1 = random.choice(selected_population)\n",
748
+ " parent2 = random.choice(selected_population)\n",
749
+ " child1, child2 = crossover(parent1, parent2)\n",
750
+ " child1 = mutate(child1)\n",
751
+ " child2 = mutate(child2)\n",
752
+ " offspring.extend([child1, child2])\n",
753
+ "\n",
754
+ " population = replacement(population, offspring)\n",
755
+ "\n",
756
+ " best_grouping = max(population, key=calculate_fitness)\n",
757
+ " best_fitness = calculate_fitness(best_grouping)\n",
758
+ " print(f\"Best Grouping: {best_grouping}\")\n",
759
+ " print(f\"Fitness Score: {best_fitness}\")\n",
760
+ "\n",
761
+ "# Run the Genetic Algorithm\n",
762
+ "genetic_algorithm()\n",
763
+ " "
764
+ ]
765
+ },
766
+ {
767
+ "cell_type": "code",
768
+ "execution_count": null,
769
+ "metadata": {},
770
+ "outputs": [],
771
+ "source": [
772
+ "def query_farmersalmanac(title, first_paragraph_only=True):\n",
773
+ " base_url = \"https://www.almanac.com/companion-planting-guide-vegetables\"\n",
774
+ " url = f\"{base_url}/w/api.php?format=json&action=query&prop=extracts&explaintext=1&titles={title}\"\n",
775
+ " if first_paragraph_only:\n",
776
+ " url += \"&exintro=1\"\n",
777
+ " data = requests.get(url).json()\n",
778
+ " return Document(\n",
779
+ " metadata={\"source\": f\"{base_url}\"},\n",
780
+ " page_content=list(data[\"query\"][\"pages\"].values())[0][\"extract\"],\n",
781
+ " )"
782
+ ]
783
+ },
784
+ {
785
+ "cell_type": "code",
786
+ "execution_count": null,
787
+ "metadata": {},
788
+ "outputs": [],
789
+ "source": []
790
+ },
791
+ {
792
+ "cell_type": "code",
793
+ "execution_count": null,
794
+ "metadata": {},
795
+ "outputs": [],
796
+ "source": []
797
+ },
798
+ {
799
+ "cell_type": "code",
800
+ "execution_count": null,
801
+ "metadata": {},
802
+ "outputs": [],
803
+ "source": []
804
+ },
805
+ {
806
+ "cell_type": "code",
807
+ "execution_count": null,
808
+ "metadata": {},
809
+ "outputs": [],
810
+ "source": []
811
+ },
812
+ {
813
+ "cell_type": "markdown",
814
+ "metadata": {},
815
+ "source": [
816
+ "## future with sources\n",
817
+ "- https://dzlab.github.io/2023/01/02/prompt-langchain/\n",
818
+ "- https://techcommunity.microsoft.com/t5/startups-at-microsoft/build-a-chatbot-to-query-your-documentation-using-langchain-and/ba-p/3833134"
819
+ ]
820
+ }
821
+ ],
822
+ "metadata": {
823
+ "kernelspec": {
824
+ "display_name": "GRDN_env",
825
+ "language": "python",
826
+ "name": "grdn_env"
827
+ },
828
+ "language_info": {
829
+ "codemirror_mode": {
830
+ "name": "ipython",
831
+ "version": 3
832
+ },
833
+ "file_extension": ".py",
834
+ "mimetype": "text/x-python",
835
+ "name": "python",
836
+ "nbconvert_exporter": "python",
837
+ "pygments_lexer": "ipython3",
838
+ "version": "3.10.9"
839
+ }
840
+ },
841
+ "nbformat": 4,
842
+ "nbformat_minor": 2
843
+ }
notebooks/llamaindex_llama2_test.ipynb ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "\n",
10
+ "import streamlit as st\n",
11
+ "import pandas as pd\n",
12
+ "import os\n",
13
+ "import replicate\n",
14
+ "from langchain.chat_models import ChatOpenAI\n",
15
+ "from langchain.prompts.chat import (\n",
16
+ " ChatPromptTemplate,\n",
17
+ " SystemMessagePromptTemplate,\n",
18
+ " AIMessagePromptTemplate,\n",
19
+ " HumanMessagePromptTemplate,\n",
20
+ ")\n"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": null,
26
+ "metadata": {},
27
+ "outputs": [
28
+ {
29
+ "name": "stdout",
30
+ "output_type": "stream",
31
+ "text": [
32
+ "assistant: 😄 Oh my daisies, companion planting? It's like peas in a pod, ya know? 😂 It's like having a garden party with all your plant friends! 🎉 Companion planting is the bee's knees, it's like a garden symphony, all the plants working together in harmony! 🎶 And let me tell you, it's not just about looks, it's like a big ol' hug for your plants! 🤗 It's like planting a big ol' bouquet of flowers, all mixed together, just like a big ol' garden party! 🎉\n",
33
+ "But seriously, companion planting is a great way to create a balanced and healthy garden ecosystem. It's like having a little garden family, all working together to keep the pests away and the soil healthy! 🐝🐜 And let me tell you, it's not just about the plants, it's like a big ol' party for the bees and butterflies too! 🐝🦋 They love all the different colors and scents, it's like a big ol' garden buffet for them! 🍴🍸 So, if you haven't tried companion planting yet, you should give it a go, it's like the bee's knees, it's the cat's pajamas! 🐰👠💤\n",
34
+ "But enough about that, let's talk about you, what do you think about companion planting? Have you tried it before? Do you have any questions? Let's chat, I'm all ears! 🐰👂💬\n"
35
+ ]
36
+ }
37
+ ],
38
+ "source": [
39
+ "from llama_index.llms import Replicate, ChatMessage\n",
40
+ "\n",
41
+ "llm = Replicate(\n",
42
+ " model=\"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5\"\n",
43
+ ")\n",
44
+ "\n",
45
+ "messages = [\n",
46
+ " ChatMessage(\n",
47
+ " role=\"system\", content=\"You are a gardnere with a colorful personality\"\n",
48
+ " ),\n",
49
+ " ChatMessage(role=\"user\", content=\"What is your opinion on companion planting?\"),\n",
50
+ "]\n",
51
+ "resp = llm.chat(messages)\n",
52
+ "\n",
53
+ "print(resp)"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": 13,
59
+ "metadata": {},
60
+ "outputs": [
61
+ {
62
+ "name": "stdout",
63
+ "output_type": "stream",
64
+ "text": [
65
+ "You return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.which of the elements of this list can be grown in a garden, [apple, orange, milk, eraser, cherry]? Return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.\n"
66
+ ]
67
+ },
68
+ {
69
+ "ename": "ReplicateError",
70
+ "evalue": "You have reached the free time limit. To continue using Replicate, set up billing at https://replicate.com/account/billing#billing.",
71
+ "output_type": "error",
72
+ "traceback": [
73
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
74
+ "\u001b[0;31mReplicateError\u001b[0m Traceback (most recent call last)",
75
+ "Cell \u001b[0;32mIn[13], line 20\u001b[0m\n\u001b[1;32m 17\u001b[0m input_prompt \u001b[39m=\u001b[39m template \u001b[39m+\u001b[39m text\n\u001b[1;32m 18\u001b[0m \u001b[39mprint\u001b[39m(input_prompt)\n\u001b[0;32m---> 20\u001b[0m resp \u001b[39m=\u001b[39m llm\u001b[39m.\u001b[39mcomplete(input_prompt)\n\u001b[1;32m 21\u001b[0m \u001b[39mprint\u001b[39m(resp)\n",
76
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/llama_index/llms/base.py:223\u001b[0m, in \u001b[0;36mllm_completion_callback.<locals>.wrap.<locals>.wrapped_llm_predict\u001b[0;34m(_self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[39mwith\u001b[39;00m wrapper_logic(_self) \u001b[39mas\u001b[39;00m callback_manager:\n\u001b[1;32m 214\u001b[0m event_id \u001b[39m=\u001b[39m callback_manager\u001b[39m.\u001b[39mon_event_start(\n\u001b[1;32m 215\u001b[0m CBEventType\u001b[39m.\u001b[39mLLM,\n\u001b[1;32m 216\u001b[0m payload\u001b[39m=\u001b[39m{\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 220\u001b[0m },\n\u001b[1;32m 221\u001b[0m )\n\u001b[0;32m--> 223\u001b[0m f_return_val \u001b[39m=\u001b[39m f(_self, \u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[1;32m 224\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39misinstance\u001b[39m(f_return_val, Generator):\n\u001b[1;32m 225\u001b[0m \u001b[39m# intercept the generator and add a callback to the end\u001b[39;00m\n\u001b[1;32m 226\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mwrapped_gen\u001b[39m() \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m CompletionResponseGen:\n",
77
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/llama_index/llms/replicate.py:100\u001b[0m, in \u001b[0;36mReplicate.complete\u001b[0;34m(self, prompt, **kwargs)\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[39m@llm_completion_callback\u001b[39m()\n\u001b[1;32m 99\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mcomplete\u001b[39m(\u001b[39mself\u001b[39m, prompt: \u001b[39mstr\u001b[39m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs: Any) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m CompletionResponse:\n\u001b[0;32m--> 100\u001b[0m response_gen \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstream_complete(prompt, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[1;32m 101\u001b[0m response_list \u001b[39m=\u001b[39m \u001b[39mlist\u001b[39m(response_gen)\n\u001b[1;32m 102\u001b[0m final_response \u001b[39m=\u001b[39m response_list[\u001b[39m-\u001b[39m\u001b[39m1\u001b[39m]\n",
78
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/llama_index/llms/base.py:223\u001b[0m, in \u001b[0;36mllm_completion_callback.<locals>.wrap.<locals>.wrapped_llm_predict\u001b[0;34m(_self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[39mwith\u001b[39;00m wrapper_logic(_self) \u001b[39mas\u001b[39;00m callback_manager:\n\u001b[1;32m 214\u001b[0m event_id \u001b[39m=\u001b[39m callback_manager\u001b[39m.\u001b[39mon_event_start(\n\u001b[1;32m 215\u001b[0m CBEventType\u001b[39m.\u001b[39mLLM,\n\u001b[1;32m 216\u001b[0m payload\u001b[39m=\u001b[39m{\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 220\u001b[0m },\n\u001b[1;32m 221\u001b[0m )\n\u001b[0;32m--> 223\u001b[0m f_return_val \u001b[39m=\u001b[39m f(_self, \u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[1;32m 224\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39misinstance\u001b[39m(f_return_val, Generator):\n\u001b[1;32m 225\u001b[0m \u001b[39m# intercept the generator and add a callback to the end\u001b[39;00m\n\u001b[1;32m 226\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mwrapped_gen\u001b[39m() \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m CompletionResponseGen:\n",
79
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/llama_index/llms/replicate.py:119\u001b[0m, in \u001b[0;36mReplicate.stream_complete\u001b[0;34m(self, prompt, **kwargs)\u001b[0m\n\u001b[1;32m 117\u001b[0m prompt \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcompletion_to_prompt(prompt)\n\u001b[1;32m 118\u001b[0m input_dict \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_get_input_dict(prompt, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m--> 119\u001b[0m response_iter \u001b[39m=\u001b[39m replicate\u001b[39m.\u001b[39mrun(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel, \u001b[39minput\u001b[39m\u001b[39m=\u001b[39minput_dict)\n\u001b[1;32m 121\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mgen\u001b[39m() \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m CompletionResponseGen:\n\u001b[1;32m 122\u001b[0m text \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39m\"\u001b[39m\n",
80
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/replicate/client.py:147\u001b[0m, in \u001b[0;36mClient.run\u001b[0;34m(self, ref, input, **params)\u001b[0m\n\u001b[1;32m 137\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mrun\u001b[39m(\n\u001b[1;32m 138\u001b[0m \u001b[39mself\u001b[39m,\n\u001b[1;32m 139\u001b[0m ref: \u001b[39mstr\u001b[39m,\n\u001b[1;32m 140\u001b[0m \u001b[39minput\u001b[39m: Optional[Dict[\u001b[39mstr\u001b[39m, Any]] \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m,\n\u001b[1;32m 141\u001b[0m \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mparams: Unpack[\u001b[39m\"\u001b[39m\u001b[39mPredictions.CreatePredictionParams\u001b[39m\u001b[39m\"\u001b[39m],\n\u001b[1;32m 142\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Union[Any, Iterator[Any]]: \u001b[39m# noqa: ANN401\u001b[39;00m\n\u001b[1;32m 143\u001b[0m \u001b[39m \u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 144\u001b[0m \u001b[39m Run a model and wait for its output.\u001b[39;00m\n\u001b[1;32m 145\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 147\u001b[0m \u001b[39mreturn\u001b[39;00m run(\u001b[39mself\u001b[39m, ref, \u001b[39minput\u001b[39m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mparams)\n",
81
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/replicate/run.py:31\u001b[0m, in \u001b[0;36mrun\u001b[0;34m(client, ref, input, **params)\u001b[0m\n\u001b[1;32m 28\u001b[0m version, owner, name, version_id \u001b[39m=\u001b[39m identifier\u001b[39m.\u001b[39m_resolve(ref)\n\u001b[1;32m 30\u001b[0m \u001b[39mif\u001b[39;00m version_id \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m---> 31\u001b[0m prediction \u001b[39m=\u001b[39m client\u001b[39m.\u001b[39mpredictions\u001b[39m.\u001b[39mcreate(\n\u001b[1;32m 32\u001b[0m version\u001b[39m=\u001b[39mversion_id, \u001b[39minput\u001b[39m\u001b[39m=\u001b[39m\u001b[39minput\u001b[39m \u001b[39mor\u001b[39;00m {}, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mparams\n\u001b[1;32m 33\u001b[0m )\n\u001b[1;32m 34\u001b[0m \u001b[39melif\u001b[39;00m owner \u001b[39mand\u001b[39;00m name:\n\u001b[1;32m 35\u001b[0m prediction \u001b[39m=\u001b[39m client\u001b[39m.\u001b[39mmodels\u001b[39m.\u001b[39mpredictions\u001b[39m.\u001b[39mcreate(\n\u001b[1;32m 36\u001b[0m model\u001b[39m=\u001b[39m(owner, name), \u001b[39minput\u001b[39m\u001b[39m=\u001b[39m\u001b[39minput\u001b[39m \u001b[39mor\u001b[39;00m {}, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mparams\n\u001b[1;32m 37\u001b[0m )\n",
82
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/replicate/prediction.py:309\u001b[0m, in \u001b[0;36mPredictions.create\u001b[0;34m(self, version, input, **params)\u001b[0m\n\u001b[1;32m 300\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 301\u001b[0m \u001b[39mCreate a new prediction for the specified model version.\u001b[39;00m\n\u001b[1;32m 302\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 304\u001b[0m body \u001b[39m=\u001b[39m _create_prediction_body(\n\u001b[1;32m 305\u001b[0m version,\n\u001b[1;32m 306\u001b[0m \u001b[39minput\u001b[39m,\n\u001b[1;32m 307\u001b[0m \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mparams,\n\u001b[1;32m 308\u001b[0m )\n\u001b[0;32m--> 309\u001b[0m resp \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_client\u001b[39m.\u001b[39m_request(\n\u001b[1;32m 310\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mPOST\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m 311\u001b[0m \u001b[39m\"\u001b[39m\u001b[39m/v1/predictions\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m 312\u001b[0m json\u001b[39m=\u001b[39mbody,\n\u001b[1;32m 313\u001b[0m )\n\u001b[1;32m 315\u001b[0m \u001b[39mreturn\u001b[39;00m _json_to_prediction(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_client, resp\u001b[39m.\u001b[39mjson())\n",
83
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/replicate/client.py:85\u001b[0m, in \u001b[0;36mClient._request\u001b[0;34m(self, method, path, **kwargs)\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_request\u001b[39m(\u001b[39mself\u001b[39m, method: \u001b[39mstr\u001b[39m, path: \u001b[39mstr\u001b[39m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m httpx\u001b[39m.\u001b[39mResponse:\n\u001b[1;32m 84\u001b[0m resp \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_client\u001b[39m.\u001b[39mrequest(method, path, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m---> 85\u001b[0m _raise_for_status(resp)\n\u001b[1;32m 87\u001b[0m \u001b[39mreturn\u001b[39;00m resp\n",
84
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/replicate/client.py:358\u001b[0m, in \u001b[0;36m_raise_for_status\u001b[0;34m(resp)\u001b[0m\n\u001b[1;32m 356\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_raise_for_status\u001b[39m(resp: httpx\u001b[39m.\u001b[39mResponse) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 357\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39m400\u001b[39m \u001b[39m<\u001b[39m\u001b[39m=\u001b[39m resp\u001b[39m.\u001b[39mstatus_code \u001b[39m<\u001b[39m \u001b[39m600\u001b[39m:\n\u001b[0;32m--> 358\u001b[0m \u001b[39mraise\u001b[39;00m ReplicateError(resp\u001b[39m.\u001b[39mjson()[\u001b[39m\"\u001b[39m\u001b[39mdetail\u001b[39m\u001b[39m\"\u001b[39m])\n",
85
+ "\u001b[0;31mReplicateError\u001b[0m: You have reached the free time limit. To continue using Replicate, set up billing at https://replicate.com/account/billing#billing."
86
+ ]
87
+ }
88
+ ],
89
+ "source": [
90
+ "from llama_index.llms import Replicate\n",
91
+ "\n",
92
+ "llm = Replicate(\n",
93
+ " model=\"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5\",\n",
94
+ " temperature=0.1,\n",
95
+ " #context_window=32,\n",
96
+ " top_p=0.9,\n",
97
+ " repetition_penalty=1.0,\n",
98
+ " max_tokens=2000,\n",
99
+ " #stop_sequences=[\"\\n\\n\"], \n",
100
+ "\n",
101
+ ")\n",
102
+ "\n",
103
+ "input_plant_text = 'apple, orange, milk, eraser, cherry'\n",
104
+ "template=\"You return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.\"\n",
105
+ "text = 'which of the elements of this list can be grown in a garden, [' + input_plant_text + ']? Return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.'\n",
106
+ "input_prompt = template + text\n",
107
+ "print(input_prompt)\n",
108
+ "\n",
109
+ "resp = llm.complete(input_prompt)\n",
110
+ "print(resp)\n"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": 7,
116
+ "metadata": {},
117
+ "outputs": [
118
+ {
119
+ "name": "stdout",
120
+ "output_type": "stream",
121
+ "text": [
122
+ "Companion planting is the practice of growing different plants together in close proximity in order to improve their growth, health, and productivity. This technique takes advantage of the different ways that plants interact with each other, such as by providing shade, repelling pests, or attracting beneficial insects.\n",
123
+ "\n",
124
+ "Here are some of my thoughts on companion planting:\n",
125
+ "\n",
126
+ "1. Diversify your garden: Companion planting is a great way to add diversity to your garden, which can improve its overall health and resilience. By growing a mix of plants, you can create a more complex and dynamic ecosystem that is less susceptible to pests and diseases.\n",
127
+ "2. Improve soil health: Many companion plants, such as legumes and comfrey, have the ability to fix nitrogen or other nutrients in the soil, which can improve the health and fertility of the soil. This can lead to healthier and more productive plants.\n",
128
+ "3. Enhance pest control: Companion planting can be a powerful tool for controlling pests naturally. For example, basil and mint can repel aphids, while marigold and nasturtium can attract beneficial insects that prey on pests.\n",
129
+ "4. Increase yields: Companion planting can also help to increase yields by providing support and shade for plants, or by attracting beneficial insects that pollinate or prey on pests. For example, planting beans with corn and squash can provide a trellis for the beans and shade for the corn, while also attracting beneficial insects that prey on pests.\n",
130
+ "5. Reduce maintenance: Companion planting can also reduce the amount of maintenance required in your garden. For example, planting a mix of plants that have different growing habits and blooming times can create a more dynamic and resilient garden that requires less work to maintain.\n",
131
+ "\n",
132
+ "\n",
133
+ "Overall, I believe that companion planting is a valuable technique for gardeners of all experience levels. It can help to improve the health, productivity, and resilience of your garden, while also reducing the amount of maintenance required. By taking advantage of the different ways that plants interact with each other"
134
+ ]
135
+ }
136
+ ],
137
+ "source": [
138
+ "#os.environ[\"REPLICATE_API_TOKEN\"] = \"key here\"\n",
139
+ "api = replicate.Client(api_token=os.environ[\"REPLICATE_API_TOKEN\"])\n",
140
+ "output = api.run(\n",
141
+ " \"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5\",\n",
142
+ " input={\"prompt\": \"what is your opinion on companion planting?\"},\n",
143
+ " )\n",
144
+ "for item in output:\n",
145
+ " print(item, end=\"\")\n",
146
+ "\n",
147
+ "# save response to string\n",
148
+ "resp = \"\"\n",
149
+ "for item in output:\n",
150
+ " resp += item\n",
151
+ " "
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": 7,
157
+ "metadata": {},
158
+ "outputs": [
159
+ {
160
+ "name": "stderr",
161
+ "output_type": "stream",
162
+ "text": [
163
+ "config.json: 100%|██████████| 638/638 [00:00<00:00, 3.22MB/s]\n",
164
+ "model.safetensors.index.json: 100%|██████████| 23.9k/23.9k [00:00<00:00, 62.9MB/s]\n",
165
+ "model-00001-of-00008.safetensors: 100%|██████████| 1.89G/1.89G [00:26<00:00, 71.1MB/s]\n",
166
+ "model-00002-of-00008.safetensors: 100%|██████████| 1.95G/1.95G [00:27<00:00, 71.0MB/s]\n",
167
+ "model-00003-of-00008.safetensors: 100%|██████████| 1.98G/1.98G [00:27<00:00, 72.0MB/s]\n",
168
+ "model-00004-of-00008.safetensors: 100%|██████████| 1.95G/1.95G [00:27<00:00, 70.2MB/s]\n",
169
+ "model-00005-of-00008.safetensors: 100%|██████████| 1.98G/1.98G [00:28<00:00, 69.8MB/s]\n",
170
+ "model-00006-of-00008.safetensors: 100%|██████████| 1.95G/1.95G [00:28<00:00, 69.5MB/s]\n",
171
+ "model-00007-of-00008.safetensors: 100%|██████████| 1.98G/1.98G [00:28<00:00, 68.5MB/s]\n",
172
+ "model-00008-of-00008.safetensors: 100%|██████████| 816M/816M [00:11<00:00, 69.9MB/s]\n",
173
+ "Downloading shards: 100%|██████████| 8/8 [03:27<00:00, 25.96s/it]\n",
174
+ "Loading checkpoint shards: 100%|██████████| 8/8 [00:24<00:00, 3.04s/it]\n",
175
+ "generation_config.json: 100%|██████████| 111/111 [00:00<00:00, 1.12MB/s]\n",
176
+ "tokenizer_config.json: 100%|██████████| 1.43k/1.43k [00:00<00:00, 9.73MB/s]\n",
177
+ "tokenizer.model: 100%|██████████| 493k/493k [00:00<00:00, 69.9MB/s]\n",
178
+ "tokenizer.json: 100%|██████████| 1.80M/1.80M [00:00<00:00, 17.9MB/s]\n",
179
+ "added_tokens.json: 100%|██████████| 42.0/42.0 [00:00<00:00, 160kB/s]\n",
180
+ "special_tokens_map.json: 100%|██████████| 168/168 [00:00<00:00, 961kB/s]\n",
181
+ "/Users/dheym/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/generation/utils.py:1518: UserWarning: You have modified the pretrained model configuration to control generation. This is a deprecated strategy to control generation and will be removed soon, in a future version. Please use and modify the model generation configuration (see https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )\n",
182
+ " warnings.warn(\n"
183
+ ]
184
+ },
185
+ {
186
+ "ename": "ValueError",
187
+ "evalue": "Greedy methods without beam search do not support `num_return_sequences` different than 1 (got 3).",
188
+ "output_type": "error",
189
+ "traceback": [
190
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
191
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
192
+ "Cell \u001b[0;32mIn[7], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m \u001b[39mimport\u001b[39;00m pipeline\n\u001b[1;32m 2\u001b[0m generator \u001b[39m=\u001b[39m pipeline(\u001b[39m'\u001b[39m\u001b[39mtext-generation\u001b[39m\u001b[39m'\u001b[39m, model \u001b[39m=\u001b[39m \u001b[39m'\u001b[39m\u001b[39mHuggingFaceH4/zephyr-7b-beta\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m----> 3\u001b[0m generator(\u001b[39m\"\u001b[39m\u001b[39mHello, I\u001b[39m\u001b[39m'\u001b[39m\u001b[39mm a language model\u001b[39m\u001b[39m\"\u001b[39m, max_length \u001b[39m=\u001b[39m \u001b[39m30\u001b[39m, num_return_sequences\u001b[39m=\u001b[39m\u001b[39m3\u001b[39m)\n",
193
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/pipelines/text_generation.py:208\u001b[0m, in \u001b[0;36mTextGenerationPipeline.__call__\u001b[0;34m(self, text_inputs, **kwargs)\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__call__\u001b[39m(\u001b[39mself\u001b[39m, text_inputs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m 168\u001b[0m \u001b[39m \u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 169\u001b[0m \u001b[39m Complete the prompt(s) given as inputs.\u001b[39;00m\n\u001b[1;32m 170\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 206\u001b[0m \u001b[39m ids of the generated text.\u001b[39;00m\n\u001b[1;32m 207\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 208\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__call__\u001b[39m(text_inputs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n",
194
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/pipelines/base.py:1140\u001b[0m, in \u001b[0;36mPipeline.__call__\u001b[0;34m(self, inputs, num_workers, batch_size, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1132\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mnext\u001b[39m(\n\u001b[1;32m 1133\u001b[0m \u001b[39miter\u001b[39m(\n\u001b[1;32m 1134\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mget_iterator(\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1137\u001b[0m )\n\u001b[1;32m 1138\u001b[0m )\n\u001b[1;32m 1139\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 1140\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mrun_single(inputs, preprocess_params, forward_params, postprocess_params)\n",
195
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/pipelines/base.py:1147\u001b[0m, in \u001b[0;36mPipeline.run_single\u001b[0;34m(self, inputs, preprocess_params, forward_params, postprocess_params)\u001b[0m\n\u001b[1;32m 1145\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mrun_single\u001b[39m(\u001b[39mself\u001b[39m, inputs, preprocess_params, forward_params, postprocess_params):\n\u001b[1;32m 1146\u001b[0m model_inputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mpreprocess(inputs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mpreprocess_params)\n\u001b[0;32m-> 1147\u001b[0m model_outputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mforward(model_inputs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mforward_params)\n\u001b[1;32m 1148\u001b[0m outputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mpostprocess(model_outputs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mpostprocess_params)\n\u001b[1;32m 1149\u001b[0m \u001b[39mreturn\u001b[39;00m outputs\n",
196
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/pipelines/base.py:1046\u001b[0m, in \u001b[0;36mPipeline.forward\u001b[0;34m(self, model_inputs, **forward_params)\u001b[0m\n\u001b[1;32m 1044\u001b[0m \u001b[39mwith\u001b[39;00m inference_context():\n\u001b[1;32m 1045\u001b[0m model_inputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_ensure_tensor_on_device(model_inputs, device\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdevice)\n\u001b[0;32m-> 1046\u001b[0m model_outputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward(model_inputs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mforward_params)\n\u001b[1;32m 1047\u001b[0m model_outputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_ensure_tensor_on_device(model_outputs, device\u001b[39m=\u001b[39mtorch\u001b[39m.\u001b[39mdevice(\u001b[39m\"\u001b[39m\u001b[39mcpu\u001b[39m\u001b[39m\"\u001b[39m))\n\u001b[1;32m 1048\u001b[0m \u001b[39melse\u001b[39;00m:\n",
197
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/pipelines/text_generation.py:271\u001b[0m, in \u001b[0;36mTextGenerationPipeline._forward\u001b[0;34m(self, model_inputs, **generate_kwargs)\u001b[0m\n\u001b[1;32m 268\u001b[0m generate_kwargs[\u001b[39m\"\u001b[39m\u001b[39mmin_length\u001b[39m\u001b[39m\"\u001b[39m] \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m prefix_length\n\u001b[1;32m 270\u001b[0m \u001b[39m# BS x SL\u001b[39;00m\n\u001b[0;32m--> 271\u001b[0m generated_sequence \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel\u001b[39m.\u001b[39mgenerate(input_ids\u001b[39m=\u001b[39minput_ids, attention_mask\u001b[39m=\u001b[39mattention_mask, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mgenerate_kwargs)\n\u001b[1;32m 272\u001b[0m out_b \u001b[39m=\u001b[39m generated_sequence\u001b[39m.\u001b[39mshape[\u001b[39m0\u001b[39m]\n\u001b[1;32m 273\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mframework \u001b[39m==\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mpt\u001b[39m\u001b[39m\"\u001b[39m:\n",
198
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/torch/utils/_contextlib.py:115\u001b[0m, in \u001b[0;36mcontext_decorator.<locals>.decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[39m@functools\u001b[39m\u001b[39m.\u001b[39mwraps(func)\n\u001b[1;32m 113\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdecorate_context\u001b[39m(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m 114\u001b[0m \u001b[39mwith\u001b[39;00m ctx_factory():\n\u001b[0;32m--> 115\u001b[0m \u001b[39mreturn\u001b[39;00m func(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n",
199
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/generation/utils.py:1529\u001b[0m, in \u001b[0;36mGenerationMixin.generate\u001b[0;34m(self, inputs, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, assistant_model, streamer, negative_prompt_ids, negative_prompt_attention_mask, **kwargs)\u001b[0m\n\u001b[1;32m 1527\u001b[0m generation_config \u001b[39m=\u001b[39m copy\u001b[39m.\u001b[39mdeepcopy(generation_config)\n\u001b[1;32m 1528\u001b[0m model_kwargs \u001b[39m=\u001b[39m generation_config\u001b[39m.\u001b[39mupdate(\u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# All unused kwargs must be model kwargs\u001b[39;00m\n\u001b[0;32m-> 1529\u001b[0m generation_config\u001b[39m.\u001b[39mvalidate()\n\u001b[1;32m 1530\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_validate_model_kwargs(model_kwargs\u001b[39m.\u001b[39mcopy())\n\u001b[1;32m 1532\u001b[0m \u001b[39m# 2. Set generation parameters if not already defined\u001b[39;00m\n",
200
+ "File \u001b[0;32m~/anaconda3/envs/GRDN_env/lib/python3.11/site-packages/transformers/generation/configuration_utils.py:498\u001b[0m, in \u001b[0;36mGenerationConfig.validate\u001b[0;34m(self, is_init)\u001b[0m\n\u001b[1;32m 496\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnum_beams \u001b[39m==\u001b[39m \u001b[39m1\u001b[39m:\n\u001b[1;32m 497\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdo_sample \u001b[39mis\u001b[39;00m \u001b[39mFalse\u001b[39;00m:\n\u001b[0;32m--> 498\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m 499\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mGreedy methods without beam search do not support `num_return_sequences` different than 1 \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 500\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m(got \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnum_return_sequences\u001b[39m}\u001b[39;00m\u001b[39m).\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 501\u001b[0m )\n\u001b[1;32m 502\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnum_return_sequences \u001b[39m>\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnum_beams:\n\u001b[1;32m 503\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m 504\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m`num_return_sequences` (\u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnum_return_sequences\u001b[39m}\u001b[39;00m\u001b[39m) has to be smaller or equal to `num_beams` \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 505\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m(\u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnum_beams\u001b[39m}\u001b[39;00m\u001b[39m).\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 506\u001b[0m )\n",
201
+ "\u001b[0;31mValueError\u001b[0m: Greedy methods without beam search do not support `num_return_sequences` different than 1 (got 3)."
202
+ ]
203
+ }
204
+ ],
205
+ "source": [
206
+ "from transformers import pipeline\n",
207
+ "generator = pipeline('text-generation', model = 'HuggingFaceH4/zephyr-7b-beta')\n",
208
+ "generator(\"Hello, I'm a language model\", max_length = 30, num_return_sequences=3)\n",
209
+ "## [{'generated_text': \"Hello, I'm a language modeler. So while writing this, when I went out to meet my wife or come home she told me that my\"},\n",
210
+ "## {'generated_text': \"Hello, I'm a language modeler. I write and maintain software in Python. I love to code, and that includes coding things that require writing\"}, ...\n",
211
+ "\n"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "code",
216
+ "execution_count": 9,
217
+ "metadata": {},
218
+ "outputs": [],
219
+ "source": [
220
+ "# Stream text\n",
221
+ "def predict(message, chatbot, system_prompt=\"\", temperature=0.9, max_new_tokens=4096):\n",
222
+ " \n",
223
+ " client = Client(\"https://ysharma-explore-llamav2-with-tgi.hf.space/\")\n",
224
+ " return client.predict(\n",
225
+ " message, # str in 'Message' Textbox component\n",
226
+ " system_prompt, # str in 'Optional system prompt' Textbox component\n",
227
+ " temperature, # int | float (numeric value between 0.0 and 1.0)\n",
228
+ " max_new_tokens, # int | float (numeric value between 0 and 4096)\n",
229
+ " 0.3, # int | float (numeric value between 0.0 and 1)\n",
230
+ " 1, # int | float (numeric value between 1.0 and 2.0)\n",
231
+ " api_name=\"/chat\"\n",
232
+ " )\n"
233
+ ]
234
+ }
235
+ ],
236
+ "metadata": {
237
+ "kernelspec": {
238
+ "display_name": "GRDN_env",
239
+ "language": "python",
240
+ "name": "python3"
241
+ },
242
+ "language_info": {
243
+ "codemirror_mode": {
244
+ "name": "ipython",
245
+ "version": 3
246
+ },
247
+ "file_extension": ".py",
248
+ "mimetype": "text/x-python",
249
+ "name": "python",
250
+ "nbconvert_exporter": "python",
251
+ "pygments_lexer": "ipython3",
252
+ "version": "3.11.3"
253
+ },
254
+ "orig_nbformat": 4
255
+ },
256
+ "nbformat": 4,
257
+ "nbformat_minor": 2
258
+ }
readme.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # README
2
+ <br/>
3
+ <br/>
4
+ <font size = "18"> GRDN 🌱</font>
5
+ <br/>
6
+ author: Danielle Heymann
7
+ <br/>
8
+ contact: dheymann314@gmail.com
9
+ <br/>
10
+ last updated: 12/17/2023
11
+ <br/>
12
+ <br/>
13
+ GRDN is an application that allows users to optimize a garden and its plant beds through companion planting, generative AI, and optimization. It is a work in progress. </font>
14
+ <br/>
15
+ <br/>
16
+ <br/>
17
+
18
+ ## Background
19
+ info
20
+ <br>
21
+ <br>
22
+ ![app1](src/assets/readme1.png)
23
+ <br>
24
+ <br>
25
+ ![app2](src/assets/readme2.png)
26
+ <br>
27
+ <br>
28
+ ![app3](src/assets/readme3.png)
29
+ <br>
30
+ <br>
31
+ ![app4](src/assets/readme4.png)
32
+ <br>
33
+ <br>
34
+ ![app5](src/assets/readme5.png)
35
+ <br>
36
+ <br>
37
+
38
+ ## Setup
39
+ - setup conda environment
40
+ >*conda create --name=GRDN_env*
41
+ - install dependencies
42
+ >*pip install -r requirements.txt*
43
+ - download local model and add it to model folder
44
+ >I used LLama2 7B HF Chat model
45
+ >https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf
46
+
47
+ ## Running App
48
+ - navigate to ...GRDN/src
49
+ - activate environment
50
+ >*conda activate GRDN_env*
51
+ - run app
52
+ >*python -m streamlit run app.py*
53
+
54
+
55
+
56
+
src/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/assets/bot.png ADDED
src/assets/cool.png ADDED
src/assets/flower.jpg ADDED

Git LFS Details

  • SHA256: 021afe38c42b91bcb4fa95babbdc05a8db83f22848229f936dc736b0e7e45622
  • Pointer size: 132 Bytes
  • Size of remote file: 1.22 MB
src/assets/flower2.jpg ADDED

Git LFS Details

  • SHA256: 94c55ec305892fb47ee20da737c7c2525fc128290b1c84d7de09efbae97bde72
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
src/assets/flower_tech.png ADDED

Git LFS Details

  • SHA256: 651653c18781f505c0dd893e1c5534b1343b12a956dfb7878d786b6018506d4c
  • Pointer size: 132 Bytes
  • Size of remote file: 3.04 MB
src/assets/lights.jpg ADDED
src/assets/logo_title.png ADDED
src/assets/logo_title_transparent.png ADDED
src/assets/plant_images/plant_0.png ADDED

Git LFS Details

  • SHA256: cb5ee09ac948d36a52f59f653972fcda279d354b02b13c5fb268429f1134a9c6
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_1.png ADDED

Git LFS Details

  • SHA256: d965f1ea9f2024e10c7b077009df0f5987c936107313bfdf9e88b2708ac22c0c
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_10.png ADDED

Git LFS Details

  • SHA256: 9c972946af2959a440fd2f5f78565d8267f403cf42e01dcb6feec5a4f725552f
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_11.png ADDED

Git LFS Details

  • SHA256: 00e3e53f67a29c8842441a2e3fad384cddb8fe3e9394b17a8f756fb84acbad0a
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_12.png ADDED

Git LFS Details

  • SHA256: 7ff58afdc10197acdbaacac383395313b6ed71ade41116af346109fc94cde030
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_13.png ADDED

Git LFS Details

  • SHA256: 19ec740eba54864637231267b4f2fb9a4afaaf409eae547f35a2766bd51bdb1a
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_14.png ADDED

Git LFS Details

  • SHA256: 06248bf39c1be76c09ef8fac87dee464cc39bab82028334bc1e0da8538cc4079
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_15.png ADDED

Git LFS Details

  • SHA256: 1e41ba1fe2a3abdecb8506baf724560fac287649f58c729ba617b6bda29e221d
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_16.png ADDED

Git LFS Details

  • SHA256: 741d14ef53e27eaee377b261621c683510b348a3ba86bc554fde69567c6c203a
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_17.png ADDED

Git LFS Details

  • SHA256: b591e0d5a13f7b0797e5e8d904ce3aceb3062bc7cf633e5e871a3f5b15e74532
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_18.png ADDED

Git LFS Details

  • SHA256: 7512adccd41a9d5c5b33e675bf58fdde710ca93efb73d03d208d942d4ca45443
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_19.png ADDED

Git LFS Details

  • SHA256: a37a598786b9e492729d8a4e87b3ac64ffe7ab4e77dd820ff358b6e66b693074
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_2.png ADDED

Git LFS Details

  • SHA256: ad8f40579380d9f5a8ed77f849b2374e8ecea6ab5e4b093ef1c024d1c2b853dd
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_20.png ADDED

Git LFS Details

  • SHA256: 6330a53ee13a6a5a107cc492176dd2b35b49858145747c0a5116c40cc7fc63df
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_21.png ADDED

Git LFS Details

  • SHA256: 9ce45216230117227d55b83ee036ac0c8a8cab522e7fbcb9eb193f54f93e716d
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_22.png ADDED

Git LFS Details

  • SHA256: 4be3b613f9b2ce1ade9f91b63b49ba48a9bd7ba77f73b08cd0ed40c869493ded
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_23.png ADDED

Git LFS Details

  • SHA256: 69f5607d685ef30a8b1a4e0e0beb4fe3a12e799ad62a21323a2a53fb0aa757e7
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_24.png ADDED

Git LFS Details

  • SHA256: 98d710a088f6445bb1bcb4c71cdc723275c211dc26eda2f6915ee437cdbb07c4
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_25.png ADDED

Git LFS Details

  • SHA256: e68409d1c4cd72344c135e59b1ed2f89f5e2408cb7d8e2796c1f6273efd4f4d8
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_26.png ADDED

Git LFS Details

  • SHA256: 59c35f917f8fab0c38d8417248cf2744eac80a251b7a4f2b7be8bc3d4eace031
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_27.png ADDED

Git LFS Details

  • SHA256: e6b7815a801cfa8586a6c3ad0b30dc7805da0f66bf82f503dd6b2a05e2da75cc
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_28.png ADDED

Git LFS Details

  • SHA256: 84f8f0e55fb977677be929ee7ab0b3cb00cdf14eae80fbb0e8e619afd8d2f64b
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_29.png ADDED

Git LFS Details

  • SHA256: a9da6db9488b9f907cc1d6219a1484ebcc6d1301e7b34c8d45e15803579bdb71
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_3.png ADDED

Git LFS Details

  • SHA256: 28ef85229e00d9d0fab3e642b60d464cd400a66410f8ce4d0b9b3ebf216c03cf
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_30.png ADDED

Git LFS Details

  • SHA256: 888232670d24d270c56c0d16818bd8661bb05f3ac6b86210a2f1870755fcbf51
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_31.png ADDED

Git LFS Details

  • SHA256: 620124cff613840f22b16cd3004d763482b1d4f43c8f69b9eaef1bab42e65f90
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_32.png ADDED

Git LFS Details

  • SHA256: b68ec50fd3a60d1c9ee5dea42ad6a77e4266e0883d82164d12b65416f0caa050
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_33.png ADDED

Git LFS Details

  • SHA256: ca4ed06d91d408f33b1b3b58731f579ef2ce2391aac1fd3f032b3753deac6844
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_34.png ADDED

Git LFS Details

  • SHA256: 62acdc477c438b00fc79470bafacc5a1785ee3ba7933d7b7b5d1a9c7e8744f93
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_35.png ADDED

Git LFS Details

  • SHA256: 539105f32b5703894b7971bc67d6fd76e2fa69fef48451f2e99bf4b993b94d8b
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_36.png ADDED

Git LFS Details

  • SHA256: 9dc97e6e65dc8a6941b9a5fd4abb41046736dd243bc1aa03a0ed0cda844c7f0a
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_37.png ADDED

Git LFS Details

  • SHA256: 4fc6d092df2fed571774691bdb72643cfb9be53e8b86bdd78f31756f60155efd
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB
src/assets/plant_images/plant_38.png ADDED

Git LFS Details

  • SHA256: 07c7b73a3db6b874e8bb2f5767b4f86e086c67d53b5424a764997bfe61ea039d
  • Pointer size: 132 Bytes
  • Size of remote file: 3.15 MB