ProPerNounpYK commited on
Commit
c1b5e4e
1 Parent(s): 3a93ac4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -210
app.py CHANGED
@@ -1,213 +1,60 @@
1
- import numpy as np
2
- import streamlit as st
3
- from transformers import Autookenizer, AutoModelorCausalLM
4
- import os
5
- import sys
6
- from dotenv import loaddotenv, dotenvvalues
7
- loaddotenv()
8
-
9
- # Create supported models
10
- modellinks = {
11
- "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
 
 
 
 
 
12
  }
13
 
14
- #andom dog images for error message
15
- randomdog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
16
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
17
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
18
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
19
- "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
20
- "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
21
- "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
22
- "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
23
- "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
24
- "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
25
- "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
26
- "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
27
- "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
28
-
29
- def resetconversation():
30
- '''
31
- Resets Conversation
32
- '''
33
- st.sessionstate.conversation = []
34
- st.sessionstate.messages = []
35
- return None
36
 
37
-
38
-
39
-
40
- # Define the available models
41
- models =[key for key in modellinks.keys()]
42
-
43
- # Create the sidebar with the dropdown for model selection
44
- selectedmodel = st.sidebar.selectbox("Select Model", models)
45
-
46
- # Create a temperature slider
47
- tempvalues = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
48
-
49
-
50
- #Add reset button to clear conversation
51
- st.sidebar.button('eset Chat', onclick=resetconversation) #eset button
52
-
53
-
54
- # Create model description
55
- st.sidebar.write(f"You're now chatting with **{selectedmodel}**")
56
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
57
- st.sidebar.markdown("\n[ypeGP](https://typegpt.net).")
58
-
59
-
60
- if "prevoption" not in st.sessionstate:
61
- st.sessionstate.prevoption = selectedmodel
62
-
63
- if st.sessionstate.prevoption != selectedmodel:
64
- st.sessionstate.messages = []
65
- # st.write(f"Changed to {selectedmodel}")
66
- st.sessionstate.prevoption = selectedmodel
67
- resetconversation()
68
-
69
-
70
-
71
- #Pull in the model we want to use
72
- repoid = modellinks[selectedmodel]
73
-
74
-
75
- st.subheader(f'ypeGP.net - {selectedmodel}')
76
- st.title(f'ChatBot Using {selectedmodel}')
77
-
78
- # Set a default model
79
- if selectedmodel not in st.sessionstate:
80
- st.sessionstate[selectedmodel] = modellinks[selectedmodel]
81
-
82
- # Initialize chat history
83
- if "messages" not in st.sessionstate:
84
- st.sessionstate.messages = []
85
-
86
-
87
- # Display chat messages from history on app rerun
88
- for message in st.sessionstate.messages:
89
- with st.chatmessage(message["role"]):
90
- st.markdown(message["content"])
91
-
92
-
93
-
94
-
95
- if prompt := st.chatinput(f"Hi I'm {selectedmodel}, ask me a question"):
96
- # Display user message in chat message container
97
- with st.chatmessage("user"):
98
- st.markdown(prompt)
99
- # Add user message to chat history
100
- st.sessionstate.messages.append({"role": "user", "content": prompt})
101
-
102
- # Display assistant response in chat message container
103
- with st.chatmessage("assistant"):
104
- try:
105
- # 수정 전 코드 (penAI)
106
- # stream = client.chat.completions.create(
107
- # model=modellinks[selectedmodel],
108
- # messages=[
109
- # {"role": m["role"], "content": m["content"]}
110
- # for m in st.sessionstate.messages
111
- # ],
112
- # temperature=tempvalues,#0.5,
113
- # stream=rue,
114
- # maxtokens=3000,
115
- # )
116
-
117
- # 수정 후 코드 (gradio & InferenceClient)
118
- import gradio as gr
119
- from huggingfacehub import InferenceClient
120
-
121
- """
122
- For more information on `huggingfacehub` Inference API support, please check the docs: https://huggingface.co/docs/huggingfacehub/v0.22.2/en/guides/inference
123
- """
124
- client = InferenceClient(repoid)
125
-
126
- def respond(
127
- message,
128
- history: list[tuple[str, str]],
129
- systemmessage,
130
- maxtokens,
131
- temperature,
132
- topp,
133
- ):
134
- messages = [{"role": "system", "content": systemmessage}]
135
-
136
- for val in history:
137
- if val[0]:
138
- messages.append({"role": "user", "content": val[0]})
139
- if val[1]:
140
- messages.append({"role": "assistant", "content": val[1]})
141
-
142
- messages.append({"role": "user", "content": message})
143
-
144
- response = ""
145
-
146
- for message in client.chatcompletion(
147
- messages,
148
- maxtokens=maxtokens,
149
- stream=rue,
150
- temperature=temperature,
151
- topp=topp,
152
- ):
153
- token = message.choices[0].delta.content
154
-
155
- response += token
156
- yield response
157
-
158
- """
159
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
160
- """
161
- demo = gr.ChatInterface(
162
- respond,
163
- additionalinputs=[
164
- gr.extbox(
165
- value="You are a friendly Chatbot.", label="System message"
166
- ),
167
- gr.Slider(
168
- minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"
169
- ),
170
- gr.Slider(
171
- minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="temperature"
172
- ),
173
- gr.Slider(
174
- minimum=0.1,
175
- maximum=1.0,
176
- value=0.95,
177
- step=0.05,
178
- label="op-p (nucleus sampling)",
179
- ),
180
- ],
181
- )
182
-
183
- response = ""
184
-
185
- for message in demo(
186
- prompt,
187
- st.sessionstate.messages[1:],
188
- "You are a friendly Chatbot.",
189
- 512,
190
- 0.7,
191
- 0.95,
192
- ):
193
- response += message
194
-
195
- except Exception as e:
196
- # st.empty()
197
- response = "😵‍💫 Looks like someone unplugged something!\
198
- \n Either the model space is being updated or something is down.\
199
- \n\
200
- \n Try again later. \
201
- \n\
202
- \n Here's a random pic of a 🐶:"
203
- st.write(response)
204
- randomdogpick = 'https://random.dog/'+ randomdog[np.random.randint(len(randomdog))]
205
- st.image(randomdogpick)
206
- st.write("his was the error message:")
207
- st.write(e)
208
-
209
-
210
-
211
-
212
-
213
- st.sessionstate.messages.append({"role": "assistant", "content": response})
 
1
+ import openai
2
+
3
+ # Initialize OpenAI API
4
+ openai.api_key = "your_openai_api_key_here"
5
+
6
+ # Outfit suggestions database
7
+ outfit_database = {
8
+ "casual": {
9
+ "red jacket": ["white t-shirt", "blue jeans", "white sneakers", "black crossbody bag"],
10
+ "denim skirt": ["striped blouse", "tan sandals", "straw hat", "neutral tote bag"]
11
+ },
12
+ "formal": {
13
+ "red jacket": ["black turtleneck", "black trousers", "pointed heels", "gold necklace"],
14
+ "denim skirt": ["silk blouse", "nude pumps", "pearl earrings", "clutch bag"]
15
+ },
16
+ # Add more clothing items and styles here
17
  }
18
 
19
+ def generate_outfit_advice(piece, color, style):
20
+ # Find the clothing piece in the database
21
+ key = f"{color} {piece}" if f"{color} {piece}" in outfit_database.get(style, {}) else piece
22
+ suggestions = outfit_database.get(style, {}).get(key, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ if not suggestions:
25
+ return "Sorry, I couldn't find an outfit for your request. Try another combination!"
26
+
27
+ # Generate outfit advice
28
+ top, bottom, footwear, accessory = suggestions
29
+ advice = (f"Here’s how you can style your {color} {piece} for a {style} look:\n"
30
+ f"- Top: {top}\n- Bottom: {bottom}\n- Footwear: {footwear}\n- Accessory: {accessory}")
31
+ return advice
32
+
33
+ def generate_image_prompt(piece, color, style):
34
+ # Create a text prompt for image generation
35
+ return f"A {style} outfit featuring a {color} {piece} styled with complementary clothing items and accessories. Modern, fashionable, and cohesive."
36
+
37
+ def create_outfit_image(prompt):
38
+ # Generate an image using OpenAI's DALL-E API
39
+ response = openai.Image.create(
40
+ prompt=prompt,
41
+ n=1,
42
+ size="1024x1024"
43
+ )
44
+ return response["data"][0]["url"]
45
+
46
+ # User inputs
47
+ piece = input("Enter the clothing piece (e.g., 'jacket', 'skirt'): ").lower()
48
+ color = input("Enter the color (e.g., 'red', 'black'): ").lower()
49
+ style = input("Enter the style (e.g., 'casual', 'formal'): ").lower()
50
+
51
+ # Generate outfit advice and image
52
+ advice = generate_outfit_advice(piece, color, style)
53
+ image_prompt = generate_image_prompt(piece, color, style)
54
+
55
+ if "Sorry" not in advice:
56
+ image_url = create_outfit_image(image_prompt)
57
+ print(advice)
58
+ print(f"Generated Image: {image_url}")
59
+ else:
60
+ print(advice)