Spaces:
Running
Running
RAHMAN
commited on
Commit
•
cd98e72
1
Parent(s):
df3100a
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
#from dotenv import load_dotenv
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
# from genai.credentials import Credentials
|
6 |
+
# from genai.schemas import GenerateParams
|
7 |
+
# from genai.model import Model
|
8 |
+
|
9 |
+
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
|
10 |
+
from ibm_watson_machine_learning.foundation_models import Model
|
11 |
+
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
|
12 |
+
from langchain_ibm import WatsonxLLM
|
13 |
+
|
14 |
+
|
15 |
+
#load_dotenv()
|
16 |
+
# api_key = os.getenv("GENAI_KEY", None)
|
17 |
+
# api_endpoint = os.getenv("GENAI_API", None)
|
18 |
+
|
19 |
+
# creds = Credentials(api_key,api_endpoint)
|
20 |
+
|
21 |
+
# params = GenerateParams(
|
22 |
+
# decoding_method="sample",
|
23 |
+
# max_new_tokens=200,
|
24 |
+
# min_new_tokens=1,
|
25 |
+
# stream=False,
|
26 |
+
# temperature=0.7,
|
27 |
+
# top_k=50,
|
28 |
+
# top_p=1,
|
29 |
+
# stop_sequences= ["Human:","AI:"],
|
30 |
+
# )
|
31 |
+
|
32 |
+
"""api_key = os.getenv("API_KEY", None)
|
33 |
+
project_id = os.getenv("PROJECT_ID", None)"""
|
34 |
+
|
35 |
+
api_key="be2PIyqJbKm9LwNDRvWeA8Bp2Dc2TQt30rw2OAd-PPKf"
|
36 |
+
project_id="11b29d9b-5940-41da-a2c7-86a17f270bf7"
|
37 |
+
creds = {
|
38 |
+
"url" : "https://us-south.ml.cloud.ibm.com",
|
39 |
+
"apikey" : api_key
|
40 |
+
}
|
41 |
+
|
42 |
+
params = {
|
43 |
+
GenParams.DECODING_METHOD:"sample",
|
44 |
+
GenParams.MAX_NEW_TOKENS:200,
|
45 |
+
GenParams.MIN_NEW_TOKENS:1,
|
46 |
+
GenParams.TEMPERATURE:0.7,
|
47 |
+
GenParams.TOP_K:50,
|
48 |
+
GenParams.TOP_P:1,
|
49 |
+
GenParams.STOP_SEQUENCES: ["Human:","AI:"]
|
50 |
+
}
|
51 |
+
|
52 |
+
with st.sidebar:
|
53 |
+
st.title("WATSONX CHAT")
|
54 |
+
st.write("WATSONX.AI")
|
55 |
+
st.write("RAHMAN")
|
56 |
+
|
57 |
+
st.title("CHAT WITH WATSONX")
|
58 |
+
|
59 |
+
with st.chat_message("system"):
|
60 |
+
st.write("Hello 👋, lets chat with watsonx")
|
61 |
+
|
62 |
+
if "messages" not in st.session_state:
|
63 |
+
st.session_state.messages = []
|
64 |
+
|
65 |
+
llm = Model(ModelTypes.LLAMA_2_70B_CHAT,creds,params,project_id)
|
66 |
+
|
67 |
+
# llm = Model(model="meta-llama/llama-2-7b-chat",credentials=creds, params=params)
|
68 |
+
|
69 |
+
# Display chat messages from history on app rerun
|
70 |
+
for message in st.session_state.messages:
|
71 |
+
with st.chat_message(message["role"]):
|
72 |
+
st.markdown(message["content"])
|
73 |
+
|
74 |
+
if prompt := st.chat_input("Say something"):
|
75 |
+
with st.chat_message("user"):
|
76 |
+
st.markdown(prompt)
|
77 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
78 |
+
|
79 |
+
prompttemplate = f"""
|
80 |
+
[INST]<<SYS>>Respond in English<<SYS>>
|
81 |
+
{prompt}
|
82 |
+
[/INST]
|
83 |
+
"""
|
84 |
+
response_text = llm.generate_text(prompttemplate)
|
85 |
+
answer = response_text
|
86 |
+
# for response in response_text[0].generated_text
|
87 |
+
# answer += response[0].generated_text
|
88 |
+
|
89 |
+
st.session_state.messages.append({"role": "agent", "content": answer})
|
90 |
+
|
91 |
+
with st.chat_message("agent"):
|
92 |
+
st.markdown(answer)
|