File size: 2,455 Bytes
cd98e72
ed3ad7d
cd98e72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c08c51
 
cd98e72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os
from dotenv import load_dotenv
import streamlit as st

# from genai.credentials import Credentials
# from genai.schemas import GenerateParams
# from genai.model import Model

from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from langchain_ibm import WatsonxLLM


#load_dotenv()
# api_key = os.getenv("GENAI_KEY", None)
# api_endpoint = os.getenv("GENAI_API", None)

# creds = Credentials(api_key,api_endpoint)

# params = GenerateParams(
#     decoding_method="sample",
#     max_new_tokens=200,
#     min_new_tokens=1,
#     stream=False,
#     temperature=0.7,
#     top_k=50,
#     top_p=1,
#     stop_sequences= ["Human:","AI:"],
# )

api_key =   os.getenv("API_KEY")
project_id = os.getenv("PROJECT_ID")

creds = {
    "url"    : "https://us-south.ml.cloud.ibm.com",
    "apikey" : api_key
}

params = {
    GenParams.DECODING_METHOD:"sample",
    GenParams.MAX_NEW_TOKENS:200,
    GenParams.MIN_NEW_TOKENS:1,
    GenParams.TEMPERATURE:0.7,
    GenParams.TOP_K:50,
    GenParams.TOP_P:1,
    GenParams.STOP_SEQUENCES: ["Human:","AI:"]
}

with st.sidebar:
    st.title("WATSONX CHAT")
    st.write("WATSONX.AI")
    st.write("RAHMAN")

st.title("CHAT WITH WATSONX")

with st.chat_message("system"):
    st.write("Hello 👋, lets chat with watsonx")

if "messages" not in st.session_state:
    st.session_state.messages = []

llm = Model(ModelTypes.LLAMA_2_70B_CHAT,creds,params,project_id)

# llm = Model(model="meta-llama/llama-2-7b-chat",credentials=creds, params=params)

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input("Say something"):
    with st.chat_message("user"):
        st.markdown(prompt)
    st.session_state.messages.append({"role": "user", "content": prompt})

    prompttemplate = f"""
    [INST]<<SYS>>Respond in English<<SYS>>
    {prompt}
    [/INST]
    """
    response_text = llm.generate_text(prompttemplate)
    answer = response_text
    # for response in response_text[0].generated_text
    #     answer += response[0].generated_text

    st.session_state.messages.append({"role": "agent", "content": answer}) 

    with st.chat_message("agent"):
        st.markdown(answer)