ebiz commited on
Commit
95744d9
β€’
1 Parent(s): e57eff6
Files changed (10) hide show
  1. Dockerfile +29 -0
  2. Dockerfile.build +31 -0
  3. home.py +105 -0
  4. main.sh +3 -0
  5. output.log +11 -0
  6. process_one.sh +19 -0
  7. process_two.sh +9 -0
  8. requirements.txt +9 -0
  9. streaming.py +11 -0
  10. utils.py +55 -0
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:latest
2
+
3
+ RUN apt update
4
+ RUN apt-get update && \
5
+ apt-get install -y curl python3-pip python3-dev && \
6
+ rm -rf /var/lib/apt/lists/*
7
+
8
+
9
+ COPY process_one.sh /app/process_one.sh
10
+ COPY process_two.sh /app/process_two.sh
11
+ COPY main.sh /app/main.sh
12
+
13
+
14
+ COPY home.py /frontend/home.py
15
+ COPY streaming.py /frontend/streaming.py
16
+ COPY utils.py /frontend/utils.py
17
+ COPY requirements.txt /frontend/requirements.txt
18
+
19
+ WORKDIR /app
20
+
21
+ EXPOSE 8501
22
+
23
+
24
+ RUN chmod +x /app/process_one.sh
25
+ RUN chmod +x /app/process_two.sh
26
+ RUN chmod +x /app/main.sh
27
+
28
+
29
+ CMD ["/app/main.sh"]
Dockerfile.build ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # centos7 amd64 dependencies
2
+ FROM --platform=linux/amd64 nvidia/cuda:11.3.1-devel-centos7 AS base-amd64
3
+ RUN yum install -y https://repo.ius.io/ius-release-el7.rpm centos-release-scl && \
4
+ yum update -y && \
5
+ yum install -y devtoolset-10-gcc devtoolset-10-gcc-c++ git236 wget
6
+ RUN wget "https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-x86_64.sh" -O cmake-installer.sh && chmod +x cmake-installer.sh && ./cmake-installer.sh --skip-license --prefix=/usr/local
7
+ ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
8
+
9
+ # centos8 arm64 dependencies
10
+ FROM --platform=linux/arm64 nvidia/cuda-arm64:11.3.1-devel-centos8 AS base-arm64
11
+ RUN sed -i -e 's/mirrorlist/#mirrorlist/g' -e 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
12
+ RUN yum install -y git cmake
13
+
14
+ FROM base-${TARGETARCH}
15
+ ARG TARGETARCH
16
+ ARG GOFLAGS="'-ldflags -w -s'"
17
+
18
+ # install go
19
+ ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
20
+ RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
21
+
22
+ # build the final binary
23
+ WORKDIR /go/src/github.com/jmorganca/ollama
24
+ COPY . .
25
+
26
+ ENV GOOS=linux
27
+ ENV GOARCH=$TARGETARCH
28
+ ENV GOFLAGS=$GOFLAGS
29
+
30
+ RUN /usr/local/go/bin/go generate ./... && \
31
+ /usr/local/go/bin/go build .
home.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import utils
2
+ import streamlit as st
3
+ from streaming import StreamHandler
4
+ import torch
5
+ # from LLM import OpenAI
6
+ from langchain.chains import ConversationChain
7
+ from langchain.memory import ConversationBufferMemory
8
+ from langchain.llms import HuggingFaceHub
9
+ from transformers import AutoConfig
10
+ from langchain.llms import HuggingFaceHub
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
12
+ from langchain.llms import HuggingFacePipeline
13
+ from langchain.chat_models import ChatOllama
14
+ import pandas as pd
15
+ import requests
16
+ import tempfile
17
+ import os
18
+
19
+ st.set_page_config(page_title="Chatbot", page_icon="πŸ’¬")
20
+ st.header('Ebiz Chatbot')
21
+ st.write('Allows users to interact with the LLM')
22
+ # st.write('[![view source code ](https://img.shields.io/badge/view_source_code-gray?logo=github)](https://github.com/shashankdeshpande/langchain-chatbot/blob/master/pages/1_%F0%9F%92%AC_basic_chatbot.py)')
23
+
24
+
25
+ # Using "with" notation
26
+ with st.sidebar:
27
+
28
+ ktopVal= st.slider(
29
+ 'K-top',
30
+ 0.0, 1.0,step=0.1)
31
+
32
+ ptopVal=st.slider(
33
+ 'P-top',
34
+ 0.0, 1.0,step=0.1)
35
+ tempVal=st.slider(
36
+ 'Temperature',
37
+ 0.0, 1.0, step=0.1)
38
+
39
+ uploaded_file = st.file_uploader("Choose a file")
40
+ submit_button = st.button("Submit")
41
+
42
+
43
+ if submit_button:
44
+ # Display the file details
45
+ st.write("File Details:")
46
+ st.write(f"Filename: {uploaded_file.name}")
47
+ st.write(f"File Type: {uploaded_file.type}")
48
+ st.write(f"File Size: {uploaded_file.size} bytes")
49
+
50
+ # Send file to Flask server
51
+ url = "http://127.0.0.1:5000/upload"
52
+ files = {'file': (uploaded_file.name, uploaded_file, uploaded_file.type)}
53
+ payload = {'ktopVal':ktopVal,"ptopVal":ptopVal,"tempVal":tempVal}
54
+ response = requests.post(url, files=files,data=payload)
55
+
56
+ # Display the response from the server
57
+ st.write("Server Response:")
58
+ st.write(response.text)
59
+
60
+
61
+ class Basic:
62
+
63
+ def __init__(self):
64
+ utils.configure_openai_api_key()
65
+ self.openai_model = "gpt-3.5-turbo"
66
+
67
+ def setup_chain(self):
68
+ # config = AutoConfig.from_pretrained('stabilityai/stablelm-3b-4e1t', trust_remote_code=True)
69
+ model_name = 'stabilityai/stablelm-3b-4e1t'
70
+ torch_dtype = "float16" #@param ["float16", "bfloat16", "float"]
71
+ load_in_8bit = False #@param {type:"boolean"}
72
+ device_map = "auto"
73
+ # tokenizer = AutoTokenizer.from_pretrained(model_name,trust_remote_code=True)
74
+ # model = AutoModelForCausalLM.from_pretrained(
75
+ # model_name,
76
+ # torch_dtype=getattr(torch, torch_dtype),
77
+ # load_in_8bit=load_in_8bit,
78
+ # device_map=device_map,
79
+ # trust_remote_code=True,
80
+ # offload_folder="./offload",
81
+ # )
82
+ # pipe = pipeline(
83
+ # "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256
84
+ # )
85
+
86
+ llm = ChatOllama(model="llama2")
87
+ memory = ConversationBufferMemory()
88
+
89
+ chain = ConversationChain(llm=llm, memory=memory, verbose=True)
90
+ return chain
91
+
92
+ @utils.enable_chat_history
93
+ def main(self):
94
+ chain = self.setup_chain()
95
+ user_query = st.chat_input(placeholder="Ask me anything!")
96
+ if user_query:
97
+ utils.display_msg(user_query, 'user')
98
+ with st.chat_message("assistant"):
99
+ st_cb = StreamHandler(st.empty())
100
+ response = chain(user_query, callbacks=[st_cb])
101
+ st.session_state.messages.append({"role": "assistant", "content": response['response']})
102
+
103
+ if __name__ == "__main__":
104
+ obj = Basic()
105
+ obj.main()
main.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/usr/bin/bash
2
+ ./process_one.sh
3
+ ./process_two.sh
output.log ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Error: could not connect to ollama server, run 'ollama serve' to startYour new public key is:
2
+
3
+ ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII0AG1ceFVkI1K3NCwbqRwCxPC24WpENmLxWrEeeRLz6
4
+
5
+ 2023/12/22 09:57:49 images.go:737: total blobs: 0
6
+ 2023/12/22 09:57:49 images.go:744: total unused blobs removed: 0
7
+ 2023/12/22 09:57:49 routes.go:895: Listening on 127.0.0.1:11434 (version 0.1.17)
8
+ 2023/12/22 09:57:49 routes.go:915: warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed
9
+ [GIN] 2023/12/22 - 09:59:35 | 404 | 273.258Β΅s | 127.0.0.1 | POST "/api/chat"
10
+ [GIN] 2023/12/22 - 10:00:15 | 404 | 48.669Β΅s | 127.0.0.1 | POST "/api/chat"
11
+ [GIN] 2023/12/22 - 10:17:22 | 404 | 77.915Β΅s | 127.0.0.1 | POST "/api/chat"
process_one.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #!/usr/bin/bash
3
+
4
+ echo "calling process one......!"
5
+
6
+ curl https://ollama.ai/install.sh | sh
7
+
8
+ echo "ollama installed here..."
9
+
10
+ ollama serve &
11
+ # nohup ollama serve > output.log 2>&1 &
12
+ mkdir -p /etc/systemd/system/ollama.service.d
13
+ echo "[Service]" >>/etc/systemd/system/ollama.service.d/environment.conf
14
+ echo "ollama served"
15
+
16
+ ollama run llama2 &
17
+ # nohup ollama run llama2 > output.log 2>&1 &
18
+
19
+ echo "llama2 running"
process_two.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/bash
2
+ echo "calling process two......!"
3
+
4
+ # Install dependencies
5
+ pip install --no-cache-dir --upgrade streamlit
6
+ ls -al
7
+ pip install --no-cache-dir --upgrade -r /frontend/requirements.txt
8
+
9
+ streamlit run /frontend/home.py
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.74.*
2
+ sentencepiece==0.1.*
3
+ torch==1.11.*
4
+ uvicorn[standard]==0.17.*
5
+ streamlit
6
+ langchain
7
+ transformers
8
+ pandas
9
+ requests
streaming.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.callbacks.base import BaseCallbackHandler
2
+ import streamlit as st
3
+ class StreamHandler(BaseCallbackHandler):
4
+
5
+ def __init__(self, container, initial_text=""):
6
+ self.container = container
7
+ self.text = initial_text
8
+
9
+ def on_llm_new_token(self, token: str, **kwargs):
10
+ self.text += token
11
+ self.container.markdown(self.text)
utils.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import streamlit as st
4
+
5
+ #decorator
6
+ def enable_chat_history(func):
7
+ if os.environ.get("OPENAI_API_KEY"):
8
+
9
+ # to clear chat history after swtching chatbot
10
+ current_page = func.__qualname__
11
+ if "current_page" not in st.session_state:
12
+ st.session_state["current_page"] = current_page
13
+ if st.session_state["current_page"] != current_page:
14
+ try:
15
+ st.cache_resource.clear()
16
+ del st.session_state["current_page"]
17
+ del st.session_state["messages"]
18
+ except:
19
+ pass
20
+
21
+ # to show chat history on ui
22
+ if "messages" not in st.session_state:
23
+ st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
24
+ for msg in st.session_state["messages"]:
25
+ st.chat_message(msg["role"]).write(msg["content"])
26
+
27
+ def execute(*args, **kwargs):
28
+ func(*args, **kwargs)
29
+ return execute
30
+
31
+ def display_msg(msg, author):
32
+ """Method to display message on the UI
33
+
34
+ Args:
35
+ msg (str): message to display
36
+ author (str): author of the message -user/assistant
37
+ """
38
+ st.session_state.messages.append({"role": author, "content": msg})
39
+ st.chat_message(author).write(msg)
40
+
41
+ def configure_openai_api_key():
42
+ openai_api_key = st.sidebar.text_input(
43
+ label="OpenAI API Key",
44
+ type="password",
45
+ value=st.session_state['OPENAI_API_KEY'] if 'OPENAI_API_KEY' in st.session_state else '',
46
+ placeholder="sk-..."
47
+ )
48
+ if openai_api_key:
49
+ st.session_state['OPENAI_API_KEY'] = openai_api_key
50
+ os.environ['OPENAI_API_KEY'] = openai_api_key
51
+ else:
52
+ st.error("Please add your OpenAI API key to continue.")
53
+ st.info("Obtain your key from this link: https://platform.openai.com/account/api-keys")
54
+ st.stop()
55
+ return openai_api_key