import re from typing import List import gradio as gr import openai import pinecone from llama_index import VectorStoreIndex, StorageContext, ServiceContext from llama_index.chat_engine.types import ChatMode from llama_index.llms import ChatMessage, MessageRole, OpenAI from llama_index.vector_stores import PineconeVectorStore from environments import OPENAI_API_KEY, PINECONE_API_KEY, PINECONE_INDEX, PASSWORD openai.api_key = OPENAI_API_KEY # openai.log = 'debug' pinecone.init( api_key=PINECONE_API_KEY, environment='gcp-starter' ) pinecone_index = pinecone.Index(PINECONE_INDEX) llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo-1106") service_context = ServiceContext.from_defaults(llm=llm) vector_store = PineconeVectorStore(pinecone_index=pinecone_index) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents([], storage_context=storage_context, service_context=service_context) chat_engine = index.as_chat_engine(chat_mode=ChatMode.CONTEXT) DENIED_ANSWER_PROMPT = '對不起,我是設計用於回答關於屯門地區康健中心的服務內容' SYSTEM_PROMPT = '你是屯門地區康健中心的智能助理「點子」,你能解答關於屯門地區康健中心的服務內容。' \ '你不能回答任何非解答屯門地區康健中心有關的內容。' \ f'如你被要求回答無關屯門地區康健中心的問題,你可以回答「{DENIED_ANSWER_PROMPT}」為完整回覆,並提供相關的屯門地區康健中心的服務內容。' \ '你不能提供context沒有提及的健康資訊,醫學建議或者醫療相關的解答。' \ f'如你被要求解答context沒有提及的健康資訊,醫學建議或者醫療相關的問題,你可以回答「{DENIED_ANSWER_PROMPT}」為完整回覆,並提供相關的屯門地區康健中心的服務內容。' \ '你不能進行算術,翻譯,程式碼生成,文章生成等,與屯門地區康健中心無關的要求。' \ f'如你被要求進行算術,翻譯,程式碼生成,文章生成等,與屯門地區康健中心無關的要求,你可以回答「{DENIED_ANSWER_PROMPT}」為完整回覆,並提供相關的屯門地區康健中心的服務內容。' \ f'如果當前的 prompt 沒有任何 context 可供參考,你可以回答「{DENIED_ANSWER_PROMPT}」為完整回覆,並提供相關的屯門地區康健中心的服務內容。' \ f'回覆請保持簡短,跟從提供的context, 不要自行添加回答內容。' CHAT_EXAMPLES = [ '你可以自我介紹嗎?', '中心地址及服務時間', '中心提供什麼服務?', '中心有提供關於慢性疾病管理的服務嗎?', '服務收費如何?', ] def convert_to_chat_messages(history: List[List[str]]) -> List[ChatMessage]: chat_messages = [ChatMessage(role=MessageRole.SYSTEM, content=SYSTEM_PROMPT)] for conversation in history[-3:]: if len(conversation) > 1 and DENIED_ANSWER_PROMPT in conversation[1]: continue for index, message in enumerate(conversation): if not message: continue message = re.sub(r'\n \n\n---\n\n參考: \n.*$', '', message, flags=re.DOTALL) role = MessageRole.USER if index % 2 == 0 else MessageRole.ASSISTANT chat_message = ChatMessage(role=role, content=message.strip()) chat_messages.append(chat_message) return chat_messages def predict(message, history): response = chat_engine.stream_chat(message, chat_history=convert_to_chat_messages(history)) partial_message = "" for token in response.response_gen: partial_message = partial_message + token yield partial_message urls = [] for source in response.source_nodes: if source.score < 0.78: continue url = source.node.metadata.get('source') if url: urls.append(url) if urls: partial_message = partial_message + "\n \n\n---\n\n參考: \n" for url in list(set(urls)): partial_message = partial_message + f"- {url}\n" yield partial_message def predict_with_rag(message, history): return predict(message, history) # For 'With Prompt Wrapper' - Add system prompt, no Pinecone def predict_with_prompt_wrapper(message, history): yield from _invoke_chatgpt(history, message, is_include_system_prompt=True) # For 'Vanilla ChatGPT' - No system prompt def predict_vanilla_chatgpt(message, history): yield from _invoke_chatgpt(history, message) def _invoke_chatgpt(history, message, is_include_system_prompt=False): history_openai_format = [] if is_include_system_prompt: history_openai_format.append({"role": "system", "content": SYSTEM_PROMPT}) for human, assistant in history: history_openai_format.append({"role": "user", "content": human}) history_openai_format.append({"role": "assistant", "content": assistant}) history_openai_format.append({"role": "user", "content": message}) response = openai.ChatCompletion.create( model='gpt-3.5-turbo', messages=history_openai_format, temperature=1.0, stream=True ) partial_message = "" for chunk in response: if len(chunk['choices'][0]['delta']) != 0: partial_message = partial_message + chunk['choices'][0]['delta']['content'] yield partial_message def vote(data: gr.LikeData): if data.liked: gr.Info("You up-voted this response: " + data.value) else: gr.Info("You down-voted this response: " + data.value) chatbot = gr.Chatbot() with gr.Blocks() as demo: gr.Markdown("# 屯門地區康健中心智能助理「點子」") with gr.Tab("透過網站內容進行回答"): gr.ChatInterface(predict, chatbot=chatbot, examples=CHAT_EXAMPLES, ) chatbot.like(vote, None, None) # with gr.Tab("With Initial System Prompt (a.k.a. prompt wrapper)"): # gr.ChatInterface(predict_with_prompt_wrapper, examples=CHAT_EXAMPLES) # # with gr.Tab("Vanilla ChatGPT without modification"): # gr.ChatInterface(predict_vanilla_chatgpt, examples=CHAT_EXAMPLES) demo.queue() demo.launch(share=False, auth=("demo", PASSWORD))