cip
commited on
Commit
β’
e3e14e0
1
Parent(s):
3afd828
init
Browse files- app.py +145 -0
- gradio_cached_examples/13/log.csv +2 -0
- gradio_cached_examples/15/log.csv +4 -0
- requirements.txt +10 -0
app.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import string
|
2 |
+
from transformers import AutoModelForCausalLM,AutoTokenizer, pipeline
|
3 |
+
import gradio as gr
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
from wikipediaapi import Wikipedia
|
7 |
+
|
8 |
+
import feedparser
|
9 |
+
import html2text
|
10 |
+
|
11 |
+
# url = "https://rss.hotnews.ro"
|
12 |
+
# url = "https://www.thecipherbrief.com/feed"
|
13 |
+
# url = "https://feeds.simplecast.com/54nAGcIl"
|
14 |
+
# url = "https://www.reddit.com/r/news/.rss"
|
15 |
+
|
16 |
+
# def init_model():
|
17 |
+
# system_prompt = "### System:\nYou are StableBeluga, an AI that follows instructions extremely well. Help as much as you can.\n\n"
|
18 |
+
# tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga-7B", use_fast=True)
|
19 |
+
# # model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga-7B", load_in_8bit=True, low_cpu_mem_usage=True, device_map=0)
|
20 |
+
# model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga-7B", low_cpu_mem_usage=True, device_map=0)
|
21 |
+
# return system_prompt, tokenizer, model
|
22 |
+
|
23 |
+
# system_prompt, tokenizer, model = init_model()
|
24 |
+
|
25 |
+
# def ask_assistant(prompt, token=tokenizer, md=model):
|
26 |
+
# inputs = token(make_prompt(prompt), return_tensors="pt").to("cuda")
|
27 |
+
# # output = md.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256).to('cpu')
|
28 |
+
# output = md.generate(**inputs, max_new_tokens=150).to('cuda')
|
29 |
+
# # return tokenizer.decode(output[0], skip_special_tokens=True)
|
30 |
+
# response = tokenizer.decode(output[0])
|
31 |
+
# return response[0].split('### Assistant: \n')[1]
|
32 |
+
|
33 |
+
# def make_prompt(user, syst=system_prompt):
|
34 |
+
# return f"{syst}### User: {user}\n\n### Assistant:\n"
|
35 |
+
|
36 |
+
# def ask_assistant_in_context(question, context):
|
37 |
+
# prompt = f"""Answer the question with the help of the provided context.
|
38 |
+
|
39 |
+
# ## Context
|
40 |
+
|
41 |
+
# {context}
|
42 |
+
|
43 |
+
# ## Question
|
44 |
+
|
45 |
+
# {question}"""
|
46 |
+
|
47 |
+
# return ask_assistant(prompt)
|
48 |
+
|
49 |
+
def ask_assistant_in_context(question, context):
|
50 |
+
return "raspuns, bla, bla, bla"
|
51 |
+
|
52 |
+
# def create_chat(id = 0, context=""):
|
53 |
+
# chat = st.container()
|
54 |
+
|
55 |
+
# # Initialize chat history
|
56 |
+
# if "messages" not in st.session_state:
|
57 |
+
# st.session_state.messages = []
|
58 |
+
|
59 |
+
# # Display chat messages from history on app rerun
|
60 |
+
# for message in st.session_state.messages:
|
61 |
+
# if message['id'] == id:
|
62 |
+
# chat.chat_message(message['role']).write(message['content'])
|
63 |
+
# # context += message['role'] + ": " + message['content'] + "\n"
|
64 |
+
|
65 |
+
# # Accept user input
|
66 |
+
# if prompt := st.chat_input(placeholder = "Don't waist time reading, ask me", key = id):
|
67 |
+
|
68 |
+
# chat.chat_message("user").write(prompt)
|
69 |
+
# with st.spinner('Wait for it...'):
|
70 |
+
# assistant_response = ask_assistant_in_context(prompt, context)
|
71 |
+
# chat.chat_message("assistant").write(f"{assistant_response}")
|
72 |
+
|
73 |
+
# st.session_state.messages.append({"id": id, "role": "user", "content": prompt})
|
74 |
+
# st.session_state.messages.append({"id": id, "role": "assistant", "content": assistant_response})
|
75 |
+
|
76 |
+
# url = "https://www.europarl.europa.eu/rss/doc/last-news-committees/en.xml"
|
77 |
+
# feed = feedparser.parse(url)
|
78 |
+
|
79 |
+
with gr.Accordion("See Details"):
|
80 |
+
gr.Markdown("lorem ipsum")
|
81 |
+
|
82 |
+
# st.subheader(feed.feed.title, divider='gray')
|
83 |
+
# key = 0
|
84 |
+
|
85 |
+
# for entry in feed.entries:
|
86 |
+
# title = entry.title.split('-')[1].strip()
|
87 |
+
|
88 |
+
# col1, col2 = st.columns([0.1,0.9])
|
89 |
+
|
90 |
+
# with col1.popover(""):
|
91 |
+
# content = html2text.html2text(entry.summary)
|
92 |
+
# st.write(content)
|
93 |
+
# st.divider()
|
94 |
+
|
95 |
+
# create_chat(key, content)
|
96 |
+
# key += 1
|
97 |
+
|
98 |
+
# col2.write(f":gray[{entry.published}]")
|
99 |
+
# col2.write(f"**{title}**")
|
100 |
+
|
101 |
+
# st.divider()
|
102 |
+
|
103 |
+
|
104 |
+
# def show_message(button_text):
|
105 |
+
# """Function to return a message based on which button was clicked."""
|
106 |
+
# return f"You clicked: {button_text}"
|
107 |
+
|
108 |
+
# def main():
|
109 |
+
# with gr.Blocks() as app:
|
110 |
+
# # Create a container for buttons
|
111 |
+
# with gr.Column():
|
112 |
+
# button_texts = ["Item 1", "Item 2", "Item 3"]
|
113 |
+
# outputs = []
|
114 |
+
|
115 |
+
# for text in button_texts:
|
116 |
+
# # Create a button for each item
|
117 |
+
# btn = gr.Button(value=text)
|
118 |
+
# output_text = gr.Textbox()
|
119 |
+
# btn.click(fn=show_message, inputs=[btn], outputs=output_text)
|
120 |
+
# outputs.append(output_text)
|
121 |
+
|
122 |
+
# app.launch()
|
123 |
+
|
124 |
+
# if __name__ == "__main__":
|
125 |
+
# main()
|
126 |
+
|
127 |
+
def yes_man(message, history):
|
128 |
+
if message.endswith("?"):
|
129 |
+
return "Yes"
|
130 |
+
else:
|
131 |
+
return "Ask me anything!"
|
132 |
+
|
133 |
+
gr.ChatInterface(
|
134 |
+
yes_man,
|
135 |
+
chatbot=gr.Chatbot(height=300),
|
136 |
+
textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=7),
|
137 |
+
title="Model",
|
138 |
+
description="Ask Model any question",
|
139 |
+
theme="soft",
|
140 |
+
examples=[""],
|
141 |
+
cache_examples=True,
|
142 |
+
retry_btn=None,
|
143 |
+
undo_btn=None,
|
144 |
+
clear_btn=None,
|
145 |
+
).launch()
|
gradio_cached_examples/13/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
"[["""", ""Ask me anything!""]]",,,2024-03-28 01:17:13.202639
|
gradio_cached_examples/15/log.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
"[[""Hello"", ""Ask me anything!""]]",,,2024-03-28 01:10:33.526848
|
3 |
+
"[[""Am I cool?"", ""Yes""]]",,,2024-03-28 01:10:33.530444
|
4 |
+
"[[""Are tomatoes vegetables?"", ""Yes""]]",,,2024-03-28 01:10:33.539654
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
2 |
+
transformers
|
3 |
+
datasets
|
4 |
+
torch
|
5 |
+
sentencepiece
|
6 |
+
accelerate
|
7 |
+
bitsandbytes
|
8 |
+
wikipedia-api
|
9 |
+
feedparser
|
10 |
+
html2text
|