StevenChen16 commited on
Commit
4f86cab
1 Parent(s): dd9b534

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -66
app.py CHANGED
@@ -1,15 +1,42 @@
1
- import threading
2
- import subprocess
3
  import gradio as gr
4
- from flask import Flask, request, stream_with_context, Response
5
- from flask_cors import CORS
6
  from llamafactory.chat import ChatModel
 
7
 
8
- # 定义 Flask 后端 (app2.py 的内容)
9
- flask_app = Flask(__name__)
10
- CORS(flask_app)
11
 
12
- # 设置模型参数
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  args = dict(
14
  model_name_or_path="StevenChen16/llama3-8b-Lawyer",
15
  template="llama3",
@@ -17,11 +44,8 @@ args = dict(
17
  quantization_bit=8,
18
  use_unsloth=True,
19
  )
20
-
21
- # 加载模型
22
  chat_model = ChatModel(args)
23
 
24
- # 设置背景提示词
25
  background_prompt = """
26
  As an AI legal assistant, you are a highly trained expert in U.S. and Canadian law. Your purpose is to provide accurate, comprehensive, and professional legal information to assist users with a wide range of legal questions. When answering questions, you should actively ask questions to obtain more information, analyze from different perspectives, and explain your reasoning process to the user. Please adhere to the following guidelines:
27
 
@@ -70,52 +94,6 @@ Potential legal risks and coping strategies
70
  Please remember that your role is to provide general legal information and analysis, but also to actively guide and interact with the user during the conversation. If you find that necessary information is missing during the conversation, take the initiative to ask for it until you feel you have enough details to provide targeted analysis and advice. Now, please guide me step by step to describe the legal issues I am facing, according to the above requirements.
71
  """
72
 
73
- @flask_app.route('/predict', methods=['POST'])
74
- def predict():
75
- data = request.get_json(force=True)
76
- input_text = data.get("inputs", "")
77
- prompt = f"{background_prompt}\n\nUser: {input_text}\nAssistant: "
78
-
79
- def generate():
80
- messages = [{"role": "user", "content": prompt}]
81
- for new_text in chat_model.stream_chat(messages):
82
- yield new_text + ' ' # 添加空格以区分单词
83
-
84
- return Response(stream_with_context(generate()), content_type='text/plain')
85
-
86
- # 定义 Gradio 应用 (app.py 的内容)
87
- DESCRIPTION = '''
88
- <div>
89
- <h1 style="text-align: center;">AI Lawyer</h1>
90
- </div>
91
- '''
92
-
93
- LICENSE = """
94
- <p/>
95
- ---
96
- Built with model "StevenChen16/Llama3-8B-Lawyer", based on "meta-llama/Meta-Llama-3-8B"
97
- """
98
-
99
- PLACEHOLDER = """
100
- <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
101
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">AI Lawyer</h1>
102
- <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything about US and Canada law...</p>
103
- </div>
104
- """
105
-
106
- css = """
107
- h1 {
108
- text-align: center;
109
- display: block;
110
- }
111
- #duplicate-button {
112
- margin: auto;
113
- color: white;
114
- background: #1565c0;
115
- border-radius: 100vh;
116
- }
117
- """
118
-
119
  def query_model(user_input, history):
120
  combined_query = background_prompt + user_input
121
  messages = [{"role": "user", "content": combined_query}]
@@ -124,7 +102,7 @@ def query_model(user_input, history):
124
  for new_text in chat_model.stream_chat(messages, temperature=0.9):
125
  response += new_text
126
  yield response
127
-
128
  # Gradio block
129
  chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
130
 
@@ -145,13 +123,5 @@ with gr.Blocks(css=css) as demo:
145
  )
146
  gr.Markdown(LICENSE)
147
 
148
- # 启动 Flask 应用的线程
149
- def run_flask():
150
- flask_app.run(host='0.0.0.0', port=6006)
151
-
152
- thread = threading.Thread(target=run_flask)
153
- thread.start()
154
-
155
- # 启动 Gradio 应用
156
  if __name__ == "__main__":
157
  demo.launch(share=True)
 
 
 
1
  import gradio as gr
2
+ import os
3
+ from threading import Thread
4
  from llamafactory.chat import ChatModel
5
+ from llamafactory.extras.misc import torch_gc
6
 
 
 
 
7
 
8
+ DESCRIPTION = '''
9
+ <div>
10
+ <h1 style="text-align: center;">AI Lawyer</h1>
11
+ </div>
12
+ '''
13
+
14
+ LICENSE = """
15
+ <p/>
16
+ ---
17
+ Built with model "StevenChen16/Llama3-8B-Lawyer", based on "meta-llama/Meta-Llama-3-8B"
18
+ """
19
+
20
+ PLACEHOLDER = """
21
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
22
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">AI Lawyer</h1>
23
+ <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything about US and Canada law...</p>
24
+ </div>
25
+ """
26
+
27
+ css = """
28
+ h1 {
29
+ text-align: center;
30
+ display: block;
31
+ }
32
+ #duplicate-button {
33
+ margin: auto;
34
+ color: white;
35
+ background: #1565c0;
36
+ border-radius: 100vh;
37
+ }
38
+ """
39
+
40
  args = dict(
41
  model_name_or_path="StevenChen16/llama3-8b-Lawyer",
42
  template="llama3",
 
44
  quantization_bit=8,
45
  use_unsloth=True,
46
  )
 
 
47
  chat_model = ChatModel(args)
48
 
 
49
  background_prompt = """
50
  As an AI legal assistant, you are a highly trained expert in U.S. and Canadian law. Your purpose is to provide accurate, comprehensive, and professional legal information to assist users with a wide range of legal questions. When answering questions, you should actively ask questions to obtain more information, analyze from different perspectives, and explain your reasoning process to the user. Please adhere to the following guidelines:
51
 
 
94
  Please remember that your role is to provide general legal information and analysis, but also to actively guide and interact with the user during the conversation. If you find that necessary information is missing during the conversation, take the initiative to ask for it until you feel you have enough details to provide targeted analysis and advice. Now, please guide me step by step to describe the legal issues I am facing, according to the above requirements.
95
  """
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  def query_model(user_input, history):
98
  combined_query = background_prompt + user_input
99
  messages = [{"role": "user", "content": combined_query}]
 
102
  for new_text in chat_model.stream_chat(messages, temperature=0.9):
103
  response += new_text
104
  yield response
105
+
106
  # Gradio block
107
  chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
108
 
 
123
  )
124
  gr.Markdown(LICENSE)
125
 
 
 
 
 
 
 
 
 
126
  if __name__ == "__main__":
127
  demo.launch(share=True)