AlexTian commited on
Commit
b68dc3f
1 Parent(s): 2849554
README.md CHANGED
@@ -7,6 +7,4 @@ sdk: gradio
7
  app_file: app.py
8
  pinned: false
9
  license: mit
10
- ---
11
-
12
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
7
  app_file: app.py
8
  pinned: false
9
  license: mit
10
+ ---
 
 
app.py DELETED
@@ -1,63 +0,0 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
-
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": {
3
+ "api_key": "your_api_key_here",
4
+ "model_name": "your_model_name_here",
5
+ "endpoint": "your_model_endpoint_here"
6
+ }
7
+ }
prompt.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "assistant":""
3
+ }
requirements.txt CHANGED
@@ -1 +1,135 @@
1
- huggingface_hub==0.22.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ aiohttp @ file:///Users/runner/miniforge3/conda-bld/aiohttp_1707669845082/work
3
+ aiosignal @ file:///home/conda/feedstock_root/build_artifacts/aiosignal_1667935791922/work
4
+ altair==5.3.0
5
+ annotated-types @ file:///home/conda/feedstock_root/build_artifacts/annotated-types_1696634205638/work
6
+ anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1708355285029/work
7
+ appnope @ file:///home/conda/feedstock_root/build_artifacts/appnope_1707233003401/work
8
+ asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1698341106958/work
9
+ async-timeout @ file:///home/conda/feedstock_root/build_artifacts/async-timeout_1691763562544/work
10
+ attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1704011227531/work
11
+ Brotli @ file:///Users/runner/miniforge3/conda-bld/brotli-split_1695989934239/work
12
+ certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1707022139797/work/certifi
13
+ charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1698833585322/work
14
+ click==8.1.7
15
+ comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1710320294760/work
16
+ contourpy==1.2.1
17
+ cycler==0.12.1
18
+ dataclasses-json @ file:///home/conda/feedstock_root/build_artifacts/dataclasses-json_1706837029949/work
19
+ debugpy @ file:///Users/runner/miniforge3/conda-bld/debugpy_1707444662218/work
20
+ decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
21
+ distro==1.9.0
22
+ dnspython==2.6.1
23
+ email_validator==2.1.1
24
+ exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1704921103267/work
25
+ executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1698579936712/work
26
+ fastapi==0.111.0
27
+ fastapi-cli==0.0.4
28
+ ffmpy==0.3.2
29
+ filelock==3.13.1
30
+ fonttools==4.52.4
31
+ frozenlist @ file:///Users/runner/miniforge3/conda-bld/frozenlist_1702645558715/work
32
+ fsspec==2024.3.1
33
+ gradio==4.32.1
34
+ gradio_client==0.17.0
35
+ greenlet @ file:///Users/runner/miniforge3/conda-bld/greenlet_1703201714305/work
36
+ h11==0.14.0
37
+ httpcore==1.0.5
38
+ httptools==0.6.1
39
+ httpx==0.27.0
40
+ huggingface-hub==0.21.4
41
+ idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1701026962277/work
42
+ importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1709821103657/work
43
+ importlib_resources==6.4.0
44
+ ipykernel @ file:///Users/runner/miniforge3/conda-bld/ipykernel_1708996616394/work
45
+ ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1709559745751/work
46
+ jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work
47
+ Jinja2==3.1.3
48
+ jsonpatch @ file:///home/conda/feedstock_root/build_artifacts/jsonpatch_1695536281965/work
49
+ jsonpointer @ file:///Users/runner/miniforge3/conda-bld/jsonpointer_1695397393385/work
50
+ jsonschema==4.22.0
51
+ jsonschema-specifications==2023.12.1
52
+ jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1710255804825/work
53
+ jupyter_core @ file:///Users/runner/miniforge3/conda-bld/jupyter_core_1710257589360/work
54
+ kiwisolver==1.4.5
55
+ langchain @ file:///home/conda/feedstock_root/build_artifacts/langchain_1710305624835/work
56
+ langchain-community @ file:///home/conda/feedstock_root/build_artifacts/langchain-community_1710298573995/work
57
+ langchain-core @ file:///home/conda/feedstock_root/build_artifacts/langchain-core_1710294774748/work
58
+ langchain-text-splitters @ file:///home/conda/feedstock_root/build_artifacts/langchain-text-splitters_1709389732771/work
59
+ langsmith @ file:///home/conda/feedstock_root/build_artifacts/langsmith_1710383848363/work
60
+ markdown-it-py==3.0.0
61
+ MarkupSafe==2.1.5
62
+ marshmallow @ file:///home/conda/feedstock_root/build_artifacts/marshmallow_1709595108657/work
63
+ matplotlib==3.9.0
64
+ matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1660814786464/work
65
+ mdurl==0.1.2
66
+ mpmath==1.3.0
67
+ multidict @ file:///Users/runner/miniforge3/conda-bld/multidict_1707040780513/work
68
+ mypy-extensions @ file:///home/conda/feedstock_root/build_artifacts/mypy_extensions_1675543315189/work
69
+ nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work
70
+ networkx==3.2.1
71
+ numpy @ file:///Users/runner/miniforge3/conda-bld/numpy_1707225421156/work/dist/numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl#sha256=011d57633d659db8280a5811d62d0c5a615719a6d7cbd46a04f6ffce0c2a7db3
72
+ openai==1.30.5
73
+ orjson @ file:///Users/runner/miniforge3/conda-bld/orjson_1708717260543/work/target/wheels/orjson-3.9.15-cp312-cp312-macosx_11_0_arm64.whl#sha256=57fbef228e0aecf0299b0b249a966d5125bdf6bd5b3931d7e45746c397a75979
74
+ packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1696202382185/work
75
+ pandas==2.2.2
76
+ parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1638334955874/work
77
+ pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1706113125309/work
78
+ pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
79
+ pillow==10.3.0
80
+ platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1706713388748/work
81
+ prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1702399386289/work
82
+ psutil @ file:///Users/runner/miniforge3/conda-bld/psutil_1705722460205/work
83
+ ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
84
+ pure-eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1642875951954/work
85
+ pydantic @ file:///home/conda/feedstock_root/build_artifacts/pydantic_1709075187841/work
86
+ pydantic_core @ file:///Users/runner/miniforge3/conda-bld/pydantic-core_1708700664642/work
87
+ pydub==0.25.1
88
+ Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1700607939962/work
89
+ pyparsing==3.1.2
90
+ PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1661604839144/work
91
+ python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work
92
+ python-dotenv==1.0.1
93
+ python-multipart==0.0.9
94
+ pytz==2024.1
95
+ PyYAML @ file:///Users/runner/miniforge3/conda-bld/pyyaml_1695373531920/work
96
+ pyzmq @ file:///Users/runner/miniforge3/conda-bld/pyzmq_1701783269366/work
97
+ referencing==0.35.1
98
+ regex==2023.12.25
99
+ requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1684774241324/work
100
+ rich==13.7.1
101
+ rpds-py==0.18.1
102
+ ruff==0.4.6
103
+ safetensors==0.4.2
104
+ semantic-version==2.10.0
105
+ setuptools==69.2.0
106
+ shellingham==1.5.4
107
+ six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
108
+ sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1708952932303/work
109
+ SQLAlchemy @ file:///Users/runner/miniforge3/conda-bld/sqlalchemy_1709646319518/work
110
+ stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work
111
+ starlette==0.37.2
112
+ sympy==1.12
113
+ tenacity @ file:///home/conda/feedstock_root/build_artifacts/tenacity_1692026804430/work
114
+ tokenizers==0.15.2
115
+ tomlkit==0.12.0
116
+ toolz==0.12.1
117
+ torch==2.2.1
118
+ tornado @ file:///Users/runner/miniforge3/conda-bld/tornado_1708363367885/work
119
+ tqdm==4.66.2
120
+ traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1710254411456/work
121
+ transformers==4.38.2
122
+ typer==0.12.3
123
+ typing-inspect @ file:///home/conda/feedstock_root/build_artifacts/typing_inspect_1685820062773/work
124
+ typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1708904622550/work
125
+ tzdata==2024.1
126
+ ujson==5.10.0
127
+ urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1708239446578/work
128
+ uvicorn==0.30.0
129
+ uvloop==0.19.0
130
+ watchfiles==0.22.0
131
+ wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1704731205417/work
132
+ websockets==11.0.3
133
+ wheel==0.42.0
134
+ yarl @ file:///Users/runner/miniforge3/conda-bld/yarl_1705508643525/work
135
+ zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1695255097490/work
skills.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "triggers": ["调高亮度"],
4
+ "action": "CMD_LIGHT_UP"
5
+ },
6
+ {
7
+ "triggers": ["调低亮度"],
8
+ "action": "CMD_LIGHT_DOWN"
9
+ },
10
+ {
11
+ "triggers": ["调高音量"],
12
+ "action": "CMD_SOUND_UP"
13
+ },
14
+ {
15
+ "triggers": ["调低音量"],
16
+ "action": "CMD_SOUND_DOWN"
17
+ },
18
+ {
19
+ "triggers": ["重启系统"],
20
+ "action": "CMD_RESTART"
21
+ },
22
+ {
23
+ "triggers": ["锁定屏幕"],
24
+ "action": "CMD_LOCK_SCREEN"
25
+ },
26
+ {
27
+ "triggers": ["深色模式"],
28
+ "action": "CMD_DARKMODE"
29
+ },
30
+ {
31
+ "triggers": ["调节亮度至{level}%"],
32
+ "action": "CMD_SET_BRIGHTNESS_{{level}}"
33
+ },
34
+ {
35
+ "triggers": ["设置{time}分钟的计时器"],
36
+ "action": "CMD_SET_TIMER_{{time}}M"
37
+ }
38
+ ]
src/__init__.py ADDED
File without changes
src/__pycache__/config.cpython-312.pyc ADDED
Binary file (474 Bytes). View file
 
src/__pycache__/dialogue_management.cpython-312.pyc ADDED
Binary file (2.05 kB). View file
 
src/__pycache__/function_trigger.cpython-312.pyc ADDED
Binary file (304 Bytes). View file
 
src/__pycache__/scene_recognition.cpython-312.pyc ADDED
Binary file (608 Bytes). View file
 
src/__pycache__/skill_repository.cpython-312.pyc ADDED
Binary file (922 Bytes). View file
 
src/app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from scene_recognition import recognize_scene
3
+ from dialogue_management import manage_dialogue
4
+ from function_trigger import trigger_function
5
+ from config import load_config
6
+ from skill_repository import load_skills
7
+
8
+ config = load_config()
9
+ skills = load_skills()
10
+
11
+ # 场景识别功能
12
+ def recognize(system_time, app_name, system_load):
13
+ system_data = {
14
+ 'time': system_time,
15
+ 'app_name': app_name,
16
+ 'system_load': system_load
17
+ }
18
+ scene = recognize_scene(config, system_data)
19
+ return scene['type']
20
+
21
+ # 对话功能
22
+ def respond(user_input):
23
+ response, action = manage_dialogue(user_input, skills)
24
+ result = trigger_function(action, config)
25
+ return response,result
26
+
27
+ # 创建两个独立的接口
28
+ recognize_interface = gr.Interface(
29
+ fn=recognize,
30
+ inputs=[
31
+ gr.Textbox(label="System Time"),
32
+ gr.Textbox(label="Application Name"),
33
+ gr.Number(label="System Load")
34
+ ],
35
+ outputs=gr.Textbox(label="Scene Type"),
36
+ title="Scene Recognition",
37
+ description="Enter system data to recognize the scene."
38
+ )
39
+
40
+ respond_interface = gr.Interface(
41
+ fn=respond,
42
+ inputs=[
43
+ gr.Textbox(label="User Input"),
44
+ ],
45
+ outputs=[
46
+ gr.Markdown(label="AI Response"),
47
+ gr.Textbox(label="Action Result"),
48
+ ],
49
+ title="AI Assistant Response",
50
+ description="Enter your command and the recognized scene type. The AI Assistant will respond accordingly."
51
+ )
52
+
53
+ # 合并两个接口
54
+ interface = gr.TabbedInterface([recognize_interface, respond_interface], ["Scene Recognition", "AI Assistant Response"])
55
+
56
+ if __name__ == "__main__":
57
+ respond_interface.launch()
src/config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ def load_config():
4
+ with open('../config.json', 'r') as file:
5
+ config = json.load(file)
6
+ return config
src/dialogue_management.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skill_repository import get_skill
2
+ from openai import OpenAI
3
+ import json
4
+
5
+ def manage_dialogue(user_input, skills):
6
+ client = OpenAI(api_key="sk-6d543a8d079d4cee83f2bda58a6469d1", base_url="https://api.deepseek.com")
7
+ print(skills)
8
+ prompt = f"你是一个具有意图识别能力的助手。你需要仔细判断用户的输入中是否包含命令,当你检测到用户的命令时并且这些命令存在于命令库中时,请严格按照以下格式返回相应的传输指令代码。否则,请进行正常对话。以下是命令库中的全部命令(triggers)及其对应的动作(action)指令代码:{skills} \n严格要求:1. 只有当用户输入匹配命令库中的命令时,才返回相应的指令代码。2. 返回格式必须严格遵守:S:{{指令代码}}。不能有任何拼写或格式错误。3. 当用户输入未包含任何命令时,进行正常对话。\n示例:用户输入:能帮我重启系统吗? \n输出:S:CMD_RESTART \n用户输入:今天天气怎么样? \n 输出:今天天气很好,可能会有阵雨。\n 现在,请根据上述规则严格响应以下用户输入:"
9
+ response = client.chat.completions.create(
10
+ model="deepseek-chat",
11
+ messages=[
12
+ {"role": "system", "content": f"{prompt}"},
13
+ {"role": "user", "content": f"{user_input}"},
14
+ ],
15
+ stream=False
16
+ )
17
+ answer = response.choices[0].message.content
18
+ print(answer)
19
+ if(answer[0]=='S'):
20
+ return "任务正在执行中...", answer[2:]
21
+ else:
22
+ return answer, None
src/flagged/log.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ System Time,Application Name,System Load,Scene Type,flag,username,timestamp
2
+ 12,chrome,20,default,,,2024-05-31 14:39:24.845526
3
+ ,,0,default,,,2024-05-31 15:53:45.269655
src/function_trigger.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ def trigger_function(action, config):
2
+ if action == None:
3
+ return "No Action"
4
+ else:
5
+ return action
src/scene_recognition.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def recognize_scene(config, system_data):
2
+ time = system_data['time']
3
+ app_name = system_data['app_name']
4
+ system_load = system_data['system_load']
5
+
6
+ # 示例场景识别逻辑,使用系统数据
7
+ if "office" in app_name.lower() and system_load < 50:
8
+ return {'type': 'office'}
9
+ elif "video" in app_name.lower() and system_load >= 50:
10
+ return {'type': 'video'}
11
+ else:
12
+ return {'type': 'default'}
src/skill_repository.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ def load_skills():
4
+ with open('../skills.json', 'r') as file:
5
+ skills = json.load(file)
6
+ return skills
7
+
8
+ def get_skill(user_input, skills):
9
+ for skill in skills:
10
+ if any(trigger in user_input for trigger in skill['triggers']):
11
+ return skill
12
+ return None