ryefoxlime commited on
Commit
6e9c870
1 Parent(s): 2dd581a

Raspberry pi codes

Browse files
Files changed (7) hide show
  1. .gitignore +1 -0
  2. Audio.py +10 -0
  3. Gemma2_2B/gemma-2-2b-therapy.modelfile +16 -0
  4. Output.py +10 -0
  5. TAD.py +5 -4
  6. pyproject.toml +13 -44
  7. uv.lock +0 -0
.gitignore CHANGED
@@ -9,3 +9,4 @@ Gemma2_2B/.cache
9
  **/*/wandb
10
  Gemma2_2B/outputs/
11
  Gemma2_2B/gemma-2-2b-it-therapist
 
 
9
  **/*/wandb
10
  Gemma2_2B/outputs/
11
  Gemma2_2B/gemma-2-2b-it-therapist
12
+ **/*/__pycache__
Audio.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+
3
+ audio = pyaudio.PyAudio()
4
+
5
+ # List all audio devices
6
+ for i in range(audio.get_device_count()):
7
+ info = audio.get_device_info_by_index(i)
8
+ print(f"Device Index {i}: {info['name']} - {info['hostApi']}")
9
+
10
+ audio.terminate()
Gemma2_2B/gemma-2-2b-therapy.modelfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ FROM /home/TADBot/TADBot/Gemma2_2B/gemma-2-2b-it-therapist/gemma-2-2b-it-therapist-Q4_K_M.gguf
3
+
4
+ TEMPLATE """{{- range $i, $_ := .Messages }}
5
+ {{- $last := eq (len (slice $.Messages $i)) 1 }}
6
+ {{- if or (eq .Role "user") (eq .Role "system") }}<start_of_turn>user
7
+ {{ .Content }}<end_of_turn>
8
+ {{ if $last }}<start_of_turn>model
9
+ {{ end }}
10
+ {{- else if eq .Role "assistant" }}<start_of_turn>model
11
+ {{ .Content }}{{ if not $last }}<end_of_turn>
12
+ {{ end }}
13
+ {{- end }}
14
+ {{- end }}"""
15
+ PARAMETER stop <start_of_turn>
16
+ PARAMETER stop <end_of_turn>
Output.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyttsx3
2
+
3
+ engine = pyttsx3.init('espeak')
4
+ voices = engine.getProperty('voices')
5
+ print("Available voices:")
6
+ for voice in voices:
7
+ print(f"- {voice.id}")
8
+ engine.setProperty('voice', voices[14].id) # Use the first available voice
9
+ engine.say("Hello from Raspberry Pi")
10
+ engine.runAndWait()
TAD.py CHANGED
@@ -1,4 +1,5 @@
1
- import ollama
 
2
  import chromadb
3
  import speech_recognition as sr
4
  import requests
@@ -32,7 +33,7 @@ message_history = [
32
  ]
33
  convo = []
34
 
35
- modelname = "ms"
36
 
37
 
38
  def create_vector_db(conversations):
@@ -58,12 +59,12 @@ def create_vector_db(conversations):
58
  def stream_response(prompt):
59
 
60
  convo.append({'role': "user", 'content': prompt})
61
- output = ollama.chat(model = modelname, messages = convo)
62
  response = output['message']['content']
63
 
64
  print("TADBot: ")
65
  print(response)
66
- engine = pyttsx3.init()
67
  engine.say(response)
68
  engine.runAndWait()
69
 
 
1
+ from ollama import Client
2
+ import ollama
3
  import chromadb
4
  import speech_recognition as sr
5
  import requests
 
33
  ]
34
  convo = []
35
 
36
+ llm = Client(host='http://localhost:11434')
37
 
38
 
39
  def create_vector_db(conversations):
 
59
  def stream_response(prompt):
60
 
61
  convo.append({'role': "user", 'content': prompt})
62
+ output = llm.chat(model = "TADBot", messages = convo)
63
  response = output['message']['content']
64
 
65
  print("TADBot: ")
66
  print(response)
67
+ engine = pyttsx3.init('espeak')
68
  engine.say(response)
69
  engine.runAndWait()
70
 
pyproject.toml CHANGED
@@ -5,51 +5,20 @@ description = "Add your description here"
5
  readme = "README.md"
6
  requires-python = ">=3.12"
7
  dependencies = [
8
- "ruff>=0.7.3",
9
- "torch==2.5.1+cu124",
10
- "torchvision==0.20.1+cu124",
11
- "torchaudio==2.5.1+cu124",
12
- "huggingface-hub>=0.26.2",
13
- "timm>=1.0.11",
14
- "thop>=0.1.1.post2209072238",
15
- "deepface==0.0.93",
16
- "bitsandbytes>=0.44.1",
17
- "peft>=0.13.2",
18
- "trl>=0.12.1",
19
- "ninja==1.11.1.1",
20
- "numpy>=1.26.4",
21
- "transformers>=4.46.2",
22
- "datasets>=3.1.0",
23
- "tensorboard>=2.18.0",
24
- "jsonlines>=4.0.0",
25
- "python-dotenv>=1.0.1",
26
- "ipykernel>=6.29.5",
27
- "ipywidgets>=8.1.5",
28
- "pyyaml>=6.0.2",
29
- "torch-tb-profiler>=0.4.3",
30
- "tensorflow>=2.18.0",
31
- "wandb>=0.18.7",
32
- ]
33
-
34
- [tool.uv.sources]
35
- torch = { index = "pytorch" }
36
- torchvision = { index = "pytorch" }
37
- torchaudio = { index = "pytorch" }
38
- deepface = {git = "https://github.com/serengil/deepface", tag="v0.0.93"}
39
-
40
- [[tool.uv.index]]
41
- name = "pytorch"
42
- url = "https://download.pytorch.org/whl/cu124"
43
- explicit = true
44
-
45
- [dependency-groups]
46
- dev = [
47
- "deepface>=0.0.93",
48
- "numpy<2",
49
- "opencv-python>=4.10.0.84",
50
- "thop>=0.1.1.post2209072238",
51
- "timm>=1.0.11",
52
  "torch>=2.5.1",
53
  "torchaudio>=2.5.1",
54
  "torchvision>=0.20.1",
 
 
 
 
 
 
 
 
 
 
 
 
55
  ]
 
 
5
  readme = "README.md"
6
  requires-python = ">=3.12"
7
  dependencies = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "torch>=2.5.1",
9
  "torchaudio>=2.5.1",
10
  "torchvision>=0.20.1",
11
+ "deepface>=0.0.93",
12
+ "timm>=1.0.11",
13
+ "thop>=0.1.1.post2209072238",
14
+ "scikit-learn>=1.5.2",
15
+ "matplotlib>=3.9.2",
16
+ "torchsampler>=0.1.2",
17
+ "tf-keras>=2.18.0",
18
+ "pyaudio>=0.2.14",
19
+ "pyttsx3>=2.98",
20
+ "ollama>=0.3.3",
21
+ "chromadb>=0.5.20",
22
+ "speechrecognition>=3.11.0",
23
  ]
24
+ deepface = {git = "https://github.com/serengil/deepface", tag="v0.0.93"}
uv.lock CHANGED
The diff for this file is too large to render. See raw diff