harshp8l commited on
Commit
9690d29
1 Parent(s): fb2f1af

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
.gitattributes CHANGED
@@ -33,3 +33,36 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ transcriber/lib/python3.11/site-packages/PIL/.dylibs/libfreetype.6.dylib filter=lfs diff=lfs merge=lfs -text
37
+ transcriber/lib/python3.11/site-packages/PIL/.dylibs/libharfbuzz.0.dylib filter=lfs diff=lfs merge=lfs -text
38
+ transcriber/lib/python3.11/site-packages/PIL/.dylibs/libtiff.6.dylib filter=lfs diff=lfs merge=lfs -text
39
+ transcriber/lib/python3.11/site-packages/PIL/_imaging.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
40
+ transcriber/lib/python3.11/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
41
+ transcriber/lib/python3.11/site-packages/functorch/.dylibs/libiomp5.dylib filter=lfs diff=lfs merge=lfs -text
42
+ transcriber/lib/python3.11/site-packages/gradio/frpc_darwin_amd64_v0.2 filter=lfs diff=lfs merge=lfs -text
43
+ transcriber/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-106fe5c7.js.map filter=lfs diff=lfs merge=lfs -text
44
+ transcriber/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-d3860fd7.js.map filter=lfs diff=lfs merge=lfs -text
45
+ transcriber/lib/python3.11/site-packages/grpc/_cython/cygrpc.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
46
+ transcriber/lib/python3.11/site-packages/llvmlite/binding/libllvmlite.dylib filter=lfs diff=lfs merge=lfs -text
47
+ transcriber/lib/python3.11/site-packages/numpy/.dylibs/libgfortran.5.dylib filter=lfs diff=lfs merge=lfs -text
48
+ transcriber/lib/python3.11/site-packages/numpy/.dylibs/libopenblas64_.0.dylib filter=lfs diff=lfs merge=lfs -text
49
+ transcriber/lib/python3.11/site-packages/numpy/core/_multiarray_umath.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
50
+ transcriber/lib/python3.11/site-packages/numpy/core/_simd.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
51
+ transcriber/lib/python3.11/site-packages/objc/_objc.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
52
+ transcriber/lib/python3.11/site-packages/pandas/_libs/algos.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
53
+ transcriber/lib/python3.11/site-packages/pandas/_libs/groupby.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
54
+ transcriber/lib/python3.11/site-packages/pandas/_libs/hashtable.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
55
+ transcriber/lib/python3.11/site-packages/pandas/_libs/interval.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
56
+ transcriber/lib/python3.11/site-packages/pandas/_libs/join.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
57
+ transcriber/lib/python3.11/site-packages/pydantic_core/_pydantic_core.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
58
+ transcriber/lib/python3.11/site-packages/speech_recognition/flac-linux-x86 filter=lfs diff=lfs merge=lfs -text
59
+ transcriber/lib/python3.11/site-packages/speech_recognition/flac-linux-x86_64 filter=lfs diff=lfs merge=lfs -text
60
+ transcriber/lib/python3.11/site-packages/speech_recognition/pocketsphinx-data/en-US/acoustic-model/mdef filter=lfs diff=lfs merge=lfs -text
61
+ transcriber/lib/python3.11/site-packages/speech_recognition/pocketsphinx-data/en-US/acoustic-model/sendump filter=lfs diff=lfs merge=lfs -text
62
+ transcriber/lib/python3.11/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
63
+ transcriber/lib/python3.11/site-packages/tiktoken/_tiktoken.cpython-311-darwin.so filter=lfs diff=lfs merge=lfs -text
64
+ transcriber/lib/python3.11/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
65
+ transcriber/lib/python3.11/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
66
+ transcriber/lib/python3.11/site-packages/torch/lib/libiomp5.dylib filter=lfs diff=lfs merge=lfs -text
67
+ transcriber/lib/python3.11/site-packages/torch/lib/libtorch_cpu.dylib filter=lfs diff=lfs merge=lfs -text
68
+ transcriber/lib/python3.11/site-packages/torch/lib/libtorch_python.dylib filter=lfs diff=lfs merge=lfs -text
2.wav ADDED
Binary file (881 kB). View file
 
GUI/GUI.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tkinter as tk
2
+ import pyaudio
3
+ import wave
4
+
5
+ # Voice recording params
6
+ CHUNK = 1024
7
+ FORMAT = pyaudio.paInt16
8
+ CHANNELS = 1
9
+ RATE = 44100
10
+ RECORD_SECONDS = 5
11
+ WAVE_OUTPUT_FILENAME = "output.wav"
12
+
13
+ root = tk.Tk()
14
+ root.geometry("400x300")
15
+
16
+ # Audio funcs
17
+ def start_record():
18
+
19
+ print('Recording')
20
+ global recorder
21
+ recorder = pyaudio.PyAudio()
22
+
23
+ global stream
24
+ stream = recorder.open(format=FORMAT,
25
+ channels=CHANNELS,
26
+ rate=RATE,
27
+ input=True,
28
+ frames_per_buffer=CHUNK)
29
+
30
+ global frames
31
+ frames = []
32
+
33
+ # from stream get audio data
34
+ audio_generator = stream.read(CHUNK)
35
+
36
+
37
+ stream.start_stream()
38
+
39
+ button = tk.Button(root, text="Record", font=("Arial", 32), padx=50, pady=20, command=start_record)
40
+ button.pack()
41
+
42
+ def stop_record():
43
+
44
+ print('Stopping')
45
+ stream.stop_stream()
46
+ stream.close()
47
+ recorder.terminate()
48
+
49
+ waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
50
+ waveFile.setnchannels(CHANNELS)
51
+ waveFile.setsampwidth(recorder.get_sample_size(FORMAT))
52
+ waveFile.setframerate(RATE)
53
+ waveFile.writeframes(b''.join(frames))
54
+ waveFile.close()
55
+
56
+ def callback(in_data, frame_count, time_info, status):
57
+ frames.append(in_data)
58
+ return in_data, pyaudio.paContinue
59
+
60
+ button = tk.Button(root, text="Stop", font=("Arial", 32), padx=50, pady=20, command=stop_record)
61
+ button.pack()
62
+
63
+ root.mainloop()
GUI/basicGUI.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tkinter as tk
2
+
3
+ root = tk.Tk()
4
+ root.title("My GUI App")
5
+
6
+ button = tk.Button(root, text="Click Me!")
7
+ button.pack()
8
+
9
+ def on_button_click():
10
+ print("Button was clicked!")
11
+
12
+ button.config(command=on_button_click)
13
+
14
+ root.mainloop()
GUI/initial-test.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import wave
3
+ import tkinter as tk
4
+ from tkinter import Button, Label
5
+
6
+ CHUNK = 1024
7
+ FORMAT = pyaudio.paInt16 #paInt8
8
+ CHANNELS = 1
9
+ RATE = 44100 #sample rate
10
+ RECORD_SECONDS = 5
11
+ WAVE_OUTPUT_FILENAME = "output.wav"
12
+
13
+ p = pyaudio.PyAudio()
14
+
15
+ stream = p.open(format=FORMAT,
16
+ channels=CHANNELS,
17
+ rate=RATE,
18
+ input=True,
19
+ frames_per_buffer=CHUNK) #buffer
20
+
21
+ frames = []
22
+
23
+ root = tk.Tk()
24
+ root.title("Voice Recorder")
25
+
26
+ label = Label(root, text="Press button to start recording")
27
+ label.pack()
28
+
29
+ def start_record():
30
+ global frames
31
+ frames = []
32
+ label.config(text="Recording...")
33
+ stream.start_stream()
34
+
35
+ def stop_record():
36
+ label.config(text="Recording stopped")
37
+ stream.stop_stream()
38
+ wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
39
+ wf.setnchannels(CHANNELS)
40
+ wf.setsampwidth(p.get_sample_size(FORMAT))
41
+ wf.setframerate(RATE)
42
+ wf.writeframes(b''.join(frames))
43
+ wf.close()
44
+
45
+ record_button = Button(root, text="Record", command=start_record)
46
+ record_button.pack()
47
+
48
+ stop_button = Button(root, text="Stop", command=stop_record)
49
+ stop_button.pack()
50
+
51
+ def callback(in_data, frame_count, time_info, status):
52
+ frames.append(in_data)
53
+ return (in_data, pyaudio.paContinue)
54
+
55
+ stream.start_stream()
56
+
57
+ try:
58
+ while stream.is_active():
59
+ stream.write(callback(in_data, frame_count, time_info, status))
60
+ except KeyboardInterrupt:
61
+ pass
62
+
63
+ stream.stop_stream()
64
+ stream.close()
65
+ p.terminate()
66
+
67
+ root.mainloop()
GUI/output.wav ADDED
Binary file (44 Bytes). View file
 
GUI/real-time.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import pyaudio
3
+ import tkinter as tk
4
+
5
+ # PyAudio config
6
+ CHUNK = 1024
7
+ FORMAT = pyaudio.paInt16
8
+ CHANNELS = 1
9
+ RATE = 44100
10
+
11
+ # Create GUI
12
+ root = tk.Tk()
13
+ text_box = tk.Text(root)
14
+ text_box.pack()
15
+
16
+ # Load Whisper
17
+ model = whisper.load_model("base")
18
+
19
+ # Start audio stream
20
+ p = pyaudio.PyAudio()
21
+ stream = p.open(format=FORMAT,
22
+ channels=CHANNELS,
23
+ rate=RATE,
24
+ input=True,
25
+ frames_per_buffer=CHUNK)
26
+
27
+ # Listen to mic in a loop
28
+ while True:
29
+ data = stream.read(CHUNK)
30
+ text = model.transcribe(data)['text']
31
+
32
+ # Insert into GUI
33
+ text_box.insert('end', text)
34
+ text_box.see('end')
35
+
36
+ root.update()
37
+
38
+ stream.stop_stream()
39
+ stream.close()
40
+ p.terminate()
README ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gcloud auth application-default login
2
+ gcloud auth login
3
+ gcloud components update
4
+ gcloud config set project intense-base-386112
5
+
6
+ export GOOGLE_APPLICATION_CREDENTIALS="/Users/harshpatel/Downloads/intense-base-386112-44f12a13694e.json"
7
+
8
+ MAKE SURE TO SPECIFY RECORD_SECONDS, remove comment and print statements, and add api key
9
+
10
+ Remaining - actually configure recording time with keystroke, and try whisper model
11
+
12
+ Keep laptop open for clear mic
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: Transcriber Prompt
3
- emoji: 🐠
4
- colorFrom: red
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 3.40.1
8
- app_file: app.py
9
- pinned: false
10
  ---
 
 
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
  ---
2
+ title: transcriber-prompt
3
+ app_file: app_gradio.py
 
 
4
  sdk: gradio
5
  sdk_version: 3.40.1
 
 
6
  ---
7
+ - Run final_runner.py : Specify RECORD_SECONDS. And uncomment out openai chat code
8
+ - During the actual run, remove the printed text of the transcription
9
+ - Use bardapi code as another option
10
 
11
+ TODO:
12
+ - But in a --help option, and also programmatically be able to use whisper, googlespeech, bard, and chatgpt
__pycache__/bardapi.cpython-311.pyc ADDED
Binary file (465 Bytes). View file
 
ai.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import wave
3
+ from pynput import keyboard
4
+
5
+ FORMAT = pyaudio.paInt16
6
+ CHANNELS = 1
7
+ RATE = 44100
8
+ CHUNK = 1024
9
+ #RECORD_SECONDS = 5
10
+ WAVE_OUTPUT_FILENAME = "test.wav"
11
+
12
+ frames = []
13
+
14
+ def on_press(key):
15
+ global frames
16
+
17
+ if key == keyboard.Key.esc:
18
+ # Stop recording
19
+ audio = pyaudio.PyAudio()
20
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
21
+ rate=RATE, input=True,
22
+ frames_per_buffer=CHUNK)
23
+ stream.stop_stream()
24
+ stream.close()
25
+ audio.terminate()
26
+ print("Finished recording audio.")
27
+ # Save recorded audio data to a .wav file
28
+ wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
29
+ wf.setnchannels(CHANNELS)
30
+ wf.setsampwidth(audio.get_sample_size(FORMAT))
31
+ wf.setframerate(RATE)
32
+ wf.writeframes(b''.join(frames))
33
+ wf.close()
34
+ # Stop the listener
35
+ return False
36
+
37
+ elif key.char == 'a':
38
+ # Start recording
39
+ audio = pyaudio.PyAudio()
40
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
41
+ rate=RATE, input=True,
42
+ frames_per_buffer=CHUNK)
43
+ print("Recording audio...")
44
+ while True:
45
+ data = stream.read(CHUNK)
46
+ frames.append(data)
47
+ if keyboard.Controller().pressed(keyboard.Key.esc):
48
+ print("esc key press here")
49
+ break
50
+ stream.stop_stream()
51
+ stream.close()
52
+ audio.terminate()
53
+
54
+ # Start the listener
55
+ with keyboard.Listener(on_press=on_press) as listener:
56
+ listener.join()
57
+
answerPalm.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pprint
2
+ import google.generativeai as palm
3
+
4
+ palm.configure(api_key='AIzaSyCLy2IgNwMBDbhYH_zvUDo0AMWQdRLQI0E')
5
+
6
+ prompt = input("Ask: ")
7
+ #prompt = """
8
+ #You are an expert at solving coding interview problems in Python.
9
+ #Describe how you arrived at the most optimal solution.
10
+ #Break the question down into manageable segments.
11
+ #Think about it step by step, and show your work.
12
+ #Say yes and ask for problem when ready.
13
+ #"""
14
+ #Here is the problem:
15
+
16
+ completion = palm.generate_text(
17
+ model='models/text-bison-001',
18
+ prompt=prompt,
19
+ temperature=0,
20
+ # The maximum length of the response
21
+ max_output_tokens=800,
22
+ ).result
23
+
24
+ print(completion)
25
+ breakpoint()
26
+ prompt += ' ' + completion
27
+
28
+ while True:
29
+ ask = input("Ask: ")
30
+ prompt += ' ' + ask
31
+ answer = palm.generate_text(
32
+ model='models/text-bison-001',
33
+ prompt=prompt,
34
+ temperature=0,
35
+ # The maximum length of the response
36
+ max_output_tokens=800,
37
+ ).result
38
+ print(answer)
39
+ prompt += ' ' + answer
app_gradio.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import gradio as gr
3
+ import time
4
+ import google.generativeai as palm
5
+ palm.configure(api_key='AIzaSyCLy2IgNwMBDbhYH_zvUDo0AMWQdRLQI0E')
6
+ model = whisper.load_model("base")
7
+ print(model.device)
8
+
9
+ def transcribe(audio):
10
+ #time.sleep(3)
11
+ # load audio and pad/trim it to fit 30 seconds
12
+ audio = whisper.load_audio(audio)
13
+ audio = whisper.pad_or_trim(audio)
14
+
15
+ # make log-Mel spectrogram and move to the same device as the model
16
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
17
+
18
+ # # detect the spoken language
19
+ # _, probs = model.detect_language(mel)
20
+ # print(f"Detected language: {max(probs, key=probs.get)}")
21
+
22
+ # decode the audio
23
+ # options = whisper.DecodingOptions()
24
+ # for cpu
25
+ options = whisper.DecodingOptions(fp16=False)
26
+ result = whisper.decode(model, mel, options)
27
+
28
+ print(result.text)
29
+ completion = palm.generate_text(
30
+ model='models/text-bison-001',
31
+ prompt=result.text,
32
+ temperature=0,
33
+ # The maximum length of the response
34
+ max_output_tokens=500,
35
+ ).result
36
+ return completion
37
+
38
+ gr.Interface(
39
+ title = 'Real-time AI-based Audio Transcription, Recognition, Answerer Web App',
40
+ fn=transcribe,
41
+ inputs=[
42
+ gr.inputs.Audio(source="microphone", type="filepath")
43
+ ],
44
+ outputs=[
45
+ "textbox"
46
+ ],
47
+ live=True).launch(share=True)
bardrun.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bardapi import Bard
2
+
3
+ #initialize via F12 → Application → Cookies → Copy the value of __Secure-1PSID cookie.
4
+ token = 'YAjsmzQTZ54vCho1kl9GG5MbJgVVCsUoF0cnVfuSHVqb1BMnrIzVEyV9YYZvxNbzhrJNDQ.'
5
+ bard = Bard(token=token)
6
+
7
+ # Setup state
8
+ context = "You are a super helpful tutor and excellent interviewee. In general you explain your thought process and concepts very well. You first explain simple brute force solutions to interview problem (no need to code) but still go over the time and space complexity, then you explain the steps leading you to the most optimized solution. You finally explain the concepts and procedures of this optimized solution step by step and then you MUST provide the final code in python with its time and space complexity."
9
+ bard.get_answer(context)
10
+ prompt = input('You: ')
11
+ print()
12
+
13
+ # Continue chat
14
+ while len(prompt) != 0:
15
+ print(f"Bard: {bard.get_answer(prompt)['content']}\n")
16
+ prompt = input('You: ')
17
+ print()
18
+
19
+
chat.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ openai.api_key = "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"
3
+
4
+ # def generate_text(prompt, model, temperature=0.5, max_tokens=1024):
5
+ # """
6
+ # Generates text using the OpenAI API.
7
+ # :param prompt: The prompt for the text generation.
8
+ # :param model: The ID of the OpenAI model to use.
9
+ # :param temperature: The temperature of the text generation. (default: 0.5)
10
+ # :param max_tokens: The maximum number of tokens to generate. (default: 1024)
11
+ # :return: The generated text.
12
+ # """
13
+ # response = openai.Completion.create(
14
+ # engine=model,
15
+ # prompt=prompt,
16
+ # temperature=temperature,
17
+ # max_tokens=max_tokens
18
+ # )
19
+ # return response.choices[0].text.strip()
20
+
21
+
22
+ # print(generate_text("hello, there", "gpt-3.5-turbo"))
23
+
24
+ def update_chat(messages, content):
25
+ messages.append({"role": "user", "content": content})
26
+ return messages
27
+
28
+
29
+ # Clarifying questions - feel free to ask them but also
30
+ messages = [
31
+ {"role": "system", "content": "You are a super helpful tutor and excellent interviewee. In general you explain your thought process and concepts very well. You first explain simple brute force solutions to interview problem (no need to code) but still go over the time and space complexity, then you explain the steps leading you to the most optimized solution. You explain the concepts and procedures of this optimized solution and then you MUST provide the final code in python with its time and space complexity. "}
32
+ ]
33
+ text = "Find the median of two sorted arrays"
34
+ print('here')
35
+ response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=update_chat(messages, text))
36
+ print(response['choices'][0]['message']['content'])
37
+
38
+
39
+ #gpt-3.5-turbo
40
+
41
+ #pre_prompt = "I am writing a letter to my friend. Here is what I want to say:"
42
+
43
+ # update_chat messages.append({role, content}) return messages
44
+
45
+ #
46
+
47
+
48
+ # Interview prompt
49
+ # You are a super helpful tutor and excellent interviewee. You explain brute force solutions to interview problems first (no need to code) but still go over the time and space complexity, then the steps leading you to the most optimized version. You explain the concepts and procedures of this optimized version and then provide the final code in python with its time and space complexity. Say yes if you are ready
explainSoln.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pprint
2
+ import google.generativeai as palm
3
+ palm.configure(api_key='AIzaSyCLy2IgNwMBDbhYH_zvUDo0AMWQdRLQI0E')
4
+
5
+
6
+ prompt = """
7
+ You are an expert at explaining coding interview solutions in Python.
8
+
9
+ Explain the following solution:
10
+
11
+ ```
12
+ def coinChange(self, coins: List[int], amount: int) -> int:
13
+ dp = [amount + 1] * (amount + 1)
14
+ dp[0] = 0
15
+
16
+ for a in range(1, amount + 1):
17
+ for c in coins:
18
+ if a - c >= 0:
19
+ dp[a] = min(dp[a], 1 + dp[a - c])
20
+ return dp[amount] if dp[amount] != amount + 1 else -1
21
+ ```
22
+
23
+ Think about it step by step, and show your work.
24
+ Afterwards, run through an example input.
25
+ """
26
+
27
+ completion = palm.generate_text(
28
+ model='models/text-bison-001',
29
+ prompt=prompt,
30
+ temperature=0,
31
+ # The maximum length of the response
32
+ max_output_tokens=1200,
33
+ )
34
+
35
+ print(completion.result)
final_runner.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # USAGE keep using , (to prompt the bot via voice and keep adding on layers to message content for chat completion),
2
+ # after successful run through backtick is the exit, early stop after instructions have been fulfilled
3
+
4
+ #TODO: Automatically stop when to recording using keystroke, Whisper? model
5
+ # MAKE SURE TO SPECIFY RECORD_SECONDS, remove comment and print statements, and add api key
6
+
7
+ import os
8
+ import pyaudio
9
+ import wave
10
+ from pynput import keyboard
11
+ import speech_recognition as sr
12
+ import time
13
+ import openai
14
+ from bardapi import Bard
15
+ import google.generativeai as palm
16
+ openai.api_key = "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"
17
+ #bard = Bard(token='XAjsm7r2qks6TRYvnuR7nbMlAHeJRSd4DHZIhvQ5NdHjgcTOYngb2GN2juVCNZSLwkeDuQ.')
18
+ palm.configure(api_key='AIzaSyCLy2IgNwMBDbhYH_zvUDo0AMWQdRLQI0E')
19
+
20
+ FORMAT = pyaudio.paInt16
21
+ CHANNELS = 2
22
+ RATE = 44100
23
+ CHUNK = 1024
24
+ RECORD_SECONDS = 4 ##HERE##
25
+ #WAVE_OUTPUT_FILENAME = "2.wav"
26
+
27
+ frames = []
28
+
29
+ def update_chat(messages, content):
30
+ messages.append({"role": "user", "content": content})
31
+ return messages
32
+
33
+ # messages = [
34
+ # # include in step - by - step more (0-shot)
35
+ # {"role": "system", "content": "You are a super helpful tutor and excellent interviewee. In general you explain your thought process and concepts very well. You first explain simple brute force solutions to interview problem (no need to code) but still go over the time and space complexity, then you explain the steps leading you to the most optimized solution. You explain the concepts and procedures of this optimized solution step by step and then you MUST provide the final code in python with its time and space complexity."}
36
+ # ]
37
+ messages = [
38
+ # include in step - by - step more (0-shot)
39
+ {"role": "system", "content": "You are a super helpful tutor and excellent interviewee. In general you explain your thought process and concepts very well. You first explain simple brute force solutions to system design interview problem, then make appropriate assumptions and explain the steps leading you to the most optimized solution. Provide functional and non-functional requirements, back of the envelope calculation, apis needed, data/databases if needed, and a high level scalable design. You MUST explain the concepts and procedures of this optimized solution step by step while providing the system design architecture"}
40
+ ]
41
+ global bard_context
42
+ bard_context = [messages[0]['content']]
43
+
44
+ def update_bard(messages, content):
45
+ msg = ': '.join(messages) + content
46
+ ans = bard.get_answer(msg)['content']
47
+ messages.append(ans)
48
+ return messages, ans
49
+
50
+ global palm_context
51
+ palm_context = [messages[0]['content']]
52
+
53
+ def update_palm(messages, content):
54
+ msg = ': '.join(messages) + ': ' + content
55
+ ans = palm.generate_text(
56
+ model='models/text-bison-001',
57
+ prompt=msg,
58
+ temperature=0,
59
+ # The maximum length of the response
60
+ max_output_tokens=2000,
61
+ ).result
62
+ messages.append(ans)
63
+ return messages, ans
64
+
65
+ #Line to add: [Let's think step by step.]
66
+
67
+ def on_press(key):
68
+
69
+ if key == keyboard.KeyCode.from_char('$'):
70
+ # Start recording
71
+ print("Ready...", end=" ")
72
+ global stream, audio
73
+ audio = pyaudio.PyAudio()
74
+ print("Recording audio...")
75
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
76
+ rate=RATE, input=True,
77
+ frames_per_buffer=CHUNK,
78
+ input_device_index=2) # when brio is connected use mac
79
+ for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
80
+ data = stream.read(CHUNK)
81
+ frames.append(data)
82
+
83
+ # Stop recording
84
+ stream.stop_stream()
85
+ stream.close()
86
+ audio.terminate()
87
+ print("Finished recording audio.")
88
+ r = sr.Recognizer()
89
+ audio_data = sr.AudioData(b''.join(frames), RATE, 2)
90
+ #print(audio_data)
91
+ text = r.recognize_google(audio_data)
92
+ #text = r.recognize_google(audio_data, language = 'en-US', show_all = True)
93
+ print(text) # REMOVE THIS DURING ACTUAL
94
+ print("Finished transcription")
95
+ # TODO: Once audio is fully done then send to gpt to answer via another keystroke.
96
+ # app currently keeps appending to transcription with each comma...
97
+
98
+ #response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=update_chat(messages, text))
99
+ #os.system('clear')
100
+ #print(response['choices'][0]['message']['content'])
101
+ #global bard_context
102
+ #bard_context, response = update_bard(bard_context, text)
103
+ global palm_context
104
+ palm_context, response = update_palm(palm_context, text)
105
+ os.system('clear')
106
+ print(response)
107
+
108
+ elif key == keyboard.KeyCode.from_char('`'):
109
+ print("Exiting")
110
+ return False
111
+
112
+ elif key == keyboard.KeyCode.from_char('|'):
113
+ text = input()
114
+ print("Completing")
115
+ #response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=update_chat(messages, text))
116
+ #print(response['choices'][0]['message']['content'])
117
+ #bard_context, response = update_bard(bard_context, text)
118
+ palm_context, response = update_palm(palm_context, text)
119
+ print(response)
120
+
121
+
122
+
123
+ def on_release(key):
124
+ if key == keyboard.KeyCode.from_char('`'): # UNTESTED CODE
125
+ return False
126
+
127
+ # Start the listener
128
+ with keyboard.Listener(on_press=on_press) as listener:
129
+ listener.join()
130
+
131
+
132
+
133
+
134
+ # from bardapi import Bard
135
+ #
136
+ # #initialize via F12 Application → Cookies → Copy the value of __Secure-1PSID cookie.
137
+ # token = 'XAjsm7r2qks6TRYvnuR7nbMlAHeJRSd4DHZIhvQ5NdHjgcTOYngb2GN2juVCNZSLwkeDuQ.'
138
+ # bard = Bard(token=token)
139
+ #
140
+ # # Setup state
141
+ # context = "You are a super helpful tutor and excellent interviewee. In general you explain your thought process and concepts very well. You first explain simple brute force solutions to interview problem (no need to code) but still go over the time and space complexity, then you explain the steps leading you to the most optimized solution. You finally explain the concepts and procedures of this optimized solution step by step and then you MUST provide the final code in python with its time and space complexity."
142
+ # bard.get_answer(context)
143
+ # prompt = input('You: ')
144
+ # print()
145
+ #
146
+ # # Continue chat
147
+ # while len(prompt) != 0:
148
+ # print(f"Bard: {bard.get_answer(prompt)['content']}\n")
149
+ # prompt = input('You: ')
150
+ # print()
151
+ #
152
+ #
gtest.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ # Imports the Google Cloud client library
5
+ from google.cloud import speech
6
+
7
+ # Instantiates a client
8
+ client = speech.SpeechClient()
9
+
10
+ # The name of the audio file to transcribe
11
+ file_name = os.path.join(os.path.dirname(__file__), '2.wav')
12
+
13
+ # Loads the audio into memory
14
+ with io.open(file_name, 'rb') as audio_file:
15
+ content = audio_file.read()
16
+ audio = speech.RecognitionAudio(content=content)
17
+
18
+ # Specifies the audio encoding and language
19
+ config = speech.RecognitionConfig(
20
+ encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
21
+ language_code='en-US')
22
+
23
+ # Detects speech in the audio file
24
+ response = client.recognize(config=config, audio=audio)
25
+
26
+ # Prints the transcription of the audio
27
+ for result in response.results:
28
+ print('Transcript: {}'.format(result.alternatives[0].transcript))
29
+
gtrans.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def transcribe_file(speech_file):
2
+ """Transcribe the given audio file asynchronously."""
3
+ from google.cloud import speech
4
+
5
+ client = speech.SpeechClient()
6
+
7
+ with open(speech_file, "rb") as audio_file:
8
+ content = audio_file.read()
9
+
10
+ """
11
+ Note that transcription is limited to a 60 seconds audio file.
12
+ Use a GCS file for audio longer than 1 minute.
13
+ """
14
+ audio = speech.RecognitionAudio(content=content)
15
+
16
+ config = speech.RecognitionConfig(
17
+ encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
18
+ sample_rate_hertz=16000,
19
+ language_code="en-US",
20
+ )
21
+
22
+
23
+ operation = client.long_running_recognize(config=config, audio=audio)
24
+
25
+ print("Waiting for operation to complete...")
26
+ response = operation.result(timeout=90)
27
+
28
+ # Each result is for a consecutive portion of the audio. Iterate through
29
+ # them to get the transcripts for the entire audio file.
30
+ for result in response.results:
31
+ # The first alternative is the most likely one for this portion.
32
+ print("Transcript: {}".format(result.alternatives[0].transcript))
33
+ print("Confidence: {}".format(result.alternatives[0].confidence))
34
+
35
+
36
+ transcribe_file('2.wav')
keypress.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ from pynput import keyboard
3
+ import wave
4
+
5
+ # Define constants for PyAudio
6
+ CHUNK = 1024
7
+ FORMAT = pyaudio.paInt16
8
+ CHANNELS = 1
9
+ RATE = 44100
10
+
11
+ # Define the on_press function
12
+ def on_press(key):
13
+ print("Key pressed: ", key)
14
+ if key == keyboard.Key.esc:
15
+ return False
16
+ print(type(key))
17
+ print(str(key))
18
+ if str(key) == 'a':
19
+ print('here')
20
+ audio = pyaudio.PyAudio()
21
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
22
+ rate=RATE, input=True,
23
+ frames_per_buffer=CHUNK)
24
+ print("Recording audio...")
25
+ frames = []
26
+ while True:
27
+ data = stream.read(CHUNK)
28
+ frames.append(data)
29
+ if not keyboard.is_pressed('a'):
30
+ break
31
+ if not listener.running:
32
+ break
33
+ stream.stop_stream()
34
+ stream.close()
35
+ audio.terminate()
36
+ print("Finished recording audio.")
37
+ # Save recorded audio data to a .wav file
38
+ wf = wave.open("recorded_audio.wav", 'wb')
39
+ wf.setnchannels(CHANNELS)
40
+ wf.setsampwidth(audio.get_sample_size(FORMAT))
41
+ wf.setframerate(RATE)
42
+ wf.writeframes(b''.join(frames))
43
+ wf.close()
44
+
45
+ # Start the listener
46
+ with keyboard.Listener(on_press=on_press) as listener:
47
+ listener.join()
keypress.py-2 ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import numpy as np
3
+ from pynput import keyboard
4
+ import wave
5
+
6
+ def record_on_keypress(keycode, chunk=1024, rate=44100):
7
+ """
8
+ Records audio from the default microphone when a key is pressed and stops when the key is released.
9
+ :param keycode: The keycode of the key that triggers the recording.
10
+ :param chunk: The number of audio frames per buffer. (default: 1024)
11
+ :param rate: The sampling rate of the audio. (default: 44100)
12
+ :return: A PyAudio stream object and a NumPy array of the recorded audio.
13
+ """
14
+
15
+ def on_press(key):
16
+ if key == keycode:
17
+ # start recording
18
+ print("start recording")
19
+ frames = []
20
+ while True:
21
+ data = stream.read(chunk)
22
+ frames.append(data)
23
+ if listener.current_key != keycode:
24
+ break
25
+
26
+ # convert frames to NumPy array
27
+ audio = np.frombuffer(b''.join(frames), dtype=np.int16)
28
+
29
+ # stop listener and 'return audio
30
+ listener.stop()
31
+ print(audio)
32
+ return stream, audio
33
+
34
+ p = pyaudio.PyAudio()
35
+ stream = p.open(format=pyaudio.paInt16,
36
+ channels=1,
37
+ rate=rate,
38
+ input=True,
39
+ frames_per_buffer=chunk)
40
+
41
+ print("listener starting recording")
42
+ breakpoint()
43
+ stream.start_stream()
44
+ with keyboard.Listener(on_press=on_press) as listener:
45
+ listener.join()
46
+ print("end recording")
47
+
48
+ # stop recording and close stream
49
+ stream.stop_stream()
50
+ stream.close()
51
+ p.terminate()
52
+
53
+ wf = wave.open("output.wav", 'wb')
54
+ wf.setnchannels(2)
55
+ wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
56
+ wf.setframerate(44100)
57
+ wf.writeframes(b''.join(frames))
58
+ wf.close()
59
+
60
+ print(record_on_keypress('+'))
monitor.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pynput import keyboard
2
+
3
+ def on_press(key):
4
+ try:
5
+ print('alphanumeric key {0} pressed'.format(
6
+ key.char))
7
+ except AttributeError:
8
+ print('special key {0} pressed'.format(
9
+ key))
10
+
11
+ def on_release(key):
12
+ print('{0} released'.format(
13
+ key))
14
+ if key == keyboard.Key.esc:
15
+ # Stop listener
16
+ return False
17
+
18
+ # Collect events until released
19
+ with keyboard.Listener(
20
+ on_press=on_press,
21
+ on_release=on_release) as listener:
22
+ listener.join()
23
+
24
+ #key.__str__# ...or, in a non-blocking fashion:
25
+ #listener = keyboard.Listener(
26
+ # on_press=on_press,
27
+ # on_release=on_release)
28
+ #listener.start()
palmapikey ADDED
@@ -0,0 +1 @@
 
 
1
+ AIzaSyCLy2IgNwMBDbhYH_zvUDo0AMWQdRLQI0E
play.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import wave
3
+ import sys
4
+
5
+ CHUNK = 1024
6
+
7
+ if len(sys.argv) < 2:
8
+ print("Plays a wave file.\n\nUsage: %s filename.wav" % sys.argv[0])
9
+ sys.exit(-1)
10
+
11
+ wf = wave.open(sys.argv[1], 'rb')
12
+
13
+ # instantiate PyAudio (1)
14
+ p = pyaudio.PyAudio()
15
+
16
+ # open stream (2)
17
+ stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
18
+ channels=wf.getnchannels(),
19
+ rate=wf.getframerate(),
20
+ output=True)
21
+
22
+ # read data
23
+ data = wf.readframes(CHUNK)
24
+
25
+ # play stream (3)
26
+ while len(data) > 0:
27
+ stream.write(data)
28
+ data = wf.readframes(CHUNK)
29
+
30
+ # stop stream (4)
31
+ stream.stop_stream()
32
+ stream.close()
33
+
34
+ # close PyAudio (5)
35
+ p.terminate()
recorder.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import wave
3
+
4
+ CHUNK = 1024
5
+ FORMAT = pyaudio.paInt16
6
+ CHANNELS = 1
7
+ RATE = 44100
8
+ RECORD_SECONDS = 5
9
+ WAVE_OUTPUT_FILENAME = "output.wav"
10
+
11
+ p = pyaudio.PyAudio()
12
+
13
+ stream = p.open(format=FORMAT,
14
+ channels=CHANNELS,
15
+ rate=RATE,
16
+ input=True,
17
+ frames_per_buffer=CHUNK)
18
+
19
+ print("* recording")
20
+
21
+ frames = []
22
+
23
+ for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
24
+ data = stream.read(CHUNK)
25
+ frames.append(data)
26
+
27
+ print("* done recording")
28
+
29
+ stream.stop_stream()
30
+ stream.close()
31
+ p.terminate()
32
+
33
+ wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
34
+ wf.setnchannels(CHANNELS)
35
+ wf.setsampwidth(p.get_sample_size(FORMAT))
36
+ wf.setframerate(RATE)
37
+ wf.writeframes(b''.join(frames))
38
+ wf.close()
39
+
test.wav ADDED
Binary file (2.09 kB). View file
 
testType.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def dfs(graph, start, visited=None):
2
+ if visited is None:
3
+ visited = set()
4
+ visited.add(start)
5
+ print(start)
6
+ for neighbor in graph[start]:
7
+ if neighbor not in visited:
8
+ dfs(graph, neighbor, visited)
9
+
10
+ def bfs(graph, start):
11
+ visited = set()
12
+ from collections import deque
13
+ queue = deque[(start)]
14
+ visited.add(start)
15
+ while queue:
16
+ node = queue.popleft()
17
+ print(node)
18
+ for neighbor in graph[node]:
19
+ if neighbor not in visited:
20
+ queue.append(neighbor)
21
+ visited.add(neighbor)
22
+
23
+ import heapq
24
+ heap = []
25
+ heapq.heappush(heap, 3)
26
+ smallest = heapq.heappop(heap)
27
+
28
+ memo = {}
29
+ def fibonacci(n):
30
+ if n <= 1:
31
+ return n
32
+ if n not in memo:
33
+ memo[n] = fibonacci(n-1) + fibonacci(n-2)
34
+
35
+ return memo[n]
36
+
37
+ def knapsack(weights, values, capacity):
38
+ n = len(weights)
39
+ dp = [[0] * (capacity + 1) for _ in range(n+1)]
40
+
41
+ for i in range(1, n+1):
42
+ for j in range(1, capacity + 1):
43
+ if weights[i-1] <= j:
44
+ dp[i][j] = max(values[i-1] + dp[i-1][j-weights[i-1]], dp[i-1][j])
45
+ else:
46
+ dp[i][j] = dp[i-1][j]
47
+ return dp[n][capacity]
48
+
49
+
50
+ def backtrack(nums, path, result):
51
+ if len(path) == len(nums):
52
+ result.append(path[:])
53
+ return
54
+
55
+ for num in nums:
56
+ if num not in path:
57
+ path.append(num)
58
+ backtrack(nums, path, result)
59
+ path.pop()
60
+
61
+
62
+
63
+ def top_k_elemnts(nums, k):
64
+ heap = []
65
+ for num in nums:
66
+ heapq.heappush(heap, num)
67
+ if len(heap) > k:
68
+ heapq.heappop(heap)
69
+ return heap
70
+
71
+
72
+ from collections import defaultdict
73
+ def group_anagrams(strs):
74
+ grouped_anagrams = defauldict(list)
75
+ for word in strs:
76
+ sorted_word = ''.join(sorted(word))
77
+ grouped_anagrams[sorted_word].append(word)
78
+ return list(grouped_anagrams.values())
79
+
80
+
81
+ from collections import OrderedDict
82
+ class LRUCache:
83
+ def __init__(self, capacity):
84
+ self.capacity = capacity
85
+ self.cache = OrderedDict()
86
+
87
+ def get(self, key):
88
+ if key in self.cache:
89
+ value = self.cache[key]
90
+ del self.cache[key]
91
+ self.cache[key] = value
92
+ return value
93
+ else:
94
+ return -1
95
+
96
+ def put(self, key, value):
97
+ if key in self.cahce:
98
+ del self.cache[key]
99
+ elif len(self.cache) == self.capacity:
100
+ self.cache.popitem(last=False)
101
+ self.cache[key] = value
102
+
103
+
104
+ #random api things
105
+ arr.sort(key=lambda x: len(x))
106
+ for n1,n2 in zip(nums1, nums2):
107
+ print(n1,n2)
108
+ ''.join(strings)
109
+
110
+ mySet.add(1)
111
+ for k, v in map.items():
112
+ print(k, v)
113
+
114
+ #for max heap use min heap by default and multiply -1 when push and pop
115
+ arr = [2,1,8,4,5]
116
+ heapq.heapify(arr)
117
+ while arr:
118
+ print(heapq.heappop(arr))
119
+
120
+
121
+
122
+
123
+ def lengthofLIS(self, nums:List[int]) -> int:
124
+ LIS = [1] * len(nums)
125
+ for i in range(len(nums)-1, -1, -1):
126
+ for j in range(i+1, len(nums)):
127
+ if nums[i] < nums[j]:
128
+ LIS[i] = max(LIS[i], 1+LIS[j])
129
+ return max(LIS)
tester.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import wave
3
+ from pynput import keyboard
4
+
5
+ FORMAT = pyaudio.paInt16
6
+ CHANNELS = 2
7
+ RATE = 44100
8
+ CHUNK = 1024
9
+ RECORD_SECONDS = 5
10
+ WAVE_OUTPUT_FILENAME = "output.wav"
11
+
12
+ frames = []
13
+
14
+ def on_press(key):
15
+ global frames
16
+ if key.char == 'a':
17
+ print("Inside...")
18
+ # Start recording
19
+ audio = pyaudio.PyAudio()
20
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
21
+ rate=RATE, input=True,
22
+ frames_per_buffer=CHUNK)
23
+ print("Recording audio...")
24
+ while True:
25
+ data = stream.read(CHUNK)
26
+ frames.append(data)
27
+ if keyboard.is_pressed('esc'):
28
+ break
29
+ stream.stop_stream()
30
+ stream.close()
31
+ audio.terminate()
32
+
33
+ def on_release(key):
34
+ if key == keyboard.Key.esc:
35
+ # Stop recording
36
+ audio = pyaudio.PyAudio()
37
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
38
+ rate=RATE, input=True,
39
+ frames_per_buffer=CHUNK)
40
+ stream.stop_stream()
41
+ stream.close()
42
+ audio.terminate()
43
+ print("Finished recording audio.")
44
+ # Save recorded audio data to a .wav file
45
+ wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
46
+ wf.setnchannels(CHANNELS)
47
+ wf.setsampwidth(audio.get_sample_size(FORMAT))
48
+ wf.setframerate(RATE)
49
+ wf.writeframes(b''.join(frames))
50
+ wf.close()
51
+ # Stop the listener
52
+ return False
53
+
54
+ # Start the listener
55
+ with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
56
+ listener.join()
57
+
testpalm.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pprint
2
+ import google.generativeai as palm
3
+
4
+ palm.configure(api_key='AIzaSyCLy2IgNwMBDbhYH_zvUDo0AMWQdRLQI0E')
5
+ #models = [m for m in palm.list_models() if 'generateText' in m.supported_generation_methods]
6
+ #model = models[0].name
7
+ #print(model)
8
+
9
+
10
+ prompt = """
11
+ You are an expert at solving word problems.
12
+
13
+ Solve the following problem:
14
+
15
+ I have three houses, each with three cats.
16
+ each cat owns 4 mittens, and a hat. Each mitten was
17
+ knit from 7m of yarn, each hat from 4m.
18
+ How much yarn was needed to make all the items?
19
+
20
+ Think about it step by step, and show your work.
21
+ """
22
+
23
+ completion = palm.generate_text(
24
+ model='models/text-bison-001',
25
+ prompt=prompt,
26
+ temperature=0,
27
+ # The maximum length of the response
28
+ max_output_tokens=800,
29
+ )
30
+
31
+ print(completion.result)
testwhisper.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import time
3
+ import timeit
4
+
5
+ model_tiny = whisper.load_model("tiny.en")
6
+ model_base = whisper.load_model("base.en")
7
+ model_small = whisper.load_model("small.en")
8
+
9
+ codes_to_time = ["print(model_tiny.transcribe('2.wav')['text'])",
10
+ "print(model_base.transcribe('2.wav')['text'])",
11
+ "print(model_small.transcribe('2.wav')['text'])"]
12
+
13
+ avg_times = []
14
+ for code_to_time in codes_to_time:
15
+ execution_time = timeit.timeit(code_to_time, globals=globals(), number=5)
16
+ avg_time = execution_time / 5.0
17
+ avg_times.append(avg_time)
18
+ print(f"Execution time: {avg_time} seconds")
19
+
20
+
21
+ print(avg_times)
22
+ # [1.2609960311994655, 1.8864748299994971, 6.38237024199916]
23
+ # From both a speed, and accuracy perspective base is best
24
+
25
+ # result = model.transcribe("2.wav")
26
+ # print(result["text"])
27
+
28
+ #TODO: Figure out whisper with python chunks to implement into runner
toApi.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # USAGE keep using , (to prompt the bot via voice and keep adding on layers to message content for chat completion),
2
+ # after successful run through backtick is the exit, early stop after instructions have been fulfilled
3
+
4
+ import os
5
+ import pyaudio
6
+ import wave
7
+ from pynput import keyboard
8
+ import speech_recognition as sr
9
+ import openai
10
+ openai.api_key = "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"
11
+
12
+ FORMAT = pyaudio.paInt16
13
+ CHANNELS = 1
14
+ RATE = 44100
15
+ CHUNK = 1024
16
+ RECORD_SECONDS = 5
17
+ #WAVE_OUTPUT_FILENAME = "2.wav"
18
+
19
+ frames = []
20
+
21
+ def update_chat(messages, content):
22
+ messages.append({"role": "user", "content": content})
23
+ return messages
24
+
25
+ messages = [
26
+ {"role": "system", "content": "You are a super helpful tutor and excellent interviewee. In general you explain your thought process and concepts very well. You first explain simple brute force solutions to interview problem (no need to code) but still go over the time and space complexity, then you explain the steps leading you to the most optimized solution. You explain the concepts and procedures of this optimized solution and then you MUST provide the final code in python with its time and space complexity."}
27
+ ]
28
+
29
+ def on_press(key):
30
+ #if key == keyboard.Key.esc:
31
+ # recording = False
32
+ # return False
33
+ if key == keyboard.KeyCode.from_char(','):
34
+ print("Recording audio...")
35
+ # Start recording
36
+ global stream, audio
37
+ audio = pyaudio.PyAudio()
38
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
39
+ rate=RATE, input=True,
40
+ frames_per_buffer=CHUNK)
41
+ for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
42
+ data = stream.read(CHUNK)
43
+ frames.append(data)
44
+
45
+ # Stop recording
46
+ stream.stop_stream()
47
+ stream.close()
48
+ audio.terminate()
49
+ print("Finished recording audio.")
50
+ audio_data = sr.AudioData(b''.join(frames), RATE, 2)
51
+ print(audio_data)
52
+ #r = sr.Recognizer()
53
+ #text = r.recognize_google(audio_data)
54
+ #print("Finished transcription")
55
+ #response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=update_chat(messages, text))
56
+ os.system('clear')
57
+ #print(response['choices'][0]['message']['content'])
58
+ elif key == keyboard.KeyCode.from_char('`'):
59
+ print("Exiting")
60
+ return False
61
+
62
+ def on_release(key):
63
+ if key == keyboard.Key.esc:
64
+ return False
65
+
66
+ # Start the listener
67
+ with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
68
+ listener.join()
69
+
toAudio.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import wave
3
+ from pynput import keyboard
4
+
5
+ FORMAT = pyaudio.paInt16
6
+ CHANNELS = 1
7
+ RATE = 44100
8
+ CHUNK = 1024
9
+ RECORD_SECONDS = 10
10
+ WAVE_OUTPUT_FILENAME = "2.wav"
11
+
12
+ frames = []
13
+
14
+ def on_press(key):
15
+ #if key == keyboard.Key.esc:
16
+ # recording = False
17
+ # return False
18
+ if key == keyboard.KeyCode.from_char('a'):
19
+ print("Recording audio...")
20
+ # Start recording
21
+ global stream, audio
22
+ audio = pyaudio.PyAudio()
23
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
24
+ rate=RATE, input=True,
25
+ frames_per_buffer=CHUNK)
26
+ for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
27
+ data = stream.read(CHUNK)
28
+ frames.append(data)
29
+
30
+ # Stop recording
31
+ stream.stop_stream()
32
+ stream.close()
33
+ audio.terminate()
34
+ print("Finished recording audio.")
35
+ # Save recorded audio data to a .wav file
36
+ wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
37
+ wf.setnchannels(CHANNELS)
38
+ wf.setsampwidth(audio.get_sample_size(FORMAT))
39
+ wf.setframerate(RATE)
40
+ wf.writeframes(b''.join(frames))
41
+ wf.close()
42
+
43
+ def on_release(key):
44
+ if key == keyboard.Key.esc:
45
+ return False
46
+
47
+ # Start the listener
48
+ with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
49
+ listener.join()
50
+
toTranscribe.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyaudio
2
+ import wave
3
+ from pynput import keyboard
4
+ import speech_recognition as sr
5
+
6
+ FORMAT = pyaudio.paInt16
7
+ CHANNELS = 1
8
+ RATE = 44100
9
+ CHUNK = 1024
10
+ RECORD_SECONDS = 10
11
+ WAVE_OUTPUT_FILENAME = "2.wav"
12
+
13
+ frames = []
14
+
15
+ #def transcribe(wav):
16
+
17
+
18
+ def on_press(key):
19
+ #if key == keyboard.Key.esc:
20
+ # recording = False
21
+ # return False
22
+ if key == keyboard.KeyCode.from_char('a'):
23
+ print("Recording audio...")
24
+ # Start recording
25
+ global stream, audio
26
+ audio = pyaudio.PyAudio()
27
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
28
+ rate=RATE, input=True,
29
+ frames_per_buffer=CHUNK)
30
+ for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
31
+ data = stream.read(CHUNK)
32
+ frames.append(data)
33
+
34
+ # Stop recording
35
+ stream.stop_stream()
36
+ stream.close()
37
+ audio.terminate()
38
+ print("Finished recording audio.")
39
+ audio_data = sr.AudioData(b''.join(frames), RATE, 2)
40
+ print(audio_data)
41
+ #r = sr.Recognizer()
42
+ #text = r.recognize_google(audio_data)
43
+ #print(text)
44
+
45
+ def on_release(key):
46
+ if key == keyboard.Key.esc:
47
+ return False
48
+
49
+ # Start the listener
50
+ with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
51
+ listener.join()
52
+
53
+
transcriber/bin/Activate.ps1 ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <#
2
+ .Synopsis
3
+ Activate a Python virtual environment for the current PowerShell session.
4
+
5
+ .Description
6
+ Pushes the python executable for a virtual environment to the front of the
7
+ $Env:PATH environment variable and sets the prompt to signify that you are
8
+ in a Python virtual environment. Makes use of the command line switches as
9
+ well as the `pyvenv.cfg` file values present in the virtual environment.
10
+
11
+ .Parameter VenvDir
12
+ Path to the directory that contains the virtual environment to activate. The
13
+ default value for this is the parent of the directory that the Activate.ps1
14
+ script is located within.
15
+
16
+ .Parameter Prompt
17
+ The prompt prefix to display when this virtual environment is activated. By
18
+ default, this prompt is the name of the virtual environment folder (VenvDir)
19
+ surrounded by parentheses and followed by a single space (ie. '(.venv) ').
20
+
21
+ .Example
22
+ Activate.ps1
23
+ Activates the Python virtual environment that contains the Activate.ps1 script.
24
+
25
+ .Example
26
+ Activate.ps1 -Verbose
27
+ Activates the Python virtual environment that contains the Activate.ps1 script,
28
+ and shows extra information about the activation as it executes.
29
+
30
+ .Example
31
+ Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
32
+ Activates the Python virtual environment located in the specified location.
33
+
34
+ .Example
35
+ Activate.ps1 -Prompt "MyPython"
36
+ Activates the Python virtual environment that contains the Activate.ps1 script,
37
+ and prefixes the current prompt with the specified string (surrounded in
38
+ parentheses) while the virtual environment is active.
39
+
40
+ .Notes
41
+ On Windows, it may be required to enable this Activate.ps1 script by setting the
42
+ execution policy for the user. You can do this by issuing the following PowerShell
43
+ command:
44
+
45
+ PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
46
+
47
+ For more information on Execution Policies:
48
+ https://go.microsoft.com/fwlink/?LinkID=135170
49
+
50
+ #>
51
+ Param(
52
+ [Parameter(Mandatory = $false)]
53
+ [String]
54
+ $VenvDir,
55
+ [Parameter(Mandatory = $false)]
56
+ [String]
57
+ $Prompt
58
+ )
59
+
60
+ <# Function declarations --------------------------------------------------- #>
61
+
62
+ <#
63
+ .Synopsis
64
+ Remove all shell session elements added by the Activate script, including the
65
+ addition of the virtual environment's Python executable from the beginning of
66
+ the PATH variable.
67
+
68
+ .Parameter NonDestructive
69
+ If present, do not remove this function from the global namespace for the
70
+ session.
71
+
72
+ #>
73
+ function global:deactivate ([switch]$NonDestructive) {
74
+ # Revert to original values
75
+
76
+ # The prior prompt:
77
+ if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
78
+ Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
79
+ Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
80
+ }
81
+
82
+ # The prior PYTHONHOME:
83
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
84
+ Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
85
+ Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
86
+ }
87
+
88
+ # The prior PATH:
89
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
90
+ Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
91
+ Remove-Item -Path Env:_OLD_VIRTUAL_PATH
92
+ }
93
+
94
+ # Just remove the VIRTUAL_ENV altogether:
95
+ if (Test-Path -Path Env:VIRTUAL_ENV) {
96
+ Remove-Item -Path env:VIRTUAL_ENV
97
+ }
98
+
99
+ # Just remove VIRTUAL_ENV_PROMPT altogether.
100
+ if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
101
+ Remove-Item -Path env:VIRTUAL_ENV_PROMPT
102
+ }
103
+
104
+ # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
105
+ if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
106
+ Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
107
+ }
108
+
109
+ # Leave deactivate function in the global namespace if requested:
110
+ if (-not $NonDestructive) {
111
+ Remove-Item -Path function:deactivate
112
+ }
113
+ }
114
+
115
+ <#
116
+ .Description
117
+ Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
118
+ given folder, and returns them in a map.
119
+
120
+ For each line in the pyvenv.cfg file, if that line can be parsed into exactly
121
+ two strings separated by `=` (with any amount of whitespace surrounding the =)
122
+ then it is considered a `key = value` line. The left hand string is the key,
123
+ the right hand is the value.
124
+
125
+ If the value starts with a `'` or a `"` then the first and last character is
126
+ stripped from the value before being captured.
127
+
128
+ .Parameter ConfigDir
129
+ Path to the directory that contains the `pyvenv.cfg` file.
130
+ #>
131
+ function Get-PyVenvConfig(
132
+ [String]
133
+ $ConfigDir
134
+ ) {
135
+ Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
136
+
137
+ # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
138
+ $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
139
+
140
+ # An empty map will be returned if no config file is found.
141
+ $pyvenvConfig = @{ }
142
+
143
+ if ($pyvenvConfigPath) {
144
+
145
+ Write-Verbose "File exists, parse `key = value` lines"
146
+ $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
147
+
148
+ $pyvenvConfigContent | ForEach-Object {
149
+ $keyval = $PSItem -split "\s*=\s*", 2
150
+ if ($keyval[0] -and $keyval[1]) {
151
+ $val = $keyval[1]
152
+
153
+ # Remove extraneous quotations around a string value.
154
+ if ("'""".Contains($val.Substring(0, 1))) {
155
+ $val = $val.Substring(1, $val.Length - 2)
156
+ }
157
+
158
+ $pyvenvConfig[$keyval[0]] = $val
159
+ Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
160
+ }
161
+ }
162
+ }
163
+ return $pyvenvConfig
164
+ }
165
+
166
+
167
+ <# Begin Activate script --------------------------------------------------- #>
168
+
169
+ # Determine the containing directory of this script
170
+ $VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
171
+ $VenvExecDir = Get-Item -Path $VenvExecPath
172
+
173
+ Write-Verbose "Activation script is located in path: '$VenvExecPath'"
174
+ Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
175
+ Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
176
+
177
+ # Set values required in priority: CmdLine, ConfigFile, Default
178
+ # First, get the location of the virtual environment, it might not be
179
+ # VenvExecDir if specified on the command line.
180
+ if ($VenvDir) {
181
+ Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
182
+ }
183
+ else {
184
+ Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
185
+ $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
186
+ Write-Verbose "VenvDir=$VenvDir"
187
+ }
188
+
189
+ # Next, read the `pyvenv.cfg` file to determine any required value such
190
+ # as `prompt`.
191
+ $pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
192
+
193
+ # Next, set the prompt from the command line, or the config file, or
194
+ # just use the name of the virtual environment folder.
195
+ if ($Prompt) {
196
+ Write-Verbose "Prompt specified as argument, using '$Prompt'"
197
+ }
198
+ else {
199
+ Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
200
+ if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
201
+ Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
202
+ $Prompt = $pyvenvCfg['prompt'];
203
+ }
204
+ else {
205
+ Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
206
+ Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
207
+ $Prompt = Split-Path -Path $venvDir -Leaf
208
+ }
209
+ }
210
+
211
+ Write-Verbose "Prompt = '$Prompt'"
212
+ Write-Verbose "VenvDir='$VenvDir'"
213
+
214
+ # Deactivate any currently active virtual environment, but leave the
215
+ # deactivate function in place.
216
+ deactivate -nondestructive
217
+
218
+ # Now set the environment variable VIRTUAL_ENV, used by many tools to determine
219
+ # that there is an activated venv.
220
+ $env:VIRTUAL_ENV = $VenvDir
221
+
222
+ if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
223
+
224
+ Write-Verbose "Setting prompt to '$Prompt'"
225
+
226
+ # Set the prompt to include the env name
227
+ # Make sure _OLD_VIRTUAL_PROMPT is global
228
+ function global:_OLD_VIRTUAL_PROMPT { "" }
229
+ Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
230
+ New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
231
+
232
+ function global:prompt {
233
+ Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
234
+ _OLD_VIRTUAL_PROMPT
235
+ }
236
+ $env:VIRTUAL_ENV_PROMPT = $Prompt
237
+ }
238
+
239
+ # Clear PYTHONHOME
240
+ if (Test-Path -Path Env:PYTHONHOME) {
241
+ Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
242
+ Remove-Item -Path Env:PYTHONHOME
243
+ }
244
+
245
+ # Add the venv to the PATH
246
+ Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
247
+ $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
transcriber/bin/activate ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate" *from bash*
2
+ # you cannot run it directly
3
+
4
+ deactivate () {
5
+ # reset old environment variables
6
+ if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
7
+ PATH="${_OLD_VIRTUAL_PATH:-}"
8
+ export PATH
9
+ unset _OLD_VIRTUAL_PATH
10
+ fi
11
+ if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
12
+ PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
13
+ export PYTHONHOME
14
+ unset _OLD_VIRTUAL_PYTHONHOME
15
+ fi
16
+
17
+ # This should detect bash and zsh, which have a hash command that must
18
+ # be called to get it to forget past commands. Without forgetting
19
+ # past commands the $PATH changes we made may not be respected
20
+ if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
21
+ hash -r 2> /dev/null
22
+ fi
23
+
24
+ if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
25
+ PS1="${_OLD_VIRTUAL_PS1:-}"
26
+ export PS1
27
+ unset _OLD_VIRTUAL_PS1
28
+ fi
29
+
30
+ unset VIRTUAL_ENV
31
+ unset VIRTUAL_ENV_PROMPT
32
+ if [ ! "${1:-}" = "nondestructive" ] ; then
33
+ # Self destruct!
34
+ unset -f deactivate
35
+ fi
36
+ }
37
+
38
+ # unset irrelevant variables
39
+ deactivate nondestructive
40
+
41
+ VIRTUAL_ENV="/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber"
42
+ export VIRTUAL_ENV
43
+
44
+ _OLD_VIRTUAL_PATH="$PATH"
45
+ PATH="$VIRTUAL_ENV/bin:$PATH"
46
+ export PATH
47
+
48
+ # unset PYTHONHOME if set
49
+ # this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
50
+ # could use `if (set -u; : $PYTHONHOME) ;` in bash
51
+ if [ -n "${PYTHONHOME:-}" ] ; then
52
+ _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
53
+ unset PYTHONHOME
54
+ fi
55
+
56
+ if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
57
+ _OLD_VIRTUAL_PS1="${PS1:-}"
58
+ PS1="(transcriber) ${PS1:-}"
59
+ export PS1
60
+ VIRTUAL_ENV_PROMPT="(transcriber) "
61
+ export VIRTUAL_ENV_PROMPT
62
+ fi
63
+
64
+ # This should detect bash and zsh, which have a hash command that must
65
+ # be called to get it to forget past commands. Without forgetting
66
+ # past commands the $PATH changes we made may not be respected
67
+ if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
68
+ hash -r 2> /dev/null
69
+ fi
transcriber/bin/activate.csh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate.csh" *from csh*.
2
+ # You cannot run it directly.
3
+ # Created by Davide Di Blasi <davidedb@gmail.com>.
4
+ # Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
5
+
6
+ alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
7
+
8
+ # Unset irrelevant variables.
9
+ deactivate nondestructive
10
+
11
+ setenv VIRTUAL_ENV "/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber"
12
+
13
+ set _OLD_VIRTUAL_PATH="$PATH"
14
+ setenv PATH "$VIRTUAL_ENV/bin:$PATH"
15
+
16
+
17
+ set _OLD_VIRTUAL_PROMPT="$prompt"
18
+
19
+ if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
20
+ set prompt = "(transcriber) $prompt"
21
+ setenv VIRTUAL_ENV_PROMPT "(transcriber) "
22
+ endif
23
+
24
+ alias pydoc python -m pydoc
25
+
26
+ rehash
transcriber/bin/activate.fish ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source <venv>/bin/activate.fish" *from fish*
2
+ # (https://fishshell.com/); you cannot run it directly.
3
+
4
+ function deactivate -d "Exit virtual environment and return to normal shell environment"
5
+ # reset old environment variables
6
+ if test -n "$_OLD_VIRTUAL_PATH"
7
+ set -gx PATH $_OLD_VIRTUAL_PATH
8
+ set -e _OLD_VIRTUAL_PATH
9
+ end
10
+ if test -n "$_OLD_VIRTUAL_PYTHONHOME"
11
+ set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
12
+ set -e _OLD_VIRTUAL_PYTHONHOME
13
+ end
14
+
15
+ if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
16
+ set -e _OLD_FISH_PROMPT_OVERRIDE
17
+ # prevents error when using nested fish instances (Issue #93858)
18
+ if functions -q _old_fish_prompt
19
+ functions -e fish_prompt
20
+ functions -c _old_fish_prompt fish_prompt
21
+ functions -e _old_fish_prompt
22
+ end
23
+ end
24
+
25
+ set -e VIRTUAL_ENV
26
+ set -e VIRTUAL_ENV_PROMPT
27
+ if test "$argv[1]" != "nondestructive"
28
+ # Self-destruct!
29
+ functions -e deactivate
30
+ end
31
+ end
32
+
33
+ # Unset irrelevant variables.
34
+ deactivate nondestructive
35
+
36
+ set -gx VIRTUAL_ENV "/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber"
37
+
38
+ set -gx _OLD_VIRTUAL_PATH $PATH
39
+ set -gx PATH "$VIRTUAL_ENV/bin" $PATH
40
+
41
+ # Unset PYTHONHOME if set.
42
+ if set -q PYTHONHOME
43
+ set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
44
+ set -e PYTHONHOME
45
+ end
46
+
47
+ if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
48
+ # fish uses a function instead of an env var to generate the prompt.
49
+
50
+ # Save the current fish_prompt function as the function _old_fish_prompt.
51
+ functions -c fish_prompt _old_fish_prompt
52
+
53
+ # With the original prompt function renamed, we can override with our own.
54
+ function fish_prompt
55
+ # Save the return status of the last command.
56
+ set -l old_status $status
57
+
58
+ # Output the venv prompt; color taken from the blue of the Python logo.
59
+ printf "%s%s%s" (set_color 4B8BBE) "(transcriber) " (set_color normal)
60
+
61
+ # Restore the return status of the previous command.
62
+ echo "exit $old_status" | .
63
+ # Output the original/"old" prompt.
64
+ _old_fish_prompt
65
+ end
66
+
67
+ set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
68
+ set -gx VIRTUAL_ENV_PROMPT "(transcriber) "
69
+ end
transcriber/bin/bard_api ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from bard_api.cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/convert-caffe2-to-onnx ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from caffe2.python.onnx.bin.conversion import caffe2_to_onnx
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(caffe2_to_onnx())
transcriber/bin/convert-onnx-to-caffe2 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from caffe2.python.onnx.bin.conversion import onnx_to_caffe2
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(onnx_to_caffe2())
transcriber/bin/f2py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy.f2py.f2py2e import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/f2py3 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy.f2py.f2py2e import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/f2py3.11 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy.f2py.f2py2e import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/fonttools ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from fontTools.__main__ import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/futurize ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from libfuturize.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/gradio ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from gradio.cli import cli
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(cli())
transcriber/bin/httpx ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from httpx import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/huggingface-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from huggingface_hub.commands.huggingface_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/isympy ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from isympy import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
transcriber/bin/jsonschema ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/harshpatel/DemoProjects/transcriber-prompt/transcriber/bin/python3.11
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from jsonschema.cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())