Toaster496 commited on
Commit
56923ec
1 Parent(s): 4c78d92

Upload webui.py

Browse files
Files changed (1) hide show
  1. webui.py +244 -0
webui.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ import shutil
5
+ import site
6
+ import subprocess
7
+ import sys
8
+
9
+ script_dir = os.getcwd()
10
+ conda_env_path = os.path.join(script_dir, "installer_files", "env")
11
+
12
+ # Use this to set your command-line flags. For the full list, see:
13
+ # https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui
14
+ CMD_FLAGS = '--chat'
15
+
16
+
17
+ # Allows users to set flags in "OOBABOOGA_FLAGS" environment variable
18
+ if "OOBABOOGA_FLAGS" in os.environ:
19
+ CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"]
20
+ print("The following flags have been taken from the environment variable 'OOBABOOGA_FLAGS':")
21
+ print(CMD_FLAGS)
22
+ print("To use the CMD_FLAGS Inside webui.py, unset 'OOBABOOGA_FLAGS'.\n")
23
+
24
+
25
+ def print_big_message(message):
26
+ message = message.strip()
27
+ lines = message.split('\n')
28
+ print("\n\n*******************************************************************")
29
+ for line in lines:
30
+ if line.strip() != '':
31
+ print("*", line)
32
+
33
+ print("*******************************************************************\n\n")
34
+
35
+
36
+ def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None):
37
+ # Use the conda environment
38
+ if environment:
39
+ if sys.platform.startswith("win"):
40
+ conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat")
41
+ cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd
42
+ else:
43
+ conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh")
44
+ cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd
45
+
46
+ # Run shell commands
47
+ result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env)
48
+
49
+ # Assert the command ran successfully
50
+ if assert_success and result.returncode != 0:
51
+ print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...")
52
+ sys.exit()
53
+
54
+ return result
55
+
56
+
57
+ def check_env():
58
+ # If we have access to conda, we are probably in an environment
59
+ conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0
60
+ if not conda_exist:
61
+ print("Conda is not installed. Exiting...")
62
+ sys.exit()
63
+
64
+ # Ensure this is a new environment and not the base environment
65
+ if os.environ["CONDA_DEFAULT_ENV"] == "base":
66
+ print("Create an environment for this project and activate it. Exiting...")
67
+ sys.exit()
68
+
69
+
70
+ def install_dependencies():
71
+ # Select your GPU or, choose to run in CPU mode
72
+ print("What is your GPU")
73
+ print()
74
+ print("A) NVIDIA")
75
+ print("B) AMD")
76
+ print("C) Apple M Series")
77
+ print("D) None (I want to run in CPU mode)")
78
+ print()
79
+ gpuchoice = input("Input> ").lower()
80
+
81
+ if gpuchoice == "d":
82
+ print_big_message("Once the installation ends, make sure to open webui.py with a text editor\nand add the --cpu flag to CMD_FLAGS.")
83
+
84
+ # Install the version of PyTorch needed
85
+ if gpuchoice == "a":
86
+ run_cmd("conda install -y -k pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git -c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia", assert_success=True, environment=True)
87
+ elif gpuchoice == "b":
88
+ print("AMD GPUs are not supported. Exiting...")
89
+ sys.exit()
90
+ elif gpuchoice == "c" or gpuchoice == "d":
91
+ run_cmd("conda install -y -k pytorch torchvision torchaudio cpuonly git -c pytorch", assert_success=True, environment=True)
92
+ else:
93
+ print("Invalid choice. Exiting...")
94
+ sys.exit()
95
+
96
+ # Clone webui to our computer
97
+ run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True)
98
+ # if sys.platform.startswith("win"):
99
+ # # Fix a bitsandbytes compatibility issue with Windows
100
+ # run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl", assert_success=True, environment=True)
101
+
102
+ # Install the webui dependencies
103
+ update_dependencies()
104
+
105
+
106
+ def update_dependencies():
107
+ os.chdir("text-generation-webui")
108
+ run_cmd("git pull", assert_success=True, environment=True)
109
+
110
+ # Installs/Updates dependencies from all requirements.txt
111
+ run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True)
112
+ extensions = next(os.walk("extensions"))[1]
113
+ for extension in extensions:
114
+ if extension in ['superbooga']: # No wheels available for dependencies
115
+ continue
116
+
117
+ extension_req_path = os.path.join("extensions", extension, "requirements.txt")
118
+ if os.path.exists(extension_req_path):
119
+ run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True)
120
+
121
+ # Latest bitsandbytes requires minimum compute 7.0
122
+ nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe"
123
+ min_compute = 70
124
+ compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True)
125
+ old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl"
126
+ if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')):
127
+ old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0
128
+ message = "\n\nWARNING: GPU with compute < 7.0 detected!\n"
129
+ if old_bnb_install:
130
+ message += "Older version of bitsandbytes has been installed to maintain compatibility.\n"
131
+ message += "You will be unable to use --load-in-4bit!\n"
132
+ else:
133
+ message += "You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n"
134
+
135
+ print_big_message(message)
136
+
137
+ # The following dependencies are for CUDA, not CPU
138
+ # Check if the package cpuonly exists to determine if torch uses CUDA or not
139
+ cpuonly_exist = run_cmd("conda list cpuonly | grep cpuonly", environment=True, capture_output=True).returncode == 0
140
+ if cpuonly_exist:
141
+ return
142
+
143
+ # Finds the path to your dependencies
144
+ for sitedir in site.getsitepackages():
145
+ if "site-packages" in sitedir:
146
+ site_packages_path = sitedir
147
+ break
148
+
149
+ # This path is critical to installing the following dependencies
150
+ if site_packages_path is None:
151
+ print("Could not find the path to your Python packages. Exiting...")
152
+ sys.exit()
153
+
154
+ # Fix a bitsandbytes compatibility issue with Linux
155
+ if sys.platform.startswith("linux"):
156
+ shutil.copy(os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cuda117.so"), os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cpu.so"))
157
+
158
+ if not os.path.exists("repositories/"):
159
+ os.mkdir("repositories")
160
+
161
+ # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization
162
+ os.chdir("repositories")
163
+ if not os.path.exists("GPTQ-for-LLaMa/"):
164
+ run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True)
165
+
166
+ # Install GPTQ-for-LLaMa dependencies
167
+ os.chdir("GPTQ-for-LLaMa")
168
+ run_cmd("git pull", assert_success=True, environment=True)
169
+
170
+ # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa
171
+ if sys.platform.startswith("linux"):
172
+ gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True)
173
+ if gxx_output.returncode != 0 or int(gxx_output.stdout.strip().split(b".")[0]) > 11:
174
+ # Install the correct version of g++
175
+ run_cmd("conda install -y -k gxx_linux-64=11.2.0", environment=True)
176
+
177
+ # Compile and install GPTQ-for-LLaMa
178
+ if os.path.exists('setup_cuda.py'):
179
+ os.rename("setup_cuda.py", "setup.py")
180
+
181
+ run_cmd("python -m pip install .", environment=True)
182
+
183
+ # Wheel installation can fail while in the build directory of a package with the same name
184
+ os.chdir("..")
185
+
186
+ # If the path does not exist, then the install failed
187
+ quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/")
188
+ if not glob.glob(quant_cuda_path_regex):
189
+ # Attempt installation via alternative, Windows/Linux-specific method
190
+ if sys.platform.startswith("win") or sys.platform.startswith("linux"):
191
+ print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nThe installer will proceed to install a pre-compiled wheel.")
192
+ url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl"
193
+ if sys.platform.startswith("linux"):
194
+ url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl"
195
+
196
+ result = run_cmd("python -m pip install " + url, environment=True)
197
+ if result.returncode == 0:
198
+ print("Wheel installation success!")
199
+ else:
200
+ print("ERROR: GPTQ wheel installation failed. You will not be able to use GPTQ-based models.")
201
+ else:
202
+ print("ERROR: GPTQ CUDA kernel compilation failed.")
203
+ print("You will not be able to use GPTQ-based models.")
204
+
205
+ print("Continuing with install..")
206
+
207
+
208
+ def download_model():
209
+ os.chdir("text-generation-webui")
210
+ run_cmd("python download-model.py", environment=True)
211
+
212
+
213
+ def launch_webui():
214
+ os.chdir("text-generation-webui")
215
+ run_cmd(f"python server.py {CMD_FLAGS}", environment=True)
216
+
217
+
218
+ if __name__ == "__main__":
219
+ # Verifies we are in a conda environment
220
+ check_env()
221
+
222
+ parser = argparse.ArgumentParser()
223
+ parser.add_argument('--update', action='store_true', help='Update the web UI.')
224
+ args = parser.parse_args()
225
+
226
+ if args.update:
227
+ update_dependencies()
228
+ else:
229
+ # If webui has already been installed, skip and run
230
+ if not os.path.exists("text-generation-webui/"):
231
+ install_dependencies()
232
+ os.chdir(script_dir)
233
+
234
+ # Check if a model has been downloaded yet
235
+ if len([item for item in glob.glob('text-generation-webui/models/*') if not item.endswith(('.txt', '.yaml'))]) == 0:
236
+ print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the bottom of the \"Model\" tab and download one.")
237
+
238
+ # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
239
+ conda_path_bin = os.path.join(conda_env_path, "bin")
240
+ if not os.path.exists(conda_path_bin):
241
+ os.mkdir(conda_path_bin)
242
+
243
+ # Launch the webui
244
+ launch_webui()