Mr-Bhaskar commited on
Commit
228fd17
1 Parent(s): 20efbc0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -3
app.py CHANGED
@@ -1,5 +1,72 @@
1
- import gradio as gr
2
 
3
- demo = gr.load("Mr-Bhaskar/fbt-mistral-7b", src="models")
4
 
5
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #@title 2. Launch the web UI
2
 
3
+ #@markdown If unsure about the branch, write "main" or leave it blank.
4
 
5
+ import torch
6
+ from pathlib import Path
7
+
8
+ if Path.cwd().name != 'text-generation-webui':
9
+ print("Installing the webui...")
10
+
11
+ # !git clone https://github.com/oobabooga/text-generation-webui
12
+ # %cd text-generation-webui
13
+
14
+ torver = torch.__version__
15
+ print(f"TORCH: {torver}")
16
+ is_cuda118 = '+cu118' in torver # 2.1.0+cu118
17
+ is_cuda117 = '+cu117' in torver # 2.0.1+cu117
18
+
19
+ textgen_requirements = open('text-generation-webui\requirements.txt').read().splitlines()
20
+ if is_cuda117:
21
+ textgen_requirements = [req.replace('+cu121', '+cu117').replace('+cu122', '+cu117').replace('torch2.1', 'torch2.0') for req in textgen_requirements]
22
+ elif is_cuda118:
23
+ textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements]
24
+ with open('temp_requirements.txt', 'w') as file:
25
+ file.write('\n'.join(textgen_requirements))
26
+
27
+ !pip install -r extensions/openai/requirements.txt --upgrade
28
+ !pip install -r temp_requirements.txt --upgrade
29
+
30
+ print("\033[1;32;1m\n --> If you see a warning about \"previously imported packages\", just ignore it.\033[0;37;0m")
31
+ print("\033[1;32;1m\n --> There is no need to restart the runtime.\n\033[0;37;0m")
32
+
33
+ try:
34
+ import flash_attn
35
+ except:
36
+ !pip uninstall -y flash_attn
37
+
38
+ # Parameters
39
+ model_url = "https://huggingface.co/Mr-Bhaskar/fbt-mistral-7b" #@param {type:"string"}
40
+ branch = "main" #@param {type:"string"}
41
+ command_line_flags = "--n-gpu-layers 128 --load-in-4bit --use_double_quant" #@param {type:"string"}
42
+ api = False #@param {type:"boolean"}
43
+
44
+ if api:
45
+ for param in ['--api', '--public-api']:
46
+ if param not in command_line_flags:
47
+ command_line_flags += f" {param}"
48
+
49
+ model_url = model_url.strip()
50
+ if model_url != "":
51
+ if not model_url.startswith('http'):
52
+ model_url = 'https://huggingface.co/' + model_url
53
+
54
+ # Download the model
55
+ url_parts = model_url.strip('/').strip().split('/')
56
+ output_folder = f"{url_parts[-2]}_{url_parts[-1]}"
57
+ branch = branch.strip('"\' ')
58
+ if branch.strip() not in ['', 'main']:
59
+ output_folder += f"_{branch}"
60
+ !python download-model.py {model_url} --branch {branch}
61
+ else:
62
+ !python download-model.py {model_url}
63
+ else:
64
+ output_folder = ""
65
+
66
+ # Start the web UI
67
+ cmd = f"python server.py --share"
68
+ if output_folder != "":
69
+ cmd += f" --model {output_folder}"
70
+ cmd += f" {command_line_flags}"
71
+ print(cmd)
72
+ !$cmd