tdoehmen commited on
Commit
49c6a0b
·
1 Parent(s): 1d1f8ae

revert changes

Browse files
Files changed (1) hide show
  1. app.py +9 -44
app.py CHANGED
@@ -4,61 +4,35 @@ import spaces
4
  import torch
5
  import os
6
  import re
7
- import threading
8
- import queue
9
- import time
10
 
11
  zero = torch.Tensor([0]).cuda()
12
  print(zero.device) # <-- 'cpu' 🤔
13
 
14
- def stream_output(process, q):
15
- for line in iter(process.stdout.readline, b''):
16
- q.put(line.decode('utf-8').strip())
17
- process.stdout.close()
18
-
19
  @spaces.GPU
20
  def run_evaluation(model_name):
21
  print(zero.device) # <-- 'cuda:0' 🤗
22
 
23
  results = []
24
- manifest_logs = []
25
 
 
26
  if "HF_TOKEN" not in os.environ:
27
- return "Error: HF_TOKEN not found in environment variables.", "Error: Cannot start manifest server without HF_TOKEN."
28
 
29
  manifest_process = None
30
- log_queue = queue.Queue()
31
  try:
 
32
  manifest_cmd = f"""
33
  cd duckdb-nsql/ &&
34
- CUDA_VISIBLE_DEVICES=0 HF_TOKEN={os.environ['HF_TOKEN']} python -m manifest.api.app \
35
  --model_type huggingface \
36
  --model_generation_type text-generation \
37
  --model_name_or_path {model_name} \
38
  --fp16 \
39
  --device 0
40
  """
41
- manifest_process = subprocess.Popen(manifest_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1, universal_newlines=True)
42
-
43
- threading.Thread(target=stream_output, args=(manifest_process, log_queue), daemon=True).start()
44
-
45
  results.append("Started manifest server in background.")
46
 
47
- # Wait for the server to initialize (adjust time as needed)
48
- server_ready = False
49
- for _ in range(60): # Increased timeout to 60 seconds
50
- try:
51
- line = log_queue.get(timeout=1)
52
- manifest_logs.append(line)
53
- if "Running on" in line: # Server is ready
54
- server_ready = True
55
- break
56
- except queue.Empty:
57
- pass
58
-
59
- if not server_ready:
60
- raise Exception("Manifest server failed to start within the expected time.")
61
-
62
  # Run inference
63
  inference_cmd = f"""
64
  cd duckdb-nsql/ &&
@@ -100,7 +74,7 @@ def run_evaluation(model_name):
100
  if metrics:
101
  results.append(f"Evaluation completed:\n{metrics}")
102
  else:
103
- results.append("Evaluation completed, but couldn't get metrics.")
104
 
105
  except subprocess.CalledProcessError as e:
106
  results.append(f"Error occurred: {str(e)}")
@@ -113,24 +87,15 @@ def run_evaluation(model_name):
113
  manifest_process.terminate()
114
  results.append("Terminated manifest server.")
115
 
116
- # Collect any remaining logs
117
- while True:
118
- try:
119
- line = log_queue.get_nowait()
120
- manifest_logs.append(line)
121
- except queue.Empty:
122
- break
123
-
124
- return "\n\n".join(results), "\n".join(manifest_logs)
125
 
126
  with gr.Blocks() as demo:
127
  gr.Markdown("# DuckDB SQL Evaluation App")
128
 
129
  model_name = gr.Textbox(label="Model Name (e.g., Qwen/Qwen2.5-7B-Instruct)")
130
  start_btn = gr.Button("Start Evaluation")
131
- output = gr.Textbox(label="Evaluation Output", lines=20)
132
- manifest_output = gr.Textbox(label="Manifest Server Logs", lines=20)
133
 
134
- start_btn.click(fn=run_evaluation, inputs=[model_name], outputs=[output, manifest_output])
135
 
136
  demo.launch()
 
4
  import torch
5
  import os
6
  import re
 
 
 
7
 
8
  zero = torch.Tensor([0]).cuda()
9
  print(zero.device) # <-- 'cpu' 🤔
10
 
 
 
 
 
 
11
  @spaces.GPU
12
  def run_evaluation(model_name):
13
  print(zero.device) # <-- 'cuda:0' 🤗
14
 
15
  results = []
 
16
 
17
+ # Use the secret HF token from the Hugging Face space
18
  if "HF_TOKEN" not in os.environ:
19
+ return "Error: HF_TOKEN not found in environment variables."
20
 
21
  manifest_process = None
 
22
  try:
23
+ # Start manifest server in background with explicit CUDA_VISIBLE_DEVICES
24
  manifest_cmd = f"""
25
  cd duckdb-nsql/ &&
26
+ CUDA_VISIBLE_DEVICES=0 HF_TOKEN={os.environ['HF_TOKEN']} python -m manifest.api.app \
27
  --model_type huggingface \
28
  --model_generation_type text-generation \
29
  --model_name_or_path {model_name} \
30
  --fp16 \
31
  --device 0
32
  """
33
+ manifest_process = subprocess.Popen(manifest_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
 
 
34
  results.append("Started manifest server in background.")
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  # Run inference
37
  inference_cmd = f"""
38
  cd duckdb-nsql/ &&
 
74
  if metrics:
75
  results.append(f"Evaluation completed:\n{metrics}")
76
  else:
77
+ results.append("Evaluation completed, but get metrics.")
78
 
79
  except subprocess.CalledProcessError as e:
80
  results.append(f"Error occurred: {str(e)}")
 
87
  manifest_process.terminate()
88
  results.append("Terminated manifest server.")
89
 
90
+ return "\n\n".join(results)
 
 
 
 
 
 
 
 
91
 
92
  with gr.Blocks() as demo:
93
  gr.Markdown("# DuckDB SQL Evaluation App")
94
 
95
  model_name = gr.Textbox(label="Model Name (e.g., Qwen/Qwen2.5-7B-Instruct)")
96
  start_btn = gr.Button("Start Evaluation")
97
+ output = gr.Textbox(label="Output", lines=20)
 
98
 
99
+ start_btn.click(fn=run_evaluation, inputs=[model_name], outputs=output)
100
 
101
  demo.launch()