Spaces:
Running
on
Zero
Running
on
Zero
Joseph Pollack
commited on
solve json error
Browse files- app.py +31 -6
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -96,14 +96,36 @@ class LOperatorDemo:
|
|
| 96 |
}
|
| 97 |
]
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
# Generate response
|
|
|
|
| 107 |
with torch.no_grad():
|
| 108 |
outputs = self.model.generate(
|
| 109 |
inputs,
|
|
@@ -113,6 +135,7 @@ class LOperatorDemo:
|
|
| 113 |
top_p=0.9
|
| 114 |
)
|
| 115 |
|
|
|
|
| 116 |
response = self.processor.tokenizer.decode(
|
| 117 |
outputs[0][inputs.shape[1]:],
|
| 118 |
skip_special_tokens=True
|
|
@@ -200,6 +223,8 @@ def load_example_episodes():
|
|
| 200 |
episode_num = episode_dir.split('_')[1]
|
| 201 |
goal_text = metadata.get('goal', f'Episode {episode_num} example')
|
| 202 |
|
|
|
|
|
|
|
| 203 |
examples.append([
|
| 204 |
pil_image, # Use PIL Image object directly
|
| 205 |
goal_text # Use the goal text from metadata
|
|
|
|
| 96 |
}
|
| 97 |
]
|
| 98 |
|
| 99 |
+
logger.info("Processing conversation with processor...")
|
| 100 |
+
|
| 101 |
+
# Process inputs with better error handling
|
| 102 |
+
try:
|
| 103 |
+
inputs = self.processor.apply_chat_template(
|
| 104 |
+
conversation,
|
| 105 |
+
add_generation_prompt=True,
|
| 106 |
+
return_tensors="pt"
|
| 107 |
+
)
|
| 108 |
+
logger.info(f"Processor output type: {type(inputs)}")
|
| 109 |
+
|
| 110 |
+
# Ensure inputs is a tensor and move to correct device
|
| 111 |
+
if not isinstance(inputs, torch.Tensor):
|
| 112 |
+
logger.warning("apply_chat_template did not return a tensor, attempting to convert...")
|
| 113 |
+
if isinstance(inputs, (list, tuple)):
|
| 114 |
+
inputs = torch.tensor(inputs)
|
| 115 |
+
else:
|
| 116 |
+
# If it's a string or other type, we need to handle it differently
|
| 117 |
+
logger.error(f"Unexpected input type: {type(inputs)}, value: {inputs}")
|
| 118 |
+
return "❌ Error: Processor returned unexpected format"
|
| 119 |
+
|
| 120 |
+
inputs = inputs.to(self.model.device)
|
| 121 |
+
logger.info(f"Inputs shape: {inputs.shape}, device: {inputs.device}")
|
| 122 |
+
|
| 123 |
+
except Exception as e:
|
| 124 |
+
logger.error(f"Error in processor: {str(e)}")
|
| 125 |
+
return f"❌ Error in processor: {str(e)}"
|
| 126 |
|
| 127 |
# Generate response
|
| 128 |
+
logger.info("Generating response...")
|
| 129 |
with torch.no_grad():
|
| 130 |
outputs = self.model.generate(
|
| 131 |
inputs,
|
|
|
|
| 135 |
top_p=0.9
|
| 136 |
)
|
| 137 |
|
| 138 |
+
logger.info("Decoding response...")
|
| 139 |
response = self.processor.tokenizer.decode(
|
| 140 |
outputs[0][inputs.shape[1]:],
|
| 141 |
skip_special_tokens=True
|
|
|
|
| 223 |
episode_num = episode_dir.split('_')[1]
|
| 224 |
goal_text = metadata.get('goal', f'Episode {episode_num} example')
|
| 225 |
|
| 226 |
+
logger.info(f"Episode {episode_num} goal: {goal_text}")
|
| 227 |
+
|
| 228 |
examples.append([
|
| 229 |
pil_image, # Use PIL Image object directly
|
| 230 |
goal_text # Use the goal text from metadata
|
requirements.txt
CHANGED
|
@@ -5,4 +5,4 @@ Pillow>=10.0.0
|
|
| 5 |
accelerate>=0.20.0
|
| 6 |
huggingface-hub>=0.17.0
|
| 7 |
safetensors>=0.4.0
|
| 8 |
-
spaces
|
|
|
|
| 5 |
accelerate>=0.20.0
|
| 6 |
huggingface-hub>=0.17.0
|
| 7 |
safetensors>=0.4.0
|
| 8 |
+
spaces
|