|
|
|
|
|
|
|
|
|
|
|
import { AutoProcessor, MultiModalityCausalLM } from "@huggingface/transformers"; |
|
|
|
|
|
async function testJanusModel() { |
|
|
console.log("Testing Janus Model Loading..."); |
|
|
|
|
|
const modelPath = + str(model_dir) + ; |
|
|
|
|
|
try { |
|
|
console.log("Loading processor..."); |
|
|
const processor = await AutoProcessor.from_pretrained(modelPath); |
|
|
console.log("Processor loaded successfully"); |
|
|
|
|
|
console.log("Loading model..."); |
|
|
const model = await MultiModalityCausalLM.from_pretrained(modelPath, { |
|
|
device: "webgpu", |
|
|
dtype: "q4f16" |
|
|
}); |
|
|
console.log("Model loaded successfully"); |
|
|
|
|
|
console.log(" |
|
|
SUCCESS! Model initialization complete"); |
|
|
console.log("Model is ready for inference"); |
|
|
|
|
|
return { model, processor }; |
|
|
|
|
|
} catch (error) { |
|
|
console.error("Model loading failed:"); |
|
|
console.error(error); |
|
|
|
|
|
|
|
|
console.log(" |
|
|
Trying CPU fallback..."); |
|
|
try { |
|
|
const processor = await AutoProcessor.from_pretrained(modelPath); |
|
|
const model = await MultiModalityCausalLM.from_pretrained(modelPath, { |
|
|
device: "cpu" |
|
|
}); |
|
|
console.log("CPU fallback successful"); |
|
|
return { model, processor }; |
|
|
} catch (cpuError) { |
|
|
console.error("CPU fallback also failed:"); |
|
|
console.error(cpuError); |
|
|
throw cpuError; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
testJanusModel() |
|
|
.then(() => console.log(" |
|
|
Test completed successfully")) |
|
|
.catch(error => console.error(" |
|
|
Test failed:", error)); |
|
|
|
|
|
export { testJanusModel }; |
|
|
|