llama-fine-tuner / index.html
st3phan3m's picture
Add 2 files
ffc89db verified
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Llama-3.2-3B Fine-Tuning Interface</title>
<script src="https://cdn.tailwindcss.com"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
<script>
tailwind.config = {
theme: {
extend: {
colors: {
primary: '#4F46E5',
secondary: '#10B981',
dark: '#1F2937',
light: '#F3F4F6',
}
}
}
}
</script>
<style>
.progress-bar {
transition: width 0.5s ease-in-out;
}
.model-card:hover {
transform: translateY(-5px);
box-shadow: 0 20px 25px -5px rgba(0, 0, 0, 0.1), 0 10px 10px -5px rgba(0, 0, 0, 0.04);
}
.animate-pulse {
animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
}
@keyframes pulse {
0%, 100% {
opacity: 1;
}
50% {
opacity: 0.5;
}
}
.code-block {
font-family: 'Courier New', Courier, monospace;
background-color: #1E293B;
color: #F8FAFC;
}
</style>
</head>
<body class="bg-gray-50 min-h-screen">
<div class="container mx-auto px-4 py-8">
<!-- Header -->
<header class="mb-10">
<div class="flex justify-between items-center">
<div>
<h1 class="text-4xl font-bold text-dark">Llama Fine-Tuner</h1>
<p class="text-gray-600 mt-2">Fine-tune Llama-3.2-3B-Instruct model with your custom dataset</p>
</div>
<div class="flex items-center space-x-4">
<button class="px-4 py-2 bg-primary text-white rounded-lg hover:bg-indigo-700 transition">
<i class="fas fa-user mr-2"></i>Sign In
</button>
<button class="px-4 py-2 border border-primary text-primary rounded-lg hover:bg-indigo-50 transition">
<i class="fas fa-cloud mr-2"></i>HuggingFace
</button>
</div>
</div>
</header>
<!-- Main Content -->
<div class="grid grid-cols-1 lg:grid-cols-3 gap-8">
<!-- Left Panel - Model Info -->
<div class="lg:col-span-1 space-y-6">
<div class="bg-white p-6 rounded-xl shadow-md model-card transition">
<div class="flex items-center mb-4">
<div class="w-16 h-16 bg-indigo-100 rounded-lg flex items-center justify-center">
<i class="fas fa-robot text-3xl text-primary"></i>
</div>
<div class="ml-4">
<h3 class="text-xl font-semibold">Llama-3.2-3B-Instruct</h3>
<p class="text-gray-500">GGUF Format</p>
</div>
</div>
<div class="space-y-4">
<div>
<p class="text-gray-600 mb-1">Model Size</p>
<p class="font-medium">3.2 Billion Parameters</p>
</div>
<div>
<p class="text-gray-600 mb-1">Precision</p>
<p class="font-medium">16-bit Floating Point (f16)</p>
</div>
<div>
<p class="text-gray-600 mb-1">Source</p>
<a href="https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/blob/main/Llama-3.2-3B-Instruct-f16.gguf"
class="text-primary hover:underline" target="_blank">
<i class="fas fa-external-link-alt mr-1"></i>HuggingFace Repository
</a>
</div>
</div>
<div class="mt-6 pt-4 border-t border-gray-200">
<button id="loadModelBtn" class="w-full py-3 bg-primary text-white rounded-lg hover:bg-indigo-700 transition flex items-center justify-center">
<i class="fas fa-cloud-download-alt mr-2"></i>Load Model
</button>
</div>
</div>
<div class="bg-white p-6 rounded-xl shadow-md">
<h3 class="text-lg font-semibold mb-4">System Requirements</h3>
<div class="space-y-3">
<div class="flex items-center">
<i class="fas fa-memory text-secondary mr-3"></i>
<span>Minimum 16GB RAM</span>
</div>
<div class="flex items-center">
<i class="fas fa-microchip text-secondary mr-3"></i>
<span>GPU with 8GB VRAM recommended</span>
</div>
<div class="flex items-center">
<i class="fas fa-hdd text-secondary mr-3"></i>
<span>6GB Disk Space</span>
</div>
</div>
<div class="mt-6">
<h4 class="font-medium mb-2">Current System Status</h4>
<div class="space-y-2">
<div>
<div class="flex justify-between text-sm mb-1">
<span>Memory</span>
<span id="memoryUsage">Loading...</span>
</div>
<div class="w-full bg-gray-200 rounded-full h-2.5">
<div id="memoryBar" class="bg-secondary h-2.5 rounded-full progress-bar" style="width: 0%"></div>
</div>
</div>
<div>
<div class="flex justify-between text-sm mb-1">
<span>GPU</span>
<span id="gpuStatus">Checking...</span>
</div>
<div class="w-full bg-gray-200 rounded-full h-2.5">
<div id="gpuBar" class="bg-secondary h-2.5 rounded-full progress-bar" style="width: 0%"></div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Center Panel - Fine-Tuning Configuration -->
<div class="lg:col-span-2 space-y-6">
<div class="bg-white p-6 rounded-xl shadow-md">
<h2 class="text-2xl font-semibold mb-6">Fine-Tuning Configuration</h2>
<!-- Step 1: Dataset -->
<div class="mb-8">
<div class="flex items-center mb-4">
<div class="w-8 h-8 rounded-full bg-primary text-white flex items-center justify-center mr-3">1</div>
<h3 class="text-lg font-medium">Upload Training Dataset</h3>
</div>
<div class="pl-11">
<div class="border-2 border-dashed border-gray-300 rounded-lg p-6 text-center mb-4">
<i class="fas fa-file-upload text-4xl text-gray-400 mb-3"></i>
<p class="mb-2">Drag & drop your dataset file here</p>
<p class="text-sm text-gray-500 mb-4">Supports JSON, CSV, or TXT formats</p>
<input type="file" id="datasetInput" class="hidden" accept=".json,.csv,.txt">
<label for="datasetInput" class="px-4 py-2 bg-gray-100 hover:bg-gray-200 rounded-lg cursor-pointer transition">
Select File
</label>
</div>
<div id="datasetInfo" class="hidden">
<div class="flex items-center justify-between bg-gray-50 p-3 rounded-lg">
<div class="flex items-center">
<i class="fas fa-file-alt text-gray-500 mr-3"></i>
<div>
<p id="fileName" class="font-medium"></p>
<p id="fileSize" class="text-sm text-gray-500"></p>
</div>
</div>
<button id="removeDatasetBtn" class="text-red-500 hover:text-red-700">
<i class="fas fa-times"></i>
</button>
</div>
<div class="mt-3">
<label class="block text-sm font-medium text-gray-700 mb-1">Dataset Format</label>
<select id="datasetFormat" class="w-full p-2 border border-gray-300 rounded-lg">
<option value="alpaca">Alpaca Format</option>
<option value="chatml">ChatML</option>
<option value="custom">Custom Format</option>
</select>
</div>
</div>
</div>
</div>
<!-- Step 2: Training Parameters -->
<div class="mb-8">
<div class="flex items-center mb-4">
<div class="w-8 h-8 rounded-full bg-primary text-white flex items-center justify-center mr-3">2</div>
<h3 class="text-lg font-medium">Training Parameters</h3>
</div>
<div class="pl-11">
<div class="grid grid-cols-1 md:grid-cols-2 gap-4 mb-4">
<div>
<label class="block text-sm font-medium text-gray-700 mb-1">Learning Rate</label>
<input type="range" id="learningRate" min="0.00001" max="0.01" step="0.00001" value="0.0002" class="w-full">
<div class="flex justify-between text-xs text-gray-500 mt-1">
<span>1e-5</span>
<span id="learningRateValue">2e-4</span>
<span>1e-2</span>
</div>
</div>
<div>
<label class="block text-sm font-medium text-gray-700 mb-1">Batch Size</label>
<select id="batchSize" class="w-full p-2 border border-gray-300 rounded-lg">
<option value="1">1</option>
<option value="2">2</option>
<option value="4" selected>4</option>
<option value="8">8</option>
<option value="16">16</option>
</select>
</div>
</div>
<div class="grid grid-cols-1 md:grid-cols-2 gap-4">
<div>
<label class="block text-sm font-medium text-gray-700 mb-1">Epochs</label>
<input type="number" id="epochs" min="1" max="20" value="3" class="w-full p-2 border border-gray-300 rounded-lg">
</div>
<div>
<label class="block text-sm font-medium text-gray-700 mb-1">LoRA Rank</label>
<input type="number" id="loraRank" min="8" max="128" value="64" class="w-full p-2 border border-gray-300 rounded-lg">
</div>
</div>
<div class="mt-4">
<label class="flex items-center">
<input type="checkbox" id="useQLoRA" class="rounded text-primary">
<span class="ml-2 text-sm font-medium">Use QLoRA (4-bit quantization)</span>
</label>
</div>
</div>
</div>
<!-- Step 3: Start Training -->
<div>
<div class="flex items-center mb-4">
<div class="w-8 h-8 rounded-full bg-primary text-white flex items-center justify-center mr-3">3</div>
<h3 class="text-lg font-medium">Start Fine-Tuning</h3>
</div>
<div class="pl-11">
<div class="flex flex-col sm:flex-row sm:items-center sm:justify-between">
<div class="mb-4 sm:mb-0">
<h4 class="font-medium">Output Model Name</h4>
<input type="text" id="modelName" placeholder="my-finetuned-llama" class="p-2 border border-gray-300 rounded-lg w-full sm:w-64">
</div>
<button id="startTrainingBtn" class="px-6 py-3 bg-secondary text-white rounded-lg hover:bg-emerald-700 transition flex items-center justify-center disabled:opacity-50" disabled>
<i class="fas fa-play mr-2"></i>Start Training
</button>
</div>
</div>
</div>
</div>
<!-- Training Output -->
<div id="trainingOutput" class="bg-white p-6 rounded-xl shadow-md hidden">
<div class="flex justify-between items-center mb-4">
<h3 class="text-lg font-semibold">Training Progress</h3>
<div class="flex items-center space-x-2">
<span id="trainingStatus" class="px-2 py-1 bg-blue-100 text-blue-800 text-xs rounded-full">Pending</span>
<button id="stopTrainingBtn" class="text-red-500 hover:text-red-700">
<i class="fas fa-stop"></i>
</button>
</div>
</div>
<div class="mb-4">
<div class="flex justify-between text-sm mb-1">
<span>Progress</span>
<span id="trainingProgressText">0%</span>
</div>
<div class="w-full bg-gray-200 rounded-full h-2.5">
<div id="trainingProgressBar" class="bg-primary h-2.5 rounded-full progress-bar" style="width: 0%"></div>
</div>
</div>
<div class="mb-4">
<div class="flex justify-between text-sm mb-1">
<span>Current Epoch</span>
<span id="currentEpoch">0/0</span>
</div>
</div>
<div class="mb-4">
<div class="flex justify-between text-sm mb-1">
<span>Loss</span>
<span id="currentLoss">-</span>
</div>
</div>
<div class="bg-gray-800 text-white p-3 rounded-lg overflow-auto max-h-60">
<pre id="trainingLog" class="text-sm code-block">Waiting for training to start...</pre>
</div>
</div>
<!-- Model Testing -->
<div id="modelTesting" class="bg-white p-6 rounded-xl shadow-md hidden">
<h3 class="text-lg font-semibold mb-4">Test Your Fine-Tuned Model</h3>
<div class="mb-4">
<label class="block text-sm font-medium text-gray-700 mb-1">Input Prompt</label>
<textarea id="testPrompt" rows="3" class="w-full p-3 border border-gray-300 rounded-lg" placeholder="Enter your prompt here..."></textarea>
</div>
<div class="flex justify-between">
<div>
<label class="flex items-center">
<input type="checkbox" id="useOriginalModel" class="rounded text-primary">
<span class="ml-2 text-sm font-medium">Compare with original model</span>
</label>
</div>
<button id="runTestBtn" class="px-4 py-2 bg-primary text-white rounded-lg hover:bg-indigo-700 transition">
<i class="fas fa-play mr-1"></i> Run Test
</button>
</div>
<div id="testResults" class="mt-4 space-y-4 hidden">
<div class="p-4 bg-gray-50 rounded-lg">
<div class="flex items-center mb-2">
<div class="w-6 h-6 rounded-full bg-primary text-white flex items-center justify-center mr-2">
<i class="fas fa-robot text-xs"></i>
</div>
<h4 class="font-medium">Fine-Tuned Model</h4>
</div>
<div id="finetunedOutput" class="text-gray-700"></div>
</div>
<div id="originalModelOutput" class="p-4 bg-gray-50 rounded-lg hidden">
<div class="flex items-center mb-2">
<div class="w-6 h-6 rounded-full bg-gray-500 text-white flex items-center justify-center mr-2">
<i class="fas fa-robot text-xs"></i>
</div>
<h4 class="font-medium">Original Model</h4>
</div>
<div id="originalOutput" class="text-gray-700"></div>
</div>
</div>
</div>
</div>
</div>
<!-- Footer -->
<footer class="mt-16 pt-8 border-t border-gray-200">
<div class="flex flex-col md:flex-row justify-between items-center">
<div class="mb-4 md:mb-0">
<p class="text-gray-600">Llama Fine-Tuner v1.0</p>
</div>
<div class="flex space-x-6">
<a href="#" class="text-gray-500 hover:text-primary"><i class="fab fa-github"></i></a>
<a href="#" class="text-gray-500 hover:text-primary"><i class="fab fa-twitter"></i></a>
<a href="#" class="text-gray-500 hover:text-primary"><i class="fab fa-discord"></i></a>
</div>
</div>
</footer>
</div>
<script>
// System status simulation
function updateSystemStatus() {
// Simulate memory usage
const memoryPercent = Math.floor(Math.random() * 30) + 30;
document.getElementById('memoryUsage').textContent = `${memoryPercent}% used`;
document.getElementById('memoryBar').style.width = `${memoryPercent}%`;
// Simulate GPU status
const gpuPercent = Math.floor(Math.random() * 20) + 10;
const gpuStatus = gpuPercent < 15 ? 'Idle' : 'Active';
document.getElementById('gpuStatus').textContent = `${gpuStatus} (${gpuPercent}%)`;
document.getElementById('gpuBar').style.width = `${gpuPercent}%`;
document.getElementById('gpuBar').className = gpuStatus === 'Active' ?
'bg-secondary h-2.5 rounded-full progress-bar' :
'bg-gray-400 h-2.5 rounded-full progress-bar';
}
// Update system status every 3 seconds
setInterval(updateSystemStatus, 3000);
updateSystemStatus();
// Dataset file handling
const datasetInput = document.getElementById('datasetInput');
const datasetInfo = document.getElementById('datasetInfo');
const fileName = document.getElementById('fileName');
const fileSize = document.getElementById('fileSize');
const removeDatasetBtn = document.getElementById('removeDatasetBtn');
const startTrainingBtn = document.getElementById('startTrainingBtn');
datasetInput.addEventListener('change', function(e) {
if (e.target.files.length > 0) {
const file = e.target.files[0];
fileName.textContent = file.name;
fileSize.textContent = formatFileSize(file.size);
datasetInfo.classList.remove('hidden');
checkStartButton();
}
});
removeDatasetBtn.addEventListener('click', function() {
datasetInput.value = '';
datasetInfo.classList.add('hidden');
checkStartButton();
});
function formatFileSize(bytes) {
if (bytes === 0) return '0 Bytes';
const k = 1024;
const sizes = ['Bytes', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
// Training parameters
const learningRate = document.getElementById('learningRate');
const learningRateValue = document.getElementById('learningRateValue');
learningRate.addEventListener('input', function() {
const value = parseFloat(learningRate.value);
learningRateValue.textContent = value.toExponential(2);
});
// Model loading
const loadModelBtn = document.getElementById('loadModelBtn');
let modelLoaded = false;
loadModelBtn.addEventListener('click', function() {
if (modelLoaded) return;
loadModelBtn.innerHTML = '<i class="fas fa-spinner animate-spin mr-2"></i> Loading...';
loadModelBtn.disabled = true;
// Simulate model loading
setTimeout(() => {
modelLoaded = true;
loadModelBtn.innerHTML = '<i class="fas fa-check-circle mr-2"></i> Model Loaded';
loadModelBtn.className = 'w-full py-3 bg-green-500 text-white rounded-lg flex items-center justify-center';
checkStartButton();
// Show success notification
showNotification('Model loaded successfully!', 'success');
}, 3000);
});
// Check if we can enable the start training button
function checkStartButton() {
startTrainingBtn.disabled = !(modelLoaded && datasetInput.files.length > 0);
}
// Training simulation
const trainingOutput = document.getElementById('trainingOutput');
const trainingProgressBar = document.getElementById('trainingProgressBar');
const trainingProgressText = document.getElementById('trainingProgressText');
const currentEpoch = document.getElementById('currentEpoch');
const currentLoss = document.getElementById('currentLoss');
const trainingLog = document.getElementById('trainingLog');
const trainingStatus = document.getElementById('trainingStatus');
const stopTrainingBtn = document.getElementById('stopTrainingBtn');
const modelTesting = document.getElementById('modelTesting');
startTrainingBtn.addEventListener('click', function() {
// Get training parameters
const modelName = document.getElementById('modelName').value || 'my-finetuned-llama';
const epochs = parseInt(document.getElementById('epochs').value);
const batchSize = parseInt(document.getElementById('batchSize').value);
const lr = parseFloat(learningRate.value);
const loraRank = parseInt(document.getElementById('loraRank').value);
const useQLoRA = document.getElementById('useQLoRA').checked;
// Show training output
trainingOutput.classList.remove('hidden');
startTrainingBtn.disabled = true;
trainingStatus.textContent = 'Training';
trainingStatus.className = 'px-2 py-1 bg-blue-100 text-blue-800 text-xs rounded-full';
// Show command that would be run
let command = `python -m llama_finetuning \\\n`;
command += ` --model_path "Llama-3.2-3B-Instruct-f16.gguf" \\\n`;
command += ` --data_path "${datasetInput.files[0].name}" \\\n`;
command += ` --output_dir "./output/${modelName}" \\\n`;
command += ` --epochs ${epochs} \\\n`;
command += ` --batch_size ${batchSize} \\\n`;
command += ` --learning_rate ${lr.toExponential(5)} \\\n`;
command += ` --lora_rank ${loraRank}`;
if (useQLoRA) {
command += ` \\\n --use_qlora`;
}
trainingLog.textContent = `Starting fine-tuning with command:\n\n${command}\n\n`;
// Simulate training progress
let progress = 0;
let currentEpochCount = 0;
const totalSteps = epochs * 100; // Assuming 100 steps per epoch
const trainingInterval = setInterval(() => {
progress += 1;
const percent = Math.min(Math.floor((progress / totalSteps) * 100), 100);
trainingProgressBar.style.width = `${percent}%`;
trainingProgressText.textContent = `${percent}%`;
// Update epoch counter every 100 steps
if (progress % 100 === 0) {
currentEpochCount += 1;
currentEpoch.textContent = `${currentEpochCount}/${epochs}`;
// Simulate loss decreasing
const loss = (2.5 - (currentEpochCount * 0.7)).toFixed(4);
currentLoss.textContent = loss;
// Add to log
trainingLog.textContent += `[Epoch ${currentEpochCount}/${epochs}] Loss: ${loss}\n`;
trainingLog.scrollTop = trainingLog.scrollHeight;
}
// Training complete
if (progress >= totalSteps) {
clearInterval(trainingInterval);
trainingStatus.textContent = 'Completed';
trainingStatus.className = 'px-2 py-1 bg-green-100 text-green-800 text-xs rounded-full';
currentLoss.textContent = '1.2345'; // Final loss
// Show model testing section
modelTesting.classList.remove('hidden');
// Show success notification
showNotification('Fine-tuning completed successfully!', 'success');
// Update log
trainingLog.textContent += `\nTraining completed! Model saved to ./output/${modelName}\n`;
}
}, 100);
// Stop training button
stopTrainingBtn.addEventListener('click', function() {
clearInterval(trainingInterval);
trainingStatus.textContent = 'Stopped';
trainingStatus.className = 'px-2 py-1 bg-red-100 text-red-800 text-xs rounded-full';
startTrainingBtn.disabled = false;
// Show warning notification
showNotification('Training stopped by user', 'warning');
});
});
// Model testing
const runTestBtn = document.getElementById('runTestBtn');
const testPrompt = document.getElementById('testPrompt');
const testResults = document.getElementById('testResults');
const finetunedOutput = document.getElementById('finetunedOutput');
const originalModelOutput = document.getElementById('originalModelOutput');
const originalOutput = document.getElementById('originalOutput');
const useOriginalModel = document.getElementById('useOriginalModel');
runTestBtn.addEventListener('click', function() {
if (!testPrompt.value.trim()) {
showNotification('Please enter a test prompt', 'error');
return;
}
runTestBtn.innerHTML = '<i class="fas fa-spinner animate-spin mr-1"></i> Running...';
runTestBtn.disabled = true;
// Show results section
testResults.classList.remove('hidden');
finetunedOutput.innerHTML = '<div class="animate-pulse">Generating response...</div>';
if (useOriginalModel.checked) {
originalModelOutput.classList.remove('hidden');
originalOutput.innerHTML = '<div class="animate-pulse">Generating response from original model...</div>';
}
// Simulate API call delay
setTimeout(() => {
// Generate fine-tuned model response
finetunedOutput.innerHTML = `
<p class="mb-2">${testPrompt.value}</p>
<p class="text-gray-600 pl-4 border-l-2 border-primary">This is a simulated response from your fine-tuned Llama model. In a real implementation, this would be the actual output generated by your model after processing the input prompt.</p>
`;
// Generate original model response if selected
if (useOriginalModel.checked) {
originalOutput.innerHTML = `
<p class="mb-2">${testPrompt.value}</p>
<p class="text-gray-600 pl-4 border-l-2 border-gray-400">This is a simulated response from the original Llama model. Notice how the fine-tuned version might provide more specific or tailored responses based on your training data.</p>
`;
}
runTestBtn.innerHTML = '<i class="fas fa-play mr-1"></i> Run Test';
runTestBtn.disabled = false;
}, 2000);
});
// Notification function
function showNotification(message, type) {
const notification = document.createElement('div');
let bgColor = 'bg-blue-500';
if (type === 'success') bgColor = 'bg-green-500';
else if (type === 'warning') bgColor = 'bg-yellow-500';
else if (type === 'error') bgColor = 'bg-red-500';
notification.className = `fixed bottom-4 right-4 ${bgColor} text-white px-4 py-2 rounded-lg shadow-lg flex items-center`;
notification.innerHTML = `
<i class="fas ${type === 'success' ? 'fa-check-circle' :
type === 'warning' ? 'fa-exclamation-triangle' :
type === 'error' ? 'fa-times-circle' : 'fa-info-circle'} mr-2"></i>
${message}
`;
document.body.appendChild(notification);
setTimeout(() => {
notification.classList.add('opacity-0', 'transition-opacity', 'duration-300');
setTimeout(() => notification.remove(), 300);
}, 3000);
}
</script>
<p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=st3phan3m/llama-fine-tuner" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
</html>