Spaces:
Running
Running
Update index.html
Browse files- index.html +54 -7
index.html
CHANGED
@@ -371,8 +371,8 @@
|
|
371 |
</div>
|
372 |
|
373 |
<div class="spaces-info">
|
374 |
-
<h3>π
|
375 |
-
<p><strong>Model:</strong> bigcode/starcoder2-3b (specialized coding AI) β’ <strong>Strategy:</strong> Single mutation per generation
|
376 |
</div>
|
377 |
|
378 |
<div class="container">
|
@@ -381,8 +381,19 @@
|
|
381 |
<h2>π― Problem Configuration</h2>
|
382 |
|
383 |
<div class="model-info">
|
384 |
-
<h4>π€ AI Model
|
385 |
-
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
</div>
|
387 |
|
388 |
<div class="problem-selector">
|
@@ -480,8 +491,16 @@
|
|
480 |
</div>
|
481 |
|
482 |
<script>
|
483 |
-
//
|
484 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
485 |
|
486 |
// Problem definitions optimized for StarCoder2
|
487 |
const PROBLEMS = {
|
@@ -654,6 +673,12 @@
|
|
654 |
startBtn.addEventListener('click', startEvolution);
|
655 |
stopBtn.addEventListener('click', stopEvolution);
|
656 |
|
|
|
|
|
|
|
|
|
|
|
|
|
657 |
// Update estimated API calls when generations change
|
658 |
document.getElementById('maxGenerations').addEventListener('input', (e) => {
|
659 |
estimatedCalls.textContent = e.target.value;
|
@@ -695,6 +720,18 @@
|
|
695 |
return;
|
696 |
}
|
697 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
698 |
isRunning = true;
|
699 |
currentGeneration = 0;
|
700 |
maxGenerations = parseInt(document.getElementById('maxGenerations').value);
|
@@ -878,11 +915,15 @@ Write an optimized version of the JavaScript function:`;
|
|
878 |
const maxTokens = parseInt(document.getElementById('maxTokens').value);
|
879 |
const temperature = parseFloat(document.getElementById('temperature').value);
|
880 |
|
|
|
|
|
|
|
881 |
try {
|
882 |
const response = await fetch(MODEL_URL, {
|
883 |
method: 'POST',
|
884 |
headers: {
|
885 |
'Content-Type': 'application/json',
|
|
|
886 |
},
|
887 |
body: JSON.stringify({
|
888 |
inputs: prompt,
|
@@ -897,11 +938,17 @@ Write an optimized version of the JavaScript function:`;
|
|
897 |
});
|
898 |
|
899 |
if (!response.ok) {
|
|
|
|
|
|
|
900 |
if (response.status === 503) {
|
901 |
log('β οΈ Model loading, retrying in 10s...');
|
902 |
await new Promise(resolve => setTimeout(resolve, 10000));
|
903 |
return await callStarCoder(prompt);
|
904 |
}
|
|
|
|
|
|
|
905 |
throw new Error(`API request failed: ${response.status} ${response.statusText}`);
|
906 |
}
|
907 |
|
@@ -1060,4 +1107,4 @@ Write an optimized version of the JavaScript function:`;
|
|
1060 |
log('');
|
1061 |
</script>
|
1062 |
</body>
|
1063 |
-
</html>
|
|
|
371 |
</div>
|
372 |
|
373 |
<div class="spaces-info">
|
374 |
+
<h3>π HuggingFace Spaces Deployment</h3>
|
375 |
+
<p><strong>Model:</strong> bigcode/starcoder2-3b (specialized coding AI) β’ <strong>Strategy:</strong> Single mutation per generation β’ <strong>Auth:</strong> Add your HF token above or get one free at <a href="https://huggingface.co/settings/tokens" target="_blank" style="color: #667eea;">huggingface.co/settings/tokens</a></p>
|
376 |
</div>
|
377 |
|
378 |
<div class="container">
|
|
|
381 |
<h2>π― Problem Configuration</h2>
|
382 |
|
383 |
<div class="model-info">
|
384 |
+
<h4>π€ AI Model Selection</h4>
|
385 |
+
<select id="modelSelect" style="width: 100%; padding: 8px; border: 2px solid #e0e0e0; border-radius: 6px; margin-bottom: 10px;">
|
386 |
+
<option value="starcoder2-3b">StarCoder2-3B (Recommended)</option>
|
387 |
+
<option value="starcoder2-7b">StarCoder2-7B (Better quality)</option>
|
388 |
+
<option value="codellama-7b">CodeLlama-7B (Alternative)</option>
|
389 |
+
<option value="codebert-base">CodeBERT-Base (Fallback)</option>
|
390 |
+
</select>
|
391 |
+
<p style="margin-bottom: 10px;">Specialized coding models by BigCode and Meta. Excellent at JavaScript generation.</p>
|
392 |
+
<div style="margin-top: 10px;">
|
393 |
+
<label for="hfToken" style="font-weight: bold; display: block; margin-bottom: 5px;">HuggingFace Token:</label>
|
394 |
+
<input type="password" id="hfToken" placeholder="hf_xxxxx... (required for API access)" style="width: 100%; padding: 8px; border: 2px solid #e0e0e0; border-radius: 6px;">
|
395 |
+
<small style="color: #666;">Get a free token at <a href="https://huggingface.co/settings/tokens" target="_blank">huggingface.co/settings/tokens</a></small>
|
396 |
+
</div>
|
397 |
</div>
|
398 |
|
399 |
<div class="problem-selector">
|
|
|
491 |
</div>
|
492 |
|
493 |
<script>
|
494 |
+
// Model options for different scenarios
|
495 |
+
const MODELS = {
|
496 |
+
'starcoder2-3b': 'https://api-inference.huggingface.co/models/bigcode/starcoder2-3b',
|
497 |
+
'starcoder2-7b': 'https://api-inference.huggingface.co/models/bigcode/starcoder2-7b',
|
498 |
+
'codellama-7b': 'https://api-inference.huggingface.co/models/codellama/CodeLlama-7b-Instruct-hf',
|
499 |
+
'codebert-base': 'https://api-inference.huggingface.co/models/microsoft/codebert-base'
|
500 |
+
};
|
501 |
+
|
502 |
+
// Default model
|
503 |
+
let MODEL_URL = MODELS['starcoder2-3b'];
|
504 |
|
505 |
// Problem definitions optimized for StarCoder2
|
506 |
const PROBLEMS = {
|
|
|
673 |
startBtn.addEventListener('click', startEvolution);
|
674 |
stopBtn.addEventListener('click', stopEvolution);
|
675 |
|
676 |
+
// Model selector
|
677 |
+
document.getElementById('modelSelect').addEventListener('change', (e) => {
|
678 |
+
MODEL_URL = MODELS[e.target.value];
|
679 |
+
log(`Model changed to: ${e.target.options[e.target.selectedIndex].text}`);
|
680 |
+
});
|
681 |
+
|
682 |
// Update estimated API calls when generations change
|
683 |
document.getElementById('maxGenerations').addEventListener('input', (e) => {
|
684 |
estimatedCalls.textContent = e.target.value;
|
|
|
720 |
return;
|
721 |
}
|
722 |
|
723 |
+
// Check for HF token
|
724 |
+
const hfToken = document.getElementById('hfToken').value.trim();
|
725 |
+
if (!hfToken) {
|
726 |
+
alert('Please add your HuggingFace token above. Get a free one at: https://huggingface.co/settings/tokens');
|
727 |
+
return;
|
728 |
+
}
|
729 |
+
|
730 |
+
if (!hfToken.startsWith('hf_')) {
|
731 |
+
alert('Invalid token format. HuggingFace tokens start with "hf_"');
|
732 |
+
return;
|
733 |
+
}
|
734 |
+
|
735 |
isRunning = true;
|
736 |
currentGeneration = 0;
|
737 |
maxGenerations = parseInt(document.getElementById('maxGenerations').value);
|
|
|
915 |
const maxTokens = parseInt(document.getElementById('maxTokens').value);
|
916 |
const temperature = parseFloat(document.getElementById('temperature').value);
|
917 |
|
918 |
+
// Get HF token from user input
|
919 |
+
const hfToken = document.getElementById('hfToken').value.trim();
|
920 |
+
|
921 |
try {
|
922 |
const response = await fetch(MODEL_URL, {
|
923 |
method: 'POST',
|
924 |
headers: {
|
925 |
'Content-Type': 'application/json',
|
926 |
+
'Authorization': `Bearer ${hfToken}`
|
927 |
},
|
928 |
body: JSON.stringify({
|
929 |
inputs: prompt,
|
|
|
938 |
});
|
939 |
|
940 |
if (!response.ok) {
|
941 |
+
if (response.status === 401) {
|
942 |
+
throw new Error('Authentication failed. Please add a valid HuggingFace token above.');
|
943 |
+
}
|
944 |
if (response.status === 503) {
|
945 |
log('β οΈ Model loading, retrying in 10s...');
|
946 |
await new Promise(resolve => setTimeout(resolve, 10000));
|
947 |
return await callStarCoder(prompt);
|
948 |
}
|
949 |
+
if (response.status === 429) {
|
950 |
+
throw new Error('Rate limit exceeded. Please wait and try again, or use your own HF token.');
|
951 |
+
}
|
952 |
throw new Error(`API request failed: ${response.status} ${response.statusText}`);
|
953 |
}
|
954 |
|
|
|
1107 |
log('');
|
1108 |
</script>
|
1109 |
</body>
|
1110 |
+
</html>
|