Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -341,7 +341,19 @@ def generate_subtitles(audio_file_path, prompt, language, auto_detect_language,
|
|
341 |
|
342 |
|
343 |
|
344 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
with gr.Column():
|
346 |
model = gr.Dropdown(
|
347 |
choices=[
|
@@ -394,3 +406,95 @@ with gr.TabItem("LLMs"):
|
|
394 |
],
|
395 |
)
|
396 |
model.change(update_max_tokens, inputs=[model], outputs=max_tokens)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
|
342 |
|
343 |
|
344 |
+
|
345 |
+
with gr.Blocks() as demo:
|
346 |
+
gr.Markdown(
|
347 |
+
"""
|
348 |
+
# Groq API UI
|
349 |
+
Inference by Groq
|
350 |
+
Hugging Face Space by [Nick088](https://linktr.ee/Nick088)
|
351 |
+
"""
|
352 |
+
)
|
353 |
+
with gr.Tabs():
|
354 |
+
with gr.TabItem("select option here:"):
|
355 |
+
with gr.Tabs():
|
356 |
+
with gr.TabItem("LLMs"):
|
357 |
with gr.Column():
|
358 |
model = gr.Dropdown(
|
359 |
choices=[
|
|
|
406 |
],
|
407 |
)
|
408 |
model.change(update_max_tokens, inputs=[model], outputs=max_tokens)
|
409 |
+
|
410 |
+
with gr.TabItem("Speech To Text"):
|
411 |
+
gr.Markdown("Speech to Text coming soon!")
|
412 |
+
with gr.TabItem("Transcription"):
|
413 |
+
gr.Markdown("Transcript audio from files to text!")
|
414 |
+
with gr.Column():
|
415 |
+
audio_input = gr.File(
|
416 |
+
type="filepath", label="Upload File containing Audio", file_types=[f".{ext}" for ext in ALLOWED_FILE_EXTENSIONS]
|
417 |
+
)
|
418 |
+
model_choice_transcribe = gr.Dropdown(
|
419 |
+
choices=["whisper-large-v3"], # Only include 'whisper-large-v3'
|
420 |
+
value="whisper-large-v3",
|
421 |
+
label="Model",
|
422 |
+
)
|
423 |
+
transcribe_prompt = gr.Textbox(
|
424 |
+
label="Prompt (Optional)",
|
425 |
+
info="Specify any context or spelling corrections.",
|
426 |
+
)
|
427 |
+
language = gr.Dropdown(
|
428 |
+
choices=[(lang, code) for lang, code in LANGUAGE_CODES.items()],
|
429 |
+
value="en",
|
430 |
+
label="Language",
|
431 |
+
)
|
432 |
+
auto_detect_language = gr.Checkbox(label="Auto Detect Language")
|
433 |
+
transcribe_button = gr.Button("Transcribe")
|
434 |
+
transcription_output = gr.Textbox(label="Transcription")
|
435 |
+
transcribe_button.click(
|
436 |
+
transcribe_audio,
|
437 |
+
inputs=[audio_input, transcribe_prompt, language, auto_detect_language, model_choice_transcribe],
|
438 |
+
outputs=transcription_output,
|
439 |
+
)
|
440 |
+
with gr.TabItem("Translation"):
|
441 |
+
gr.Markdown("Transcript audio from files and translate them to English text!")
|
442 |
+
with gr.Column():
|
443 |
+
audio_input_translate = gr.File(
|
444 |
+
type="filepath", label="Upload File containing Audio", file_types=[f".{ext}" for ext in ALLOWED_FILE_EXTENSIONS]
|
445 |
+
)
|
446 |
+
model_choice_translate = gr.Dropdown(
|
447 |
+
choices=["whisper-large-v3"], # Only include 'whisper-large-v3'
|
448 |
+
value="whisper-large-v3",
|
449 |
+
label="Model",
|
450 |
+
)
|
451 |
+
translate_prompt = gr.Textbox(
|
452 |
+
label="Prompt (Optional)",
|
453 |
+
info="Specify any context or spelling corrections.",
|
454 |
+
)
|
455 |
+
translate_button = gr.Button("Translate")
|
456 |
+
translation_output = gr.Textbox(label="Translation")
|
457 |
+
translate_button.click(
|
458 |
+
translate_audio,
|
459 |
+
inputs=[audio_input_translate, translate_prompt, model_choice_translate],
|
460 |
+
outputs=translation_output,
|
461 |
+
)
|
462 |
+
with gr.TabItem("Subtitle Maker"):
|
463 |
+
with gr.Column():
|
464 |
+
audio_input_subtitles = gr.File(
|
465 |
+
label="Upload Audio/Video",
|
466 |
+
file_types=[f".{ext}" for ext in ALLOWED_FILE_EXTENSIONS],
|
467 |
+
)
|
468 |
+
model_choice_subtitles = gr.Dropdown(
|
469 |
+
choices=["whisper-large-v3"], # Only include 'whisper-large-v3'
|
470 |
+
value="whisper-large-v3",
|
471 |
+
label="Model",
|
472 |
+
)
|
473 |
+
transcribe_prompt_subtitles = gr.Textbox(
|
474 |
+
label="Prompt (Optional)",
|
475 |
+
info="Specify any context or spelling corrections.",
|
476 |
+
)
|
477 |
+
language_subtitles = gr.Dropdown(
|
478 |
+
choices=[(lang, code) for lang, code in LANGUAGE_CODES.items()],
|
479 |
+
value="en",
|
480 |
+
label="Language",
|
481 |
+
)
|
482 |
+
auto_detect_language_subtitles = gr.Checkbox(
|
483 |
+
label="Auto Detect Language"
|
484 |
+
)
|
485 |
+
transcribe_button_subtitles = gr.Button("Generate Subtitles")
|
486 |
+
srt_output = gr.File(label="SRT Output File")
|
487 |
+
video_output = gr.File(label="Output Video with Subtitles")
|
488 |
+
transcribe_button_subtitles.click(
|
489 |
+
generate_subtitles,
|
490 |
+
inputs=[
|
491 |
+
audio_input_subtitles,
|
492 |
+
transcribe_prompt_subtitles,
|
493 |
+
language_subtitles,
|
494 |
+
auto_detect_language_subtitles,
|
495 |
+
model_choice_subtitles,
|
496 |
+
],
|
497 |
+
outputs=[srt_output, video_output, gr.Textbox(label="Error")]
|
498 |
+
)
|
499 |
+
|
500 |
+
demo.launch()
|