import os import random import numpy as np import gradio as gr from groq import Groq client = Groq(api_key=os.environ.get("Groq_Api_Key")) def create_history_messages(history): history_messages = [{"role": "user", "content": m[0]} for m in history] history_messages.extend([{"role": "assistant", "content": m[1]} for m in history]) return history_messages MAX_SEED = np.iinfo(np.int32).max def generate_response(prompt, history, model, temperature, max_tokens, top_p, seed): messages = create_history_messages(history) messages.append({"role": "user", "content": prompt}) print(messages) if seed == 0: seed = random.randint(1, MAX_SEED) stream = client.chat.completions.create( messages=messages, model=model, temperature=temperature, max_tokens=max_tokens, top_p=top_p, seed=seed, stop=None, stream=True, ) response = "" for chunk in stream: delta_content = chunk.choices[0].delta.content if delta_content is not None: response += delta_content yield response return response ALLOWED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"] MAX_FILE_SIZE_MB = 25 # Checks file extension, size, and downsamples if needed. def check_file(audio_file_path): if not audio_file_path: return None, gr.Error("Please upload an audio file.") file_size_mb = os.path.getsize(audio_file_path) / (1024 * 1024) file_extension = audio_file_path.split(".")[-1].lower() if file_extension not in ALLOWED_FILE_EXTENSIONS: return ( None, gr.Error( f"Invalid file type (.{file_extension}). Allowed types: {', '.join(ALLOWED_FILE_EXTENSIONS)}" ), ) if file_size_mb > MAX_FILE_SIZE_MB: gr.Warning( f"File size too large ({file_size_mb:.2f} MB). Attempting to downsample to 16kHz. Maximum allowed: {MAX_FILE_SIZE_MB} MB" ) output_file_path = os.path.splitext(audio_file_path)[0] + "_downsampled.wav" try: subprocess.run( [ "ffmpeg", "-i", audio_file_path, "-ar", "16000", "-ac", "1", "-map", "0:a:", output_file_path, ], check=True, ) # Check size after downsampling downsampled_size_mb = os.path.getsize(output_file_path) / (1024 * 1024) if downsampled_size_mb > MAX_FILE_SIZE_MB: return ( None, gr.Error( f"File size still too large after downsampling ({downsampled_size_mb:.2f} MB). Maximum allowed: {MAX_FILE_SIZE_MB} MB" ), ) return output_file_path, None except subprocess.CalledProcessError as e: return None, gr.Error(f"Error during downsampling: {e}") return audio_file_path, None def transcribe_audio(audio_file_path, prompt, language): # Check and process the file first processed_path, error_message = check_file(audio_file_path) # If there's an error during file check if error_message: return error_message with open(processed_path, "rb") as file: transcription = client.audio.transcriptions.create( file=(os.path.basename(processed_path), file.read()), model="whisper-large-v3", prompt=prompt, response_format="json", language=language, temperature=0.0, ) return transcription.text def translate_audio(audio_file_path, prompt): # Check and process the file first processed_path, error_message = check_file(audio_file_path) # If there's an error during file check if error_message: return error_message with open(processed_path, "rb") as file: translation = client.audio.translations.create( file=(os.path.basename(processed_path), file.read()), model="whisper-large-v3", prompt=prompt, response_format="json", temperature=0.0, ) return translation.text with gr.Blocks() as demo: gr.Markdown( """ # Groq API UI Inference by Groq. Hugging Face Space by [Nick088](https://linktr.ee/Nick088) """ ) with gr.Tabs(): with gr.TabItem("LLMs"): with gr.Row(): with gr.Column(): model = gr.Dropdown( choices=[ "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma-7b-it", "gemma2-9b-it", ], value="llama3-70b-8192", label="Model", ) temperature = gr.Slider( minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Temperature", info="Controls diversity of the generated text. Lower is more deterministic, higher is more creative.", ) max_tokens = gr.Slider( minimum=1, maximum=32192, step=1, value=4096, label="Max Tokens", info="The maximum number of tokens that the model can process in a single response.
Maximums: 8k for gemma 7b it, gemma2 9b it, llama 7b & 70b, 32k for mixtral 8x7b.", ) top_p = gr.Slider( minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P", info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p.", ) seed = gr.Number( precision=0, value=42, label="Seed", info="A starting point to initiate generation, use 0 for random" ) with gr.Column(): chatbot_ui = gr.ChatInterface( fn=generate_response, chatbot=None, additional_inputs=[model, temperature, max_tokens, top_p, seed], ) with gr.TabItem("Whisper"): with gr.Tabs(): with gr.TabItem("Transcription"): with gr.Row(): audio_input = gr.Audio( type="filepath", label="Upload Audio" ) transcribe_prompt = gr.Textbox( label="Prompt (Optional)", info="Specify any context or spelling corrections.", ) language = gr.Dropdown( choices=["en", "es", "fr", "de", "zh", "ja", "ko"], # Add more language codes as needed value="en", label="Language", ) transcribe_button = gr.Button("Transcribe") transcription_output = gr.Textbox(label="Transcription") transcribe_button.click( transcribe_audio, inputs=[audio_input, transcribe_prompt, language], outputs=transcription_output, ) with gr.TabItem("Translation"): with gr.Row(): audio_input_translate = gr.Audio( type="filepath", label="Upload Audio" ) translate_prompt = gr.Textbox( label="Prompt (Optional)", info="Specify any context or spelling corrections.", ) translate_button = gr.Button("Translate") translation_output = gr.Textbox(label="Translation") translate_button.click( translate_audio, inputs=[audio_input_translate, translate_prompt], outputs=translation_output, ) demo.launch()