import streamlit as st import os from transformers import pipeline # Set up the Hugging Face API token hugging_face_api_token = os.getenv('hugging_face_api_token') # Load the text generation pipeline generator = pipeline("text-generation", model="google/gemma-7b", tokenizer="google/gemma-7b") import streamlit as st from transformers import pipeline # Streamlit app title and description st.title("Gemma Text Generation App") st.write("This app generates text based on the input prompt using the Gemma model.") # Text input for user prompt prompt = st.text_input("Enter your prompt:", "Once upon a time,") # User controls for output length and creativity max_length = st.slider("Select the maximum output length:", min_value=50, max_value=500, value=100) temperature = st.slider("Adjust the creativity level (temperature):", min_value=0.1, max_value=1.0, value=0.7) # Generate button to trigger text generation if st.button("Generate Text"): with st.spinner('Generating text...'): try: generated_text = generator(prompt, max_length=max_length, temperature=temperature)[0]['generated_text'] except Exception as e: st.error(f"Error generating text: {str(e)}") else: st.success('Text generation complete!') st.markdown("### Generated Text:") st.markdown(generated_text) # About section with st.expander("About"): st.write(""" The Gemma Text Generation app uses the powerful Gemma-7b model from Google to generate text. Adjust the sliders to change the length and creativity of the output. """)