Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Create app.py
Browse files
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,171 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import gradio as gr
         | 
| 2 | 
            +
            import google.generativeai as genai
         | 
| 3 | 
            +
            import base64
         | 
| 4 | 
            +
            from PIL import Image
         | 
| 5 | 
            +
            import io
         | 
| 6 | 
            +
            import time
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            def encode_image(image):
         | 
| 9 | 
            +
                if isinstance(image, dict) and 'path' in image:
         | 
| 10 | 
            +
                    image_path = image['path']
         | 
| 11 | 
            +
                elif isinstance(image, str):
         | 
| 12 | 
            +
                    image_path = image
         | 
| 13 | 
            +
                else:
         | 
| 14 | 
            +
                    raise ValueError("Unsupported image format")
         | 
| 15 | 
            +
                
         | 
| 16 | 
            +
                with open(image_path, "rb") as image_file:
         | 
| 17 | 
            +
                    return base64.b64encode(image_file.read()).decode('utf-8')
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            def bot_streaming(message, history, api_key, model, system_prompt, temperature, max_tokens, top_p, top_k, harassment, hate_speech, sexually_explicit, dangerous_content):
         | 
| 20 | 
            +
                genai.configure(api_key=api_key)
         | 
| 21 | 
            +
                
         | 
| 22 | 
            +
                messages = []
         | 
| 23 | 
            +
                images = []
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                if system_prompt:
         | 
| 26 | 
            +
                    messages.append({"role": "system", "content": system_prompt})
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                for i, msg in enumerate(history):
         | 
| 29 | 
            +
                    if isinstance(msg[0], tuple):
         | 
| 30 | 
            +
                        # This is a message with an image
         | 
| 31 | 
            +
                        image, text = msg[0]
         | 
| 32 | 
            +
                        base64_image = encode_image(image)
         | 
| 33 | 
            +
                        messages.append({
         | 
| 34 | 
            +
                            "role": "user",
         | 
| 35 | 
            +
                            "parts": [
         | 
| 36 | 
            +
                                {"text": text},
         | 
| 37 | 
            +
                                {"inline_data": {"mime_type": "image/jpeg", "data": base64_image}}
         | 
| 38 | 
            +
                            ]
         | 
| 39 | 
            +
                        })
         | 
| 40 | 
            +
                        images.append(Image.open(image['path'] if isinstance(image, dict) else image).convert("RGB"))
         | 
| 41 | 
            +
                    else:
         | 
| 42 | 
            +
                        # This is a text-only message
         | 
| 43 | 
            +
                        messages.append({"role": "user", "parts": [{"text": str(msg[0])}]})
         | 
| 44 | 
            +
                    
         | 
| 45 | 
            +
                    # Add the model's response
         | 
| 46 | 
            +
                    messages.append({"role": "model", "parts": [{"text": str(msg[1])}]})
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                # Handle the current message
         | 
| 49 | 
            +
                if isinstance(message, dict) and "files" in message and message["files"]:
         | 
| 50 | 
            +
                    # This is a message with an image
         | 
| 51 | 
            +
                    image = message["files"][0]
         | 
| 52 | 
            +
                    base64_image = encode_image(image)
         | 
| 53 | 
            +
                    content = [
         | 
| 54 | 
            +
                        {"text": message["text"]},
         | 
| 55 | 
            +
                        {"inline_data": {"mime_type": "image/jpeg", "data": base64_image}}
         | 
| 56 | 
            +
                    ]
         | 
| 57 | 
            +
                    images.append(Image.open(image['path'] if isinstance(image, dict) else image).convert("RGB"))
         | 
| 58 | 
            +
                else:
         | 
| 59 | 
            +
                    # This is a text-only message
         | 
| 60 | 
            +
                    content = [{"text": message["text"] if isinstance(message, dict) else str(message)}]
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                messages.append({"role": "user", "parts": content})
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                model = genai.GenerativeModel(model_name=model)
         | 
| 65 | 
            +
                
         | 
| 66 | 
            +
                safety_settings = [
         | 
| 67 | 
            +
                    {"category": genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT, "threshold": getattr(genai.types.HarmBlockThreshold, harassment)},
         | 
| 68 | 
            +
                    {"category": genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, "threshold": getattr(genai.types.HarmBlockThreshold, hate_speech)},
         | 
| 69 | 
            +
                    {"category": genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, "threshold": getattr(genai.types.HarmBlockThreshold, sexually_explicit)},
         | 
| 70 | 
            +
                    {"category": genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, "threshold": getattr(genai.types.HarmBlockThreshold, dangerous_content)}
         | 
| 71 | 
            +
                ]
         | 
| 72 | 
            +
                
         | 
| 73 | 
            +
                chat = model.start_chat(history=messages)
         | 
| 74 | 
            +
                
         | 
| 75 | 
            +
                response = chat.send_message(
         | 
| 76 | 
            +
                    content,
         | 
| 77 | 
            +
                    stream=True,
         | 
| 78 | 
            +
                    generation_config=genai.types.GenerationConfig(
         | 
| 79 | 
            +
                        temperature=temperature,
         | 
| 80 | 
            +
                        max_output_tokens=max_tokens,
         | 
| 81 | 
            +
                        top_p=top_p,
         | 
| 82 | 
            +
                        top_k=top_k
         | 
| 83 | 
            +
                    ),
         | 
| 84 | 
            +
                    safety_settings=safety_settings
         | 
| 85 | 
            +
                )
         | 
| 86 | 
            +
             | 
| 87 | 
            +
                buffer = ""
         | 
| 88 | 
            +
                for chunk in response:
         | 
| 89 | 
            +
                    if hasattr(chunk, 'text') and chunk.text:
         | 
| 90 | 
            +
                        buffer += chunk.text
         | 
| 91 | 
            +
                        yield buffer
         | 
| 92 | 
            +
                        time.sleep(0.01)
         | 
| 93 | 
            +
                    if hasattr(chunk, 'finish_reason') and chunk.finish_reason:
         | 
| 94 | 
            +
                        break
         | 
| 95 | 
            +
             | 
| 96 | 
            +
                if buffer:
         | 
| 97 | 
            +
                    yield buffer
         | 
| 98 | 
            +
             | 
| 99 | 
            +
             | 
| 100 | 
            +
            with gr.Blocks(theme=gr.themes.Soft()) as demo:
         | 
| 101 | 
            +
                gr.Markdown("""
         | 
| 102 | 
            +
                # π€ Google Gemini API Multimodal Chat
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                Chat with Google Gemini AI models. Supports text and image interactions.
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                ## π Quick Start:
         | 
| 107 | 
            +
                1. Enter your Google AI API key
         | 
| 108 | 
            +
                2. Choose a model
         | 
| 109 | 
            +
                3. Start chatting!
         | 
| 110 | 
            +
             | 
| 111 | 
            +
                Enjoy your AI-powered conversation!
         | 
| 112 | 
            +
                """)
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                with gr.Row():
         | 
| 115 | 
            +
                    with gr.Column(scale=1):
         | 
| 116 | 
            +
                        api_key = gr.Textbox(label="API Key", type="password", placeholder="Enter your Google AI API key")
         | 
| 117 | 
            +
                        model = gr.Dropdown(
         | 
| 118 | 
            +
                            label="Select Model",
         | 
| 119 | 
            +
                            choices=[
         | 
| 120 | 
            +
                                "gemini-1.5-pro",
         | 
| 121 | 
            +
                                "gemini-1.5-pro-001",
         | 
| 122 | 
            +
                                "gemini-1.5-pro-vision-latest",
         | 
| 123 | 
            +
                                "gemini-1.5-pro-latest",
         | 
| 124 | 
            +
                                "gemini-1.5-flash",
         | 
| 125 | 
            +
                                "gemini-1.5-flash-002",
         | 
| 126 | 
            +
                                "gemini-1.0-pro",
         | 
| 127 | 
            +
                                "gemini-1.0-pro-001",
         | 
| 128 | 
            +
                                "gemini-1.0-pro-vision-latest",
         | 
| 129 | 
            +
                                "gemini-1.0-pro-latest"
         | 
| 130 | 
            +
                            ],
         | 
| 131 | 
            +
                            value="gemini-1.5-pro",
         | 
| 132 | 
            +
                        )
         | 
| 133 | 
            +
                        system_prompt = gr.Textbox(label="System Prompt", placeholder="Enter a system prompt (optional)")
         | 
| 134 | 
            +
                        
         | 
| 135 | 
            +
                        with gr.Accordion("Common Settings", open=False):
         | 
| 136 | 
            +
                            temperature = gr.Slider(minimum=0, maximum=1, value=0.7, step=0.1, label="Temperature")
         | 
| 137 | 
            +
                            max_tokens = gr.Slider(minimum=1, maximum=2048, value=1000, step=1, label="Max Tokens")
         | 
| 138 | 
            +
                            top_p = gr.Slider(minimum=0, maximum=1, value=0.95, step=0.01, label="Top P")
         | 
| 139 | 
            +
                            top_k = gr.Slider(minimum=1, maximum=40, value=40, step=1, label="Top K")
         | 
| 140 | 
            +
                        
         | 
| 141 | 
            +
                        with gr.Accordion("Safety Settings", open=False):
         | 
| 142 | 
            +
                            harassment = gr.Dropdown(label="Harassment", choices=["BLOCK_NONE", "BLOCK_ONLY_HIGH", "BLOCK_MEDIUM_AND_ABOVE", "BLOCK_LOW_AND_ABOVE"], value="BLOCK_MEDIUM_AND_ABOVE")
         | 
| 143 | 
            +
                            hate_speech = gr.Dropdown(label="Hate Speech", choices=["BLOCK_NONE", "BLOCK_ONLY_HIGH", "BLOCK_MEDIUM_AND_ABOVE", "BLOCK_LOW_AND_ABOVE"], value="BLOCK_MEDIUM_AND_ABOVE")
         | 
| 144 | 
            +
                            sexually_explicit = gr.Dropdown(label="Sexually Explicit", choices=["BLOCK_NONE", "BLOCK_ONLY_HIGH", "BLOCK_MEDIUM_AND_ABOVE", "BLOCK_LOW_AND_ABOVE"], value="BLOCK_MEDIUM_AND_ABOVE")
         | 
| 145 | 
            +
                            dangerous_content = gr.Dropdown(label="Dangerous Content", choices=["BLOCK_NONE", "BLOCK_ONLY_HIGH", "BLOCK_MEDIUM_AND_ABOVE", "BLOCK_LOW_AND_ABOVE"], value="BLOCK_MEDIUM_AND_ABOVE")
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                    with gr.Column(scale=2):
         | 
| 148 | 
            +
                        chatbot = gr.ChatInterface(
         | 
| 149 | 
            +
                            fn=bot_streaming,
         | 
| 150 | 
            +
                            additional_inputs=[
         | 
| 151 | 
            +
                                api_key, model, system_prompt, temperature, max_tokens, top_p, top_k,
         | 
| 152 | 
            +
                                harassment, hate_speech, sexually_explicit, dangerous_content
         | 
| 153 | 
            +
                            ],
         | 
| 154 | 
            +
                            title="π¬ Chat with Google Gemini AI",
         | 
| 155 | 
            +
                            description="Upload images or type your message to start the conversation.",
         | 
| 156 | 
            +
                            retry_btn="π Retry",
         | 
| 157 | 
            +
                            undo_btn="β©οΈ Undo",
         | 
| 158 | 
            +
                            clear_btn="ποΈ Clear",
         | 
| 159 | 
            +
                            multimodal=True,
         | 
| 160 | 
            +
                            cache_examples=False,
         | 
| 161 | 
            +
                            fill_height=True,
         | 
| 162 | 
            +
                        )
         | 
| 163 | 
            +
             | 
| 164 | 
            +
                gr.Markdown("""
         | 
| 165 | 
            +
                ## π§ Settings:
         | 
| 166 | 
            +
                - Adjust basic parameters in the "Common Settings" section
         | 
| 167 | 
            +
                - Fine-tune safety options in the "Safety Settings" section
         | 
| 168 | 
            +
                - Upload images for multimodal interactions
         | 
| 169 | 
            +
                """)
         | 
| 170 | 
            +
             | 
| 171 | 
            +
            demo.launch(debug=True, share=True)
         | 
