import streamlit as st from transformers import AutoImageProcessor, AutoModelForImageClassification from groq import Groq import torch from PIL import Image from gtts import gTTS import os import re import tempfile # Set page config for a wide, user-friendly layout st.set_page_config(page_title="Farmer's Friend", page_icon="🌱", layout="wide", initial_sidebar_state="expanded") # Initialize Groq client using environment variable (set as a secret in Hugging Face Spaces) groq_api_key = os.getenv("GROQ_API_KEY") if not groq_api_key: st.error("Groq API key not found. Please set the GROQ_API_KEY environment variable in Hugging Face Spaces secrets.") st.stop() groq_client = Groq(api_key=groq_api_key) # Load and cache the model and processor @st.cache_resource def load_model_and_processor(): image_processor = AutoImageProcessor.from_pretrained("wambugu71/crop_leaf_diseases_vit", use_fast=True) image_model = AutoModelForImageClassification.from_pretrained("wambugu71/crop_leaf_diseases_vit") image_model.eval() # Set model to evaluation mode for faster inference if torch.cuda.is_available(): image_model.cuda() # Use GPU if available return image_processor, image_model image_processor, image_model = load_model_and_processor() # Check for wide-field image def is_wide_field_image(image): width, height = image.size return width > 1000 or height > 1000 # Generate audio in English using temporary files @st.cache_data def text_to_audio(text, filename="output.mp3"): try: # Use a temporary file to store the audio with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: tts = gTTS(text=text, lang='en', slow=False, tld='com') tts.save(tmp_file.name) return tmp_file.name except Exception as e: st.error(f"Error generating audio: {e}") return None # Smart voice-over generation (removing special characters) @st.cache_data def generate_voice_over(predicted_label, recommendation): clean_label = re.sub(r'[_*]', ' ', predicted_label).strip() if "healthy" in clean_label.lower(): voice_text = "Your crop looks healthy. Keep up the good work!" else: voice_text = f"Issue detected: {clean_label}. Recommendation: {recommendation}" return text_to_audio(voice_text, "result.mp3") # Analyze the crop (no caching due to PIL.Image.Image serialization issues) def analyze_crop(image, crop_type): if image is None: msg = "Please upload a photo of your crop leaf." return msg, text_to_audio(msg, "no_image.mp3") if is_wide_field_image(image): msg = "This image is too large. Please upload a close-up photo of the leaf." return msg, text_to_audio(msg, "large_image.mp3") # Convert image to RGB and resize (optimized) image = image.convert("RGB") image = image.resize((224, 224), Image.Resampling.LANCZOS) inputs = image_processor(images=image, return_tensors="pt") # Move inputs to GPU if available if torch.cuda.is_available(): inputs = {k: v.cuda() for k, v in inputs.items()} # Run inference with torch.no_grad(): outputs = image_model(**inputs) predicted_class_idx = outputs.logits.argmax(-1).item() predicted_label = image_model.config.id2label[predicted_class_idx] # Prepare result if "healthy" in predicted_label.lower(): issue = "Healthy" recommendation = "Your crop looks healthy. Keep up the good work!" else: issue = predicted_label try: prompt = f"Detected issue: {predicted_label} in {crop_type}. Suggest general remedies for this crop problem suitable for farmers worldwide in English." response = groq_client.chat.completions.create( model='llama3-8b-8192', messages=[{'role': 'user', 'content': prompt}], timeout=5 # Add timeout to avoid long waits ).choices[0].message.content recommendation = response except Exception as e: recommendation = "Sorry, there was an issue retrieving recommendations. Please consult a local agricultural expert." result_text = f"**Issue:** {issue}. **Recommendation:** {recommendation}" audio_file = generate_voice_over(predicted_label, recommendation) return result_text, audio_file # Custom CSS for a Grok-inspired design st.markdown(""" """, unsafe_allow_html=True) # Main header with emoji st.markdown('
🌱 Farmer\'s Friend 🌱
', unsafe_allow_html=True) st.markdown('
Helping farmers worldwide keep crops healthy!
', unsafe_allow_html=True) # Instruction section st.markdown('
📸 Upload a clear photo of your crop leaf to check for issues. We provide simple recommendations in English with English audio.
', unsafe_allow_html=True) # Sidebar for settings with st.sidebar: st.markdown("### 🌾 Crop Settings") crop_type = st.selectbox( "Select your crop type:", ["Wheat", "Maize", "Rice", "Corn", "Soybean", "Barley", "Cotton", "Millet", "Sorghum"], help="Choose the type of crop you're analyzing." ) st.markdown("### â„šī¸ About") st.write("This app helps farmers worldwide identify crop issues and get practical solutions. Upload a leaf photo to get started!") # Tabs for organized layout tab1, tab2 = st.tabs(["📤 Upload & Analyze", "📊 Results"]) with tab1: # Use columns for a clean layout col1, col2 = st.columns([1, 1]) with col1: uploaded_image = st.file_uploader("Upload Photo:", type=["jpg", "jpeg", "png"], help="Upload a clear image of your crop leaf.") if uploaded_image: image = Image.open(uploaded_image) st.image(image, caption="Your Uploaded Photo", use_container_width=True) else: image = None with col2: st.markdown("### 📷 Tips for Best Results") st.write("- Use a close-up photo of the leaf.") st.write("- Ensure good lighting.") st.write("- Avoid blurry images.") # Analyze button if st.button("🔍 Check"): if image: with st.spinner("Analyzing your crop..."): result_text, audio_file = analyze_crop(image, crop_type) st.session_state['result_text'] = result_text st.session_state['audio_file'] = audio_file st.success("Analysis Complete! Check the Results tab.") else: st.warning("Please upload a photo first.") with tab2: if 'result_text' in st.session_state: st.markdown("### 📊 Analysis Results") st.write(st.session_state['result_text']) if 'audio_file' in st.session_state and st.session_state['audio_file'] and os.path.exists(st.session_state['audio_file']): with open(st.session_state['audio_file'], 'rb') as f: audio_bytes = f.read() st.audio(audio_bytes, format='audio/mp3') # Clean up the temporary audio file after use try: os.remove(st.session_state['audio_file']) except: pass else: st.error("Audio not generated. Please try again.") else: st.info("No results yet. Please upload a photo and click 'Check' in the Upload tab.") # Clear button at the bottom if st.button("🔁 Clear"): # Clean up any existing audio files if 'audio_file' in st.session_state and st.session_state['audio_file'] and os.path.exists(st.session_state['audio_file']): try: os.remove(st.session_state['audio_file']) except: pass st.session_state.clear() st.rerun() # Initial instruction audio if not uploaded_image: instruction = "Upload a photo of your crop leaf to check for issues." instruction_audio = text_to_audio(instruction, "instruction.mp3") if instruction_audio and os.path.exists(instruction_audio): with open(instruction_audio, 'rb') as f: audio_bytes = f.read() st.audio(audio_bytes, format='audio/mp3') # Clean up the temporary audio file try: os.remove(instruction_audio) except: pass # Footer st.markdown("---") st.markdown('
Made with â¤ī¸ for farmers worldwide | Contact support: help@farmersfriend.com
', unsafe_allow_html=True)