Spaces:
Running
Running
import streamlit as st | |
from transformers import AutoImageProcessor, AutoModelForImageClassification | |
from groq import Groq | |
import torch | |
from PIL import Image | |
from gtts import gTTS | |
import os | |
import re | |
import tempfile | |
# Set page config for a wide, user-friendly layout | |
st.set_page_config(page_title="Farmer's Friend", page_icon="π±", layout="wide", initial_sidebar_state="expanded") | |
# Initialize Groq client using environment variable (set as a secret in Hugging Face Spaces) | |
groq_api_key = os.getenv("GROQ_API_KEY") | |
if not groq_api_key: | |
st.error("Groq API key not found. Please set the GROQ_API_KEY environment variable in Hugging Face Spaces secrets.") | |
st.stop() | |
groq_client = Groq(api_key=groq_api_key) | |
# Load and cache the model and processor | |
def load_model_and_processor(): | |
image_processor = AutoImageProcessor.from_pretrained("wambugu71/crop_leaf_diseases_vit", use_fast=True) | |
image_model = AutoModelForImageClassification.from_pretrained("wambugu71/crop_leaf_diseases_vit") | |
image_model.eval() # Set model to evaluation mode for faster inference | |
if torch.cuda.is_available(): | |
image_model.cuda() # Use GPU if available | |
return image_processor, image_model | |
image_processor, image_model = load_model_and_processor() | |
# Check for wide-field image | |
def is_wide_field_image(image): | |
width, height = image.size | |
return width > 1000 or height > 1000 | |
# Generate audio in English using temporary files | |
def text_to_audio(text, filename="output.mp3"): | |
try: | |
# Use a temporary file to store the audio | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: | |
tts = gTTS(text=text, lang='en', slow=False, tld='com') | |
tts.save(tmp_file.name) | |
return tmp_file.name | |
except Exception as e: | |
st.error(f"Error generating audio: {e}") | |
return None | |
# Smart voice-over generation (removing special characters) | |
def generate_voice_over(predicted_label, recommendation): | |
clean_label = re.sub(r'[_*]', ' ', predicted_label).strip() | |
if "healthy" in clean_label.lower(): | |
voice_text = "Your crop looks healthy. Keep up the good work!" | |
else: | |
voice_text = f"Issue detected: {clean_label}. Recommendation: {recommendation}" | |
return text_to_audio(voice_text, "result.mp3") | |
# Analyze the crop (no caching due to PIL.Image.Image serialization issues) | |
def analyze_crop(image, crop_type): | |
if image is None: | |
msg = "Please upload a photo of your crop leaf." | |
return msg, text_to_audio(msg, "no_image.mp3") | |
if is_wide_field_image(image): | |
msg = "This image is too large. Please upload a close-up photo of the leaf." | |
return msg, text_to_audio(msg, "large_image.mp3") | |
# Convert image to RGB and resize (optimized) | |
image = image.convert("RGB") | |
image = image.resize((224, 224), Image.Resampling.LANCZOS) | |
inputs = image_processor(images=image, return_tensors="pt") | |
# Move inputs to GPU if available | |
if torch.cuda.is_available(): | |
inputs = {k: v.cuda() for k, v in inputs.items()} | |
# Run inference | |
with torch.no_grad(): | |
outputs = image_model(**inputs) | |
predicted_class_idx = outputs.logits.argmax(-1).item() | |
predicted_label = image_model.config.id2label[predicted_class_idx] | |
# Prepare result | |
if "healthy" in predicted_label.lower(): | |
issue = "Healthy" | |
recommendation = "Your crop looks healthy. Keep up the good work!" | |
else: | |
issue = predicted_label | |
try: | |
prompt = f"Detected issue: {predicted_label} in {crop_type}. Suggest general remedies for this crop problem suitable for farmers worldwide in English." | |
response = groq_client.chat.completions.create( | |
model='llama3-8b-8192', | |
messages=[{'role': 'user', 'content': prompt}], | |
timeout=5 # Add timeout to avoid long waits | |
).choices[0].message.content | |
recommendation = response | |
except Exception as e: | |
recommendation = "Sorry, there was an issue retrieving recommendations. Please consult a local agricultural expert." | |
result_text = f"**Issue:** {issue}. **Recommendation:** {recommendation}" | |
audio_file = generate_voice_over(predicted_label, recommendation) | |
return result_text, audio_file | |
# Custom CSS for a Grok-inspired design | |
st.markdown(""" | |
<style> | |
.stApp { | |
background-color: #F5F7F5; | |
font-family: 'Arial', sans-serif; | |
} | |
.main-header { | |
color: #1A3C34; | |
font-size: 40px; | |
font-weight: 700; | |
text-align: center; | |
margin-bottom: 10px; | |
} | |
.sub-header { | |
color: #2E7D32; | |
font-size: 20px; | |
text-align: center; | |
margin-bottom: 20px; | |
} | |
.info-box { | |
background-color: #E8F5E9; | |
padding: 20px; | |
border-radius: 12px; | |
font-size: 16px; | |
box-shadow: 0 2px 5px rgba(0,0,0,0.1); | |
margin-bottom: 20px; | |
} | |
.css-1d391kg { | |
background-color: #FFFFFF; | |
border-right: 1px solid #E0E0E0; | |
} | |
.stButton>button { | |
background-color: #2E7D32; | |
color: white; | |
border-radius: 12px; | |
padding: 12px 24px; | |
font-size: 16px; | |
font-weight: 600; | |
border: none; | |
transition: all 0.3s ease; | |
} | |
.stButton>button:hover { | |
background-color: #1A3C34; | |
transform: scale(1.05); | |
} | |
.stTabs [data-baseweb="tab-list"] { | |
gap: 20px; | |
} | |
.stTabs [data-baseweb="tab"] { | |
background-color: #FFFFFF; | |
border-radius: 8px; | |
padding: 10px 20px; | |
font-weight: 500; | |
color: #1A3C34; | |
} | |
.stTabs [data-baseweb="tab"][aria-selected="true"] { | |
background-color: #2E7D32; | |
color: white; | |
} | |
.stImage > div > div > div > div > p { | |
font-size: 14px; | |
color: #4A4A4A; | |
text-align: center; | |
} | |
.stMarkdown { | |
font-size: 16px; | |
color: #1A3C34; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
# Main header with emoji | |
st.markdown('<div class="main-header">π± Farmer\'s Friend π±</div>', unsafe_allow_html=True) | |
st.markdown('<div class="sub-header">Helping farmers worldwide keep crops healthy!</div>', unsafe_allow_html=True) | |
# Instruction section | |
st.markdown('<div class="info-box">πΈ Upload a clear photo of your crop leaf to check for issues. We provide simple recommendations in English with English audio.</div>', unsafe_allow_html=True) | |
# Sidebar for settings | |
with st.sidebar: | |
st.markdown("### πΎ Crop Settings") | |
crop_type = st.selectbox( | |
"Select your crop type:", | |
["Wheat", "Maize", "Rice", "Corn", "Soybean", "Barley", "Cotton", "Millet", "Sorghum"], | |
help="Choose the type of crop you're analyzing." | |
) | |
st.markdown("### βΉοΈ About") | |
st.write("This app helps farmers worldwide identify crop issues and get practical solutions. Upload a leaf photo to get started!") | |
# Tabs for organized layout | |
tab1, tab2 = st.tabs(["π€ Upload & Analyze", "π Results"]) | |
with tab1: | |
# Use columns for a clean layout | |
col1, col2 = st.columns([1, 1]) | |
with col1: | |
uploaded_image = st.file_uploader("Upload Photo:", type=["jpg", "jpeg", "png"], help="Upload a clear image of your crop leaf.") | |
if uploaded_image: | |
image = Image.open(uploaded_image) | |
st.image(image, caption="Your Uploaded Photo", use_container_width=True) | |
else: | |
image = None | |
with col2: | |
st.markdown("### π· Tips for Best Results") | |
st.write("- Use a close-up photo of the leaf.") | |
st.write("- Ensure good lighting.") | |
st.write("- Avoid blurry images.") | |
# Analyze button | |
if st.button("π Check"): | |
if image: | |
with st.spinner("Analyzing your crop..."): | |
result_text, audio_file = analyze_crop(image, crop_type) | |
st.session_state['result_text'] = result_text | |
st.session_state['audio_file'] = audio_file | |
st.success("Analysis Complete! Check the Results tab.") | |
else: | |
st.warning("Please upload a photo first.") | |
with tab2: | |
if 'result_text' in st.session_state: | |
st.markdown("### π Analysis Results") | |
st.write(st.session_state['result_text']) | |
if 'audio_file' in st.session_state and st.session_state['audio_file'] and os.path.exists(st.session_state['audio_file']): | |
with open(st.session_state['audio_file'], 'rb') as f: | |
audio_bytes = f.read() | |
st.audio(audio_bytes, format='audio/mp3') | |
# Clean up the temporary audio file after use | |
try: | |
os.remove(st.session_state['audio_file']) | |
except: | |
pass | |
else: | |
st.error("Audio not generated. Please try again.") | |
else: | |
st.info("No results yet. Please upload a photo and click 'Check' in the Upload tab.") | |
# Clear button at the bottom | |
if st.button("π Clear"): | |
# Clean up any existing audio files | |
if 'audio_file' in st.session_state and st.session_state['audio_file'] and os.path.exists(st.session_state['audio_file']): | |
try: | |
os.remove(st.session_state['audio_file']) | |
except: | |
pass | |
st.session_state.clear() | |
st.rerun() | |
# Initial instruction audio | |
if not uploaded_image: | |
instruction = "Upload a photo of your crop leaf to check for issues." | |
instruction_audio = text_to_audio(instruction, "instruction.mp3") | |
if instruction_audio and os.path.exists(instruction_audio): | |
with open(instruction_audio, 'rb') as f: | |
audio_bytes = f.read() | |
st.audio(audio_bytes, format='audio/mp3') | |
# Clean up the temporary audio file | |
try: | |
os.remove(instruction_audio) | |
except: | |
pass | |
# Footer | |
st.markdown("---") | |
st.markdown('<div style="text-align: center; color: #666;">Made with β€οΈ for farmers worldwide | Contact support: [email protected]</div>', unsafe_allow_html=True) |