Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,94 +1,29 @@
|
|
| 1 |
-
import
|
| 2 |
-
|
| 3 |
-
import
|
| 4 |
-
from
|
| 5 |
-
import io
|
| 6 |
-
import logging
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
try:
|
| 16 |
-
model = tf.keras.models.load_model('mnist_cnn.h5')
|
| 17 |
-
logger.info("MNIST model loaded successfully")
|
| 18 |
-
return model
|
| 19 |
-
except Exception as e:
|
| 20 |
-
logger.error(f"Error loading model: {e}")
|
| 21 |
-
st.error("Failed to load the model. Please check the model file.")
|
| 22 |
-
return None
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
# Convert to grayscale
|
| 28 |
-
img = image.convert('L')
|
| 29 |
-
# Resize to 28x28 (MNIST model input size)
|
| 30 |
-
img = img.resize((28, 28), Image.Resampling.LANCZOS)
|
| 31 |
-
# Convert to numpy array and normalize
|
| 32 |
-
img_array = np.array(img)
|
| 33 |
-
# Ensure the image is inverted if necessary (MNIST expects white digits on black background)
|
| 34 |
-
img_array = 255 - img_array # Invert colors
|
| 35 |
-
img_array = img_array / 255.0 # Normalize to [0, 1]
|
| 36 |
-
# Reshape for model input (1, 28, 28, 1)
|
| 37 |
-
img_array = img_array.reshape(1, 28, 28, 1)
|
| 38 |
-
logger.info("Image preprocessed successfully")
|
| 39 |
-
return img_array
|
| 40 |
-
except Exception as e:
|
| 41 |
-
logger.error(f"Error preprocessing image: {e}")
|
| 42 |
-
st.error("Failed to preprocess the image. Please ensure it's a valid image.")
|
| 43 |
-
return None
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
|
|
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
# Display the uploaded image
|
| 55 |
-
image = Image.open(uploaded_file)
|
| 56 |
-
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 57 |
|
| 58 |
-
|
| 59 |
-
processed_image = preprocess_image(image)
|
| 60 |
-
if processed_image is None:
|
| 61 |
-
st.stop()
|
| 62 |
-
|
| 63 |
-
# Load the model
|
| 64 |
-
model = load_model()
|
| 65 |
-
if model is None:
|
| 66 |
-
st.stop()
|
| 67 |
-
|
| 68 |
-
# Make prediction
|
| 69 |
-
with st.spinner("Detecting number..."):
|
| 70 |
-
prediction = model.predict(processed_image)
|
| 71 |
-
predicted_digit = np.argmax(prediction, axis=1)[0]
|
| 72 |
-
confidence = np.max(prediction) * 100
|
| 73 |
-
|
| 74 |
-
# Display result
|
| 75 |
-
st.success(f"Detected Number: {predicted_digit}")
|
| 76 |
-
st.write(f"Confidence: {confidence:.2f}%")
|
| 77 |
-
|
| 78 |
-
# Provide feedback if confidence is low
|
| 79 |
-
if confidence < 70:
|
| 80 |
-
st.warning("Low confidence in prediction. Please ensure the image contains a clear, single handwritten digit.")
|
| 81 |
-
|
| 82 |
-
except Exception as e:
|
| 83 |
-
logger.error(f"Error processing image: {e}")
|
| 84 |
-
st.error("An error occurred while processing the image. Please try again with a different image.")
|
| 85 |
-
else:
|
| 86 |
-
st.info("Please upload an image to proceed.")
|
| 87 |
-
|
| 88 |
-
# Instructions for users
|
| 89 |
-
st.markdown("""
|
| 90 |
-
### Instructions
|
| 91 |
-
1. Upload an image containing a single handwritten digit (0-9).
|
| 92 |
-
2. Ensure the digit is clear, centered, and on a plain background for best results.
|
| 93 |
-
3. The model expects white digits on a black background, similar to MNIST dataset images.
|
| 94 |
-
""")
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
import pytz
|
| 4 |
+
from ocr_engine import extract_weight_from_image
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
def process_image(img):
|
| 7 |
+
if img is None:
|
| 8 |
+
return "No image uploaded", None, None
|
| 9 |
|
| 10 |
+
ist_time = datetime.now(pytz.timezone("Asia/Kolkata")).strftime("%d-%m-%Y %I:%M:%S %p")
|
| 11 |
+
weight, confidence = extract_weight_from_image(img)
|
| 12 |
+
return f"{weight} kg (Confidence: {confidence}%)", ist_time, img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
with gr.Blocks(title="⚖️ Auto Weight Logger") as demo:
|
| 15 |
+
gr.Markdown("## ⚖️ Auto Weight Logger")
|
| 16 |
+
gr.Markdown("📷 Upload or capture an image of a digital weight scale.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
with gr.Row():
|
| 19 |
+
image_input = gr.Image(type="pil", label="Upload / Capture Image")
|
| 20 |
+
output_weight = gr.Textbox(label="⚖️ Detected Weight (in kg)")
|
| 21 |
|
| 22 |
+
with gr.Row():
|
| 23 |
+
timestamp = gr.Textbox(label="🕒 Captured At (IST)")
|
| 24 |
+
snapshot = gr.Image(label="📸 Snapshot Image")
|
| 25 |
|
| 26 |
+
submit = gr.Button("🔍 Detect Weight")
|
| 27 |
+
submit.click(process_image, inputs=image_input, outputs=[output_weight, timestamp, snapshot])
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|