Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from PIL import Image | |
import numpy as np | |
import tensorflow as tf | |
from transformers import SegformerForSemanticSegmentation, AutoFeatureExtractor | |
import cv2 | |
import json | |
import os | |
from huggingface_hub import login | |
# --- HUGGING FACE LOGIN --- | |
tokenn = os.getenv('HF_AUTH_TOKEN') | |
try: | |
login(token=tokenn) | |
print("Successfully logged in to Hugging Face Hub.") | |
except Exception as e: | |
print(f"Hugging Face Hub login failed: {e}. Token will be used directly in from_pretrained calls.") | |
# Load Hugging Face models | |
try: | |
part_seg_model = SegformerForSemanticSegmentation.from_pretrained("Mohaddz/huggingCars", token=tokenn) | |
damage_seg_model = SegformerForSemanticSegmentation.from_pretrained("Mohaddz/DamageSeg", token=tokenn) | |
feature_extractor = AutoFeatureExtractor.from_pretrained("Mohaddz/huggingCars", token=tokenn) | |
print("Hugging Face models loaded successfully.") | |
except OSError as e: | |
print(f"Error loading Hugging Face models: {e}") | |
print("Please ensure the model identifiers are correct and you have the necessary access rights.") | |
part_seg_model = None | |
damage_seg_model = None | |
feature_extractor = None | |
# Critical Hugging Face models failed to load; dependent features will be unavailable. | |
# Load TensorFlow model for damage prediction | |
def load_model(model_path): | |
print(f"Attempting to load TensorFlow model from: {model_path}") | |
print(f"Current working directory: {os.getcwd()}") | |
if not os.path.exists(model_path): | |
print(f"Error: Model file '{model_path}' not found in current directory: {os.getcwd()}") | |
print(f"Files in current directory: {os.listdir('.')}") | |
raise Exception(f"Model file '{model_path}' not found.") | |
try: | |
# Attempt 1: Load the entire model directly | |
model = tf.keras.models.load_model(model_path) | |
print("Successfully loaded the entire TensorFlow model.") | |
return model | |
except Exception as e: | |
print(f"Failed to load entire TensorFlow model. Error: {str(e)}") | |
try: | |
# Attempt 2: Load model architecture from JSON and weights from H5 | |
json_path = model_path.replace('.h5', '.json') | |
if not os.path.exists(json_path): | |
print(f"Error: JSON model architecture file '{json_path}' not found.") | |
raise FileNotFoundError(f"JSON model architecture file '{json_path}' not found.") | |
with open(json_path, 'r') as json_file: | |
model_json = json_file.read() | |
model = tf.keras.models.model_from_json(model_json) | |
model.load_weights(model_path) # .h5 file should contain weights | |
print("Successfully loaded TensorFlow model from JSON and weights.") | |
return model | |
except Exception as e_json: | |
print(f"Failed to load TensorFlow model from JSON and weights. Error: {str(e_json)}") | |
try: | |
# Attempt 3: Load only weights into a predefined architecture | |
# This architecture must match the one used when 'improved_car_damage_prediction_model(2).h5' was saved. | |
input_shape_val = 33 # Default; will be updated if HF models provide info | |
num_classes_val = 29 # Default; should match the number of parts in all_parts | |
# Calculate expected input_shape from loaded Hugging Face models' configurations | |
# Input to this TF model is a concatenation of mean features from part and damage segmentation. | |
if part_seg_model and damage_seg_model: | |
actual_input_shape = part_seg_model.config.num_labels + damage_seg_model.config.num_labels | |
print(f"Calculated input_shape for TensorFlow model based on HF models: {actual_input_shape}") | |
if input_shape_val != actual_input_shape: | |
print(f"Note: Overriding predefined input_shape ({input_shape_val}) with calculated shape ({actual_input_shape}).") | |
input_shape_val = actual_input_shape | |
else: | |
print(f"Warning: Hugging Face models not loaded. Using default input_shape={input_shape_val} for TensorFlow model. This may lead to errors if incorrect.") | |
inputs_tf = tf.keras.Input(shape=(input_shape_val,)) | |
x = tf.keras.layers.Dense(256, activation='relu')(inputs_tf) | |
x = tf.keras.layers.Dense(128, activation='relu')(x) | |
x = tf.keras.layers.Dense(64, activation='relu')(x) | |
outputs_tf = tf.keras.layers.Dense(num_classes_val, activation='sigmoid')(x) | |
model = tf.keras.Model(inputs=inputs_tf, outputs=outputs_tf) | |
model.load_weights(model_path) | |
print("Successfully loaded weights into predefined TensorFlow model architecture.") | |
return model | |
except Exception as e_weights: | |
print(f"Failed to load weights into predefined TensorFlow architecture. Error: {str(e_weights)}") | |
detailed_error_message = ( | |
"All attempts to load the TensorFlow model failed.\n" | |
f"Attempt 1 (load_model): {str(e)}\n" | |
f"Attempt 2 (from JSON): {str(e_json)}\n" | |
f"Attempt 3 (load_weights): {str(e_weights)}" | |
) | |
print(detailed_error_message) | |
raise Exception("All attempts to load the TensorFlow model failed.") | |
# Initialize TensorFlow model variable | |
dl_model = None | |
if part_seg_model and damage_seg_model and feature_extractor: # Proceed only if HF models loaded | |
try: | |
dl_model = load_model('improved_car_damage_prediction_model(2).h5') | |
print("TensorFlow damage prediction model loaded successfully.") | |
dl_model.summary() | |
except Exception as e: | |
print(f"Failed to load the TensorFlow damage prediction model: {str(e)}") | |
dl_model = None # Ensure it's None if loading fails | |
else: | |
print("Skipping TensorFlow model loading because prerequisite Hugging Face models failed to load.") | |
# Load parts list from JSON | |
PARTS_LIST_FILE = 'cars117.json' | |
all_parts = [] | |
if os.path.exists(PARTS_LIST_FILE): | |
with open(PARTS_LIST_FILE, 'r', encoding='utf-8') as f: | |
data = json.load(f) | |
all_parts = sorted(list(set(part for entry in data.values() for part in entry.get('replaced_parts', [])))) | |
if dl_model and dl_model.output_shape[-1] != len(all_parts): | |
print(f"Warning: TensorFlow model output classes ({dl_model.output_shape[-1]}) " | |
f"does not match number of parts in JSON ({len(all_parts)}). Predictions may be misaligned.") | |
else: | |
print(f"Error: Parts list file '{PARTS_LIST_FILE}' not found. Predicted part names will be unavailable.") | |
def process_image(image): | |
if not part_seg_model or not damage_seg_model or not feature_extractor: | |
# Create placeholder images if HF models aren't loaded | |
dummy_img = Image.new('RGB', (256, 256), color = 'grey') | |
return (dummy_img, dummy_img, dummy_img, | |
"Hugging Face models failed to load. Cannot process image.") | |
if image.mode != 'RGB': | |
image = image.convert('RGB') # Ensure image is in RGB format | |
inputs_hf = feature_extractor(images=image, return_tensors="pt") # Prepare for Hugging Face models | |
# Damage segmentation | |
with torch.no_grad(): | |
damage_output_logits = damage_seg_model(**inputs_hf).logits | |
# Squeeze batch dim, move to CPU, convert to numpy: (num_damage_labels, H, W) | |
damage_features = damage_output_logits.squeeze(0).cpu().detach().numpy() | |
damage_heatmap_raw = create_heatmap(damage_features) # Create heatmap from damage features | |
damage_heatmap_resized = cv2.resize(damage_heatmap_raw, (image.size[0], image.size[1])) | |
image_array = np.array(image) | |
damage_mask = np.argmax(damage_features, axis=0) # Create mask from highest probability class | |
damage_mask_resized = cv2.resize(damage_mask, (image.size[0], image.size[1]), interpolation=cv2.INTER_NEAREST) | |
overlay = np.zeros_like(image_array) | |
overlay[damage_mask_resized > 0] = [255, 0, 0] # Apply red overlay for damage | |
annotated_image = cv2.addWeighted(image_array, 1, overlay, 0.5, 0) | |
# Part segmentation | |
with torch.no_grad(): | |
part_output_logits = part_seg_model(**inputs_hf).logits | |
# Squeeze batch dim, move to CPU, convert to numpy: (num_part_labels, H, W) | |
part_features = part_output_logits.squeeze(0).cpu().detach().numpy() | |
part_heatmap_raw = create_heatmap(part_features) # Create heatmap from part features | |
part_heatmap_resized = cv2.resize(part_heatmap_raw, (image.size[0], image.size[1])) | |
# Prepare input vector for the TensorFlow damage prediction model | |
# Calculate mean of features over spatial dimensions for each label map | |
part_feature_vector = part_features.mean(axis=(1, 2)) # Shape: (num_part_labels,) | |
damage_feature_vector = damage_features.mean(axis=(1, 2)) # Shape: (num_damage_labels,) | |
input_vector_tf = np.concatenate([part_feature_vector, damage_feature_vector]) | |
prediction_text = "TensorFlow part prediction model (dl_model) not loaded. Predictions unavailable." | |
if dl_model is not None: | |
if not all_parts: | |
prediction_text = "Parts list ('all_parts') is empty. Cannot map predictions to part names." | |
else: | |
expected_input_shape_tf = dl_model.input_shape[1] | |
if input_vector_tf.shape[0] != expected_input_shape_tf: | |
prediction_text = (f"Error: Input vector size for TF model ({input_vector_tf.shape[0]}) " | |
f"does not match model's expected input size ({expected_input_shape_tf}). " | |
"Check Segformer model label counts or TF model definition.") | |
else: | |
try: | |
# Add batch dimension for TensorFlow model prediction | |
prediction = dl_model.predict(np.expand_dims(input_vector_tf, axis=0)) | |
if prediction.shape[1] != len(all_parts): | |
prediction_text = (f"Error: Prediction output size ({prediction.shape[1]}) " | |
f"does not match number of parts ({len(all_parts)}). " | |
"Check TF model's output layer or the parts list JSON.") | |
else: | |
predicted_parts = [(all_parts[i], float(prob)) for i, prob in enumerate(prediction[0]) if prob > 0.1] | |
predicted_parts.sort(key=lambda x: x[1], reverse=True) # Sort by probability | |
if predicted_parts: | |
prediction_text = "\n".join([f"{part}: {prob:.2f}" for part, prob in predicted_parts[:5]]) # Top 5 | |
else: | |
prediction_text = "No parts predicted with confidence > 0.1." | |
except Exception as e_predict: | |
prediction_text = f"Error during TensorFlow model prediction: {str(e_predict)}" | |
else: | |
prediction_text = "TensorFlow damage prediction model (dl_model) failed to load. Unable to make part predictions." | |
return (Image.fromarray(annotated_image), | |
Image.fromarray(damage_heatmap_resized), | |
Image.fromarray(part_heatmap_resized), | |
prediction_text) | |
def create_heatmap(features_maps): # Input features_maps shape: (num_labels, H, W) | |
# Creates a general heatmap by summing features across label channels. | |
# For per-label specific heatmaps, this function would need to process each channel individually. | |
heatmap = np.sum(features_maps, axis=0) | |
if heatmap.max() == heatmap.min(): # Handle flat heatmaps to avoid division by zero | |
heatmap_normalized = np.zeros_like(heatmap, dtype=np.float32) | |
else: | |
heatmap_normalized = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min()) | |
heatmap_uint8 = np.uint8(255 * heatmap_normalized) | |
return cv2.applyColorMap(heatmap_uint8, cv2.COLORMAP_JET) | |
iface = gr.Interface( | |
fn=process_image, | |
inputs=gr.Image(type="pil"), | |
outputs=[ | |
gr.Image(type="pil", label="Annotated Damage"), | |
gr.Image(type="pil", label="Damage Heatmap"), | |
gr.Image(type="pil", label="Part Segmentation Heatmap"), | |
gr.Textbox(label="Predicted Parts to Replace (Top 5)") | |
], | |
title="Car Damage Assessment", | |
description="Upload an image of a damaged car. Ensure 'improved_car_damage_prediction_model(2).h5' and 'cars117.json' are in the script's directory." | |
) | |
if __name__ == '__main__': | |
if not os.path.exists('improved_car_damage_prediction_model(2).h5'): | |
print("WARNING: TensorFlow model 'improved_car_damage_prediction_model(2).h5' not found. Part prediction will be unavailable.") | |
if not os.path.exists(PARTS_LIST_FILE): | |
print(f"WARNING: Parts list '{PARTS_LIST_FILE}' not found. Part names for predictions will be unavailable.") | |
if not (part_seg_model and damage_seg_model and feature_extractor): | |
print("WARNING: One or more Hugging Face models could not be loaded. The application functionality will be limited.") | |
iface.launch() |