|
import gradio as gr |
|
import torch |
|
from PIL import Image |
|
import numpy as np |
|
import tensorflow as tf |
|
from transformers import SegformerForSemanticSegmentation, AutoFeatureExtractor |
|
import cv2 |
|
import json |
|
import os |
|
from huggingface_hub import login |
|
|
|
|
|
|
|
tokenn = os.getenv('HF_AUTH_TOKEN') |
|
|
|
try: |
|
login(token=tokenn) |
|
print("Successfully logged in to Hugging Face Hub.") |
|
except Exception as e: |
|
print(f"Hugging Face Hub login failed: {e}. Token will be used directly in from_pretrained calls.") |
|
|
|
|
|
try: |
|
part_seg_model = SegformerForSemanticSegmentation.from_pretrained("Mohaddz/huggingCars", token=tokenn) |
|
damage_seg_model = SegformerForSemanticSegmentation.from_pretrained("Mohaddz/DamageSeg", token=tokenn) |
|
feature_extractor = AutoFeatureExtractor.from_pretrained("Mohaddz/huggingCars", token=tokenn) |
|
print("Hugging Face models loaded successfully.") |
|
except OSError as e: |
|
print(f"Error loading Hugging Face models: {e}") |
|
print("Please ensure the model identifiers are correct and you have the necessary access rights.") |
|
part_seg_model = None |
|
damage_seg_model = None |
|
feature_extractor = None |
|
|
|
|
|
|
|
def load_model(model_path): |
|
print(f"Attempting to load TensorFlow model from: {model_path}") |
|
print(f"Current working directory: {os.getcwd()}") |
|
|
|
if not os.path.exists(model_path): |
|
print(f"Error: Model file '{model_path}' not found in current directory: {os.getcwd()}") |
|
print(f"Files in current directory: {os.listdir('.')}") |
|
raise Exception(f"Model file '{model_path}' not found.") |
|
|
|
try: |
|
|
|
model = tf.keras.models.load_model(model_path) |
|
print("Successfully loaded the entire TensorFlow model.") |
|
return model |
|
except Exception as e: |
|
print(f"Failed to load entire TensorFlow model. Error: {str(e)}") |
|
|
|
try: |
|
|
|
json_path = model_path.replace('.h5', '.json') |
|
if not os.path.exists(json_path): |
|
print(f"Error: JSON model architecture file '{json_path}' not found.") |
|
raise FileNotFoundError(f"JSON model architecture file '{json_path}' not found.") |
|
|
|
with open(json_path, 'r') as json_file: |
|
model_json = json_file.read() |
|
model = tf.keras.models.model_from_json(model_json) |
|
model.load_weights(model_path) |
|
print("Successfully loaded TensorFlow model from JSON and weights.") |
|
return model |
|
except Exception as e_json: |
|
print(f"Failed to load TensorFlow model from JSON and weights. Error: {str(e_json)}") |
|
|
|
try: |
|
|
|
|
|
input_shape_val = 33 |
|
num_classes_val = 29 |
|
|
|
|
|
|
|
if part_seg_model and damage_seg_model: |
|
actual_input_shape = part_seg_model.config.num_labels + damage_seg_model.config.num_labels |
|
print(f"Calculated input_shape for TensorFlow model based on HF models: {actual_input_shape}") |
|
if input_shape_val != actual_input_shape: |
|
print(f"Note: Overriding predefined input_shape ({input_shape_val}) with calculated shape ({actual_input_shape}).") |
|
input_shape_val = actual_input_shape |
|
else: |
|
print(f"Warning: Hugging Face models not loaded. Using default input_shape={input_shape_val} for TensorFlow model. This may lead to errors if incorrect.") |
|
|
|
inputs_tf = tf.keras.Input(shape=(input_shape_val,)) |
|
x = tf.keras.layers.Dense(256, activation='relu')(inputs_tf) |
|
x = tf.keras.layers.Dense(128, activation='relu')(x) |
|
x = tf.keras.layers.Dense(64, activation='relu')(x) |
|
outputs_tf = tf.keras.layers.Dense(num_classes_val, activation='sigmoid')(x) |
|
model = tf.keras.Model(inputs=inputs_tf, outputs=outputs_tf) |
|
model.load_weights(model_path) |
|
print("Successfully loaded weights into predefined TensorFlow model architecture.") |
|
return model |
|
except Exception as e_weights: |
|
print(f"Failed to load weights into predefined TensorFlow architecture. Error: {str(e_weights)}") |
|
detailed_error_message = ( |
|
"All attempts to load the TensorFlow model failed.\n" |
|
f"Attempt 1 (load_model): {str(e)}\n" |
|
f"Attempt 2 (from JSON): {str(e_json)}\n" |
|
f"Attempt 3 (load_weights): {str(e_weights)}" |
|
) |
|
print(detailed_error_message) |
|
raise Exception("All attempts to load the TensorFlow model failed.") |
|
|
|
|
|
dl_model = None |
|
if part_seg_model and damage_seg_model and feature_extractor: |
|
try: |
|
dl_model = load_model('improved_car_damage_prediction_model(2).h5') |
|
print("TensorFlow damage prediction model loaded successfully.") |
|
dl_model.summary() |
|
except Exception as e: |
|
print(f"Failed to load the TensorFlow damage prediction model: {str(e)}") |
|
dl_model = None |
|
else: |
|
print("Skipping TensorFlow model loading because prerequisite Hugging Face models failed to load.") |
|
|
|
|
|
|
|
PARTS_LIST_FILE = 'cars117.json' |
|
all_parts = [] |
|
if os.path.exists(PARTS_LIST_FILE): |
|
with open(PARTS_LIST_FILE, 'r', encoding='utf-8') as f: |
|
data = json.load(f) |
|
|
|
all_parts = sorted(list(set(part for entry in data.values() for part in entry.get('replaced_parts', [])))) |
|
|
|
|
|
if dl_model is not None: |
|
model_output_size = dl_model.output_shape[-1] |
|
parts_list_size = len(all_parts) |
|
|
|
if model_output_size != parts_list_size: |
|
print(f"WARNING: Model output size ({model_output_size}) and parts list size ({parts_list_size}) do not match.") |
|
|
|
|
|
if model_output_size > parts_list_size: |
|
diff = model_output_size - parts_list_size |
|
print(f"Padding the parts list with {diff} dummy entries to prevent a crash.") |
|
for i in range(diff): |
|
all_parts.append(f"_dummy_part_{i+1}_") |
|
|
|
|
|
else: |
|
diff = parts_list_size - model_output_size |
|
print(f"Truncating the parts list by {diff} entries to match the model's output.") |
|
all_parts = all_parts[:model_output_size] |
|
else: |
|
print(f"Error: Parts list file '{PARTS_LIST_FILE}' not found. Predicted part names will be unavailable.") |
|
|
|
def process_image(image): |
|
if not part_seg_model or not damage_seg_model or not feature_extractor: |
|
|
|
dummy_img = Image.new('RGB', (256, 256), color = 'grey') |
|
return (dummy_img, dummy_img, dummy_img, |
|
"Hugging Face models failed to load. Cannot process image.") |
|
|
|
if image.mode != 'RGB': |
|
image = image.convert('RGB') |
|
|
|
inputs_hf = feature_extractor(images=image, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
damage_output_logits = damage_seg_model(**inputs_hf).logits |
|
|
|
damage_features = damage_output_logits.squeeze(0).cpu().detach().numpy() |
|
|
|
damage_heatmap_raw = create_heatmap(damage_features) |
|
damage_heatmap_resized = cv2.resize(damage_heatmap_raw, (image.size[0], image.size[1])) |
|
|
|
image_array = np.array(image) |
|
damage_mask = np.argmax(damage_features, axis=0) |
|
damage_mask_resized = cv2.resize(damage_mask, (image.size[0], image.size[1]), interpolation=cv2.INTER_NEAREST) |
|
overlay = np.zeros_like(image_array) |
|
overlay[damage_mask_resized > 0] = [255, 0, 0] |
|
annotated_image = cv2.addWeighted(image_array, 1, overlay, 0.5, 0) |
|
|
|
|
|
with torch.no_grad(): |
|
part_output_logits = part_seg_model(**inputs_hf).logits |
|
|
|
part_features = part_output_logits.squeeze(0).cpu().detach().numpy() |
|
part_heatmap_raw = create_heatmap(part_features) |
|
part_heatmap_resized = cv2.resize(part_heatmap_raw, (image.size[0], image.size[1])) |
|
|
|
|
|
|
|
part_feature_vector = part_features.mean(axis=(1, 2)) |
|
damage_feature_vector = damage_features.mean(axis=(1, 2)) |
|
input_vector_tf = np.concatenate([part_feature_vector, damage_feature_vector]) |
|
|
|
prediction_text = "TensorFlow part prediction model (dl_model) not loaded. Predictions unavailable." |
|
if dl_model is not None: |
|
if not all_parts: |
|
prediction_text = "Parts list ('all_parts') is empty. Cannot map predictions to part names." |
|
else: |
|
expected_input_shape_tf = dl_model.input_shape[1] |
|
if input_vector_tf.shape[0] != expected_input_shape_tf: |
|
prediction_text = (f"Error: Input vector size for TF model ({input_vector_tf.shape[0]}) " |
|
f"does not match model's expected input size ({expected_input_shape_tf}). " |
|
"Check Segformer model label counts or TF model definition.") |
|
else: |
|
try: |
|
|
|
prediction = dl_model.predict(np.expand_dims(input_vector_tf, axis=0)) |
|
if prediction.shape[1] != len(all_parts): |
|
prediction_text = (f"Error: Prediction output size ({prediction.shape[1]}) " |
|
f"does not match number of parts ({len(all_parts)}). " |
|
"Check TF model's output layer or the parts list JSON.") |
|
else: |
|
predicted_parts = [(all_parts[i], float(prob)) for i, prob in enumerate(prediction[0]) if prob > 0.1] |
|
predicted_parts.sort(key=lambda x: x[1], reverse=True) |
|
if predicted_parts: |
|
prediction_text = "\n".join([f"{part}: {prob:.2f}" for part, prob in predicted_parts[:5]]) |
|
else: |
|
prediction_text = "No parts predicted with confidence > 0.1." |
|
except Exception as e_predict: |
|
prediction_text = f"Error during TensorFlow model prediction: {str(e_predict)}" |
|
else: |
|
prediction_text = "TensorFlow damage prediction model (dl_model) failed to load. Unable to make part predictions." |
|
|
|
return (Image.fromarray(annotated_image), |
|
Image.fromarray(damage_heatmap_resized), |
|
Image.fromarray(part_heatmap_resized), |
|
prediction_text) |
|
|
|
def create_heatmap(features_maps): |
|
|
|
|
|
heatmap = np.sum(features_maps, axis=0) |
|
if heatmap.max() == heatmap.min(): |
|
heatmap_normalized = np.zeros_like(heatmap, dtype=np.float32) |
|
else: |
|
heatmap_normalized = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min()) |
|
heatmap_uint8 = np.uint8(255 * heatmap_normalized) |
|
return cv2.applyColorMap(heatmap_uint8, cv2.COLORMAP_JET) |
|
|
|
iface = gr.Interface( |
|
fn=process_image, |
|
inputs=gr.Image(type="pil"), |
|
outputs=[ |
|
gr.Image(type="pil", label="Annotated Damage"), |
|
gr.Image(type="pil", label="Damage Heatmap"), |
|
gr.Image(type="pil", label="Part Segmentation Heatmap"), |
|
gr.Textbox(label="Predicted Parts to Replace (Top 5)") |
|
], |
|
title="Car Damage Assessment", |
|
description="Upload an image of a damaged car. Ensure 'improved_car_damage_prediction_model(2).h5' and 'cars117.json' are in the script's directory." |
|
) |
|
|
|
if __name__ == '__main__': |
|
if not os.path.exists('improved_car_damage_prediction_model(2).h5'): |
|
print("WARNING: TensorFlow model 'improved_car_damage_prediction_model(2).h5' not found. Part prediction will be unavailable.") |
|
if not os.path.exists(PARTS_LIST_FILE): |
|
print(f"WARNING: Parts list '{PARTS_LIST_FILE}' not found. Part names for predictions will be unavailable.") |
|
if not (part_seg_model and damage_seg_model and feature_extractor): |
|
print("WARNING: One or more Hugging Face models could not be loaded. The application functionality will be limited.") |
|
iface.launch(share=True) |