Spaces:
Sleeping
Sleeping
Commit
·
e83afa6
1
Parent(s):
7cc748c
Update main.py
Browse files
main.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
import io
|
2 |
-
from typing import List
|
3 |
import uvicorn
|
4 |
import numpy as np
|
5 |
import uuid
|
6 |
from datetime import datetime
|
7 |
from fastapi import FastAPI, UploadFile, File, HTTPException, Form
|
8 |
-
from fastapi.responses import JSONResponse
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
10 |
from fastapi.staticfiles import StaticFiles
|
11 |
from PIL import Image
|
@@ -15,7 +15,6 @@ from src.detection import YOLOv11Detector
|
|
15 |
from src.comparison import DamageComparator
|
16 |
from src.visualization import DamageVisualizer
|
17 |
from pathlib import Path
|
18 |
-
from concurrent.futures import ProcessPoolExecutor, as_completed
|
19 |
|
20 |
app = FastAPI(
|
21 |
title="Car Damage Detection API",
|
@@ -44,13 +43,12 @@ MODEL_PATHS = {
|
|
44 |
1: "models_small_version_2/best.pt", # Small v2 PT
|
45 |
2: "models_medium/best.pt", # Medium v1 PT
|
46 |
3: "models_medium_version_2/best.pt", # Medium v2 PT
|
47 |
-
4: "models_large/best.pt", # Large PT (no ONNX for large)
|
48 |
|
49 |
# ONNX models (optimized with v1.19 + opset 21)
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
}
|
55 |
|
56 |
# Config paths - ONNX uses same config as PT version
|
@@ -59,19 +57,18 @@ CONFIG_PATHS = {
|
|
59 |
1: "config_version2.yaml", # Small v2 PT
|
60 |
2: "config.yaml", # Medium v1 PT
|
61 |
3: "config_version2.yaml", # Medium v2 PT
|
62 |
-
4: "config.yaml", #
|
63 |
-
5: "
|
64 |
-
6: "
|
65 |
-
7: "
|
66 |
-
8: "config_version2.yaml" # Medium v2 ONNX
|
67 |
}
|
68 |
|
69 |
# Mapping from PT index to ONNX index
|
70 |
PT_TO_ONNX_MAPPING = {
|
71 |
-
0:
|
72 |
-
1:
|
73 |
-
2:
|
74 |
-
3:
|
75 |
4: None # Large has no ONNX
|
76 |
}
|
77 |
|
@@ -81,7 +78,7 @@ def get_optimal_model_index(select_models: int, prefer_onnx: bool = True) -> int
|
|
81 |
Enhanced model selection with performance optimization info
|
82 |
"""
|
83 |
# If user explicitly selects ONNX index (5..8) => use that ONNX with optimizations
|
84 |
-
if select_models in (5, 6, 7
|
85 |
onnx_path = Path(MODEL_PATHS.get(select_models, ""))
|
86 |
if not onnx_path.exists():
|
87 |
raise FileNotFoundError(
|
@@ -90,7 +87,7 @@ def get_optimal_model_index(select_models: int, prefer_onnx: bool = True) -> int
|
|
90 |
return select_models
|
91 |
|
92 |
# Normalize to valid PT indices
|
93 |
-
if select_models not in (0, 1, 2, 3
|
94 |
select_models = 2 # default to medium v1
|
95 |
|
96 |
# PT preferred for 0..4
|
@@ -113,12 +110,10 @@ def get_optimal_model_index(select_models: int, prefer_onnx: bool = True) -> int
|
|
113 |
|
114 |
def load_detector(select_models: int = 2, prefer_onnx: bool = True):
|
115 |
"""
|
116 |
-
|
117 |
-
|
118 |
Args:
|
119 |
select_models: Model selection
|
120 |
-
|
121 |
-
- 5-8: ONNX models (with maximum optimizations)
|
122 |
prefer_onnx: Whether to prefer ONNX format for fallback
|
123 |
"""
|
124 |
global detector, comparator, visualizer
|
@@ -148,14 +143,14 @@ def load_detector(select_models: int = 2, prefer_onnx: bool = True):
|
|
148 |
# Log model info with optimization status
|
149 |
model_type = "ONNX" if MODEL_PATHS[actual_model_index].endswith('.onnx') else "PyTorch"
|
150 |
model_labels = [
|
151 |
-
"Small v1", "Small v2", "Medium v1", "Medium v2",
|
152 |
"Small v1 ONNX", "Small v2 ONNX", "Medium v1 ONNX", "Medium v2 ONNX"
|
153 |
]
|
154 |
|
155 |
if 0 <= select_models < len(model_labels):
|
156 |
model_size = model_labels[select_models]
|
157 |
else:
|
158 |
-
raise ValueError(f"select_models={select_models} must be 0-
|
159 |
|
160 |
# Enhanced logging for optimization status
|
161 |
optimization_status = "🚀 MAXIMUM OPTIMIZATIONS" if model_type == "ONNX" else "📦 Standard PyTorch"
|
@@ -298,7 +293,6 @@ async def detect_single_image(
|
|
298 |
):
|
299 |
"""
|
300 |
Multi-view detection with ONNX Runtime optimizations
|
301 |
-
|
302 |
Args:
|
303 |
file: Single image (backward compatibility)
|
304 |
files: Multiple images for multi-view detection
|
@@ -309,7 +303,7 @@ async def detect_single_image(
|
|
309 |
"""
|
310 |
try:
|
311 |
# Validate select_models
|
312 |
-
if select_models not in list(range(0,
|
313 |
raise HTTPException(status_code=400,
|
314 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
315 |
|
@@ -349,39 +343,52 @@ async def detect_single_image(
|
|
349 |
},
|
350 |
"visualized_image_path": f"uploads/{filename}",
|
351 |
"visualized_image_url": f"http://localhost:8000/uploads/{filename}",
|
352 |
-
"filename": filename
|
|
|
353 |
})
|
354 |
|
355 |
-
# Case 2: Multiple images
|
356 |
-
elif files is not None:
|
|
|
|
|
|
|
357 |
detections_list = []
|
358 |
-
|
359 |
-
|
360 |
-
for idx,
|
361 |
-
contents = await
|
362 |
image = Image.open(io.BytesIO(contents)).convert("RGB")
|
363 |
image_np = np.array(image)
|
364 |
image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
|
365 |
-
|
|
|
366 |
detections = current_detector.detect(image_bgr)
|
367 |
detections_list.append(detections)
|
368 |
|
369 |
-
|
370 |
-
|
|
|
|
|
|
|
|
|
|
|
371 |
|
372 |
# Create combined visualization
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
total_width = sum(widths)
|
377 |
-
combined_img = np.zeros((max_height, total_width, 3), dtype=np.uint8)
|
378 |
-
x_offset = 0
|
379 |
|
380 |
-
|
|
|
|
|
381 |
h, w = image.shape[:2]
|
382 |
-
if h !=
|
383 |
-
|
384 |
-
|
|
|
|
|
|
|
|
|
385 |
combined_img[:, x_offset:x_offset + w] = image
|
386 |
|
387 |
# Draw detections with unique IDs
|
@@ -456,73 +463,6 @@ async def detect_single_image(
|
|
456 |
raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
|
457 |
|
458 |
|
459 |
-
def process_single_position(
|
460 |
-
i: int,
|
461 |
-
before_contents: bytes,
|
462 |
-
after_contents: bytes,
|
463 |
-
config_path: str,
|
464 |
-
timestamp_str: str,
|
465 |
-
session_id: str
|
466 |
-
) -> Dict:
|
467 |
-
# Load components trong process (giữ nguyên logic cũ)
|
468 |
-
from src.detection import YOLOv11Detector
|
469 |
-
from src.comparison import DamageComparator
|
470 |
-
from src.visualization import DamageVisualizer
|
471 |
-
|
472 |
-
detector = YOLOv11Detector(config_path=config_path)
|
473 |
-
comparator = DamageComparator(config_path=config_path)
|
474 |
-
visualizer = DamageVisualizer(config_path=config_path)
|
475 |
-
|
476 |
-
# Logic cũ: Preprocess images
|
477 |
-
before_img = Image.open(io.BytesIO(before_contents)).convert("RGB")
|
478 |
-
after_img = Image.open(io.BytesIO(after_contents)).convert("RGB")
|
479 |
-
before_np = np.array(before_img)
|
480 |
-
after_np = np.array(after_img)
|
481 |
-
before_bgr = cv2.cvtColor(before_np, cv2.COLOR_RGB2BGR)
|
482 |
-
after_bgr = cv2.cvtColor(after_np, cv2.COLOR_RGB2BGR)
|
483 |
-
|
484 |
-
# Detect (giữ nguyên)
|
485 |
-
before_detections = detector.detect(before_bgr)
|
486 |
-
after_detections = detector.detect(after_bgr)
|
487 |
-
|
488 |
-
# Compare (giữ nguyên)
|
489 |
-
comparison = comparator.analyze_damage_status(
|
490 |
-
before_detections, after_detections,
|
491 |
-
before_bgr, after_bgr
|
492 |
-
)
|
493 |
-
|
494 |
-
# Visualize và save (giữ nguyên)
|
495 |
-
vis_img = visualizer.create_comparison_visualization(
|
496 |
-
before_bgr, after_bgr,
|
497 |
-
before_detections, after_detections,
|
498 |
-
comparison
|
499 |
-
)
|
500 |
-
vis_filename = f"comparison_{timestamp_str}_{session_id}_pos{i + 1}.jpg"
|
501 |
-
vis_path = UPLOADS_DIR / vis_filename
|
502 |
-
cv2.imwrite(str(vis_path), vis_img)
|
503 |
-
vis_url = f"http://localhost:8000/uploads/{vis_filename}"
|
504 |
-
|
505 |
-
# Return result cho tổng hợp (giữ nguyên structure)
|
506 |
-
return {
|
507 |
-
f"position_{i + 1}": {
|
508 |
-
"case": comparison['case'],
|
509 |
-
"message": comparison['message'],
|
510 |
-
"statistics": comparison['statistics'],
|
511 |
-
"new_damages": comparison['new_damages'],
|
512 |
-
"matched_damages": comparison['matched_damages'],
|
513 |
-
"repaired_damages": comparison['repaired_damages'],
|
514 |
-
"using_reid": comparison['statistics'].get('using_reid', True),
|
515 |
-
"visualization_path": f"uploads/{vis_filename}",
|
516 |
-
"visualization_url": vis_url,
|
517 |
-
"filename": vis_filename
|
518 |
-
},
|
519 |
-
"before_bgr": before_bgr,
|
520 |
-
"after_bgr": after_bgr,
|
521 |
-
"before_detections": before_detections,
|
522 |
-
"after_detections": after_detections
|
523 |
-
}
|
524 |
-
|
525 |
-
|
526 |
@app.post("/compare")
|
527 |
async def compare_vehicle_damages(
|
528 |
# Before delivery images (6 positions)
|
@@ -545,7 +485,6 @@ async def compare_vehicle_damages(
|
|
545 |
):
|
546 |
"""
|
547 |
Enhanced comparison with ONNX Runtime optimizations and ReID
|
548 |
-
|
549 |
Args:
|
550 |
before_1-6: Before delivery images from 6 positions
|
551 |
after_1-6: After delivery images from 6 positions
|
@@ -554,7 +493,7 @@ async def compare_vehicle_damages(
|
|
554 |
"""
|
555 |
try:
|
556 |
# Validate select_models
|
557 |
-
if select_models not in list(range(0,
|
558 |
raise HTTPException(status_code=400,
|
559 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
560 |
|
@@ -564,13 +503,15 @@ async def compare_vehicle_damages(
|
|
564 |
before_images = [before_1, before_2, before_3, before_4, before_5, before_6]
|
565 |
after_images = [after_1, after_2, after_3, after_4, after_5, after_6]
|
566 |
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
|
571 |
-
#
|
572 |
-
|
573 |
-
|
|
|
|
|
574 |
|
575 |
# Overall statistics
|
576 |
total_new_damages = 0
|
@@ -580,51 +521,73 @@ async def compare_vehicle_damages(
|
|
580 |
session_id = str(uuid.uuid4())[:8]
|
581 |
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
582 |
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
all_after_images = []
|
588 |
-
all_before_detections = []
|
589 |
-
all_after_detections = []
|
590 |
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
for i in range(6)
|
604 |
-
]
|
605 |
|
606 |
-
|
607 |
-
result = future.result()
|
608 |
-
pos_key = list(result.keys())[0] # e.g., 'position_1'
|
609 |
-
position_results.append(result)
|
610 |
-
all_visualizations.append(result[pos_key]["visualization_url"])
|
611 |
|
612 |
-
|
613 |
-
|
|
|
614 |
|
615 |
-
|
616 |
-
|
617 |
-
all_before_detections.append(result["before_detections"])
|
618 |
-
all_after_detections.append(result["after_detections"])
|
619 |
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
|
626 |
-
|
627 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
628 |
|
629 |
# Deduplicate BEFORE damages across all 6 views
|
630 |
unique_before = comparator.deduplicate_detections_across_views(
|
@@ -653,7 +616,7 @@ async def compare_vehicle_damages(
|
|
653 |
overall_message = "Existing damages from beginning → Delivery completed"
|
654 |
|
655 |
# Create summary grid
|
656 |
-
grid_results = [res[
|
657 |
grid_img = visualizer.create_summary_grid(grid_results, image_pairs)
|
658 |
|
659 |
grid_filename = f"summary_grid_{timestamp_str}_{session_id}.jpg"
|
@@ -711,6 +674,7 @@ async def compare_vehicle_damages(
|
|
711 |
raise HTTPException(status_code=500, detail=f"Comparison failed: {str(e)}")
|
712 |
|
713 |
|
|
|
714 |
if __name__ == "__main__":
|
715 |
import os
|
716 |
uvicorn.run(
|
|
|
1 |
import io
|
2 |
+
from typing import List
|
3 |
import uvicorn
|
4 |
import numpy as np
|
5 |
import uuid
|
6 |
from datetime import datetime
|
7 |
from fastapi import FastAPI, UploadFile, File, HTTPException, Form
|
8 |
+
from fastapi.responses import JSONResponse, FileResponse
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
10 |
from fastapi.staticfiles import StaticFiles
|
11 |
from PIL import Image
|
|
|
15 |
from src.comparison import DamageComparator
|
16 |
from src.visualization import DamageVisualizer
|
17 |
from pathlib import Path
|
|
|
18 |
|
19 |
app = FastAPI(
|
20 |
title="Car Damage Detection API",
|
|
|
43 |
1: "models_small_version_2/best.pt", # Small v2 PT
|
44 |
2: "models_medium/best.pt", # Medium v1 PT
|
45 |
3: "models_medium_version_2/best.pt", # Medium v2 PT
|
|
|
46 |
|
47 |
# ONNX models (optimized with v1.19 + opset 21)
|
48 |
+
4: "models_small/best.onnx", # Small v1 ONNX
|
49 |
+
5: "models_small_version_2/best.onnx", # Small v2 ONNX
|
50 |
+
6: "models_medium/best.onnx", # Medium v1 ONNX
|
51 |
+
7: "models_medium_version_2/best.onnx" # Medium v2 ONNX
|
52 |
}
|
53 |
|
54 |
# Config paths - ONNX uses same config as PT version
|
|
|
57 |
1: "config_version2.yaml", # Small v2 PT
|
58 |
2: "config.yaml", # Medium v1 PT
|
59 |
3: "config_version2.yaml", # Medium v2 PT
|
60 |
+
4: "config.yaml", # Small v1 ONNX
|
61 |
+
5: "config_version2.yaml", # Small v2 ONNX
|
62 |
+
6: "config.yaml", # Medium v1 ONNX
|
63 |
+
7: "config_version2.yaml" # Medium v2 ONNX
|
|
|
64 |
}
|
65 |
|
66 |
# Mapping from PT index to ONNX index
|
67 |
PT_TO_ONNX_MAPPING = {
|
68 |
+
0: 4, # Small v1 -> ONNX
|
69 |
+
1: 5, # Small v2 -> ONNX
|
70 |
+
2: 6, # Medium v1 -> ONNX
|
71 |
+
3: 7, # Medium v2 -> ONNX
|
72 |
4: None # Large has no ONNX
|
73 |
}
|
74 |
|
|
|
78 |
Enhanced model selection with performance optimization info
|
79 |
"""
|
80 |
# If user explicitly selects ONNX index (5..8) => use that ONNX with optimizations
|
81 |
+
if select_models in (4, 5, 6, 7):
|
82 |
onnx_path = Path(MODEL_PATHS.get(select_models, ""))
|
83 |
if not onnx_path.exists():
|
84 |
raise FileNotFoundError(
|
|
|
87 |
return select_models
|
88 |
|
89 |
# Normalize to valid PT indices
|
90 |
+
if select_models not in (0, 1, 2, 3):
|
91 |
select_models = 2 # default to medium v1
|
92 |
|
93 |
# PT preferred for 0..4
|
|
|
110 |
|
111 |
def load_detector(select_models: int = 2, prefer_onnx: bool = True):
|
112 |
"""
|
113 |
+
|
|
|
114 |
Args:
|
115 |
select_models: Model selection
|
116 |
+
|
|
|
117 |
prefer_onnx: Whether to prefer ONNX format for fallback
|
118 |
"""
|
119 |
global detector, comparator, visualizer
|
|
|
143 |
# Log model info with optimization status
|
144 |
model_type = "ONNX" if MODEL_PATHS[actual_model_index].endswith('.onnx') else "PyTorch"
|
145 |
model_labels = [
|
146 |
+
"Small v1", "Small v2", "Medium v1", "Medium v2",
|
147 |
"Small v1 ONNX", "Small v2 ONNX", "Medium v1 ONNX", "Medium v2 ONNX"
|
148 |
]
|
149 |
|
150 |
if 0 <= select_models < len(model_labels):
|
151 |
model_size = model_labels[select_models]
|
152 |
else:
|
153 |
+
raise ValueError(f"select_models={select_models} must be 0-7")
|
154 |
|
155 |
# Enhanced logging for optimization status
|
156 |
optimization_status = "🚀 MAXIMUM OPTIMIZATIONS" if model_type == "ONNX" else "📦 Standard PyTorch"
|
|
|
293 |
):
|
294 |
"""
|
295 |
Multi-view detection with ONNX Runtime optimizations
|
|
|
296 |
Args:
|
297 |
file: Single image (backward compatibility)
|
298 |
files: Multiple images for multi-view detection
|
|
|
303 |
"""
|
304 |
try:
|
305 |
# Validate select_models
|
306 |
+
if select_models not in list(range(0, 8)):
|
307 |
raise HTTPException(status_code=400,
|
308 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
309 |
|
|
|
343 |
},
|
344 |
"visualized_image_path": f"uploads/{filename}",
|
345 |
"visualized_image_url": f"http://localhost:8000/uploads/{filename}",
|
346 |
+
"filename": filename,
|
347 |
+
"performance_note": "Using ONNX optimizations" if model_type == "ONNX" else "Consider using ONNX models (5-8) for better performance"
|
348 |
})
|
349 |
|
350 |
+
# Case 2: Multiple images - MULTI-VIEW DETECTION with ReID
|
351 |
+
elif files is not None and len(files) > 0:
|
352 |
+
print(f"\nMulti-view detection with {len(files)} images")
|
353 |
+
|
354 |
+
images_list = []
|
355 |
detections_list = []
|
356 |
+
|
357 |
+
# Process all images
|
358 |
+
for idx, img_file in enumerate(files):
|
359 |
+
contents = await img_file.read()
|
360 |
image = Image.open(io.BytesIO(contents)).convert("RGB")
|
361 |
image_np = np.array(image)
|
362 |
image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
|
363 |
+
|
364 |
+
images_list.append(image_bgr)
|
365 |
detections = current_detector.detect(image_bgr)
|
366 |
detections_list.append(detections)
|
367 |
|
368 |
+
print(f" View {idx + 1}: {len(detections['boxes'])} detections")
|
369 |
+
|
370 |
+
# DEDUPLICATION using ReID
|
371 |
+
print("\nPerforming cross-view deduplication...")
|
372 |
+
unique_damages = comparator.deduplicate_detections_across_views(
|
373 |
+
detections_list, images_list
|
374 |
+
)
|
375 |
|
376 |
# Create combined visualization
|
377 |
+
combined_height = max(img.shape[0] for img in images_list)
|
378 |
+
combined_width = sum(img.shape[1] for img in images_list)
|
379 |
+
combined_img = np.ones((combined_height, combined_width, 3), dtype=np.uint8) * 255
|
|
|
|
|
|
|
380 |
|
381 |
+
x_offset = 0
|
382 |
+
for img_idx, (image, detections) in enumerate(zip(images_list, detections_list)):
|
383 |
+
# Resize if needed
|
384 |
h, w = image.shape[:2]
|
385 |
+
if h != combined_height:
|
386 |
+
scale = combined_height / h
|
387 |
+
new_w = int(w * scale)
|
388 |
+
image = cv2.resize(image, (new_w, combined_height))
|
389 |
+
w = new_w
|
390 |
+
|
391 |
+
# Draw on combined image
|
392 |
combined_img[:, x_offset:x_offset + w] = image
|
393 |
|
394 |
# Draw detections with unique IDs
|
|
|
463 |
raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
|
464 |
|
465 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
466 |
@app.post("/compare")
|
467 |
async def compare_vehicle_damages(
|
468 |
# Before delivery images (6 positions)
|
|
|
485 |
):
|
486 |
"""
|
487 |
Enhanced comparison with ONNX Runtime optimizations and ReID
|
|
|
488 |
Args:
|
489 |
before_1-6: Before delivery images from 6 positions
|
490 |
after_1-6: After delivery images from 6 positions
|
|
|
493 |
"""
|
494 |
try:
|
495 |
# Validate select_models
|
496 |
+
if select_models not in list(range(0, 8)):
|
497 |
raise HTTPException(status_code=400,
|
498 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
499 |
|
|
|
503 |
before_images = [before_1, before_2, before_3, before_4, before_5, before_6]
|
504 |
after_images = [after_1, after_2, after_3, after_4, after_5, after_6]
|
505 |
|
506 |
+
position_results = []
|
507 |
+
all_visualizations = []
|
508 |
+
image_pairs = []
|
509 |
|
510 |
+
# Collect all before/after images and detections
|
511 |
+
all_before_images = []
|
512 |
+
all_after_images = []
|
513 |
+
all_before_detections = []
|
514 |
+
all_after_detections = []
|
515 |
|
516 |
# Overall statistics
|
517 |
total_new_damages = 0
|
|
|
521 |
session_id = str(uuid.uuid4())[:8]
|
522 |
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
523 |
|
524 |
+
# Process each position pair
|
525 |
+
for i in range(6):
|
526 |
+
before_contents = await before_images[i].read()
|
527 |
+
after_contents = await after_images[i].read()
|
|
|
|
|
|
|
528 |
|
529 |
+
before_img = Image.open(io.BytesIO(before_contents)).convert("RGB")
|
530 |
+
after_img = Image.open(io.BytesIO(after_contents)).convert("RGB")
|
531 |
+
|
532 |
+
before_np = np.array(before_img)
|
533 |
+
after_np = np.array(after_img)
|
534 |
+
|
535 |
+
before_bgr = cv2.cvtColor(before_np, cv2.COLOR_RGB2BGR)
|
536 |
+
after_bgr = cv2.cvtColor(after_np, cv2.COLOR_RGB2BGR)
|
537 |
+
|
538 |
+
# Store for multi-view analysis
|
539 |
+
all_before_images.append(before_bgr)
|
540 |
+
all_after_images.append(after_bgr)
|
|
|
|
|
541 |
|
542 |
+
image_pairs.append((before_bgr, after_bgr))
|
|
|
|
|
|
|
|
|
543 |
|
544 |
+
# Detect damages
|
545 |
+
before_detections = current_detector.detect(before_bgr)
|
546 |
+
after_detections = current_detector.detect(after_bgr)
|
547 |
|
548 |
+
all_before_detections.append(before_detections)
|
549 |
+
all_after_detections.append(after_detections)
|
|
|
|
|
550 |
|
551 |
+
# Enhanced comparison with ReID
|
552 |
+
comparison = comparator.analyze_damage_status(
|
553 |
+
before_detections, after_detections,
|
554 |
+
before_bgr, after_bgr
|
555 |
+
)
|
556 |
|
557 |
+
# Update statistics
|
558 |
+
total_new_damages += len(comparison['new_damages'])
|
559 |
+
total_existing_damages += len(comparison['repaired_damages'])
|
560 |
+
total_matched_damages += len(comparison['matched_damages'])
|
561 |
+
|
562 |
+
# Create visualization
|
563 |
+
vis_img = visualizer.create_comparison_visualization(
|
564 |
+
before_bgr, after_bgr,
|
565 |
+
before_detections, after_detections,
|
566 |
+
comparison
|
567 |
+
)
|
568 |
+
|
569 |
+
vis_filename = f"comparison_{timestamp_str}_{session_id}_pos{i + 1}.jpg"
|
570 |
+
vis_path = UPLOADS_DIR / vis_filename
|
571 |
+
cv2.imwrite(str(vis_path), vis_img)
|
572 |
+
|
573 |
+
vis_url = f"http://localhost:8000/uploads/{vis_filename}"
|
574 |
+
all_visualizations.append(vis_url)
|
575 |
+
|
576 |
+
# Store position result with ReID info
|
577 |
+
position_results.append({
|
578 |
+
f"position_{i + 1}": {
|
579 |
+
"case": comparison['case'],
|
580 |
+
"message": comparison['message'],
|
581 |
+
"statistics": comparison['statistics'],
|
582 |
+
"new_damages": comparison['new_damages'],
|
583 |
+
"matched_damages": comparison['matched_damages'],
|
584 |
+
"repaired_damages": comparison['repaired_damages'],
|
585 |
+
"using_reid": comparison['statistics'].get('using_reid', True),
|
586 |
+
"visualization_path": f"uploads/{vis_filename}",
|
587 |
+
"visualization_url": vis_url,
|
588 |
+
"filename": vis_filename
|
589 |
+
}
|
590 |
+
})
|
591 |
|
592 |
# Deduplicate BEFORE damages across all 6 views
|
593 |
unique_before = comparator.deduplicate_detections_across_views(
|
|
|
616 |
overall_message = "Existing damages from beginning → Delivery completed"
|
617 |
|
618 |
# Create summary grid
|
619 |
+
grid_results = [res[f"position_{i + 1}"] for i, res in enumerate(position_results)]
|
620 |
grid_img = visualizer.create_summary_grid(grid_results, image_pairs)
|
621 |
|
622 |
grid_filename = f"summary_grid_{timestamp_str}_{session_id}.jpg"
|
|
|
674 |
raise HTTPException(status_code=500, detail=f"Comparison failed: {str(e)}")
|
675 |
|
676 |
|
677 |
+
|
678 |
if __name__ == "__main__":
|
679 |
import os
|
680 |
uvicorn.run(
|