Spaces:
Running
Running
Commit
·
7f8e169
1
Parent(s):
b498bf1
Update main.py
Browse files
main.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
import io
|
2 |
-
from typing import List
|
3 |
import uvicorn
|
4 |
import numpy as np
|
5 |
import uuid
|
6 |
from datetime import datetime
|
7 |
from fastapi import FastAPI, UploadFile, File, HTTPException, Form
|
8 |
-
from fastapi.responses import JSONResponse
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
10 |
from fastapi.staticfiles import StaticFiles
|
11 |
from PIL import Image
|
@@ -15,6 +15,7 @@ from src.detection import YOLOv11Detector
|
|
15 |
from src.comparison import DamageComparator
|
16 |
from src.visualization import DamageVisualizer
|
17 |
from pathlib import Path
|
|
|
18 |
|
19 |
app = FastAPI(
|
20 |
title="Car Damage Detection API",
|
@@ -43,12 +44,13 @@ MODEL_PATHS = {
|
|
43 |
1: "models_small_version_2/best.pt", # Small v2 PT
|
44 |
2: "models_medium/best.pt", # Medium v1 PT
|
45 |
3: "models_medium_version_2/best.pt", # Medium v2 PT
|
|
|
46 |
|
47 |
# ONNX models (optimized with v1.19 + opset 21)
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
}
|
53 |
|
54 |
# Config paths - ONNX uses same config as PT version
|
@@ -57,18 +59,19 @@ CONFIG_PATHS = {
|
|
57 |
1: "config_version2.yaml", # Small v2 PT
|
58 |
2: "config.yaml", # Medium v1 PT
|
59 |
3: "config_version2.yaml", # Medium v2 PT
|
60 |
-
4: "config.yaml", #
|
61 |
-
5: "
|
62 |
-
6: "
|
63 |
-
7: "
|
|
|
64 |
}
|
65 |
|
66 |
# Mapping from PT index to ONNX index
|
67 |
PT_TO_ONNX_MAPPING = {
|
68 |
-
0:
|
69 |
-
1:
|
70 |
-
2:
|
71 |
-
3:
|
72 |
4: None # Large has no ONNX
|
73 |
}
|
74 |
|
@@ -78,7 +81,7 @@ def get_optimal_model_index(select_models: int, prefer_onnx: bool = True) -> int
|
|
78 |
Enhanced model selection with performance optimization info
|
79 |
"""
|
80 |
# If user explicitly selects ONNX index (5..8) => use that ONNX with optimizations
|
81 |
-
if select_models in (
|
82 |
onnx_path = Path(MODEL_PATHS.get(select_models, ""))
|
83 |
if not onnx_path.exists():
|
84 |
raise FileNotFoundError(
|
@@ -87,7 +90,7 @@ def get_optimal_model_index(select_models: int, prefer_onnx: bool = True) -> int
|
|
87 |
return select_models
|
88 |
|
89 |
# Normalize to valid PT indices
|
90 |
-
if select_models not in (0, 1, 2, 3):
|
91 |
select_models = 2 # default to medium v1
|
92 |
|
93 |
# PT preferred for 0..4
|
@@ -110,11 +113,12 @@ def get_optimal_model_index(select_models: int, prefer_onnx: bool = True) -> int
|
|
110 |
|
111 |
def load_detector(select_models: int = 2, prefer_onnx: bool = True):
|
112 |
"""
|
113 |
-
|
114 |
|
115 |
Args:
|
116 |
select_models: Model selection
|
117 |
-
|
|
|
118 |
prefer_onnx: Whether to prefer ONNX format for fallback
|
119 |
"""
|
120 |
global detector, comparator, visualizer
|
@@ -144,14 +148,14 @@ def load_detector(select_models: int = 2, prefer_onnx: bool = True):
|
|
144 |
# Log model info with optimization status
|
145 |
model_type = "ONNX" if MODEL_PATHS[actual_model_index].endswith('.onnx') else "PyTorch"
|
146 |
model_labels = [
|
147 |
-
"Small v1", "Small v2", "Medium v1", "Medium v2",
|
148 |
"Small v1 ONNX", "Small v2 ONNX", "Medium v1 ONNX", "Medium v2 ONNX"
|
149 |
]
|
150 |
|
151 |
if 0 <= select_models < len(model_labels):
|
152 |
model_size = model_labels[select_models]
|
153 |
else:
|
154 |
-
raise ValueError(f"select_models={select_models} must be 0-
|
155 |
|
156 |
# Enhanced logging for optimization status
|
157 |
optimization_status = "🚀 MAXIMUM OPTIMIZATIONS" if model_type == "ONNX" else "📦 Standard PyTorch"
|
@@ -305,7 +309,7 @@ async def detect_single_image(
|
|
305 |
"""
|
306 |
try:
|
307 |
# Validate select_models
|
308 |
-
if select_models not in list(range(0,
|
309 |
raise HTTPException(status_code=400,
|
310 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
311 |
|
@@ -345,52 +349,39 @@ async def detect_single_image(
|
|
345 |
},
|
346 |
"visualized_image_path": f"uploads/{filename}",
|
347 |
"visualized_image_url": f"http://localhost:8000/uploads/{filename}",
|
348 |
-
"filename": filename
|
349 |
-
"performance_note": "Using ONNX optimizations" if model_type == "ONNX" else "Consider using ONNX models (5-8) for better performance"
|
350 |
})
|
351 |
|
352 |
-
# Case 2: Multiple images
|
353 |
-
elif files is not None
|
354 |
-
print(f"\nMulti-view detection with {len(files)} images")
|
355 |
-
|
356 |
-
images_list = []
|
357 |
detections_list = []
|
358 |
-
|
359 |
-
|
360 |
-
for idx,
|
361 |
-
contents = await
|
362 |
image = Image.open(io.BytesIO(contents)).convert("RGB")
|
363 |
image_np = np.array(image)
|
364 |
image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
|
365 |
-
|
366 |
-
images_list.append(image_bgr)
|
367 |
detections = current_detector.detect(image_bgr)
|
368 |
detections_list.append(detections)
|
369 |
|
370 |
-
|
371 |
-
|
372 |
-
# DEDUPLICATION using ReID
|
373 |
-
print("\nPerforming cross-view deduplication...")
|
374 |
-
unique_damages = comparator.deduplicate_detections_across_views(
|
375 |
-
detections_list, images_list
|
376 |
-
)
|
377 |
|
378 |
# Create combined visualization
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
|
|
383 |
x_offset = 0
|
384 |
-
for img_idx, (image, detections) in enumerate(zip(images_list, detections_list)):
|
385 |
-
# Resize if needed
|
386 |
-
h, w = image.shape[:2]
|
387 |
-
if h != combined_height:
|
388 |
-
scale = combined_height / h
|
389 |
-
new_w = int(w * scale)
|
390 |
-
image = cv2.resize(image, (new_w, combined_height))
|
391 |
-
w = new_w
|
392 |
|
393 |
-
|
|
|
|
|
|
|
|
|
394 |
combined_img[:, x_offset:x_offset + w] = image
|
395 |
|
396 |
# Draw detections with unique IDs
|
@@ -465,6 +456,73 @@ async def detect_single_image(
|
|
465 |
raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
|
466 |
|
467 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
468 |
@app.post("/compare")
|
469 |
async def compare_vehicle_damages(
|
470 |
# Before delivery images (6 positions)
|
@@ -496,7 +554,7 @@ async def compare_vehicle_damages(
|
|
496 |
"""
|
497 |
try:
|
498 |
# Validate select_models
|
499 |
-
if select_models not in list(range(0,
|
500 |
raise HTTPException(status_code=400,
|
501 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
502 |
|
@@ -506,15 +564,13 @@ async def compare_vehicle_damages(
|
|
506 |
before_images = [before_1, before_2, before_3, before_4, before_5, before_6]
|
507 |
after_images = [after_1, after_2, after_3, after_4, after_5, after_6]
|
508 |
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
|
513 |
-
#
|
514 |
-
|
515 |
-
|
516 |
-
all_before_detections = []
|
517 |
-
all_after_detections = []
|
518 |
|
519 |
# Overall statistics
|
520 |
total_new_damages = 0
|
@@ -524,73 +580,51 @@ async def compare_vehicle_damages(
|
|
524 |
session_id = str(uuid.uuid4())[:8]
|
525 |
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
526 |
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
before_np = np.array(before_img)
|
536 |
-
after_np = np.array(after_img)
|
537 |
-
|
538 |
-
before_bgr = cv2.cvtColor(before_np, cv2.COLOR_RGB2BGR)
|
539 |
-
after_bgr = cv2.cvtColor(after_np, cv2.COLOR_RGB2BGR)
|
540 |
-
|
541 |
-
# Store for multi-view analysis
|
542 |
-
all_before_images.append(before_bgr)
|
543 |
-
all_after_images.append(after_bgr)
|
544 |
|
545 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
546 |
|
547 |
-
|
548 |
-
|
549 |
-
|
|
|
|
|
550 |
|
551 |
-
|
552 |
-
|
553 |
|
554 |
-
|
555 |
-
|
556 |
-
before_detections
|
557 |
-
|
558 |
-
)
|
559 |
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
|
|
564 |
|
565 |
-
|
566 |
-
|
567 |
-
before_bgr, after_bgr,
|
568 |
-
before_detections, after_detections,
|
569 |
-
comparison
|
570 |
-
)
|
571 |
-
|
572 |
-
vis_filename = f"comparison_{timestamp_str}_{session_id}_pos{i + 1}.jpg"
|
573 |
-
vis_path = UPLOADS_DIR / vis_filename
|
574 |
-
cv2.imwrite(str(vis_path), vis_img)
|
575 |
-
|
576 |
-
vis_url = f"http://localhost:8000/uploads/{vis_filename}"
|
577 |
-
all_visualizations.append(vis_url)
|
578 |
-
|
579 |
-
# Store position result with ReID info
|
580 |
-
position_results.append({
|
581 |
-
f"position_{i + 1}": {
|
582 |
-
"case": comparison['case'],
|
583 |
-
"message": comparison['message'],
|
584 |
-
"statistics": comparison['statistics'],
|
585 |
-
"new_damages": comparison['new_damages'],
|
586 |
-
"matched_damages": comparison['matched_damages'],
|
587 |
-
"repaired_damages": comparison['repaired_damages'],
|
588 |
-
"using_reid": comparison['statistics'].get('using_reid', True),
|
589 |
-
"visualization_path": f"uploads/{vis_filename}",
|
590 |
-
"visualization_url": vis_url,
|
591 |
-
"filename": vis_filename
|
592 |
-
}
|
593 |
-
})
|
594 |
|
595 |
# Deduplicate BEFORE damages across all 6 views
|
596 |
unique_before = comparator.deduplicate_detections_across_views(
|
@@ -619,7 +653,7 @@ async def compare_vehicle_damages(
|
|
619 |
overall_message = "Existing damages from beginning → Delivery completed"
|
620 |
|
621 |
# Create summary grid
|
622 |
-
grid_results = [res[
|
623 |
grid_img = visualizer.create_summary_grid(grid_results, image_pairs)
|
624 |
|
625 |
grid_filename = f"summary_grid_{timestamp_str}_{session_id}.jpg"
|
@@ -677,7 +711,6 @@ async def compare_vehicle_damages(
|
|
677 |
raise HTTPException(status_code=500, detail=f"Comparison failed: {str(e)}")
|
678 |
|
679 |
|
680 |
-
|
681 |
if __name__ == "__main__":
|
682 |
import os
|
683 |
uvicorn.run(
|
|
|
1 |
import io
|
2 |
+
from typing import List, Dict
|
3 |
import uvicorn
|
4 |
import numpy as np
|
5 |
import uuid
|
6 |
from datetime import datetime
|
7 |
from fastapi import FastAPI, UploadFile, File, HTTPException, Form
|
8 |
+
from fastapi.responses import JSONResponse
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
10 |
from fastapi.staticfiles import StaticFiles
|
11 |
from PIL import Image
|
|
|
15 |
from src.comparison import DamageComparator
|
16 |
from src.visualization import DamageVisualizer
|
17 |
from pathlib import Path
|
18 |
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
19 |
|
20 |
app = FastAPI(
|
21 |
title="Car Damage Detection API",
|
|
|
44 |
1: "models_small_version_2/best.pt", # Small v2 PT
|
45 |
2: "models_medium/best.pt", # Medium v1 PT
|
46 |
3: "models_medium_version_2/best.pt", # Medium v2 PT
|
47 |
+
4: "models_large/best.pt", # Large PT (no ONNX for large)
|
48 |
|
49 |
# ONNX models (optimized with v1.19 + opset 21)
|
50 |
+
5: "models_small/best.onnx", # Small v1 ONNX
|
51 |
+
6: "models_small_version_2/best.onnx", # Small v2 ONNX
|
52 |
+
7: "models_medium/best.onnx", # Medium v1 ONNX
|
53 |
+
8: "models_medium_version_2/best.onnx" # Medium v2 ONNX
|
54 |
}
|
55 |
|
56 |
# Config paths - ONNX uses same config as PT version
|
|
|
59 |
1: "config_version2.yaml", # Small v2 PT
|
60 |
2: "config.yaml", # Medium v1 PT
|
61 |
3: "config_version2.yaml", # Medium v2 PT
|
62 |
+
4: "config.yaml", # Large PT
|
63 |
+
5: "config.yaml", # Small v1 ONNX
|
64 |
+
6: "config_version2.yaml", # Small v2 ONNX
|
65 |
+
7: "config.yaml", # Medium v1 ONNX
|
66 |
+
8: "config_version2.yaml" # Medium v2 ONNX
|
67 |
}
|
68 |
|
69 |
# Mapping from PT index to ONNX index
|
70 |
PT_TO_ONNX_MAPPING = {
|
71 |
+
0: 5, # Small v1 -> ONNX
|
72 |
+
1: 6, # Small v2 -> ONNX
|
73 |
+
2: 7, # Medium v1 -> ONNX
|
74 |
+
3: 8, # Medium v2 -> ONNX
|
75 |
4: None # Large has no ONNX
|
76 |
}
|
77 |
|
|
|
81 |
Enhanced model selection with performance optimization info
|
82 |
"""
|
83 |
# If user explicitly selects ONNX index (5..8) => use that ONNX with optimizations
|
84 |
+
if select_models in (5, 6, 7, 8):
|
85 |
onnx_path = Path(MODEL_PATHS.get(select_models, ""))
|
86 |
if not onnx_path.exists():
|
87 |
raise FileNotFoundError(
|
|
|
90 |
return select_models
|
91 |
|
92 |
# Normalize to valid PT indices
|
93 |
+
if select_models not in (0, 1, 2, 3, 4):
|
94 |
select_models = 2 # default to medium v1
|
95 |
|
96 |
# PT preferred for 0..4
|
|
|
113 |
|
114 |
def load_detector(select_models: int = 2, prefer_onnx: bool = True):
|
115 |
"""
|
116 |
+
Load detector with optimized ONNX Runtime v1.19 support
|
117 |
|
118 |
Args:
|
119 |
select_models: Model selection
|
120 |
+
- 0-4: PyTorch models (original logic)
|
121 |
+
- 5-8: ONNX models (with maximum optimizations)
|
122 |
prefer_onnx: Whether to prefer ONNX format for fallback
|
123 |
"""
|
124 |
global detector, comparator, visualizer
|
|
|
148 |
# Log model info with optimization status
|
149 |
model_type = "ONNX" if MODEL_PATHS[actual_model_index].endswith('.onnx') else "PyTorch"
|
150 |
model_labels = [
|
151 |
+
"Small v1", "Small v2", "Medium v1", "Medium v2", "Large",
|
152 |
"Small v1 ONNX", "Small v2 ONNX", "Medium v1 ONNX", "Medium v2 ONNX"
|
153 |
]
|
154 |
|
155 |
if 0 <= select_models < len(model_labels):
|
156 |
model_size = model_labels[select_models]
|
157 |
else:
|
158 |
+
raise ValueError(f"select_models={select_models} must be 0-8")
|
159 |
|
160 |
# Enhanced logging for optimization status
|
161 |
optimization_status = "🚀 MAXIMUM OPTIMIZATIONS" if model_type == "ONNX" else "📦 Standard PyTorch"
|
|
|
309 |
"""
|
310 |
try:
|
311 |
# Validate select_models
|
312 |
+
if select_models not in list(range(0, 9)):
|
313 |
raise HTTPException(status_code=400,
|
314 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
315 |
|
|
|
349 |
},
|
350 |
"visualized_image_path": f"uploads/{filename}",
|
351 |
"visualized_image_url": f"http://localhost:8000/uploads/{filename}",
|
352 |
+
"filename": filename
|
|
|
353 |
})
|
354 |
|
355 |
+
# Case 2: Multiple images
|
356 |
+
elif files is not None:
|
|
|
|
|
|
|
357 |
detections_list = []
|
358 |
+
images = []
|
359 |
+
unique_damages = {}
|
360 |
+
for idx, f in enumerate(files):
|
361 |
+
contents = await f.read()
|
362 |
image = Image.open(io.BytesIO(contents)).convert("RGB")
|
363 |
image_np = np.array(image)
|
364 |
image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
|
365 |
+
images.append(image_bgr)
|
|
|
366 |
detections = current_detector.detect(image_bgr)
|
367 |
detections_list.append(detections)
|
368 |
|
369 |
+
# Deduplicate across views
|
370 |
+
unique_damages = comparator.deduplicate_detections_across_views(detections_list, images)
|
|
|
|
|
|
|
|
|
|
|
371 |
|
372 |
# Create combined visualization
|
373 |
+
heights = [img.shape[0] for img in images]
|
374 |
+
widths = [img.shape[1] for img in images]
|
375 |
+
max_height = max(heights)
|
376 |
+
total_width = sum(widths)
|
377 |
+
combined_img = np.zeros((max_height, total_width, 3), dtype=np.uint8)
|
378 |
x_offset = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
379 |
|
380 |
+
for img_idx, image in enumerate(images):
|
381 |
+
h, w = image.shape[:2]
|
382 |
+
if h != max_height:
|
383 |
+
image = cv2.resize(image, (w, max_height))
|
384 |
+
detections = detections_list[img_idx]
|
385 |
combined_img[:, x_offset:x_offset + w] = image
|
386 |
|
387 |
# Draw detections with unique IDs
|
|
|
456 |
raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
|
457 |
|
458 |
|
459 |
+
def process_single_position(
|
460 |
+
i: int,
|
461 |
+
before_contents: bytes,
|
462 |
+
after_contents: bytes,
|
463 |
+
config_path: str,
|
464 |
+
timestamp_str: str,
|
465 |
+
session_id: str
|
466 |
+
) -> Dict:
|
467 |
+
# Load components trong process (giữ nguyên logic cũ)
|
468 |
+
from src.detection import YOLOv11Detector
|
469 |
+
from src.comparison import DamageComparator
|
470 |
+
from src.visualization import DamageVisualizer
|
471 |
+
|
472 |
+
detector = YOLOv11Detector(config_path=config_path)
|
473 |
+
comparator = DamageComparator(config_path=config_path)
|
474 |
+
visualizer = DamageVisualizer(config_path=config_path)
|
475 |
+
|
476 |
+
# Logic cũ: Preprocess images
|
477 |
+
before_img = Image.open(io.BytesIO(before_contents)).convert("RGB")
|
478 |
+
after_img = Image.open(io.BytesIO(after_contents)).convert("RGB")
|
479 |
+
before_np = np.array(before_img)
|
480 |
+
after_np = np.array(after_img)
|
481 |
+
before_bgr = cv2.cvtColor(before_np, cv2.COLOR_RGB2BGR)
|
482 |
+
after_bgr = cv2.cvtColor(after_np, cv2.COLOR_RGB2BGR)
|
483 |
+
|
484 |
+
# Detect (giữ nguyên)
|
485 |
+
before_detections = detector.detect(before_bgr)
|
486 |
+
after_detections = detector.detect(after_bgr)
|
487 |
+
|
488 |
+
# Compare (giữ nguyên)
|
489 |
+
comparison = comparator.analyze_damage_status(
|
490 |
+
before_detections, after_detections,
|
491 |
+
before_bgr, after_bgr
|
492 |
+
)
|
493 |
+
|
494 |
+
# Visualize và save (giữ nguyên)
|
495 |
+
vis_img = visualizer.create_comparison_visualization(
|
496 |
+
before_bgr, after_bgr,
|
497 |
+
before_detections, after_detections,
|
498 |
+
comparison
|
499 |
+
)
|
500 |
+
vis_filename = f"comparison_{timestamp_str}_{session_id}_pos{i + 1}.jpg"
|
501 |
+
vis_path = UPLOADS_DIR / vis_filename
|
502 |
+
cv2.imwrite(str(vis_path), vis_img)
|
503 |
+
vis_url = f"http://localhost:8000/uploads/{vis_filename}"
|
504 |
+
|
505 |
+
# Return result cho tổng hợp (giữ nguyên structure)
|
506 |
+
return {
|
507 |
+
f"position_{i + 1}": {
|
508 |
+
"case": comparison['case'],
|
509 |
+
"message": comparison['message'],
|
510 |
+
"statistics": comparison['statistics'],
|
511 |
+
"new_damages": comparison['new_damages'],
|
512 |
+
"matched_damages": comparison['matched_damages'],
|
513 |
+
"repaired_damages": comparison['repaired_damages'],
|
514 |
+
"using_reid": comparison['statistics'].get('using_reid', True),
|
515 |
+
"visualization_path": f"uploads/{vis_filename}",
|
516 |
+
"visualization_url": vis_url,
|
517 |
+
"filename": vis_filename
|
518 |
+
},
|
519 |
+
"before_bgr": before_bgr,
|
520 |
+
"after_bgr": after_bgr,
|
521 |
+
"before_detections": before_detections,
|
522 |
+
"after_detections": after_detections
|
523 |
+
}
|
524 |
+
|
525 |
+
|
526 |
@app.post("/compare")
|
527 |
async def compare_vehicle_damages(
|
528 |
# Before delivery images (6 positions)
|
|
|
554 |
"""
|
555 |
try:
|
556 |
# Validate select_models
|
557 |
+
if select_models not in list(range(0, 9)):
|
558 |
raise HTTPException(status_code=400,
|
559 |
detail="select_models must be 0-8 (0-4=PyTorch, 5-8=ONNX optimized)")
|
560 |
|
|
|
564 |
before_images = [before_1, before_2, before_3, before_4, before_5, before_6]
|
565 |
after_images = [after_1, after_2, after_3, after_4, after_5, after_6]
|
566 |
|
567 |
+
# Đọc contents trước để pass vào processes (vì UploadFile không pickleable)
|
568 |
+
before_contents_list = [await img.read() for img in before_images]
|
569 |
+
after_contents_list = [await img.read() for img in after_images]
|
570 |
|
571 |
+
# Config để load trong processes
|
572 |
+
actual_model_index = get_optimal_model_index(select_models, prefer_onnx)
|
573 |
+
config_path = CONFIG_PATHS.get(actual_model_index, "config.yaml")
|
|
|
|
|
574 |
|
575 |
# Overall statistics
|
576 |
total_new_damages = 0
|
|
|
580 |
session_id = str(uuid.uuid4())[:8]
|
581 |
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
582 |
|
583 |
+
position_results = []
|
584 |
+
all_visualizations = []
|
585 |
+
image_pairs = []
|
586 |
+
all_before_images = []
|
587 |
+
all_after_images = []
|
588 |
+
all_before_detections = []
|
589 |
+
all_after_detections = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
590 |
|
591 |
+
# Parallelize
|
592 |
+
with ProcessPoolExecutor(max_workers=6) as executor:
|
593 |
+
futures = [
|
594 |
+
executor.submit(
|
595 |
+
process_single_position,
|
596 |
+
i,
|
597 |
+
before_contents_list[i],
|
598 |
+
after_contents_list[i],
|
599 |
+
config_path,
|
600 |
+
timestamp_str,
|
601 |
+
session_id
|
602 |
+
)
|
603 |
+
for i in range(6)
|
604 |
+
]
|
605 |
|
606 |
+
for future in as_completed(futures):
|
607 |
+
result = future.result()
|
608 |
+
pos_key = list(result.keys())[0] # e.g., 'position_1'
|
609 |
+
position_results.append(result)
|
610 |
+
all_visualizations.append(result[pos_key]["visualization_url"])
|
611 |
|
612 |
+
# Preprocess images for image_pairs (since before_bgr and after_bgr are returned)
|
613 |
+
image_pairs.append((result["before_bgr"], result["after_bgr"]))
|
614 |
|
615 |
+
all_before_images.append(result["before_bgr"])
|
616 |
+
all_after_images.append(result["after_bgr"])
|
617 |
+
all_before_detections.append(result["before_detections"])
|
618 |
+
all_after_detections.append(result["after_detections"])
|
|
|
619 |
|
620 |
+
# Update statistics from comparison
|
621 |
+
comparison = result[pos_key]
|
622 |
+
total_new_damages += len(comparison["new_damages"])
|
623 |
+
total_existing_damages += len(comparison["repaired_damages"])
|
624 |
+
total_matched_damages += len(comparison["matched_damages"])
|
625 |
|
626 |
+
# Sort position_results by position number
|
627 |
+
position_results.sort(key=lambda x: int(list(x.keys())[0].split('_')[1]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
628 |
|
629 |
# Deduplicate BEFORE damages across all 6 views
|
630 |
unique_before = comparator.deduplicate_detections_across_views(
|
|
|
653 |
overall_message = "Existing damages from beginning → Delivery completed"
|
654 |
|
655 |
# Create summary grid
|
656 |
+
grid_results = [res[list(res.keys())[0]] for res in position_results]
|
657 |
grid_img = visualizer.create_summary_grid(grid_results, image_pairs)
|
658 |
|
659 |
grid_filename = f"summary_grid_{timestamp_str}_{session_id}.jpg"
|
|
|
711 |
raise HTTPException(status_code=500, detail=f"Comparison failed: {str(e)}")
|
712 |
|
713 |
|
|
|
714 |
if __name__ == "__main__":
|
715 |
import os
|
716 |
uvicorn.run(
|