minh9972t12 commited on
Commit
c8a046c
·
1 Parent(s): 89929c4

Upload 12 files

Browse files
README.md CHANGED
@@ -1,14 +1,29 @@
1
- ---
2
- title: Yolocar
3
- emoji: 💻
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 5.42.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- short_description: apache-2.0
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Car Damage Detection System with YOLOv11
2
+
3
+ A state-of-the-art vehicle damage detection and comparison system using YOLOv11, designed to analyze vehicle conditions before and after delivery.
4
+
5
+ ## Features
6
+
7
+ - **YOLOv11 Detection**: YOLO model for accurate damage detection
8
+ - **Smart Comparison**: IoU-based matching using Hungarian algorithm
9
+ - **6-Position Analysis**: Comprehensive vehicle inspection from multiple angles
10
+ - **Case Classification**: Automatic categorization of damage scenarios
11
+ - **Visual Reports**: Generated comparison images with damage highlights
12
+ - **REST API**: Easy integration with existing systems
13
+
14
+ ## Use Cases
15
+
16
+ The system identifies three main scenarios:
17
+
18
+ 1. **Case 1 - Existing Damage**: Damages present before and after delivery → Delivery completed
19
+ 2. **Case 2 - New Damage**: New damages detected after delivery → Issue during delivery
20
+ 3. **Case 3 - No Damage**: No damages detected → Successful delivery
21
+
22
+ ## Quick Start
23
+
24
+ ### Installation
25
+
26
+ 1. Clone the repository
27
+ 2. Install dependencies:
28
+ ```bash
29
+ pip install -r requirements.txt
__pycache__/main.cpython-313.pyc ADDED
Binary file (11.5 kB). View file
 
config.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Configuration
2
+ model:
3
+ path: "models/best.pt" # Sử dụng model best.pt đã train từ Colab
4
+ confidence: 0.1 # Confidence threshold
5
+ iou_threshold: 0.14 # NMS IoU threshold
6
+ device: "cpu" # cuda:0 for GPU, cpu for CPU
7
+
8
+ # Detection Configuration
9
+ detection:
10
+ classes:
11
+ - "Damaged_Bonnet"
12
+ - "Damaged_Bumper"
13
+ - "Damaged_Dickey"
14
+ - "Damaged_Door"
15
+ - "Damaged_Fender"
16
+ - "Damaged_Light"
17
+ - "Damaged_Windshield"
18
+
19
+ # Comparison Configuration
20
+ comparison:
21
+ iou_match_threshold: 0.2 # IoU threshold for matching damages
22
+ position_tolerance: 50 # Pixel tolerance for position matching
23
+
24
+ # Visualization
25
+ visualization:
26
+ line_thickness: 2
27
+ font_scale: 0.5
28
+ colors:
29
+ existing_damage: [0, 255, 0] # Green for existing damages
30
+ new_damage: [0, 0, 255] # Red for new damages
31
+ matched_damage: [255, 255, 0] # Yellow for matched damages
main.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import shutil
3
+
4
+ import uvicorn
5
+ import numpy as np
6
+ import uuid
7
+ from datetime import datetime
8
+ from pathlib import Path
9
+ from fastapi import FastAPI, UploadFile, File, HTTPException
10
+ from fastapi.responses import JSONResponse, FileResponse
11
+ from fastapi.middleware.cors import CORSMiddleware
12
+ from fastapi.staticfiles import StaticFiles
13
+ from PIL import Image
14
+ import cv2
15
+ from src.detection import YOLOv11Detector
16
+ from src.comparison import DamageComparator
17
+ from src.visualization import DamageVisualizer
18
+
19
+
20
+ app = FastAPI(
21
+ title="Car Damage Detection API",
22
+ description="YOLOv11-based car damage detection and comparison system",
23
+ version="1.0.0"
24
+ )
25
+
26
+ # Add CORS middleware
27
+ app.add_middleware(
28
+ CORSMiddleware,
29
+ allow_origins=["*"],
30
+ allow_credentials=True,
31
+ allow_methods=["*"],
32
+ allow_headers=["*"],
33
+ )
34
+
35
+ # Initialize components
36
+ detector = YOLOv11Detector()
37
+ comparator = DamageComparator()
38
+ visualizer = DamageVisualizer()
39
+
40
+ # Create necessary directories
41
+ Path("uploads").mkdir(exist_ok=True)
42
+ Path("results").mkdir(exist_ok=True)
43
+
44
+ # Mount static files directory
45
+ app.mount("/uploads", StaticFiles(directory="uploads"), name="uploads")
46
+
47
+ @app.get("/")
48
+ async def root():
49
+ """Root endpoint"""
50
+ return {
51
+ "message": "Car Damage Detection API with YOLOv11",
52
+ "endpoints": {
53
+ "/docs": "API documentation",
54
+ "/detect": "Single image detection",
55
+ "/compare": "Compare before/after images (6 pairs)",
56
+ "/uploads/{filename}": "Access saved visualization images",
57
+ "/health": "Health check"
58
+ }
59
+ }
60
+
61
+
62
+ def save_temp_file(upload_file: UploadFile) -> str:
63
+ """Save an uploaded file into /tmp and return the temp file path"""
64
+ tmp_dir = Path("/tmp")
65
+ tmp_dir.mkdir(exist_ok=True)
66
+
67
+ temp_path = tmp_dir / upload_file.filename
68
+
69
+ with open(temp_path, "wb") as buffer:
70
+ shutil.copyfileobj(upload_file.file, buffer)
71
+
72
+ return str(temp_path)
73
+ @app.get("/health")
74
+ async def health_check():
75
+ """Health check endpoint"""
76
+ return {"status": "healthy", "model": "YOLOv11"}
77
+
78
+ @app.post("/detect")
79
+ async def detect_single_image(file: UploadFile = File(...)):
80
+ """
81
+ Detect damages in a single image
82
+
83
+ Args:
84
+ file: Image file
85
+
86
+ Returns:
87
+ Detection results with bounding boxes and path to visualized image
88
+ """
89
+ try:
90
+ # Read and process image
91
+ contents = await file.read()
92
+ image = Image.open(io.BytesIO(contents)).convert("RGB")
93
+ image_np = np.array(image)
94
+ image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
95
+
96
+ # Perform detection
97
+ detections = detector.detect(image_bgr)
98
+
99
+ # Create visualization
100
+ visualized = visualizer.draw_detections(image_bgr, detections, 'new_damage')
101
+
102
+
103
+ filename = f"detection_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4().hex[:8]}.jpg"
104
+ output_path = Path("/tmp") / filename
105
+ cv2.imwrite(str(output_path), visualized)
106
+
107
+ return JSONResponse({
108
+ "status": "success",
109
+ "detections": detections,
110
+ "statistics": {
111
+ "total_damages": len(detections['boxes']),
112
+ "damage_types": list(set(detections['classes']))
113
+ },
114
+ "visualized_image_path": f"/tmp/{filename}",
115
+ })
116
+
117
+ except Exception as e:
118
+ raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
119
+
120
+ @app.post("/compare")
121
+ async def compare_vehicle_damages(
122
+ # Before delivery images (6 positions)
123
+ before_1: UploadFile = File(..., description="Before - Position 1"),
124
+ before_2: UploadFile = File(..., description="Before - Position 2"),
125
+ before_3: UploadFile = File(..., description="Before - Position 3"),
126
+ before_4: UploadFile = File(..., description="Before - Position 4"),
127
+ before_5: UploadFile = File(..., description="Before - Position 5"),
128
+ before_6: UploadFile = File(..., description="Before - Position 6"),
129
+ # After delivery images (6 positions)
130
+ after_1: UploadFile = File(..., description="After - Position 1"),
131
+ after_2: UploadFile = File(..., description="After - Position 2"),
132
+ after_3: UploadFile = File(..., description="After - Position 3"),
133
+ after_4: UploadFile = File(..., description="After - Position 4"),
134
+ after_5: UploadFile = File(..., description="After - Position 5"),
135
+ after_6: UploadFile = File(..., description="After - Position 6"),
136
+ ):
137
+ """
138
+ Compare vehicle damages before and after delivery
139
+
140
+ Analyzes 6 pairs of images (before/after) from different positions
141
+ and determines the damage status according to 3 cases:
142
+ - Case 1: Existing damages (from before) -> Delivery completed
143
+ - Case 2: New damages detected -> Error during delivery
144
+ - Case 3: No damages -> Successful delivery
145
+
146
+ Returns:
147
+ Detailed comparison results for each position and overall status
148
+ """
149
+ try:
150
+ before_images = [before_1, before_2, before_3, before_4, before_5, before_6]
151
+ after_images = [after_1, after_2, after_3, after_4, after_5, after_6]
152
+
153
+ position_results = []
154
+ all_visualizations = []
155
+ image_pairs = []
156
+
157
+ # Overall statistics
158
+ total_new_damages = 0
159
+ total_existing_damages = 0
160
+ total_matched_damages = 0
161
+
162
+ # Generate unique session ID for this comparison
163
+ session_id = str(uuid.uuid4())[:8]
164
+ timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
165
+
166
+ # Process each position pair
167
+ for i in range(6):
168
+ # Read images
169
+ before_contents = await before_images[i].read()
170
+ after_contents = await after_images[i].read()
171
+
172
+ before_img = Image.open(io.BytesIO(before_contents)).convert("RGB")
173
+ after_img = Image.open(io.BytesIO(after_contents)).convert("RGB")
174
+
175
+ before_np = np.array(before_img)
176
+ after_np = np.array(after_img)
177
+
178
+ before_bgr = cv2.cvtColor(before_np, cv2.COLOR_RGB2BGR)
179
+ after_bgr = cv2.cvtColor(after_np, cv2.COLOR_RGB2BGR)
180
+
181
+ # Store image pairs for grid visualization
182
+ image_pairs.append((before_bgr, after_bgr))
183
+
184
+ # Detect damages
185
+ before_detections = detector.detect(before_bgr)
186
+ after_detections = detector.detect(after_bgr)
187
+
188
+ # Compare damages
189
+ comparison = comparator.analyze_damage_status(before_detections, after_detections)
190
+
191
+ # Update overall statistics
192
+ total_new_damages += len(comparison['new_damages'])
193
+ total_existing_damages += len(comparison['repaired_damages'])
194
+ total_matched_damages += len(comparison['matched_damages'])
195
+
196
+ # Create visualization for this position
197
+ vis_img = visualizer.create_comparison_visualization(
198
+ before_bgr, after_bgr,
199
+ before_detections, after_detections,
200
+ comparison
201
+ )
202
+
203
+ # Save visualization image with unique filename
204
+ vis_filename = f"comparison_{timestamp_str}_{session_id}_pos{i+1}.jpg"
205
+ vis_path = Path("/tmp") / vis_filename
206
+ cv2.imwrite(str(vis_path), vis_img)
207
+
208
+ vis_url = f"http://localhost:8000/uploads/{vis_filename}"
209
+ all_visualizations.append(vis_url)
210
+
211
+ # Store position result
212
+ position_results.append({
213
+ f"position_{i+1}": {
214
+ "case": comparison['case'],
215
+ "message": comparison['message'],
216
+ "statistics": comparison['statistics'],
217
+ "new_damages": comparison['new_damages'],
218
+ "matched_damages": comparison['matched_damages'],
219
+ "repaired_damages": comparison['repaired_damages'],
220
+ "visualization_path": f"/tmp/{vis_filename}",
221
+ "visualization_url": vis_url
222
+ }
223
+ })
224
+
225
+ # Determine overall case
226
+ overall_case = "CASE_3_SUCCESS"
227
+ overall_message = "Successful delivery - No damage detected"
228
+
229
+ if total_new_damages > 0:
230
+ overall_case = "CASE_2_NEW_DAMAGE"
231
+ overall_message = f"Error during vehicle delivery - Detection {total_new_damages} new damage"
232
+ elif total_matched_damages > 0 and total_new_damages == 0:
233
+ overall_case = "CASE_1_EXISTING"
234
+ overall_message = "Error from the beginning, not during the delivery process -> Delivery completed"
235
+
236
+ # Create summary grid visualization
237
+ grid_results = [res[f"position_{i+1}"] for i, res in enumerate(position_results)]
238
+ grid_img = visualizer.create_summary_grid(grid_results, image_pairs)
239
+
240
+ # Save grid summary image
241
+ grid_filename = f"summary_grid_{timestamp_str}_{session_id}.jpg"
242
+ grid_path = Path("uploads") / grid_filename
243
+ cv2.imwrite(str(grid_path), grid_img)
244
+ grid_url = f"http://localhost:8000/uploads/{grid_filename}"
245
+
246
+ # Generate timestamp for tracking
247
+ timestamp = datetime.now().isoformat()
248
+
249
+ return JSONResponse({
250
+ "status": "success",
251
+ "session_id": session_id,
252
+ "timestamp": timestamp,
253
+ "overall_result": {
254
+ "case": overall_case,
255
+ "message": overall_message,
256
+ "statistics": {
257
+ "total_new_damages": total_new_damages,
258
+ "total_matched_damages": total_matched_damages,
259
+ "total_repaired_damages": total_existing_damages
260
+ }
261
+ },
262
+ "position_results": position_results,
263
+ "summary_visualization_path": f"/uploads/{grid_filename}",
264
+ "summary_visualization_url": grid_url,
265
+ "recommendations": {
266
+ "action_required": total_new_damages > 0,
267
+ "suggested_action": "Investigate delivery process" if total_new_damages > 0 else "Proceed with delivery completion"
268
+ }
269
+ })
270
+
271
+ except Exception as e:
272
+ raise HTTPException(status_code=500, detail=f"Comparison failed: {str(e)}")
273
+
274
+
275
+
276
+ if __name__ == "__main__":
277
+ uvicorn.run(
278
+ "main:app",
279
+ host="0.0.0.0",
280
+ port=8000,
281
+ reload=True,
282
+ log_level="info"
283
+ )
requirements.txt ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core YOLOv11 and Ultralytics
2
+ ultralytics>=8.3.0
3
+ torch>=2.0.0
4
+ torchvision>=0.15.0
5
+
6
+ # FastAPI and web server
7
+ fastapi>=0.104.0
8
+ uvicorn[standard]>=0.24.0
9
+ python-multipart>=0.0.6
10
+ websockets>=12.0
11
+
12
+ # Image processing
13
+ opencv-python>=4.8.0
14
+ opencv-contrib-python>=4.8.0
15
+ Pillow>=10.0.0
16
+ numpy>=1.24.0
17
+
18
+ # Video processing
19
+ imageio>=2.31.0
20
+ imageio-ffmpeg>=0.4.8
21
+
22
+ # Data processing
23
+ scipy>=1.11.0
24
+ scikit-learn>=1.3.0
25
+ pandas>=2.0.0
26
+
27
+ # Configuration
28
+ pyyaml>=6.0
29
+ python-dotenv>=1.0.0
30
+
31
+ # API documentation
32
+ pydantic>=2.0.0
33
+
34
+ # Performance optimization
35
+ # For ONNX export
36
+ onnx>=1.14.0
37
+ onnxruntime>=1.15.0
38
+ # onnxruntime-gpu>=1.15.0 # Uncomment for GPU support
39
+
40
+ # Monitoring and logging
41
+ tqdm>=4.65.0
42
+ colorama>=0.4.6
43
+ rich>=13.0.0
44
+
45
+ # Testing (optional)
46
+ pytest>=7.4.0
47
+ pytest-asyncio>=0.21.0
48
+
49
+ # Database (optional)
50
+ # psycopg2-binary>=2.9.0 # PostgreSQL
51
+ # pymongo>=4.5.0 # MongoDB
52
+ # redis>=5.0.0 # Redis for caching
53
+
54
+ # Cloud storage (optional)
55
+ # boto3>=1.28.0 # AWS S3
56
+ # google-cloud-storage>=2.10.0 # Google Cloud Storage
57
+
58
+ # Tracking algorithms (for object tracking in videos)
59
+ # lap>=0.4.0 # For ByteTrack
60
+ # filterpy>=1.4.5 # For Kalman filtering
61
+
62
+ # Additional utilities
63
+ matplotlib>=3.7.0
64
+ seaborn>=0.12.0
65
+ plotly>=5.16.0
src/__pycache__/comparison.cpython-313.pyc ADDED
Binary file (7.49 kB). View file
 
src/__pycache__/detection.cpython-313.pyc ADDED
Binary file (10.5 kB). View file
 
src/__pycache__/visualization.cpython-313.pyc ADDED
Binary file (9.06 kB). View file
 
src/comparison.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from typing import List, Dict
3
+ from scipy.optimize import linear_sum_assignment
4
+ import yaml
5
+
6
+ class DamageComparator:
7
+ """Compare damages between before and after images"""
8
+
9
+ def __init__(self, config_path: str = "config.yaml"):
10
+ """Initialize comparator with configuration"""
11
+ with open(config_path, 'r') as f:
12
+ self.config = yaml.safe_load(f)
13
+
14
+ self.iou_threshold = self.config['comparison']['iou_match_threshold']
15
+ self.position_tolerance = self.config['comparison']['position_tolerance']
16
+
17
+ def calculate_iou(self, box1: List[int], box2: List[int]) -> float:
18
+ """
19
+ Calculate Intersection over Union between two boxes
20
+
21
+ Args:
22
+ box1, box2: Bounding boxes in format [x1, y1, x2, y2]
23
+
24
+ Returns:
25
+ IoU value between 0 and 1
26
+ """
27
+ # Calculate intersection area
28
+ x1 = max(box1[0], box2[0])
29
+ y1 = max(box1[1], box2[1])
30
+ x2 = min(box1[2], box2[2])
31
+ y2 = min(box1[3], box2[3])
32
+
33
+ if x2 < x1 or y2 < y1:
34
+ return 0.0
35
+
36
+ intersection = (x2 - x1) * (y2 - y1)
37
+
38
+ # Calculate union area
39
+ box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
40
+ box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
41
+ union = box1_area + box2_area - intersection
42
+
43
+ # Calculate IoU
44
+ if union == 0:
45
+ return 0.0
46
+
47
+ return intersection / union
48
+
49
+ def match_damages(self, detections1: Dict, detections2: Dict) -> Dict:
50
+ """
51
+ Match damages between two sets of detections using Hungarian algorithm
52
+
53
+ Args:
54
+ detections1: First detection results (before)
55
+ detections2: Second detection results (after)
56
+
57
+ Returns:
58
+ Matching results with paired and unpaired damages
59
+ """
60
+ boxes1 = detections1['boxes']
61
+ boxes2 = detections2['boxes']
62
+
63
+ if len(boxes1) == 0 and len(boxes2) == 0:
64
+ return {
65
+ 'matched_pairs': [],
66
+ 'unmatched_before': [],
67
+ 'unmatched_after': [],
68
+ 'iou_matrix': None
69
+ }
70
+
71
+ if len(boxes1) == 0:
72
+ return {
73
+ 'matched_pairs': [],
74
+ 'unmatched_before': [],
75
+ 'unmatched_after': list(range(len(boxes2))),
76
+ 'iou_matrix': None
77
+ }
78
+
79
+ if len(boxes2) == 0:
80
+ return {
81
+ 'matched_pairs': [],
82
+ 'unmatched_before': list(range(len(boxes1))),
83
+ 'unmatched_after': [],
84
+ 'iou_matrix': None
85
+ }
86
+
87
+ # Calculate IoU matrix
88
+ iou_matrix = np.zeros((len(boxes1), len(boxes2)))
89
+ for i, box1 in enumerate(boxes1):
90
+ for j, box2 in enumerate(boxes2):
91
+ iou_matrix[i, j] = self.calculate_iou(box1, box2)
92
+
93
+ # Use Hungarian algorithm for optimal matching
94
+ # Convert to cost matrix (1 - IoU)
95
+ cost_matrix = 1 - iou_matrix
96
+ row_indices, col_indices = linear_sum_assignment(cost_matrix)
97
+
98
+ # Filter matches by IoU threshold
99
+ matched_pairs = []
100
+ matched_rows = set()
101
+ matched_cols = set()
102
+
103
+ for i, j in zip(row_indices, col_indices):
104
+ if iou_matrix[i, j] >= self.iou_threshold:
105
+ # Also check if damage types match
106
+ if detections1['classes'][i] == detections2['classes'][j]:
107
+ matched_pairs.append((i, j, iou_matrix[i, j]))
108
+ matched_rows.add(i)
109
+ matched_cols.add(j)
110
+
111
+ # Find unmatched damages
112
+ unmatched_before = [i for i in range(len(boxes1)) if i not in matched_rows]
113
+ unmatched_after = [j for j in range(len(boxes2)) if j not in matched_cols]
114
+
115
+ return {
116
+ 'matched_pairs': matched_pairs,
117
+ 'unmatched_before': unmatched_before,
118
+ 'unmatched_after': unmatched_after,
119
+ 'iou_matrix': iou_matrix.tolist()
120
+ }
121
+
122
+ def analyze_damage_status(self, before_detections: Dict, after_detections: Dict) -> Dict:
123
+ """
124
+ Analyze damage status between before and after images
125
+
126
+ Returns detailed analysis with case classification
127
+ """
128
+ matching = self.match_damages(before_detections, after_detections)
129
+
130
+ # Extract damage information
131
+ matched_damages = []
132
+ for i, j, iou in matching['matched_pairs']:
133
+ matched_damages.append({
134
+ 'type': before_detections['classes'][i],
135
+ 'confidence_before': before_detections['confidences'][i],
136
+ 'confidence_after': after_detections['confidences'][j],
137
+ 'box_before': before_detections['boxes'][i],
138
+ 'box_after': after_detections['boxes'][j],
139
+ 'iou': iou
140
+ })
141
+
142
+ existing_damages = []
143
+ for i in matching['unmatched_before']:
144
+ existing_damages.append({
145
+ 'type': before_detections['classes'][i],
146
+ 'confidence': before_detections['confidences'][i],
147
+ 'box': before_detections['boxes'][i]
148
+ })
149
+
150
+ new_damages = []
151
+ for j in matching['unmatched_after']:
152
+ new_damages.append({
153
+ 'type': after_detections['classes'][j],
154
+ 'confidence': after_detections['confidences'][j],
155
+ 'box': after_detections['boxes'][j]
156
+ })
157
+
158
+ # Determine case
159
+ case = self._determine_case(matched_damages, existing_damages, new_damages)
160
+
161
+ return {
162
+ 'case': case['type'],
163
+ 'message': case['message'],
164
+ 'matched_damages': matched_damages,
165
+ 'repaired_damages': existing_damages, # Damages that were there before but not after
166
+ 'new_damages': new_damages,
167
+ 'statistics': {
168
+ 'total_before': len(before_detections['boxes']),
169
+ 'total_after': len(after_detections['boxes']),
170
+ 'matched': len(matched_damages),
171
+ 'repaired': len(existing_damages),
172
+ 'new': len(new_damages)
173
+ }
174
+ }
175
+
176
+ def _determine_case(self, matched: List, repaired: List, new: List) -> Dict:
177
+ """Determine which case the comparison falls into"""
178
+
179
+ # Case 3: Happy case - no damages at all
180
+ if len(matched) == 0 and len(repaired) == 0 and len(new) == 0:
181
+ return {
182
+ 'type': 'CASE_3_SUCCESS',
183
+ 'message': 'Successful delivery - No damage detected'
184
+ }
185
+
186
+ # Case 1: Existing damages remain (with or without repairs/new damages)
187
+ if len(matched) > 0 and len(new) == 0:
188
+ return {
189
+ 'type': 'CASE_1_EXISTING',
190
+ 'message': 'Error from the beginning, not during the delivery process -> Delivery completed'
191
+ }
192
+
193
+ # Case 2: New damages detected
194
+ if len(new) > 0:
195
+ return {
196
+ 'type': 'CASE_2_NEW_DAMAGE',
197
+ 'message': 'Delivery Defect - New Damage Discovered'
198
+ }
199
+
200
+ # Special case: All damages repaired
201
+ if len(repaired) > 0 and len(new) == 0 and len(matched) == 0:
202
+ return {
203
+ 'type': 'CASE_REPAIRED',
204
+ 'message': 'All damage repaired - Vehicle delivered successfully'
205
+ }
206
+
207
+ return {
208
+ 'type': 'CASE_UNKNOWN',
209
+ 'message': 'Status Undetermined'
210
+ }
src/detection.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from typing import List, Dict, Tuple
3
+ import cv2
4
+ from pathlib import Path
5
+ import yaml
6
+
7
+ class YOLOv11Detector:
8
+ """YOLOv11 detector for car damage detection"""
9
+
10
+ def __init__(self, config_path: str = "config.yaml"):
11
+ """Initialize YOLOv11 detector with configuration"""
12
+ with open(config_path, 'r') as f:
13
+ self.config = yaml.safe_load(f)
14
+
15
+ model_path = self.config['model']['path']
16
+
17
+ # Check which model file exists
18
+ if not Path(model_path).exists():
19
+ # Try to find available model files
20
+ model_dir = Path("models")
21
+ if (model_dir / "best.pt").exists():
22
+ model_path = str(model_dir / "best.pt")
23
+ print(f"Using best.pt model from training")
24
+ elif (model_dir / "last.pt").exists():
25
+ model_path = str(model_dir / "last.pt")
26
+ print(f"Using last.pt checkpoint model")
27
+ elif (model_dir / "best.onnx").exists():
28
+ model_path = str(model_dir / "best.onnx")
29
+ print(f"Using best.onnx model")
30
+ else:
31
+ raise FileNotFoundError(f"No model files found in models/ directory!")
32
+
33
+ self.model_path = model_path
34
+ self.device = self.config['model']['device']
35
+ self.confidence = self.config['model']['confidence']
36
+ self.iou_threshold = self.config['model']['iou_threshold']
37
+ self.classes = self.config['detection']['classes']
38
+
39
+ # Load model based on format
40
+ if model_path.endswith('.onnx'):
41
+ self._load_onnx_model()
42
+ else: # .pt format
43
+ self._load_pytorch_model()
44
+
45
+ def _load_pytorch_model(self):
46
+ """Load PyTorch model using Ultralytics"""
47
+ from ultralytics import YOLO
48
+ self.model = YOLO(self.model_path)
49
+
50
+ # Set model to appropriate device
51
+ if self.device == 'cuda:0':
52
+ self.model.to('cuda')
53
+
54
+ print(f"Loaded PyTorch model: {self.model_path}")
55
+
56
+ def _load_onnx_model(self):
57
+ """Load ONNX model using OpenCV DNN"""
58
+ self.net = cv2.dnn.readNet(self.model_path)
59
+
60
+ # Set backend based on device
61
+ if self.device == 'cuda:0':
62
+ self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
63
+ self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
64
+ else:
65
+ self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
66
+ self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
67
+
68
+ print(f"Loaded ONNX model: {self.model_path}")
69
+
70
+ def detect(self, image: np.ndarray) -> Dict:
71
+ """
72
+ Perform detection on image
73
+
74
+ Args:
75
+ image: Input image as numpy array (BGR format)
76
+
77
+ Returns:
78
+ Dictionary containing detection results
79
+ """
80
+ if self.model_path.endswith('.onnx'):
81
+ return self._detect_onnx(image)
82
+ else:
83
+ return self._detect_pytorch(image)
84
+
85
+ def _detect_pytorch(self, image: np.ndarray) -> Dict:
86
+ """Detection using PyTorch model"""
87
+ # Run YOLO inference
88
+ results = self.model(
89
+ image,
90
+ conf=self.confidence,
91
+ iou=self.iou_threshold,
92
+ device=self.device,
93
+ verbose=False
94
+ )
95
+
96
+ # Parse results
97
+ detections = {
98
+ 'boxes': [],
99
+ 'confidences': [],
100
+ 'classes': [],
101
+ 'class_ids': []
102
+ }
103
+
104
+ if len(results) > 0 and results[0].boxes is not None:
105
+ boxes = results[0].boxes
106
+
107
+ for box in boxes:
108
+ # Get box coordinates (xyxy format)
109
+ x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
110
+
111
+ # Get confidence and class
112
+ conf = float(box.conf[0].cpu().numpy())
113
+ cls_id = int(box.cls[0].cpu().numpy())
114
+
115
+ # Map class ID to class name
116
+ if cls_id < len(self.classes):
117
+ cls_name = self.classes[cls_id]
118
+ else:
119
+ cls_name = f"class_{cls_id}"
120
+
121
+ detections['boxes'].append([int(x1), int(y1), int(x2), int(y2)])
122
+ detections['confidences'].append(conf)
123
+ detections['classes'].append(cls_name)
124
+ detections['class_ids'].append(cls_id)
125
+
126
+ return detections
127
+
128
+ def _detect_onnx(self, image: np.ndarray) -> Dict:
129
+ """Detection using ONNX model (compatible with original code)"""
130
+ height, width = image.shape[:2]
131
+
132
+ # Preprocess image for ONNX
133
+ blob = cv2.dnn.blobFromImage(
134
+ image, 1/255.0, (640, 640),
135
+ swapRB=True, crop=False
136
+ )
137
+
138
+ self.net.setInput(blob)
139
+ preds = self.net.forward()
140
+ preds = preds.transpose((0, 2, 1))
141
+
142
+ # Extract outputs
143
+ detections = self._extract_onnx_output(
144
+ preds=preds,
145
+ image_shape=(height, width),
146
+ input_shape=(640, 640)
147
+ )
148
+
149
+ return detections
150
+
151
+ def _extract_onnx_output(self, preds: np.ndarray, image_shape: Tuple[int, int],
152
+ input_shape: Tuple[int, int]) -> Dict:
153
+ """Extract detection results from ONNX model output"""
154
+ class_ids, confs, boxes = [], [], []
155
+
156
+ image_height, image_width = image_shape
157
+ input_height, input_width = input_shape
158
+ x_factor = image_width / input_width
159
+ y_factor = image_height / input_height
160
+
161
+ rows = preds[0].shape[0]
162
+ for i in range(rows):
163
+ row = preds[0][i]
164
+ conf = row[4]
165
+
166
+ classes_score = row[4:]
167
+ _, _, _, max_idx = cv2.minMaxLoc(classes_score)
168
+ class_id = max_idx[1]
169
+
170
+ if classes_score[class_id] > self.confidence:
171
+ confs.append(float(conf))
172
+ label = self.classes[int(class_id)] if int(class_id) < len(self.classes) else f"class_{class_id}"
173
+ class_ids.append(label)
174
+
175
+ # Extract boxes
176
+ x, y, w, h = row[0].item(), row[1].item(), row[2].item(), row[3].item()
177
+ left = int((x - 0.5 * w) * x_factor)
178
+ top = int((y - 0.5 * h) * y_factor)
179
+ width = int(w * x_factor)
180
+ height = int(h * y_factor)
181
+ box = [left, top, left + width, top + height]
182
+ boxes.append(box)
183
+
184
+ # Apply NMS
185
+ if len(boxes) > 0:
186
+ indices = cv2.dnn.NMSBoxes(
187
+ [[b[0], b[1], b[2]-b[0], b[3]-b[1]] for b in boxes],
188
+ confs, self.confidence, self.iou_threshold
189
+ )
190
+
191
+ if len(indices) > 0:
192
+ indices = indices.flatten()
193
+ return {
194
+ 'boxes': [boxes[i] for i in indices],
195
+ 'confidences': [confs[i] for i in indices],
196
+ 'classes': [class_ids[i] for i in indices],
197
+ 'class_ids': list(range(len(indices)))
198
+ }
199
+
200
+ return {'boxes': [], 'confidences': [], 'classes': [], 'class_ids': []}
201
+
202
+ def detect_batch(self, images: List[np.ndarray]) -> List[Dict]:
203
+ """Detect on multiple images"""
204
+ return [self.detect(img) for img in images]
src/init.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "1.0.0"
src/visualization.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from typing import List, Dict, Tuple
4
+ import yaml
5
+
6
+ class DamageVisualizer:
7
+ """Visualize detection and comparison results"""
8
+
9
+ def __init__(self, config_path: str = "config.yaml"):
10
+ """Initialize visualizer with configuration"""
11
+ with open(config_path, 'r') as f:
12
+ self.config = yaml.safe_load(f)
13
+
14
+ self.line_thickness = self.config['visualization']['line_thickness']
15
+ self.font_scale = self.config['visualization']['font_scale']
16
+ self.colors = self.config['visualization']['colors']
17
+
18
+ def draw_detections(self, image: np.ndarray, detections: Dict,
19
+ color_type: str = 'new_damage') -> np.ndarray:
20
+ """
21
+ Draw bounding boxes and labels on image
22
+
23
+ Args:
24
+ image: Input image
25
+ detections: Detection results
26
+ color_type: Type of color to use ('new_damage', 'existing_damage', 'matched_damage')
27
+
28
+ Returns:
29
+ Image with drawn detections
30
+ """
31
+ img_copy = image.copy()
32
+ color = self.colors.get(color_type, [255, 0, 0])
33
+
34
+ for i, box in enumerate(detections['boxes']):
35
+ x1, y1, x2, y2 = box
36
+ label = f"{detections['classes'][i]} ({detections['confidences'][i]:.2f})"
37
+
38
+ # Draw rectangle
39
+ cv2.rectangle(img_copy, (x1, y1), (x2, y2), color, self.line_thickness)
40
+
41
+ # Draw label background
42
+ label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX,
43
+ self.font_scale, self.line_thickness)
44
+ cv2.rectangle(img_copy, (x1, y1 - label_size[1] - 5),
45
+ (x1 + label_size[0], y1), color, -1)
46
+
47
+ # Draw label text
48
+ cv2.putText(img_copy, label, (x1, y1 - 5),
49
+ cv2.FONT_HERSHEY_SIMPLEX, self.font_scale,
50
+ [255, 255, 255], self.line_thickness)
51
+
52
+ return img_copy
53
+
54
+ def create_comparison_visualization(self, before_img: np.ndarray, after_img: np.ndarray,
55
+ before_detections: Dict, after_detections: Dict,
56
+ comparison_result: Dict) -> np.ndarray:
57
+ """
58
+ Create side-by-side comparison visualization
59
+
60
+ Args:
61
+ before_img, after_img: Input images
62
+ before_detections, after_detections: Detection results
63
+ comparison_result: Comparison analysis results
64
+
65
+ Returns:
66
+ Combined visualization image
67
+ """
68
+ # Draw matched damages in yellow
69
+ before_vis = before_img.copy()
70
+ after_vis = after_img.copy()
71
+
72
+ # Draw matched damages
73
+ for match in comparison_result['matched_damages']:
74
+ # Draw on before image
75
+ x1, y1, x2, y2 = match['box_before']
76
+ cv2.rectangle(before_vis, (x1, y1), (x2, y2),
77
+ self.colors['matched_damage'], self.line_thickness)
78
+
79
+ # Draw on after image
80
+ x1, y1, x2, y2 = match['box_after']
81
+ cv2.rectangle(after_vis, (x1, y1), (x2, y2),
82
+ self.colors['matched_damage'], self.line_thickness)
83
+
84
+ # Draw repaired damages (only on before) in green
85
+ for damage in comparison_result['repaired_damages']:
86
+ x1, y1, x2, y2 = damage['box']
87
+ cv2.rectangle(before_vis, (x1, y1), (x2, y2),
88
+ self.colors['existing_damage'], self.line_thickness)
89
+ cv2.putText(before_vis, "REPAIRED", (x1, y1 - 5),
90
+ cv2.FONT_HERSHEY_SIMPLEX, self.font_scale,
91
+ self.colors['existing_damage'], self.line_thickness)
92
+
93
+ # Draw new damages (only on after) in red
94
+ for damage in comparison_result['new_damages']:
95
+ x1, y1, x2, y2 = damage['box']
96
+ cv2.rectangle(after_vis, (x1, y1), (x2, y2),
97
+ self.colors['new_damage'], self.line_thickness + 1)
98
+ cv2.putText(after_vis, "NEW!", (x1, y1 - 5),
99
+ cv2.FONT_HERSHEY_SIMPLEX, self.font_scale * 1.5,
100
+ self.colors['new_damage'], self.line_thickness)
101
+
102
+ # Combine images side by side
103
+ h1, w1 = before_vis.shape[:2]
104
+ h2, w2 = after_vis.shape[:2]
105
+ max_height = max(h1, h2)
106
+
107
+ # Resize if needed
108
+ if h1 != max_height:
109
+ before_vis = cv2.resize(before_vis, (int(w1 * max_height / h1), max_height))
110
+ if h2 != max_height:
111
+ after_vis = cv2.resize(after_vis, (int(w2 * max_height / h2), max_height))
112
+
113
+ # Create combined image
114
+ combined = np.hstack([before_vis, after_vis])
115
+
116
+ # Add status text
117
+ status_height = 100
118
+ status_img = np.ones((status_height, combined.shape[1], 3), dtype=np.uint8) * 255
119
+
120
+ # Add case message
121
+ case_color = (0, 128, 0) if 'SUCCESS' in comparison_result['case'] else (0, 0, 255)
122
+ cv2.putText(status_img, comparison_result['message'], (20, 40),
123
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, case_color, 2)
124
+
125
+ # Add statistics
126
+ stats_text = f"Before: {comparison_result['statistics']['total_before']} | " \
127
+ f"After: {comparison_result['statistics']['total_after']} | " \
128
+ f"Matched: {comparison_result['statistics']['matched']} | " \
129
+ f"New: {comparison_result['statistics']['new']}"
130
+ cv2.putText(status_img, stats_text, (20, 70),
131
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 1)
132
+
133
+ # Combine with status
134
+ final_image = np.vstack([status_img, combined])
135
+
136
+ return final_image
137
+
138
+ def create_summary_grid(self, comparison_results: List[Dict],
139
+ image_pairs: List[Tuple[np.ndarray, np.ndarray]]) -> np.ndarray:
140
+ """
141
+ Create a grid visualization of all 6 position comparisons
142
+
143
+ Args:
144
+ comparison_results: List of comparison results for each position
145
+ image_pairs: List of (before, after) image pairs
146
+
147
+ Returns:
148
+ Grid visualization of all positions
149
+ """
150
+ grid_images = []
151
+
152
+ for i, (result, (before_img, after_img)) in enumerate(zip(comparison_results, image_pairs)):
153
+ # Create mini comparison for each position
154
+ target_size = (300, 200) # Smaller size for grid
155
+
156
+ before_small = cv2.resize(before_img, target_size)
157
+ after_small = cv2.resize(after_img, target_size)
158
+
159
+ # Add position label
160
+ position_label = f"Position {i+1}"
161
+ cv2.putText(before_small, position_label, (10, 20),
162
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
163
+ cv2.putText(after_small, position_label, (10, 20),
164
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
165
+
166
+ # Add case indicator
167
+ case_color = (0, 255, 0) if 'SUCCESS' in result['case'] else (0, 0, 255)
168
+ if 'NEW_DAMAGE' in result['case']:
169
+ case_color = (0, 0, 255)
170
+
171
+ cv2.rectangle(after_small, (0, 0), (target_size[0], 30), case_color, -1)
172
+ cv2.putText(after_small, result['case'][:10], (10, 20),
173
+ cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
174
+
175
+ pair_img = np.hstack([before_small, after_small])
176
+ grid_images.append(pair_img)
177
+
178
+ # Create 2x3 grid
179
+ row1 = np.hstack(grid_images[:3])
180
+ row2 = np.hstack(grid_images[3:])
181
+ grid = np.vstack([row1, row2])
182
+
183
+ return grid