lkk688 commited on
Commit
4bfd00b
·
verified ·
1 Parent(s): 877ee65

Upload YOLOv8l model

Browse files
Files changed (4) hide show
  1. config.json +180 -0
  2. example.py +47 -0
  3. preprocessor_config.json +21 -0
  4. yolov8_l.pt +3 -0
config.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "yolov8",
3
+ "scale": "l",
4
+ "num_classes": 80,
5
+ "image_size": [
6
+ 640,
7
+ 640
8
+ ],
9
+ "confidence_threshold": 0.25,
10
+ "iou_threshold": 0.45,
11
+ "max_detections": 300,
12
+ "backbone_type": "default",
13
+ "id2label": {
14
+ "0": "0",
15
+ "1": "1",
16
+ "2": "2",
17
+ "3": "3",
18
+ "4": "4",
19
+ "5": "5",
20
+ "6": "6",
21
+ "7": "7",
22
+ "8": "8",
23
+ "9": "9",
24
+ "10": "10",
25
+ "11": "11",
26
+ "12": "12",
27
+ "13": "13",
28
+ "14": "14",
29
+ "15": "15",
30
+ "16": "16",
31
+ "17": "17",
32
+ "18": "18",
33
+ "19": "19",
34
+ "20": "20",
35
+ "21": "21",
36
+ "22": "22",
37
+ "23": "23",
38
+ "24": "24",
39
+ "25": "25",
40
+ "26": "26",
41
+ "27": "27",
42
+ "28": "28",
43
+ "29": "29",
44
+ "30": "30",
45
+ "31": "31",
46
+ "32": "32",
47
+ "33": "33",
48
+ "34": "34",
49
+ "35": "35",
50
+ "36": "36",
51
+ "37": "37",
52
+ "38": "38",
53
+ "39": "39",
54
+ "40": "40",
55
+ "41": "41",
56
+ "42": "42",
57
+ "43": "43",
58
+ "44": "44",
59
+ "45": "45",
60
+ "46": "46",
61
+ "47": "47",
62
+ "48": "48",
63
+ "49": "49",
64
+ "50": "50",
65
+ "51": "51",
66
+ "52": "52",
67
+ "53": "53",
68
+ "54": "54",
69
+ "55": "55",
70
+ "56": "56",
71
+ "57": "57",
72
+ "58": "58",
73
+ "59": "59",
74
+ "60": "60",
75
+ "61": "61",
76
+ "62": "62",
77
+ "63": "63",
78
+ "64": "64",
79
+ "65": "65",
80
+ "66": "66",
81
+ "67": "67",
82
+ "68": "68",
83
+ "69": "69",
84
+ "70": "70",
85
+ "71": "71",
86
+ "72": "72",
87
+ "73": "73",
88
+ "74": "74",
89
+ "75": "75",
90
+ "76": "76",
91
+ "77": "77",
92
+ "78": "78",
93
+ "79": "79"
94
+ },
95
+ "label2id": {
96
+ "0": "0",
97
+ "1": "1",
98
+ "2": "2",
99
+ "3": "3",
100
+ "4": "4",
101
+ "5": "5",
102
+ "6": "6",
103
+ "7": "7",
104
+ "8": "8",
105
+ "9": "9",
106
+ "10": "10",
107
+ "11": "11",
108
+ "12": "12",
109
+ "13": "13",
110
+ "14": "14",
111
+ "15": "15",
112
+ "16": "16",
113
+ "17": "17",
114
+ "18": "18",
115
+ "19": "19",
116
+ "20": "20",
117
+ "21": "21",
118
+ "22": "22",
119
+ "23": "23",
120
+ "24": "24",
121
+ "25": "25",
122
+ "26": "26",
123
+ "27": "27",
124
+ "28": "28",
125
+ "29": "29",
126
+ "30": "30",
127
+ "31": "31",
128
+ "32": "32",
129
+ "33": "33",
130
+ "34": "34",
131
+ "35": "35",
132
+ "36": "36",
133
+ "37": "37",
134
+ "38": "38",
135
+ "39": "39",
136
+ "40": "40",
137
+ "41": "41",
138
+ "42": "42",
139
+ "43": "43",
140
+ "44": "44",
141
+ "45": "45",
142
+ "46": "46",
143
+ "47": "47",
144
+ "48": "48",
145
+ "49": "49",
146
+ "50": "50",
147
+ "51": "51",
148
+ "52": "52",
149
+ "53": "53",
150
+ "54": "54",
151
+ "55": "55",
152
+ "56": "56",
153
+ "57": "57",
154
+ "58": "58",
155
+ "59": "59",
156
+ "60": "60",
157
+ "61": "61",
158
+ "62": "62",
159
+ "63": "63",
160
+ "64": "64",
161
+ "65": "65",
162
+ "66": "66",
163
+ "67": "67",
164
+ "68": "68",
165
+ "69": "69",
166
+ "70": "70",
167
+ "71": "71",
168
+ "72": "72",
169
+ "73": "73",
170
+ "74": "74",
171
+ "75": "75",
172
+ "76": "76",
173
+ "77": "77",
174
+ "78": "78",
175
+ "79": "79"
176
+ },
177
+ "architectures": [
178
+ "YoloDetectionModel"
179
+ ]
180
+ }
example.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Example script for using the yolov8_l model from Hugging Face
3
+ import torch
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image
7
+ from transformers import AutoImageProcessor, AutoModelForObjectDetection
8
+
9
+ # Load model and processor
10
+ model = AutoModelForObjectDetection.from_pretrained("lkk688/yolov8l-model")
11
+ processor = AutoImageProcessor.from_pretrained("lkk688/yolov8l-model")
12
+
13
+ # Function to run inference on an image
14
+ def detect_objects(image_path, confidence_threshold=0.25):
15
+ # Load image
16
+ image = Image.open(image_path).convert("RGB")
17
+
18
+ # Process image
19
+ inputs = processor(images=image, return_tensors="pt")
20
+
21
+ # Run inference
22
+ with torch.no_grad():
23
+ outputs = model(**inputs)
24
+
25
+ # Post-process outputs
26
+ target_sizes = torch.tensor([image.size[::-1]])
27
+ results = processor.post_process_object_detection(
28
+ outputs,
29
+ threshold=confidence_threshold,
30
+ target_sizes=target_sizes
31
+ )[0]
32
+
33
+ # Print results
34
+ for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
35
+ box = [round(i, 2) for i in box.tolist()]
36
+ print(
37
+ f"Detected {model.config.id2label[label.item()]} with confidence "
38
+ f"{round(score.item(), 3)} at location {box}"
39
+ )
40
+
41
+ return results
42
+
43
+ # Example usage
44
+ if __name__ == "__main__":
45
+ # Replace with your image path
46
+ image_path = "path/to/your/image.jpg"
47
+ detect_objects(image_path)
preprocessor_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_resize": true,
4
+ "do_rescale": true,
5
+ "image_mean": [
6
+ 0.0,
7
+ 0.0,
8
+ 0.0
9
+ ],
10
+ "image_std": [
11
+ 1.0,
12
+ 1.0,
13
+ 1.0
14
+ ],
15
+ "rescale_factor": 0.00392156862745098,
16
+ "size": {
17
+ "height": 640,
18
+ "width": 640
19
+ },
20
+ "use_letterbox": true
21
+ }
yolov8_l.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6e8bdc228932aaf97f4ad3bdd02def49d015c4d811133bfd1e92ffa82617f86
3
+ size 175159011