Update README.md
Browse files
README.md
CHANGED
@@ -15,4 +15,30 @@ The labels are ['bag', 'bottom', 'dress', 'hat', 'shoes', 'outer', 'top']
|
|
15 |
|
16 |
In the 96th epoch out of total of 100 epochs, the best score was achieved with mAP 0.7542. Therefore, it is believed that there is a little room for performance improvement.
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
![sample_image](sample_image.png)
|
|
|
15 |
|
16 |
In the 96th epoch out of total of 100 epochs, the best score was achieved with mAP 0.7542. Therefore, it is believed that there is a little room for performance improvement.
|
17 |
|
18 |
+
``` python
|
19 |
+
from PIL import Image
|
20 |
+
import torch
|
21 |
+
from transformers import AutoImageProcessor, AutoModelForObjectDetection
|
22 |
+
|
23 |
+
ckpt = 'yainage90/fashion-object-detection'
|
24 |
+
image_processor = AutoImageProcessor.from_pretrained(ckpt)
|
25 |
+
model = AutoModelForObjectDetection.from_pretrained(ckpt).to(device)
|
26 |
+
|
27 |
+
image = Image.open('<path/to/image>').convert('RGB')
|
28 |
+
|
29 |
+
with torch.no_grad():
|
30 |
+
inputs = image_processor(images=[image], return_tensors="pt")
|
31 |
+
outputs = model(**inputs.to(device))
|
32 |
+
target_sizes = torch.tensor([[image.size[1], image.size[0]]])
|
33 |
+
results = image_processor.post_process_object_detection(outputs, threshold=0.4, target_sizes=target_sizes)[0]
|
34 |
+
|
35 |
+
items = []
|
36 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
37 |
+
score = score.item()
|
38 |
+
label = label.item()
|
39 |
+
box = [i.item() for i in box]
|
40 |
+
print(f"{model.config.id2label[label]}: {round(score, 3)} at {box}")
|
41 |
+
items.append((score, label, box))
|
42 |
+
```
|
43 |
+
|
44 |
![sample_image](sample_image.png)
|