lkk688 commited on
Commit
e8e4cb7
·
verified ·
1 Parent(s): 7eb373a

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. inference.py +19 -4
  2. model.onnx +1 -1
  3. model.safetensors +1 -1
  4. preprocessor_config.json +26 -0
inference.py CHANGED
@@ -1,19 +1,34 @@
1
  from PIL import Image
2
  import torch
3
  import numpy as np
4
- from transformers import PreTrainedModel
5
  import onnxruntime
 
6
 
7
  class FasterRCNNInference:
8
  def __init__(self, model_path):
9
  # Load ONNX model
10
  self.ort_session = onnxruntime.InferenceSession(f"{model_path}/model.onnx")
11
- self.processor = torch.load(f"{model_path}/processor.bin")
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def predict(self, images, threshold=0.5):
14
  # Preprocess
15
- inputs = self.processor(images)
16
- pixel_values = inputs["pixel_values"].numpy()
 
 
 
 
17
 
18
  # Run inference
19
  outputs = self.ort_session.run(None, {"pixel_values": pixel_values})
 
1
  from PIL import Image
2
  import torch
3
  import numpy as np
 
4
  import onnxruntime
5
+ import os
6
 
7
  class FasterRCNNInference:
8
  def __init__(self, model_path):
9
  # Load ONNX model
10
  self.ort_session = onnxruntime.InferenceSession(f"{model_path}/model.onnx")
11
+
12
+ # Try to load Hugging Face image processor
13
+ try:
14
+ from transformers import AutoImageProcessor
15
+ self.image_processor = AutoImageProcessor.from_pretrained(model_path)
16
+ self.use_hf_processor = True
17
+ print("Using Hugging Face image processor")
18
+ except Exception as e:
19
+ print(f"Could not load Hugging Face image processor: {e}")
20
+ print("Falling back to custom processor")
21
+ self.processor = torch.load(f"{model_path}/processor.bin")
22
+ self.use_hf_processor = False
23
 
24
  def predict(self, images, threshold=0.5):
25
  # Preprocess
26
+ if self.use_hf_processor:
27
+ inputs = self.image_processor(images=images, return_tensors="pt")
28
+ pixel_values = inputs["pixel_values"].numpy()
29
+ else:
30
+ inputs = self.processor(images)
31
+ pixel_values = inputs["pixel_values"].numpy()
32
 
33
  # Run inference
34
  outputs = self.ort_session.run(None, {"pixel_values": pixel_values})
model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:456470547ad0365bb4229e2d1c81ff99015984487753ca26e694f7ba8847e37d
3
  size 331069979
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83785c773e3567b4244528396f7f553de877cc3e04724a383b723b647f2fc670
3
  size 331069979
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38a17a2591648db50e1b701341cf3ef261db1cec76e5131975a82f6600769455
3
  size 331180436
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a184954eea223d4339ca5e728ed03ef966bcbdb3fd9a499c1a1af6c60c282bd2
3
  size 331180436
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_annotations": true,
3
+ "do_normalize": true,
4
+ "do_pad": true,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "format": "coco_detection",
8
+ "image_mean": [
9
+ 0.485,
10
+ 0.456,
11
+ 0.406
12
+ ],
13
+ "image_processor_type": "DetrImageProcessor",
14
+ "image_std": [
15
+ 0.229,
16
+ 0.224,
17
+ 0.225
18
+ ],
19
+ "pad_size": null,
20
+ "resample": 2,
21
+ "rescale_factor": 0.00392156862745098,
22
+ "size": {
23
+ "longest_edge": 1333,
24
+ "shortest_edge": 800
25
+ }
26
+ }