IlyasMoutawwakil HF staff commited on
Commit
8cd2626
1 Parent(s): fd46e37
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +22 -19
  3. requirements.txt +1 -1
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Optimum Benchmark UI
3
  emoji: 🏋️
4
  colorFrom: purple
5
  colorTo: indigo
 
1
  ---
2
+ title: Auto Benchmark
3
  emoji: 🏋️
4
  colorFrom: purple
5
  colorTo: indigo
app.py CHANGED
@@ -10,18 +10,17 @@ from config_store import (
10
  get_inference_config,
11
  get_openvino_config,
12
  get_pytorch_config,
13
- # get_ipex_config,
14
  )
15
  from optimum_benchmark.launchers.base import Launcher # noqa
16
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
17
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
18
-
19
- # from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
20
  from optimum_benchmark import (
21
  BenchmarkConfig,
22
  PyTorchConfig,
23
  OVConfig,
24
- # IPEXConfig,
25
  ProcessConfig,
26
  InferenceConfig,
27
  Benchmark,
@@ -39,7 +38,7 @@ MODELS = [
39
  ]
40
  TASKS = (
41
  set(TASKS_TO_OVMODEL.keys())
42
- # & set(TASKS_TO_IPEXMODEL.keys())
43
  & set(TASKS_TO_MODEL_LOADERS.keys())
44
  )
45
 
@@ -62,7 +61,7 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
62
  "inference": {},
63
  "openvino": {},
64
  "pytorch": {},
65
- # "ipex": {},
66
  }
67
 
68
  for key, value in kwargs.items():
@@ -98,17 +97,17 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
98
  device=DEVICE,
99
  **configs["pytorch"],
100
  )
101
- # configs["ipex"] = IPEXConfig(
102
- # task=task,
103
- # model=model,
104
- # device=DEVICE,
105
- # **configs["ipex"],
106
- # )
107
 
108
  outputs = {
109
  "openvino": "Running benchmark for OpenVINO backend",
110
  "pytorch": "Running benchmark for PyTorch backend",
111
- # "ipex": "Running benchmark for IPEX backend",
112
  }
113
 
114
  yield tuple(outputs[b] for b in BACKENDS)
@@ -171,6 +170,10 @@ def build_demo():
171
  "</a>"
172
  "<br>"
173
  "</h3>"
 
 
 
 
174
  )
175
 
176
  model = gr.Dropdown(
@@ -206,8 +209,8 @@ def build_demo():
206
  openvino_config = get_openvino_config()
207
  with gr.Accordion(label="PyTorch Config", open=False, visible=True):
208
  pytorch_config = get_pytorch_config()
209
- # with gr.Accordion(label="IPEX Config", open=False, visible=True):
210
- # ipex_config = get_ipex_config()
211
 
212
  backends.change(
213
  inputs=backends,
@@ -225,8 +228,8 @@ def build_demo():
225
  openvino_output = gr.Markdown()
226
  with gr.Accordion(label="PyTorch Output", open=True, visible=True):
227
  pytorch_output = gr.Markdown()
228
- # with gr.Accordion(label="IPEX Output", open=True, visible=True):
229
- # ipex_output = gr.Markdown()
230
 
231
  backends.change(
232
  inputs=backends,
@@ -246,12 +249,12 @@ def build_demo():
246
  *inference_config.values(),
247
  *openvino_config.values(),
248
  *pytorch_config.values(),
249
- # *ipex_config.values(),
250
  },
251
  outputs={
252
  openvino_output,
253
  pytorch_output,
254
- # ipex_output,
255
  },
256
  concurrency_limit=1,
257
  )
 
10
  get_inference_config,
11
  get_openvino_config,
12
  get_pytorch_config,
13
+ get_ipex_config,
14
  )
15
  from optimum_benchmark.launchers.base import Launcher # noqa
16
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
17
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
18
+ from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
 
19
  from optimum_benchmark import (
20
  BenchmarkConfig,
21
  PyTorchConfig,
22
  OVConfig,
23
+ IPEXConfig,
24
  ProcessConfig,
25
  InferenceConfig,
26
  Benchmark,
 
38
  ]
39
  TASKS = (
40
  set(TASKS_TO_OVMODEL.keys())
41
+ & set(TASKS_TO_IPEXMODEL.keys())
42
  & set(TASKS_TO_MODEL_LOADERS.keys())
43
  )
44
 
 
61
  "inference": {},
62
  "openvino": {},
63
  "pytorch": {},
64
+ "ipex": {},
65
  }
66
 
67
  for key, value in kwargs.items():
 
97
  device=DEVICE,
98
  **configs["pytorch"],
99
  )
100
+ configs["ipex"] = IPEXConfig(
101
+ task=task,
102
+ model=model,
103
+ device=DEVICE,
104
+ **configs["ipex"],
105
+ )
106
 
107
  outputs = {
108
  "openvino": "Running benchmark for OpenVINO backend",
109
  "pytorch": "Running benchmark for PyTorch backend",
110
+ "ipex": "Running benchmark for IPEX backend",
111
  }
112
 
113
  yield tuple(outputs[b] for b in BACKENDS)
 
170
  "</a>"
171
  "<br>"
172
  "</h3>"
173
+ "<p style='text-align: center'>"
174
+ "This Space uses Optimum Benchmark to automatically benchmark a model from the Hub on different backends."
175
+ "<br>"
176
+ "The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub."
177
  )
178
 
179
  model = gr.Dropdown(
 
209
  openvino_config = get_openvino_config()
210
  with gr.Accordion(label="PyTorch Config", open=False, visible=True):
211
  pytorch_config = get_pytorch_config()
212
+ with gr.Accordion(label="IPEX Config", open=False, visible=True):
213
+ ipex_config = get_ipex_config()
214
 
215
  backends.change(
216
  inputs=backends,
 
228
  openvino_output = gr.Markdown()
229
  with gr.Accordion(label="PyTorch Output", open=True, visible=True):
230
  pytorch_output = gr.Markdown()
231
+ with gr.Accordion(label="IPEX Output", open=True, visible=True):
232
+ ipex_output = gr.Markdown()
233
 
234
  backends.change(
235
  inputs=backends,
 
249
  *inference_config.values(),
250
  *openvino_config.values(),
251
  *pytorch_config.values(),
252
+ *ipex_config.values(),
253
  },
254
  outputs={
255
  openvino_output,
256
  pytorch_output,
257
+ ipex_output,
258
  },
259
  concurrency_limit=1,
260
  )
requirements.txt CHANGED
@@ -1 +1 @@
1
- optimum-benchmark[openvino]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report
 
1
+ optimum-benchmark[openvino,ipex]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report