Mario Faúndez Vidal
commited on
Commit
·
246a904
1
Parent(s):
51864ed
fix(deps): update tensorflow-hub to 0.16.1 for TF 2.19 compatibility
Browse filesFixes ImportError with estimator module by upgrading tensorflow-hub
from 0.15.0 to 0.16.1. Also adds explicit version constraints for
TensorFlow ecosystem packages and updates Gradio to 5.49.1.
- Add error handling for tensorflow_hub imports
- Pin tensorflow>=2.18.0,<2.20.0 for stability
- Update gradio to fix security vulnerabilities
Fixes compatibility issues in Hugging Face Space deployment.
- app.py +28 -3
- pyproject.toml +5 -2
- requirements.txt +9 -3
app.py
CHANGED
@@ -1,11 +1,23 @@
|
|
|
|
|
|
1 |
from typing import Dict
|
2 |
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
import tensorflow as tf
|
6 |
-
|
|
|
|
|
7 |
import tensorflow_text # noqa: F401 - Required to register TensorFlow Text ops
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
# Use tf_keras for compatibility with models saved using tf.keras
|
10 |
try:
|
11 |
import tf_keras
|
@@ -15,7 +27,13 @@ try:
|
|
15 |
except ImportError:
|
16 |
keras = tf.keras
|
17 |
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
np.set_printoptions(suppress=True)
|
21 |
|
@@ -23,7 +41,14 @@ labels = ["hate speech", "offensive language", "neither"]
|
|
23 |
|
24 |
|
25 |
# Load model with custom objects
|
26 |
-
custom_objects = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
classifier_model = keras.models.load_model("classifier_model.h5", custom_objects=custom_objects)
|
28 |
|
29 |
|
|
|
1 |
+
# Suppress TensorFlow warnings about plugin registration
|
2 |
+
import os
|
3 |
from typing import Dict
|
4 |
|
5 |
import gradio as gr
|
6 |
import numpy as np
|
7 |
import tensorflow as tf
|
8 |
+
|
9 |
+
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Suppress TF warnings
|
10 |
+
|
11 |
import tensorflow_text # noqa: F401 - Required to register TensorFlow Text ops
|
12 |
|
13 |
+
# Import tensorflow_hub with error handling
|
14 |
+
try:
|
15 |
+
import tensorflow_hub as hub
|
16 |
+
except ImportError as e:
|
17 |
+
# Fallback if tensorflow_hub has issues
|
18 |
+
print(f"Warning: TensorFlow Hub import issue: {e}")
|
19 |
+
hub = None
|
20 |
+
|
21 |
# Use tf_keras for compatibility with models saved using tf.keras
|
22 |
try:
|
23 |
import tf_keras
|
|
|
27 |
except ImportError:
|
28 |
keras = tf.keras
|
29 |
|
30 |
+
# Import optimization with error handling
|
31 |
+
try:
|
32 |
+
from official.nlp.optimization import AdamWeightDecay, WarmUp
|
33 |
+
except ImportError:
|
34 |
+
# Fallback if official.nlp is not available
|
35 |
+
AdamWeightDecay = None
|
36 |
+
WarmUp = None
|
37 |
|
38 |
np.set_printoptions(suppress=True)
|
39 |
|
|
|
41 |
|
42 |
|
43 |
# Load model with custom objects
|
44 |
+
custom_objects = {}
|
45 |
+
if hub is not None:
|
46 |
+
custom_objects["KerasLayer"] = hub.KerasLayer
|
47 |
+
if AdamWeightDecay is not None:
|
48 |
+
custom_objects["AdamWeightDecay"] = AdamWeightDecay
|
49 |
+
if WarmUp is not None:
|
50 |
+
custom_objects["WarmUp"] = WarmUp
|
51 |
+
|
52 |
classifier_model = keras.models.load_model("classifier_model.h5", custom_objects=custom_objects)
|
53 |
|
54 |
|
pyproject.toml
CHANGED
@@ -12,8 +12,11 @@ license = {text = "MIT"}
|
|
12 |
|
13 |
dependencies = [
|
14 |
"gradio>=5.0.0",
|
15 |
-
"tensorflow
|
16 |
-
"
|
|
|
|
|
|
|
17 |
]
|
18 |
|
19 |
[dependency-groups]
|
|
|
12 |
|
13 |
dependencies = [
|
14 |
"gradio>=5.0.0",
|
15 |
+
"tensorflow>=2.18.0,<2.20.0",
|
16 |
+
"tensorflow-text>=2.18.0,<2.20.0",
|
17 |
+
"tensorflow-hub>=0.16.0",
|
18 |
+
"tf-models-official>=2.18.0",
|
19 |
+
"tf-keras>=2.18.0",
|
20 |
]
|
21 |
|
22 |
[dependency-groups]
|
requirements.txt
CHANGED
@@ -373,13 +373,16 @@ tensorboard-data-server==0.7.2
|
|
373 |
# via tensorboard
|
374 |
tensorflow==2.19.1
|
375 |
# via
|
|
|
376 |
# tensorflow-text
|
377 |
# tf-keras
|
378 |
# tf-models-official
|
379 |
tensorflow-datasets==4.9.9
|
380 |
# via tf-models-official
|
381 |
-
tensorflow-hub==0.
|
382 |
-
# via
|
|
|
|
|
383 |
tensorflow-io-gcs-filesystem==0.37.1
|
384 |
# via tensorflow
|
385 |
tensorflow-metadata==1.17.2
|
@@ -399,7 +402,10 @@ text-unidecode==1.3
|
|
399 |
# kaggle
|
400 |
# python-slugify
|
401 |
tf-keras==2.19.0
|
402 |
-
# via
|
|
|
|
|
|
|
403 |
tf-models-official==2.19.1
|
404 |
# via classify-text-with-bert-hate-speech (pyproject.toml)
|
405 |
tf-slim==1.1.0
|
|
|
373 |
# via tensorboard
|
374 |
tensorflow==2.19.1
|
375 |
# via
|
376 |
+
# classify-text-with-bert-hate-speech (pyproject.toml)
|
377 |
# tensorflow-text
|
378 |
# tf-keras
|
379 |
# tf-models-official
|
380 |
tensorflow-datasets==4.9.9
|
381 |
# via tf-models-official
|
382 |
+
tensorflow-hub==0.16.1
|
383 |
+
# via
|
384 |
+
# classify-text-with-bert-hate-speech (pyproject.toml)
|
385 |
+
# tf-models-official
|
386 |
tensorflow-io-gcs-filesystem==0.37.1
|
387 |
# via tensorflow
|
388 |
tensorflow-metadata==1.17.2
|
|
|
402 |
# kaggle
|
403 |
# python-slugify
|
404 |
tf-keras==2.19.0
|
405 |
+
# via
|
406 |
+
# classify-text-with-bert-hate-speech (pyproject.toml)
|
407 |
+
# tensorflow-hub
|
408 |
+
# tf-models-official
|
409 |
tf-models-official==2.19.1
|
410 |
# via classify-text-with-bert-hate-speech (pyproject.toml)
|
411 |
tf-slim==1.1.0
|