|
from transformers import AutoModel, AutoTokenizer |
|
from pathlib import Path |
|
import torch |
|
import sys |
|
|
|
try: |
|
print("Loading tokenizer...") |
|
model_name = "." |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
print("β Tokenizer loaded successfully") |
|
|
|
print("Loading model...") |
|
model = AutoModel.from_pretrained(model_name, trust_remote_code=True) |
|
print("β Model loaded successfully") |
|
|
|
print("Setting model to evaluation mode...") |
|
model.eval() |
|
print("β Model set to evaluation mode") |
|
|
|
print("Tokenizing input text...") |
|
inputs = tokenizer("Export this model to ONNX!", return_tensors="pt") |
|
print("β Input tokenized successfully") |
|
|
|
print("Exporting model to ONNX format...") |
|
|
|
torch.onnx.export( |
|
model, |
|
(inputs["input_ids"], inputs["attention_mask"]), |
|
"model.onnx", |
|
input_names=["input_ids", "attention_mask"], |
|
output_names=["last_hidden_state"], |
|
dynamic_axes={ |
|
"input_ids": {0: "batch", 1: "seq"}, |
|
"attention_mask": {0: "batch", 1: "seq"}, |
|
"last_hidden_state": {0: "batch", 1: "seq"}, |
|
}, |
|
opset_version=14, |
|
) |
|
print("β Model exported to ONNX successfully") |
|
print(f"β ONNX file saved as: model.onnx") |
|
|
|
except FileNotFoundError as e: |
|
print(f"β Error: Model files not found in current directory: {e}") |
|
sys.exit(1) |
|
except ImportError as e: |
|
print(f"β Error: Failed to import required modules: {e}") |
|
sys.exit(1) |
|
except Exception as e: |
|
print(f"β Error during model export: {e}") |
|
sys.exit(1) |
|
|