from huggingface_hub import HfApi, HfFolder, create_repo, upload_folder | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import os | |
# Configuration | |
repo_name = "seliny2/medusa-Llama-2-70b-chat-hf" | |
local_model_dir = "/work1/deming/shared/medusa-Llama-2-70b-chat-hf-stage1-4k-lr0.0001" # Replace with the path to your output_dir | |
commit_message = "Initial upload of fine-tuned Medusa model" | |
private_repo = False # Set to False if you want a public model | |
# Authenticate (optional if already logged in) | |
api = HfApi() | |
# Create the repo (fails silently if already exists) | |
create_repo(repo_id=repo_name, private=private_repo, exist_ok=True) | |
# Upload the entire folder | |
upload_folder( | |
folder_path=local_model_dir, | |
path_in_repo="", | |
repo_id=repo_name, | |
repo_type="model", | |
ignore_patterns=["checkpoint-310/*"]) | |