Improved error message for trust_remote_code models
Browse files- app.py +5 -4
- llm_run.py +2 -2
app.py
CHANGED
@@ -101,8 +101,8 @@ def search_bar_gr(model_names,slider=True,double_search=False,key=None):
|
|
101 |
ret.insert(0,col2)
|
102 |
return ret
|
103 |
|
104 |
-
import spaces
|
105 |
-
|
106 |
def _run(path,genes,N,progress_bar):
|
107 |
#Load the model
|
108 |
progress_bar(0.20, desc="Loading Model...",total=100)
|
@@ -110,7 +110,7 @@ def _run(path,genes,N,progress_bar):
|
|
110 |
model,tokenizer = load_model(path)
|
111 |
except ValueError as e:
|
112 |
print(f"Error loading model '{path}': {e}")
|
113 |
-
gr.Warning("Model couldn't load. This space currently only works with AutoModelForCausalLM models. Please check the model architecture and try again.")
|
114 |
return None
|
115 |
except OSError as e:
|
116 |
print(f"Error loading model '{path}': {e}")
|
@@ -175,7 +175,6 @@ def run(path,progress_bar):
|
|
175 |
|
176 |
progress_bar(1, desc="Done!",total=100)
|
177 |
|
178 |
-
|
179 |
def prepare_run(model_name,progress_bar=gr.Progress()):
|
180 |
global MODEL_SEARCHED_X,MODEL_NAMES
|
181 |
if model_name in MODEL_NAMES:
|
@@ -222,6 +221,8 @@ def reload_env():
|
|
222 |
|
223 |
|
224 |
# Load environment variables
|
|
|
|
|
225 |
|
226 |
USERNAME = os.environ['GITHUB_USERNAME']
|
227 |
TOKEN = os.environ['GITHUB_TOKEN']
|
|
|
101 |
ret.insert(0,col2)
|
102 |
return ret
|
103 |
|
104 |
+
#import spaces
|
105 |
+
#@spaces.GPU(duration=300)
|
106 |
def _run(path,genes,N,progress_bar):
|
107 |
#Load the model
|
108 |
progress_bar(0.20, desc="Loading Model...",total=100)
|
|
|
110 |
model,tokenizer = load_model(path)
|
111 |
except ValueError as e:
|
112 |
print(f"Error loading model '{path}': {e}")
|
113 |
+
gr.Warning("Model couldn't load. This space currently only works with AutoModelForCausalLM models and trust_remote_code=False. Please check the model architecture and whether it requires the execution of custom code and try again.")
|
114 |
return None
|
115 |
except OSError as e:
|
116 |
print(f"Error loading model '{path}': {e}")
|
|
|
175 |
|
176 |
progress_bar(1, desc="Done!",total=100)
|
177 |
|
|
|
178 |
def prepare_run(model_name,progress_bar=gr.Progress()):
|
179 |
global MODEL_SEARCHED_X,MODEL_NAMES
|
180 |
if model_name in MODEL_NAMES:
|
|
|
221 |
|
222 |
|
223 |
# Load environment variables
|
224 |
+
from dotenv import load_dotenv
|
225 |
+
load_dotenv()
|
226 |
|
227 |
USERNAME = os.environ['GITHUB_USERNAME']
|
228 |
TOKEN = os.environ['GITHUB_TOKEN']
|
llm_run.py
CHANGED
@@ -34,8 +34,8 @@ def download_llm_to_cache(model_name, revision="main", cache_dir=None):
|
|
34 |
return None
|
35 |
|
36 |
def load_model(path,cache_dir=None):
|
37 |
-
model = transformers.AutoModelForCausalLM.from_pretrained(path,cache_dir=cache_dir,device_map='auto')
|
38 |
-
tokenizer = transformers.AutoTokenizer.from_pretrained(path,cache_dir=cache_dir,device_map='auto')
|
39 |
return model,tokenizer
|
40 |
|
41 |
def llm_run(model,tokenizer,genes,N):
|
|
|
34 |
return None
|
35 |
|
36 |
def load_model(path,cache_dir=None):
|
37 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(path,cache_dir=cache_dir,device_map='auto',trust_remote_code=False)
|
38 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(path,cache_dir=cache_dir,device_map='auto',trust_remote_code=False)
|
39 |
return model,tokenizer
|
40 |
|
41 |
def llm_run(model,tokenizer,genes,N):
|