Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer
|
3 |
-
from huggingface_hub import HfApi
|
4 |
|
5 |
api = HfApi()
|
6 |
|
7 |
# Define a function to calculate tokens
|
8 |
-
def count_tokens(llm_name, input_text):
|
9 |
try:
|
|
|
|
|
|
|
|
|
10 |
# Load the tokenizer for the selected transformer-based model
|
11 |
tokenizer = AutoTokenizer.from_pretrained(llm_name)
|
12 |
tokens = tokenizer.encode(input_text)
|
@@ -14,16 +18,26 @@ def count_tokens(llm_name, input_text):
|
|
14 |
except Exception as e:
|
15 |
return f"Error: {str(e)}"
|
16 |
|
17 |
-
# Fetch model details including metadata (like
|
18 |
models = list(api.list_models(task="text-generation"))
|
19 |
|
20 |
-
#
|
21 |
-
|
|
|
|
|
|
|
|
|
22 |
|
23 |
-
# Define custom CSS for a bluish theme
|
24 |
custom_css = """
|
|
|
|
|
|
|
25 |
"""
|
26 |
|
|
|
|
|
|
|
27 |
# Create the Gradio interface
|
28 |
with gr.Blocks(css=custom_css) as demo:
|
29 |
gr.HTML("<h1 style='text-align: center; color: #0078d7;'>Token Counter for Transformer-Based Models</h1>")
|
@@ -32,14 +46,16 @@ with gr.Blocks(css=custom_css) as demo:
|
|
32 |
"using selected transformer-based models from Hugging Face."
|
33 |
)
|
34 |
with gr.Row():
|
35 |
-
llm_dropdown = gr.Dropdown(choices=
|
36 |
with gr.Row():
|
37 |
input_text = gr.Textbox(label="Enter your text")
|
38 |
output = gr.Textbox(label="Token Count", interactive=False)
|
|
|
|
|
39 |
with gr.Row():
|
40 |
submit_btn = gr.Button("Calculate Tokens")
|
41 |
|
42 |
-
submit_btn.click(count_tokens, inputs=[llm_dropdown, input_text], outputs=output)
|
43 |
|
44 |
# Launch the app
|
45 |
-
demo.launch(share=True)
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer
|
3 |
+
from huggingface_hub import HfApi, login
|
4 |
|
5 |
api = HfApi()
|
6 |
|
7 |
# Define a function to calculate tokens
|
8 |
+
def count_tokens(llm_name, input_text, api_token):
|
9 |
try:
|
10 |
+
# Login using the API token if provided
|
11 |
+
if api_token:
|
12 |
+
login(api_token)
|
13 |
+
|
14 |
# Load the tokenizer for the selected transformer-based model
|
15 |
tokenizer = AutoTokenizer.from_pretrained(llm_name)
|
16 |
tokens = tokenizer.encode(input_text)
|
|
|
18 |
except Exception as e:
|
19 |
return f"Error: {str(e)}"
|
20 |
|
21 |
+
# Fetch model details including metadata (like tags)
|
22 |
models = list(api.list_models(task="text-generation"))
|
23 |
|
24 |
+
# Filter models that have the 'text-generation-inference' tag and 'text-generation' pipeline_tag
|
25 |
+
filtered_models = []
|
26 |
+
for model in models:
|
27 |
+
model_info = api.model_info(model.modelId)
|
28 |
+
if 'text-generation-inference' in model_info.tags and model_info.pipeline_tag == 'text-generation':
|
29 |
+
filtered_models.append(model.modelId)
|
30 |
|
31 |
+
# Define custom CSS for a bluish theme and cursor pointer
|
32 |
custom_css = """
|
33 |
+
.gr-dropdown {
|
34 |
+
cursor: pointer;
|
35 |
+
}
|
36 |
"""
|
37 |
|
38 |
+
# Set the default model to the first filtered model, or "gpt2" if there are no filtered models
|
39 |
+
default_model = filtered_models[0] if filtered_models else "gpt2"
|
40 |
+
|
41 |
# Create the Gradio interface
|
42 |
with gr.Blocks(css=custom_css) as demo:
|
43 |
gr.HTML("<h1 style='text-align: center; color: #0078d7;'>Token Counter for Transformer-Based Models</h1>")
|
|
|
46 |
"using selected transformer-based models from Hugging Face."
|
47 |
)
|
48 |
with gr.Row():
|
49 |
+
llm_dropdown = gr.Dropdown(choices=filtered_models, label="Select Transformer Model", value=default_model)
|
50 |
with gr.Row():
|
51 |
input_text = gr.Textbox(label="Enter your text")
|
52 |
output = gr.Textbox(label="Token Count", interactive=False)
|
53 |
+
with gr.Row():
|
54 |
+
api_token_input = gr.Textbox(label="Enter Hugging Face API Token (if needed)", type="password", placeholder="Your API Token", interactive=True)
|
55 |
with gr.Row():
|
56 |
submit_btn = gr.Button("Calculate Tokens")
|
57 |
|
58 |
+
submit_btn.click(count_tokens, inputs=[llm_dropdown, input_text, api_token_input], outputs=output)
|
59 |
|
60 |
# Launch the app
|
61 |
+
demo.launch(share=True, debug=True)
|