i-darrshan's picture
Update app.py
677744a verified
raw
history blame
1.65 kB
import gradio as gr
from transformers import AutoTokenizer
from huggingface_hub import HfApi
api = HfApi()
# Define a function to calculate tokens
def count_tokens(llm_name, input_text):
try:
# Load the tokenizer for the selected transformer-based model
tokenizer = AutoTokenizer.from_pretrained(llm_name)
tokens = tokenizer.encode(input_text)
return f"Number of tokens: {len(tokens)}"
except Exception as e:
return f"Error: {str(e)}"
# Fetch model details including metadata (like download count, likes)
models = list(api.list_models(task="text-generation"))
# Sort models by number of downloads or likes
llm_options = [model.modelId for model in models]#sorted(model_details, key=lambda x: x['downloads'], reverse=True)
# Define custom CSS for a bluish theme
custom_css = """
"""
# Create the Gradio interface
with gr.Blocks(css=custom_css) as demo:
gr.HTML("<h1 style='text-align: center; color: #0078d7;'>Token Counter for Transformer-Based Models</h1>")
gr.Markdown(
"This app allows you to count the number of tokens in the input text "
"using selected transformer-based models from Hugging Face."
)
with gr.Row():
llm_dropdown = gr.Dropdown(choices=llm_options, label="Select Transformer Model", value="gpt2")
with gr.Row():
input_text = gr.Textbox(label="Enter your text")
output = gr.Textbox(label="Token Count", interactive=False)
with gr.Row():
submit_btn = gr.Button("Calculate Tokens")
submit_btn.click(count_tokens, inputs=[llm_dropdown, input_text], outputs=output)
# Launch the app
demo.launch(share=True)