import gradio as gr from transformers import AutoTokenizer from huggingface_hub import HfApi, login api = HfApi() # Define a function to calculate tokens def count_tokens(llm_name, input_text, api_token): try: # Login using the API token if provided if api_token: login(api_token) # Load the tokenizer for the selected transformer-based model tokenizer = AutoTokenizer.from_pretrained(llm_name) tokens = tokenizer.encode(input_text) return f"Number of tokens: {len(tokens)}" except Exception as e: return f"Error: {str(e)}" # Fetch model details including metadata (like tags) models = list(api.list_models(task="text-generation")) # Filter models that have the 'text-generation-inference' tag and 'text-generation' pipeline_tag filtered_models = [] for model in models: model_info = api.model_info(model.modelId) if 'text-generation-inference' in model_info.tags and model_info.pipeline_tag == 'text-generation': filtered_models.append(model.modelId) # Define custom CSS for a bluish theme and cursor pointer custom_css = """ .gr-dropdown { cursor: pointer; } """ # Set the default model to the first filtered model, or "gpt2" if there are no filtered models default_model = filtered_models[0] if filtered_models else "gpt2" # Create the Gradio interface with gr.Blocks(css=custom_css) as demo: gr.HTML("