pagezyhf's picture
pagezyhf HF Staff
ts
cf86247
raw
history blame
2.06 kB
import gradio as gr
import requests
import json
import logging
import os
# Set up logging
logging.basicConfig(level=logging.INFO)
def get_model_info(model_id="Qwen/Qwen2-7B-Instruct", hf_token=os.getenv('HF_TOKEN')):
url = f"https://huggingface.co/api/models"
params = {
"limit": 5,
"pipeline_tag": "text-generation",
"sort": "likes30d",
"full": "True",
"config": "True"
}
headers = {}
if hf_token:
headers["Authorization"] = f"Bearer {hf_token}"
try:
response = requests.get(url, params=params, headers=headers)
except requests.exceptions.RequestException as e:
print(f"Error fetching data for task text-generation: {e}")
return None
data = response.json()
return json.dumps(data, indent=4)
# url = f"https://huggingface.co/api/integrations/aws/v1/lookup/{model_id}"
# headers = {
# "Authorization": f"Bearer {hf_token}",
# }
# logging.info(f"Requesting model info for model ID: {model_id}")
# logging.info(f"URL: {url}")
# response = requests.get(url, headers=headers)
# logging.info(f"Response Status: {response.status_code}")
# logging.info(f"Response Headers: {response.headers}")
# logging.info(f"Response Text: {response.text}")
# if response.status_code != 200:
# logging.error(f"Error: {response.status_code} - {response.text}")
# return f"Error: {response.status_code}\\nResponse: {response.text}"
# data = response.json()
# logging.info("Successfully retrieved model info.")
# return json.dumps(data, indent=4)
iface = gr.Interface(
fn=lambda model_id: get_model_info(model_id, hf_token=os.getenv('HF_TOKEN')),
inputs=gr.Textbox(label="Model ID", placeholder="HuggingFaceH4/zephyr-7b-beta"),
outputs=gr.Textbox(label="API Response", lines=20),
title="Hugging Face Model Lookup",
description="Enter a model ID to retrieve its AWS integration details from Hugging Face."
)
if __name__ == "__main__":
iface.launch()