File size: 2,059 Bytes
35429d6
 
 
ae1d265
57f97fb
35429d6
ae1d265
 
 
004a803
cf86247
 
 
 
 
 
 
9444ced
cf86247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81593f1
cf86247
 
81593f1
cf86247
35429d6
cf86247
 
 
321d8e9
cf86247
 
 
35429d6
cf86247
 
 
35429d6
 
004a803
b16d0f6
35429d6
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import requests
import json
import logging
import os

# Set up logging
logging.basicConfig(level=logging.INFO)

def get_model_info(model_id="Qwen/Qwen2-7B-Instruct", hf_token=os.getenv('HF_TOKEN')):
    url = f"https://huggingface.co/api/models"
    params = {
        "limit": 5,
        "pipeline_tag": "text-generation",
        "sort": "likes30d",
        "full": "True",
        "config": "True"
    }
    
    headers = {}
    if hf_token:
        headers["Authorization"] = f"Bearer {hf_token}"
    try:
        response = requests.get(url, params=params, headers=headers)
    except requests.exceptions.RequestException as e:
        print(f"Error fetching data for task text-generation: {e}")
        return None
    data = response.json()
    return json.dumps(data, indent=4)
    
    # url = f"https://huggingface.co/api/integrations/aws/v1/lookup/{model_id}"
    # headers = {
    #     "Authorization": f"Bearer {hf_token}",
    # }

    # logging.info(f"Requesting model info for model ID: {model_id}")
    # logging.info(f"URL: {url}")

    # response = requests.get(url, headers=headers)
    
    # logging.info(f"Response Status: {response.status_code}")
    # logging.info(f"Response Headers: {response.headers}")
    # logging.info(f"Response Text: {response.text}")

    # if response.status_code != 200:
    #     logging.error(f"Error: {response.status_code} - {response.text}")
    #     return f"Error: {response.status_code}\\nResponse: {response.text}"
    
    # data = response.json()
    # logging.info("Successfully retrieved model info.")
    # return json.dumps(data, indent=4)

iface = gr.Interface(
    fn=lambda model_id: get_model_info(model_id, hf_token=os.getenv('HF_TOKEN')),
    inputs=gr.Textbox(label="Model ID", placeholder="HuggingFaceH4/zephyr-7b-beta"),
    outputs=gr.Textbox(label="API Response", lines=20),
    title="Hugging Face Model Lookup",
    description="Enter a model ID to retrieve its AWS integration details from Hugging Face."
)

if __name__ == "__main__":
    iface.launch()