Spaces:
Running
Running
Rename text_generator.py to most_downloaeded_model.py
Browse files- most_downloaeded_model.py +36 -0
- text_generator.py +0 -68
most_downloaeded_model.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import Tool
|
2 |
+
from huggingface_hub import list_models
|
3 |
+
|
4 |
+
class HFModelDownloadsTool(Tool):
|
5 |
+
name = "model_download_counter"
|
6 |
+
description = (
|
7 |
+
"This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. "
|
8 |
+
"It takes the name of the category (such as text-classification, depth-estimation, etc), and "
|
9 |
+
"returns the name of the checkpoint."
|
10 |
+
)
|
11 |
+
|
12 |
+
inputs = ["text"]
|
13 |
+
outputs = ["text"]
|
14 |
+
|
15 |
+
def __call__(self, task: str):
|
16 |
+
model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
|
17 |
+
return model.id
|
18 |
+
|
19 |
+
# Push the tool to the Hub
|
20 |
+
tool = HFModelDownloadsTool()
|
21 |
+
tool.push_to_hub("hf-model-downloads")
|
22 |
+
|
23 |
+
# Load the tool from the Hub
|
24 |
+
loaded_tool = Tool.from_hub("hf-model-downloads")
|
25 |
+
|
26 |
+
# Instantiate the HfAgent with the additional tool
|
27 |
+
from transformers import HfAgent
|
28 |
+
|
29 |
+
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=[loaded_tool])
|
30 |
+
|
31 |
+
# Run the agent with the new tool
|
32 |
+
result = agent.run(
|
33 |
+
"Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?"
|
34 |
+
)
|
35 |
+
|
36 |
+
print(result)
|
text_generator.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import os
|
3 |
-
from transformers import pipeline
|
4 |
-
|
5 |
-
|
6 |
-
from transformers import Tool
|
7 |
-
# Import other necessary libraries if needed
|
8 |
-
|
9 |
-
class TextGenerationTool(Tool):
|
10 |
-
name = "text_generator"
|
11 |
-
description = (
|
12 |
-
"This is a tool for text generation. It takes a prompt as input and returns the generated text."
|
13 |
-
)
|
14 |
-
|
15 |
-
inputs = ["text"]
|
16 |
-
outputs = ["text"]
|
17 |
-
|
18 |
-
def __call__(self, prompt: str):
|
19 |
-
#API_URL = "https://api-inference.huggingface.co/models/openchat/openchat_3.5"
|
20 |
-
#headers = {"Authorization": "Bearer " + os.environ['hf']}
|
21 |
-
token=os.environ['hf']
|
22 |
-
#payload = {
|
23 |
-
# "inputs": prompt # Adjust this based on your model's input format
|
24 |
-
#}
|
25 |
-
|
26 |
-
#payload = {
|
27 |
-
# "inputs": "Can you please let us know more details about your ",
|
28 |
-
# }
|
29 |
-
|
30 |
-
#def query(payload):
|
31 |
-
#generated_text = requests.post(API_URL, headers=headers, json=payload).json()
|
32 |
-
#print(generated_text)
|
33 |
-
#return generated_text["text"]
|
34 |
-
|
35 |
-
# Replace the following line with your text generation logic
|
36 |
-
#generated_text = f"Generated text based on the prompt: '{prompt}'"
|
37 |
-
|
38 |
-
# Initialize the text generation pipeline
|
39 |
-
#text_generator = pipeline(model="lgaalves/gpt2-dolly", token=token)
|
40 |
-
text_generator = pipeline(model="microsoft/Orca-2-13b", token=token)
|
41 |
-
|
42 |
-
# Generate text based on a prompt
|
43 |
-
generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
|
44 |
-
|
45 |
-
# Print the generated text
|
46 |
-
print(generated_text)
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
return generated_text
|
51 |
-
|
52 |
-
# Define the payload for the request
|
53 |
-
#payload = {
|
54 |
-
# "inputs": prompt # Adjust this based on your model's input format
|
55 |
-
#}
|
56 |
-
|
57 |
-
# Make the request to the API
|
58 |
-
#generated_text = requests.post(API_URL, headers=headers, json=payload).json()
|
59 |
-
|
60 |
-
# Extract and return the generated text
|
61 |
-
#return generated_text["generated_text"]
|
62 |
-
|
63 |
-
# Uncomment and customize the following lines based on your text generation needs
|
64 |
-
# text_generator = pipeline(model="gpt2")
|
65 |
-
# generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
|
66 |
-
|
67 |
-
# Print the generated text if needed
|
68 |
-
# print(generated_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|