File size: 3,511 Bytes
486eff6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/usr/bin/env python3

import logging
import requests
import os
import json


# Set up logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)

logger = logging.getLogger(__name__)

import requests
import json
import os

# --- Setup your API access token ---
# Get your Hugging Face token from an environment variable for security
HF_TOKEN = os.getenv("HF_TOKEN")

if not HF_TOKEN:
    raise ValueError("Hugging Face API token not found. Please set the HF_TOKEN environment variable.")

# --- Define API parameters ---
# Use the OpenAI-compatible router endpoint for Inference Providers
API_URL = "https://router.huggingface.co/v1"
MODEL_ID = "deepseek-ai/DeepSeek-V3-0324:fireworks-ai" # Model and provider

def generate_crossword_clue(word: str, context: str) -> str:
    """
    Generates a crossword-style clue for a given word using the Hugging Face API router.

    Args:
        word (str): The word for which to generate a clue.
        context (str): The context sentence or phrase to help define the word.

    Returns:
        str: The generated crossword clue.
    """
    headers = {
        "Authorization": f"Bearer {HF_TOKEN}",
        "Content-Type": "application/json"
    }

    # Construct the messages payload in an OpenAI-compatible format
    messages = [
        {
            "role": "system",
            "content": f"You are a crossword puzzle clue generator. Given a word and a context, your task is to generate a single, concise, creative, and fair crossword puzzle clue. The clue should be for the word '{word}'."
        },
        {
            "role": "user",
            "content": f"The word is '{word}'. The context is: '{context}'."
        }
    ]

    # Combine model parameters with other payload data
    # Removing 'do_sample' as it explicitly caused an error.
    payload = {
        "model": MODEL_ID,
        "messages": messages,
        # "max_new_tokens": 50,
        "temperature": 0.7
    }

    try:
        response = requests.post(f"{API_URL}/chat/completions", headers=headers, data=json.dumps(payload))
        response.raise_for_status()  # Raise an exception for HTTP errors
        
        result = response.json()
        
        # Extract the content from the API response
        if "choices" in result and len(result["choices"]) > 0:
            final_clue = result["choices"][0]["message"]["content"].strip()
            return final_clue
        else:
            return "No clue generated."

    except requests.exceptions.RequestException as e:
        print(f"An error occurred: {e}")
        # Print the response content for debugging
        if 'response' in locals():
            print(f"API Response: {response.text}")
        return "Error generating clue."

# --- Main program execution ---
if __name__ == "__main__":
    target_word = "cricket"
    target_context = "yesterday india won test series against england"

    clue = generate_crossword_clue(target_word, target_context)
    
    print(f"Word: {target_word}")
    print(f"Context: {target_context}")
    print(f"Generated Clue: {clue}")

    # Another example
    print("\n--- Another example ---")
    target_word_2 = "Shuttle"
    target_context_2 = "Man landed on the moon"
    clue_2 = generate_crossword_clue(target_word_2, target_context_2)
    print(f"Word: {target_word_2}")
    print(f"Context: {target_context_2}")
    print(f"Generated Clue: {clue_2}")