Spaces:
Running
Running
Save history
Browse files- logging/2025-02-20.jsonl +244 -0
- logging/2025-02-21.jsonl +0 -0
- logging/2025-02-22.jsonl +0 -0
- logging/2025-02-23.jsonl +0 -0
- logging/2025-02-24.jsonl +359 -0
- save.py +23 -0
- usage/2025-02-20.jsonl +13 -0
- usage/2025-02-21.jsonl +382 -0
- usage/2025-02-22.jsonl +464 -0
- usage/2025-02-23.jsonl +313 -0
- usage/2025-02-24.jsonl +82 -0
logging/2025-02-20.jsonl
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4"}}
|
2 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
3 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "gpt-4o"}}
|
4 |
+
{"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
|
5 |
+
{"type": "message", "message": "NameError: name 'items' is not defined", "error": "NameError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o"}}
|
6 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
7 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
8 |
+
{"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
9 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: ", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "flux"}}
|
10 |
+
{"type": "message", "message": "NotImplementedError: ", "error": "NotImplementedError", "provider": {"name": "OpenaiAccount", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "dall-e-3"}}
|
11 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "midjourney"}}
|
12 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-r1"}}
|
13 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
14 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-r1"}}
|
15 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-r1"}}
|
16 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "deepseek-r1"}}
|
17 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-r1"}}
|
18 |
+
{"type": "message", "message": "ResponseStatusError: Response 401: {\"error\":\"Invalid username or password.\"}", "error": "ResponseStatusError", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "flux-schnell"}}
|
19 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
20 |
+
{"type": "message", "message": "NotImplementedError: ", "error": "NotImplementedError", "provider": {"name": "HuggingChat", "url": "https://huggingface.co/chat", "label": null, "model": "flux-schnell"}}
|
21 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
22 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
23 |
+
{"type": "error", "error": "CloudflareError", "message": "CloudflareError: Response 403: Cloudflare detected", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
24 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host www.blackbox.ai:443 ssl:default [The semaphore timeout period has expired]", "error": "ClientConnectorError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "deepseek-v3"}}
|
25 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
26 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
27 |
+
{"type": "message", "message": "MissingRequirementsError: Install \"nodriver\" and \"platformdirs\" package | pip install -U nodriver platformdirs", "error": "MissingRequirementsError", "provider": {"name": "HuggingChat", "url": "https://huggingface.co/chat", "label": null, "model": "deepseek-r1"}}
|
28 |
+
{"type": "message", "message": "NotImplementedError: ", "error": "NotImplementedError", "provider": {"name": "HuggingChat", "url": "https://huggingface.co/chat", "label": null, "model": "flux-schnell"}}
|
29 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host glider.so:443 ssl:default [The semaphore timeout period has expired]", "error": "ClientConnectorError", "provider": {"name": "Glider", "url": "https://glider.so", "label": "Glider", "model": "deepseek-r1"}}
|
30 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-r1"}}
|
31 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
32 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
33 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o"}}
|
34 |
+
{"type": "message", "message": "NotImplementedError: ", "error": "NotImplementedError", "provider": {"name": "CopilotAccount", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "dall-e-3"}}
|
35 |
+
{"type": "message", "message": "MissingAuthError: Missing \"_U\" cookie", "error": "MissingAuthError", "provider": {"name": "BingCreateImages", "url": "https://www.bing.com/images/create", "label": "Microsoft Designer in Bing", "model": "dall-e-3"}}
|
36 |
+
{"type": "error", "error": "CloudflareError", "message": "CloudflareError: Response 403: Cloudflare detected", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
37 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (28) Failed to connect to jmuz.me port 443 after 21101 ms: Couldn't connect to server. See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
38 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
39 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: RequestException: Failed to perform, curl: (28) Failed to connect to jmuz.me port 443 after 21101 ms: Couldn't connect to server. See https://curl.se/libcurl/c/libcurl-errors.html first for more details.\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
40 |
+
{"type": "error", "error": "CloudflareError", "message": "CloudflareError: Response 403: Cloudflare detected", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
41 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (28) Failed to connect to jmuz.me port 443 after 21013 ms: Couldn't connect to server. See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
42 |
+
{"type": "error", "error": "AttributeError", "message": "AttributeError: 'AsyncWebSocket' object has no attribute 'aclose'", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
43 |
+
{"type": "error", "error": "ConnectionRefusedError", "message": "ConnectionRefusedError: [Errno 111] Connect call failed ('127.0.0.1', 38843)", "provider": {"name": "HailuoAI", "url": "https://www.hailuo.ai", "label": "Hailuo AI", "model": "MiniMax"}}
|
44 |
+
{"type": "error", "error": "AttributeError", "message": "AttributeError: 'AsyncWebSocket' object has no attribute 'asend'", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
45 |
+
{"type": "error", "error": "RuntimeError", "message": "RuntimeError: Unknown error", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
46 |
+
{"type": "error", "error": "RuntimeError", "message": "RuntimeError: Unknown error", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
47 |
+
{"type": "error", "error": "RuntimeError", "message": "RuntimeError: Unknown error", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
48 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash"}}
|
49 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
50 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "o3-mini"}}
|
51 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nBlackbox: ResponseStatusError: Response 429: You have reached your request limit for the day.\nDDG: ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "o3-mini"}}
|
52 |
+
{"type": "error", "error": "RuntimeError", "message": "RuntimeError: Unknown error", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
53 |
+
{"type": "error", "error": "CloudflareError", "message": "CloudflareError: Response 403: Cloudflare detected", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
54 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "o3-mini"}}
|
55 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
56 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nDDG: ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}\nBlackbox: ResponseStatusError: Response 429: You have reached your request limit for the day.", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
57 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "o3-mini"}}
|
58 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
59 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nDDG: ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}\nBlackbox: ResponseStatusError: Response 429: You have reached your request limit for the day.", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
60 |
+
{"type": "error", "error": "RuntimeError", "message": "RuntimeError: Unknown error", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
61 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
62 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "o3-mini-high"}}
|
63 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: Add a \"api_key\"", "provider": {"name": "xAI", "url": "https://console.x.ai", "label": null, "model": "o3-mini"}}
|
64 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: Add a \"api_key\"", "provider": {"name": "xAI", "url": "https://console.x.ai", "label": null, "model": "grok3"}}
|
65 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
66 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
67 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
68 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
69 |
+
{"type": "error", "error": "AttributeError", "message": "AttributeError: 'coroutine' object has no attribute 'startswith'", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
70 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "Flux Pro Ultra Raw"}}
|
71 |
+
{"type": "error", "error": "AssertionError", "message": "AssertionError: ", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
72 |
+
{"type": "error", "error": "ConnectionRefusedError", "message": "ConnectionRefusedError: [Errno 111] Connect call failed ('127.0.0.1', 52835)", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "stabilityai/stable-diffusion-xl-base-1.0"}}
|
73 |
+
{"type": "error", "error": "ConnectionRefusedError", "message": "ConnectionRefusedError: [Errno 111] Connect call failed ('127.0.0.1', 35217)", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "stabilityai/stable-diffusion-xl-base-1.0"}}
|
74 |
+
{"type": "error", "error": "AssertionError", "message": "AssertionError: ", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
75 |
+
{"type": "error", "error": "CloudflareError", "message": "CloudflareError: Response 403: Cloudflare detected", "provider": {"name": "RubiksAI", "url": "https://rubiks.ai", "label": "Rubiks AI", "model": "grok-beta"}}
|
76 |
+
{"type": "error", "error": "ConnectionRefusedError", "message": "ConnectionRefusedError: [Errno 111] Connect call failed ('127.0.0.1', 47071)", "provider": {"name": "You", "url": "https://you.com", "label": "You.com", "model": "agent"}}
|
77 |
+
{"type": "error", "error": "CloudflareError", "message": "CloudflareError: Response 403: Cloudflare detected", "provider": {"name": "RubiksAI", "url": "https://rubiks.ai", "label": "Rubiks AI", "model": "grok-beta"}}
|
78 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 400: \"Invalid transport\"", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
79 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Error generating image: b'null\\n\\n'", "provider": {"name": "VoodoohopFlux1Schnell", "url": "https://voodoohop-flux-1-schnell.hf.space", "label": null, "model": "voodoohop-flux-1-schnell"}}
|
80 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "sonar-pro"}}
|
81 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "Flux Pro Ultra Raw"}}
|
82 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "Flux Pro Ultra"}}
|
83 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
84 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "Flux Schnell"}}
|
85 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "o3-mini-high"}}
|
86 |
+
{"type": "error", "error": "ClientResponseError", "message": "ClientResponseError: 404, message='Not Found', url='https://api.prodia.com/generate?new=true&prompt=generate+photo+of+rain+of+buns&model=absolutereality_v181.safetensors+%5B3d9d4d2b%5D&negative_prompt=&steps=20&cfg=7&seed=5227&sampler=DPM%2B%2B+2M+Karras&aspect_ratio=square'", "provider": {"name": "Prodia", "url": "https://app.prodia.com", "label": null, "model": "absolutereality_v181.safetensors [3d9d4d2b]"}}
|
87 |
+
{"type": "error", "error": "MissingRequirementsError", "message": "MissingRequirementsError: Install \"gpt4all\" package | pip install -U g4f[local]", "provider": {"name": "Local", "url": null, "label": "GPT4All", "model": "Llama-3"}}
|
88 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "You", "url": "https://you.com", "label": "You.com", "model": "gpt-4o-mini"}}
|
89 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Message: 42[\"r1-1776_query_progress\",{\"output\":\"Okay, the user\",\"citations\":[],\"chunks\":[],\"final\":false,\"elapsed_time\":4.0209852159023285e-07,\"tokens_streamed\":4}]", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "r1-1776"}}
|
90 |
+
{"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
91 |
+
{"type": "message", "message": "ResponseError: Invalid response: {\"detail\": {\"error\": \"Not authenticated\"}}", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
92 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "flux-pro"}}
|
93 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "dall-e-3"}}
|
94 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Message: 42[\"sonar-reasoning-pro_query_progress\",{\"output\":\"<think>\\nOkay, I need to answer the query \\\"hello\\\" by providing various ways to say hello in English, both formal and informal, based on the given search results. Let me start by looking through the sources to gather all the greetings mentioned.\\n\\nFrom Source 1, there's a long list of informal greetings like \\\"Howdy, partner!\\\", \\\"What\\u2019s cookin\\u2019, good lookin\\u2019?\\\", and some cultural greetings like \\\"Aloha\\\" and \\\"Hola\\\". It also mentions classy ways such as \\\"Pleasure to see you\\\" and formal settings examples.\\n\\nSource 2 discusses informal greetings like \\\"How's it going?\\\" and responses. It includes phrases like \\\"What's up?\\\" and \\\"Howdy\\\", which is regional. There are also greetings for when you haven't seen someone in a while, like \\\"It\\u2019s been ages!\\\".\\n\\nSource 3 lists both informal and formal greetings. Informal ones include \\\"Hi!\\\", \\\"Hey!\\\", \\\"Morning!\\\", while formal ones are \\\"Good morning\\\", \\\"Good afternoon\\\", etc. It also mentions responses to these greetings.\\n\\nSource 4 focuses on American English greetings, emphasizing pronunciation and common phrases like \\\"Hey! How are ya?\\\" with example dialogues. It provides different responses based on how someone is feeling.\\n\\nSource 5 is about email greetings, both formal and informal. It suggests starting with \\\"Hi [Name]\\\" or \\\"Dear [Name]\\\" for formal contexts and gives examples like \\\"Good morning\\\" and \\\"Greetings\\\".\\n\\nSource 6 adds more regional variations like \\\"Oi!\\\" in Australia and other colloquial terms.\\n\\nSource 7 reinforces email salutations similar to Source 5.\\n\\nSource 8 is a video about greetings but since it's a video link, maybe the transcript isn't available, but from the title, it might cover both informal and professional greetings.\\n\\nNow, I need to organize these into sections: informal, formal, email greetings, cultural/regional variations, and responses. Make sure to cite sources properly after each example. Avoid duplications and ensure examples are categorized correctly. Also, include some responses where relevant, especially from Sources 2 and 4.\\n\\nCheck for any unique greetings from each source. For example, Source 1 has creative ones like \\\"Hello Queen!\\\" while Source 6 mentions Australian terms like \\\"How ya goin?\\\". Source 5's email greetings should be a separate section. Cultural greetings from Source 1 include nose bumping in Yemen or Tibet's tongue greeting\\u2014maybe mention those under cultural variations.\\n\\nEnsure that each example has the correct source citation. For instance, if a greeting appears in multiple sources, cite all relevant ones but prioritize the most relevant source. Also, note that some sources have overlapping examples; need to present them cohesively without repetition.\\n\\nFinally, structure the answer with headers for each section (Informal, Formal, Email Greetings, Cultural/Regional Variations) using markdown level 2 headers. Use bullet points for lists and bold where necessary. Make sure the introduction summarizes the answer and the conclusion wraps it up neatly.\\n</think>\\n\\nHere are diverse ways to greet someone in English across casual, formal, digital, and cultural contexts:\\n\\n## Informal Greetings \\n- **Casual phrases**: Hi!, Hey!, Yo!, What\\u2019s up?, How\\u2019s it going?, Howdy (Southern U.S.), G\\u2019day (Australia), Sup? [1][2][3][4][6] \\n- **Playful/affectionate terms**: Hello sunshine!, Howdy partner!, What\\u2019s cookin\\u2019, good lookin\\u2019?, Hello Queen!, Hey munchkin! [1] \\n- **Reconnecting**: Long time no see!, Where have you been hiding?, It\\u2019s been ages! [2][3] \\n- **Responses**: Not much., Good, thanks., Can\\u2019t complain., Hangin\\u2019 in there. [2][4] \\n\\n## Formal Greetings \\n- **Polite/respectful**: Good morning/afternoon/evening, Pleasure to see you, Warmest regards, How is the world treating you? [1][3][5] \\n- **First-time meetings**: It\\u2019s nice to meet you, The pleasure is mine [3][5] \\n\\n## Email Greetings \\n- **Formal**: Dear [Name], Good morning [Team], To Whom It May Concern [5][7] \\n- **Semi-formal**: Hello [Name], Greetings [5][7] \\n- **Casual**: Hi [Name], Hi there!, Hey everyone [5][7] \\n\\n## Cultural/Regional Variations \\n- **Gestures**: Bumping noses (Yemen/UAE), Sticking out tongues (Tibet), Pressing palms together (India/Cambodia) [1] \\n- **Language-specific**: Aloha (Hawaiian), Hola (Spanish), Bonjour (French), Konnichiwa (Japanese) [1][6] \\n- **Local slang**: Wagwan? (Jamaican), How ya goin? (Australia), You right? (New Zealand) [1][6] \\n\\n## Key Tips \\n- Match formality to context: Use playful greetings only with close friends/peers [1][4]. \\n- In emails, default to \\u201cHi [Name]\\u201d unless addressing superiors (\\u201cDear\\u201d) or groups (\\u201cGreetings\\u201d) [5][7]. \\n- Adjust tone based on regional norms (e.g., \\u201cHowdy\\u201d in Texas vs. \\u201cG\\u2019day\\u201d in Australia) [3][6]. \\n\\nWhether saying \\u201cHey!\\u201d to a friend or \\u201cGood afternoon\\u201d in a meeting, tailoring your greeting enhances connection and professionalism.\",\"citations\":[\"https://www.stylecraze.com/articles/ways-to-say-hello/\",\"https://www.speakconfidentenglish.com/greetings-for-every-situation/\",\"https://tandem.net/blog/20-greetings-in-english\",\"https://www.clearenglishcorner.com/blog/64\",\"https://www.indeed.com/career-advice/career-development/greeting-from-email\",\"https://www.berlitz.com/blog/hello-in-english\",\"https://www.mail.com/blog/posts/email-greetings/118/\",\"https://www.youtube.com/watch?v=Z4p2mL7m8Lc\"],\"chunks\":[\"<think>\\nOkay, I need to answer the query \\\"hello\\\" by providing various ways to say hello in English, both formal and informal, based on the\",\" given search\",\" results. Let\",\" me start by looking through\",\" the sources to\",\" gather all the\",\" greetings mentioned.\",\"\\n\\nFrom Source \",\"1, there's a long list of\",\" informal greetings\",\" like \\\"Howdy, partner!\\\", \\\"\",\"What\\u2019s cookin\\u2019,\",\" good lookin\\u2019?\\\", and\",\" some cultural\",\" greetings like \\\"\",\"Aloha\\\" and \\\"Hola\",\"\\\". It also mentions class\",\"y ways such as\",\" \\\"Pleasure to see\",\" you\\\" and formal settings\",\" examples.\\n\\nSource 2 discusses\",\" informal greetings like \\\"\",\"How's it going?\\\"\",\" and responses.\",\" It includes phrases\",\" like \\\"What's\",\" up?\\\" and \\\"Howdy\",\"\\\", which is regional\",\". There are also\",\" greetings for when\",\" you haven't seen someone in\",\" a while, like \\\"It\",\"\\u2019s been ages!\\\".\\n\\nSource 3 lists both\",\" informal and formal\",\" greetings. Informal\",\" ones include\",\" \\\"Hi!\\\", \\\"Hey!\\\",\",\" \\\"Morning!\\\", while formal\",\" ones are \\\"Good morning\",\"\\\", \\\"Good afternoon\",\"\\\", etc. It also mentions\",\" responses to\",\" these greetings.\\n\\nSource 4 focuses\",\" on American English\",\" greetings, emphasizing\",\" pronunciation and\",\" common phrases\",\" like \\\"Hey! How are ya\",\"?\\\" with example\",\" dialogues. It provides different\",\" responses based on\",\" how someone is\",\" feeling.\\n\\nSource 5 is about\",\" email greetings,\",\" both formal and\",\" informal. It suggests starting\",\" with \\\"Hi [Name]\\\" or \\\"Dear [Name]\\\" for formal contexts and\",\" gives examples\",\" like \\\"Good morning\",\"\\\" and \\\"Greetings\\\".\\n\\nSource 6 adds more\",\" regional variations\",\" like \\\"Oi!\\\" in Australia and\",\" other colloqu\",\"ial terms.\\n\\nSource 7 reinforces email\",\" salutations similar to\",\" Source 5.\\n\\nSource 8 is a video about\",\" greetings but\",\" since it's a video link\",\", maybe the transcript isn\",\"'t available,\",\" but from the\",\" title, it might cover\",\" both informal and\",\" professional greetings.\\n\\nNow, I\",\" need to organize these\",\" into sections:\",\" informal, formal\",\", email greetings\",\", cultural/regional variations, and\",\" responses. Make\",\" sure to cite\",\" sources properly\",\" after each example.\",\" Avoid duplications and ensure\",\" examples are\",\" categorized correctly\",\". Also, include some responses\",\" where relevant\",\", especially from\",\" Sources 2 and 4.\\n\\nCheck for any unique\",\" greetings from each\",\" source. For example, Source\",\" 1 has creative ones\",\" like \\\"Hello Queen\",\"!\\\" while Source\",\" 6 mentions Australian terms\",\" like \\\"How ya go\",\"in?\\\". Source 5's email\",\" greetings should be\",\" a separate section. Cultural\",\" greetings from Source\",\" 1 include nose bump\",\"ing in Yemen or\",\" Tibet's tongue\",\" greeting\\u2014maybe\",\" mention those under\",\" cultural variations\",\".\\n\\nEnsure that\",\" each example\",\" has the correct\",\" source citation\",\". For instance\",\", if a greeting\",\" appears in multiple sources,\",\" cite all relevant\",\" ones but prioritize\",\" the most relevant\",\" source. Also,\",\" note that some sources have\",\" overlapping examples\",\"; need to present them\",\" cohesively without repetition\",\".\\n\\nFinally, structure the\",\" answer with headers for\",\" each section\",\" (Informal, Formal\",\", Email Greetings,\",\" Cultural/Regional Variations) using markdown level 2 headers. Use bullet points for lists and bold where necessary. Make sure the introduction\",\" summarizes the\",\" answer and the conclusion\",\" wraps it up neatly.\\n</think>\",\"\\n\\nHere are diverse ways\",\" to greet someone\",\" in English across casual\",\", formal, digital\",\", and cultural contexts\",\":\\n\\n## Informal\",\" Greetings \\n- **Casual phrases**:\",\" Hi!, Hey!, Yo\",\"!, What\\u2019s up?, How\",\"\\u2019s it going?, How\",\"dy (Southern U\",\".S.), G\\u2019day (Australia\",\"), Sup? [1][2][3][4][6] \\n- **Playful\",\"/affectionate terms\",\"**: Hello sunshine\",\"!, Howdy partner!,\",\" What\\u2019s cookin\\u2019, good\",\" lookin\\u2019?, Hello\",\" Queen!, Hey munch\",\"kin! [1] \\n- **Reconnecting**:\",\" Long time no see\",\"!, Where have\",\" you been hiding\",\"?, It\\u2019s been ages!\",\" [2][3] \\n- **Responses\",\"**: Not much., Good\",\", thanks., Can\",\"\\u2019t complain.,\",\" Hangin\\u2019 in there\",\". [2][4] \\n\\n## Formal Gre\",\"etings \\n- **Polite\",\"/respectful**: Good morning\",\"/afternoon/evening, Ple\",\"asure to see you\",\", Warmest regards,\",\" How is the world treating you\",\"? [1][3][5] \\n- **First-time meetings**:\",\" It\\u2019s nice to\",\" meet you, The pleasure\",\" is mine [3][5] \\n\\n## Email Gre\",\"etings \\n- **Formal\",\"**: Dear [Name], Good morning\",\" [Team], To Wh\",\"om It May Concern [5][7] \\n- **Semi\",\"-formal**: Hello [Name], Greetings [5][7] \\n- **Casual\",\"**: Hi [Name], Hi there!, Hey\",\" everyone [5][7] \\n\\n## Cultural/\",\"Regional Variations \\n- **Gestures**: B\",\"umping noses (Yemen/UAE), Sticking\",\" out tongues (Tibet), Pressing palms together\",\" (India/Cambodia) [1] \\n- **Language\",\"-specific**: Aloha\",\" (Hawaiian), Hola (Spanish), Bonjour\",\" (French), Konn\",\"ichiwa (Japanese) [1][6] \\n- **Local slang\",\"**: Wagwan? (Jamaican), How ya go\",\"in? (Australia), You right?\",\" (New Zealand\",\") [1][6] \\n\\n## Key Tips\",\" \\n- Match form\",\"ality to context:\",\" Use playful greetings\",\" only with close friends\",\"/peers [1][4]. \\n- In emails\",\", default to \\u201cHi\",\" [Name]\\u201d unless addressing\",\" superiors (\\u201cDear\\u201d) or groups\",\" (\\u201cGreetings\\u201d) [5][7]. \\n- Adjust tone\",\" based on regional norms\",\" (e.g., \\u201cHowdy\\u201d in Texas vs\",\". \\u201cG\\u2019day\\u201d in Australia) [3][6]. \\n\\nWhether saying\",\" \\u201cHey!\\u201d to a friend or\",\" \\u201cGood afternoon\",\"\\u201d in a meeting,\",\" tailoring your\",\" greeting enhances connection\",\" and professionalism\",\".\"],\"final\":true,\"elapsed_time\":39.65950366621837,\"tokens_streamed\":1154}]", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "sonar-reasoning-pro"}}
|
95 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: NameError: name 'CurlMime' is not defined", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
96 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
97 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
98 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
99 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: NameError: name 'CurlMime' is not defined\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
100 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
101 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Message: 42[\"sonar-reasoning-pro_query_progress\",{\"output\":\"<think>\\nAlright, the user just said \\\"hello\\\". I need to provide a comprehensive answer on different ways to say hello, using the search results given. Let me start by looking at the sources.\\n\\nFrom [1], there's a long list of informal and creative greetings like \\\"Howdy, partner!\\\" and \\\"What\\u2019s cookin\\u2019, good lookin\\u2019?\\\". There's also some cultural greetings like \\\"Aloha\\\" and \\\"Bonjour\\\". [2] mentions informal phrases like \\\"How's it going?\\\" and responses. [3] lists both informal and formal greetings, including \\\"Hi\\\" and \\\"Good morning\\\". [4] focuses on American English greetings and their pronunciation. [5] and [7] talk about email greetings, both formal and informal. [6] adds more international variations. [8] is a video but seems to cover professional vs informal greetings.\\n\\nThe user probably wants a variety of greetings for different contexts. I should categorize them into sections like informal, formal, cultural, email, etc. Make sure to cite sources properly. Also, check if there are any unique greetings or tips on usage from each source. For example, [1] has creative ones, [5] gives email examples. Need to avoid repetition and present the info clearly with markdown formatting as per the rules.\\n</think>\\n\\nGreetings vary widely depending on context, formality, and cultural norms. Here\\u2019s a breakdown of common and creative ways to say \\u201chello\\u201d in English:\\n\\n---\\n\\n## **Informal Greetings** \\nUsed with friends, family, or in casual settings: \\n- **Hi! / Hey! / Yo!** \\u2013 Simple and universal[2][3][4]. \\n- **How\\u2019s it going? / What\\u2019s up?** \\u2013 Often paired with short responses like *\\u201cGood, thanks!\\u201d* or *\\u201cNot much!\\u201d*[2][4]. \\n- **Morning! / Howdy!** \\u2013 Regional variations (e.g., *Howdy* in the southern U.S.)[3][4]. \\n- **What\\u2019s cracking? / What\\u2019s good in the hood?** \\u2013 Slang for friendly check-ins[1]. \\n- **Long time no see! / Where have you been hiding?** \\u2013 For reconnecting after a while[2]. \\n\\n**Creative & Playful Examples**: \\n- *\\u201cWhat\\u2019s cookin\\u2019, good lookin\\u2019?\\u201d*[1] \\n- *\\u201cHey there, sunshine!\\u201d*[1] \\n- *\\u201cTough day? Need a hug?\\u201d*[1] \\n\\n---\\n\\n## **Formal Greetings** \\nSuitable for professional or respectful interactions: \\n- **Hello! / Good morning/afternoon/evening.** \\u2013 Classic and versatile[3][5][7]. \\n- **Dear [Name],** \\u2013 Common in emails (e.g., cover letters)[5][7]. \\n- **It\\u2019s a pleasure to meet you.** \\u2013 For first introductions[3]. \\n- **Greetings.** \\u2013 Neutral and polite for groups or unknown recipients[5]. \\n\\n**Email-Specific Examples**: \\n- *\\u201cHi [Name], I hope this email finds you well.\\u201d*[5] \\n- *\\u201cGood morning, team.\\u201d*[7] \\n\\n---\\n\\n## **Cultural & International Variations** \\nExamples from other languages or traditions: \\n- **Aloha! (Hawaiian)** \\u2013 Also means \\u201clove\\u201d or \\u201cpeace\\u201d[1][6]. \\n- **Bonjour! (French) / Hola! (Spanish)** \\u2013 Common in multilingual settings[1][3]. \\n- **Konnichiwa! (Japanese)** \\u2013 Used during daytime[1]. \\n- **Namaste! (Hindi)** \\u2013 Accompanied by a slight bow with palms together[1]. \\n\\n**Unique Physical Gestures**: \\n- **Bumping noses** (Yemen, UAE)[1]. \\n- **Sticking out tongues** (Tibet)[1]. \\n\\n---\\n\\n## **Situational Tips** \\n- **In emails**: Use *\\u201cHi [Name],\\u201d* for most cases; *\\u201cDear [Name],\\u201d* for formal contexts[5][7]. Avoid overly casual greetings like *\\u201cHey!\\u201d* in professional emails. \\n- **Responses**: Keep replies brief unless deeper conversation is expected (e.g., *\\u201cI\\u2019ve been better\\u201d* signals a need to talk)[4]. \\n\\n---\\n\\nWhether you\\u2019re greeting a friend, colleague, or someone from another culture, tailoring your hello to the context ensures warmth and respect. For more nuanced scenarios, observe local customs or default to polite simplicity.\",\"citations\":[\"https://www.stylecraze.com/articles/ways-to-say-hello/\",\"https://www.speakconfidentenglish.com/greetings-for-every-situation/\",\"https://tandem.net/blog/20-greetings-in-english\",\"https://www.clearenglishcorner.com/blog/64\",\"https://www.indeed.com/career-advice/career-development/greeting-from-email\",\"https://www.berlitz.com/blog/hello-in-english\",\"https://www.mail.com/blog/posts/email-greetings/118/\",\"https://www.youtube.com/watch?v=Z4p2mL7m8Lc\"],\"chunks\":[\"<think>\\nAlright, the user\",\" just said \\\"hello\\\". I\",\" need to provide\",\" a comprehensive answer\",\" on different\",\" ways to say hello\",\", using the search\",\" results given.\",\" Let me start\",\" by looking at\",\" the sources.\\n\\nFrom [1], there's a long\",\" list of informal\",\" and creative\",\" greetings like\",\" \\\"Howdy, partner!\\\"\",\" and \\\"What\\u2019s cookin\\u2019,\",\" good lookin\\u2019?\\\". There\",\"'s also some cultural\",\" greetings like \\\"\",\"Aloha\\\" and \\\"Bonjour\\\". [2] mentions informal\",\" phrases like\",\" \\\"How's it going?\\\"\",\" and responses.\",\" [3] lists both\",\" informal and formal\",\" greetings, including\",\" \\\"Hi\\\" and \\\"Good\",\" morning\\\". [4] focuses\",\" on American English\",\" greetings and their\",\" pronunciation.\",\" [5] and [7] talk about\",\" email greetings,\",\" both formal and informal.\",\" [6] adds more\",\" international variations\",\". [8] is a video but\",\" seems to cover\",\" professional vs\",\" informal greetings\",\".\\n\\nThe user probably wants\",\" a variety of\",\" greetings for\",\" different contexts. I\",\" should categorize\",\" them into sections like\",\" informal, formal\",\", cultural, email,\",\" etc. Make sure to cite\",\" sources properly.\",\" Also, check if there\",\" are any unique greetings\",\" or tips on usage\",\" from each source\",\". For example\",\", [1] has creative\",\" ones, [5] gives email\",\" examples. Need\",\" to avoid repetition\",\" and present the\",\" info clearly\",\" with markdown formatting as\",\" per the rules.\\n</think>\\n\\nGreetings vary widely\",\" depending on\",\" context, formality\",\", and cultural\",\" norms. Here\\u2019s\",\" a breakdown of common and\",\" creative ways to\",\" say \\u201chello\\u201d in English\",\":\\n\\n---\\n\\n## **Inform\",\"al Greetings** \\nUsed with\",\" friends, family,\",\" or in casual\",\" settings: \\n- **Hi! / Hey\",\"! / Yo!** \\u2013 Simple\",\" and universal[2][3][4]. \\n- **How\",\"\\u2019s it going? /\",\" What\\u2019s up?**\",\" \\u2013 Often paired with short\",\" responses like *\\u201c\",\"Good, thanks!\\u201d*\",\" or *\\u201cNot much\",\"!\\u201d*[2][4]. \\n- **Morning!\",\" / Howdy!** \\u2013 Regional variations\",\" (e.g., *Howdy* in the\",\" southern U.S\",\".)[3][4]. \\n- **What\",\"\\u2019s cracking? / What\",\"\\u2019s good in the\",\" hood?** \\u2013 Slang\",\" for friendly\",\" check-ins[1]. \\n- **Long time\",\" no see! / Where\",\" have you been\",\" hiding?** \\u2013 For re\",\"connecting after a\",\" while[2]. \\n\\n**Creative & Play\",\"ful Examples**:\",\" \\n- *\\u201cWhat\\u2019s cook\",\"in\\u2019, good lookin\\u2019\",\"?\\u201d*[1] \\n- *\\u201cHey there,\",\" sunshine!\\u201d*[1] \\n- *\\u201cTough\",\" day? Need a hug?\\u201d*[1] \\n\\n---\\n\\n## **Formal Greetings** \\nSuitable for professional or respectful interactions: \\n- **Hello! / Good morning/\",\"afternoon/evening.** \\u2013 Classic and\",\" versatile[3][5][7]. \\n- **Dear [Name],** \\u2013 Common in\",\" emails (e.g., cover\",\" letters)[5][7]. \\n- **It\\u2019\",\"s a pleasure to meet you\",\".** \\u2013 For first introductions\",\"[3]. \\n- **Greetings\",\".** \\u2013 Neutral and\",\" polite for groups\",\" or unknown recipients[5]. \\n\\n**Email\",\"-Specific Examples**:\",\" \\n- *\\u201cHi [Name], I hope\",\" this email finds you well\",\".\\u201d*[5] \\n- *\\u201cGood morning\",\", team.\\u201d*[7] \\n\\n---\\n\\n## **Cultural &\",\" International Variations\",\"** \\nExamples from\",\" other languages\",\" or traditions\",\": \\n- **Aloha! (Hawaiian)** \\u2013 Also\",\" means \\u201clove\\u201d or \\u201c\",\"peace\\u201d[1][6]. \\n- **Bonjour! (French) / Hola!\",\" (Spanish)** \\u2013 Common in\",\" multilingual settings\",\"[1][3]. \\n- **Konn\",\"ichiwa! (Japanese)** \\u2013 Used during\",\" daytime[1]. \\n- **Namaste\",\"! (Hindi)** \\u2013 Accompan\",\"ied by a slight bow\",\" with palms together[1]. \\n\\n**Unique\",\" Physical Gestures\",\"**: \\n- **Bumping\",\" noses** (Yemen, UAE)[1]. \\n- **Sticking\",\" out tongues** (Tibet)[1]. \\n\\n---\\n\\n## **\",\"Situational Tips**\",\" \\n- **In emails**:\",\" Use *\\u201cHi [Name],\\u201d* for most cases\",\"; *\\u201cDear [Name],\\u201d* for formal\",\" contexts[5][7]. Avoid overly\",\" casual greetings like *\",\"\\u201cHey!\\u201d* in professional\",\" emails. \\n- **Respons\",\"es**: Keep replies brief\",\" unless deeper\",\" conversation\",\" is expected (e.g., *\\u201cI\",\"\\u2019ve been better\\u201d\",\"* signals a need to talk\",\")[4]. \\n\\n---\\n\\nWhether you\",\"\\u2019re greeting a friend,\",\" colleague, or someone from\",\" another culture\",\", tailoring your\",\" hello to the context\",\" ensures warmth and\",\" respect. For\",\" more nuanced scenarios\",\", observe local\",\" customs or default to\",\" polite simplicity\",\".\"],\"final\":true,\"elapsed_time\":36.30259839305654,\"tokens_streamed\":944}]", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "sonar-reasoning-pro"}}
|
102 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: GPU token limit exceeded: data: null\n", "provider": {"name": "StableDiffusion35Large", "url": "https://stabilityai-stable-diffusion-3-5-large.hf.space", "label": null, "model": "stabilityai-stable-diffusion-3-5-large"}}
|
103 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: GPU token limit exceeded: data: null\n", "provider": {"name": "StableDiffusion35Large", "url": "https://stabilityai-stable-diffusion-3-5-large.hf.space", "label": null, "model": "stabilityai-stable-diffusion-3-5-large"}}
|
104 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-chat"}}
|
105 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-chat"}}
|
106 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-chat"}}
|
107 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-chat"}}
|
108 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-r1"}}
|
109 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-r1"}}
|
110 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
111 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3-haiku"}}
|
112 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
113 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
114 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
115 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
116 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: ('Missing or invalid \"__Secure-1PSID\" cookie', RuntimeError('coroutine raised StopIteration'))", "provider": {"name": "Gemini", "url": "https://gemini.google.com", "label": "Google Gemini", "model": "gemini"}}
|
117 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
118 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
119 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "gpt-4o"}}
|
120 |
+
{"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
|
121 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
122 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
123 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "gpt-4o"}}
|
124 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
125 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Custom", "url": null, "label": "Custom Provider", "model": "blackboxai"}}
|
126 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nCustom: NameError: name 'CurlMime' is not defined", "provider": {"name": "Custom", "url": null, "label": "Custom Provider", "model": "blackboxai"}}
|
127 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: Add a \"api_key\"", "provider": {"name": "Groq", "url": "https://console.groq.com/playground", "label": null, "model": "mixtral-8x7b-32768"}}
|
128 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "sonar-pro"}}
|
129 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: Add a \"api_key\"", "provider": {"name": "PerplexityApi", "url": "https://www.perplexity.ai", "label": "Perplexity API", "model": "llama-3-sonar-large-32k-online"}}
|
130 |
+
{"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
131 |
+
{"type": "message", "message": "ResponseError: Invalid response: {\"detail\": {\"error\": \"Not authenticated\"}}", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
132 |
+
{"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
133 |
+
{"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
134 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
135 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
136 |
+
{"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
137 |
+
{"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
138 |
+
{"type": "message", "message": "ResponseError: Invalid response: {\"detail\": {\"error\": \"Not authenticated\"}}", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
139 |
+
{"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
140 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-reasoner"}}
|
141 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
142 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
143 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "sonar-reasoning-pro"}}
|
144 |
+
{"type": "error", "error": "TimeoutError", "message": "TimeoutError: ", "provider": {"name": "Qwen_QVQ_72B", "url": "https://qwen-qvq-72b-preview.hf.space", "label": null, "model": "qwen-qvq-72b-preview"}}
|
145 |
+
{"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
146 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
147 |
+
{"type": "message", "message": "ResponseError: Invalid response: {\"detail\": {\"error\": \"Not authenticated\"}}", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
148 |
+
{"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
149 |
+
{"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
150 |
+
{"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
151 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-r1"}}
|
152 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
153 |
+
{"type": "message", "message": "CloudflareError: Response 403: Cloudflare detected", "error": "CloudflareError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
154 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
155 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
156 |
+
{"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
157 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
158 |
+
{"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
159 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
160 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-2"}}
|
161 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "sonar-reasoning-pro"}}
|
162 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
163 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
164 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: NameError: name 'CurlMime' is not defined", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
165 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "o3-mini"}}
|
166 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o-mini"}}
|
167 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "o1-preview"}}
|
168 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "llama-3.1-405b"}}
|
169 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"400 status code (no body)\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "mistral-nemo"}}
|
170 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "hermes-2-dpo"}}
|
171 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash-thinking"}}
|
172 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash"}}
|
173 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
|
174 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
175 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
176 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
177 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
178 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-2"}}
|
179 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
180 |
+
{"type": "error", "error": "ConnectionRefusedError", "message": "ConnectionRefusedError: [WinError 1225] O computador remoto recusou a conex\u00e3o de rede", "provider": {"name": "HailuoAI", "url": "https://www.hailuo.ai", "label": "Hailuo AI", "model": "MiniMax"}}
|
181 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
182 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
183 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
184 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
185 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
186 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
187 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
188 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
189 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
190 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
191 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/blackbox.json'", "error": "PermissionError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "blackboxai"}}
|
192 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"400 Invalid type for 'messages[1].content[1].text': expected a string, but got an array instead.\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai"}}
|
193 |
+
{"type": "message", "message": "MissingAuthError: Add a \"api_key\"", "error": "MissingAuthError", "provider": {"name": "GeminiPro", "url": "https://ai.google.dev", "label": "Google Gemini API", "model": "gemini-1.5-pro"}}
|
194 |
+
{"type": "message", "message": "ResponseError: GPU token limit exceeded: data: null\n", "error": "ResponseError", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "qwen-qwen2-72b-instruct"}}
|
195 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "HuggingFaceAPI", "url": "https://api-inference.huggingface.com", "label": "HuggingFace (Inference API)", "model": "meta-llama/Llama-3.2-11B-Vision-Instruct"}}
|
196 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_Copilot.json'", "error": "PermissionError", "provider": {"name": "CopilotAccount", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
197 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_OpenaiChat.json'", "error": "PermissionError", "provider": {"name": "OpenaiAccount", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
198 |
+
{"type": "message", "message": "MissingAuthError: ('Missing or invalid \"__Secure-1PSID\" cookie', PermissionError(13, 'Permission denied'))", "error": "MissingAuthError", "provider": {"name": "Gemini", "url": "https://gemini.google.com", "label": "Google Gemini", "model": "gemini"}}
|
199 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nBlackbox: PermissionError: [Errno 13] Permission denied: 'har_and_cookies/blackbox.json'\nOIVSCode: NameError: name 'CurlMime' is not defined\nDeepInfraChat: NameError: name 'CurlMime' is not defined\nPollinationsAI: ResponseStatusError: Response 500: {\"error\":\"400 Invalid type for 'messages[1].content[1].text': expected a string, but got an array instead.\",\"status\":500}\nHuggingSpace: ResponseError: GPU token limit exceeded: data: null\n\nGeminiPro: MissingAuthError: Add a \"api_key\"\nHuggingFaceAPI: NameError: name 'CurlMime' is not defined\nCopilotAccount: PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_Copilot.json'\nOpenaiAccount: PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_OpenaiChat.json'\nGemini: MissingAuthError: ('Missing or invalid \"__Secure-1PSID\" cookie', PermissionError(13, 'Permission denied'))", "provider": {"name": "Gemini", "url": "https://gemini.google.com", "label": "Google Gemini", "model": "gemini"}}
|
200 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/blackbox.json'", "error": "PermissionError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "blackboxai"}}
|
201 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
202 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
203 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"400 Invalid type for 'messages[2].content[1].text': expected a string, but got an array instead.\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai"}}
|
204 |
+
{"type": "message", "message": "ResponseError: GPU token limit exceeded: data: null\n", "error": "ResponseError", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "qwen-qwen2-72b-instruct"}}
|
205 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "HuggingFaceAPI", "url": "https://api-inference.huggingface.com", "label": "HuggingFace (Inference API)", "model": "meta-llama/Llama-3.2-11B-Vision-Instruct"}}
|
206 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_OpenaiChat.json'", "error": "PermissionError", "provider": {"name": "OpenaiAccount", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
207 |
+
{"type": "message", "message": "MissingAuthError: Add a \"api_key\"", "error": "MissingAuthError", "provider": {"name": "GeminiPro", "url": "https://ai.google.dev", "label": "Google Gemini API", "model": "gemini-1.5-pro"}}
|
208 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_Copilot.json'", "error": "PermissionError", "provider": {"name": "CopilotAccount", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
209 |
+
{"type": "message", "message": "MissingAuthError: ('Missing or invalid \"__Secure-1PSID\" cookie', PermissionError(13, 'Permission denied'))", "error": "MissingAuthError", "provider": {"name": "Gemini", "url": "https://gemini.google.com", "label": "Google Gemini", "model": "gemini"}}
|
210 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nBlackbox: PermissionError: [Errno 13] Permission denied: 'har_and_cookies/blackbox.json'\nOIVSCode: NameError: name 'CurlMime' is not defined\nDeepInfraChat: NameError: name 'CurlMime' is not defined\nPollinationsAI: ResponseStatusError: Response 500: {\"error\":\"400 Invalid type for 'messages[2].content[1].text': expected a string, but got an array instead.\",\"status\":500}\nHuggingSpace: ResponseError: GPU token limit exceeded: data: null\n\nGeminiPro: MissingAuthError: Add a \"api_key\"\nHuggingFaceAPI: NameError: name 'CurlMime' is not defined\nCopilotAccount: PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_Copilot.json'\nOpenaiAccount: PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_OpenaiChat.json'\nGemini: MissingAuthError: ('Missing or invalid \"__Secure-1PSID\" cookie', PermissionError(13, 'Permission denied'))", "provider": {"name": "Gemini", "url": "https://gemini.google.com", "label": "Google Gemini", "model": "gemini"}}
|
211 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (28) Failed to connect to oi-vscode-server.onrender.com port 443 after 42309 ms: Couldn't connect to server. See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
|
212 |
+
{"type": "error", "error": "PermissionError", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
213 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "0x-lite"}}
|
214 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 503: HTML content", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-ai/DeepSeek-V3"}}
|
215 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "chatgpt-4o-latest"}}
|
216 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "deepseek-v3"}}
|
217 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
218 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "deepseek-v3"}}
|
219 |
+
{"type": "error", "error": "NameError", "message": "NameError: name 'CurlMime' is not defined", "provider": {"name": "CablyAI", "url": "https://cablyai.com/chat", "label": null, "model": "FLUX.1 [dev]"}}
|
220 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
221 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
222 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "DeepSeek-V3"}}
|
223 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
224 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
225 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
226 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
227 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
228 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: ResponseStatusError: Response 429: Rate limit\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
229 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
230 |
+
{"type": "message", "message": "CloudflareError: Response 403: Cloudflare detected", "error": "CloudflareError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
231 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
232 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
233 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
234 |
+
{"type": "message", "message": "ClientConnectorCertificateError: Cannot connect to host duckduckgo.com:443 ssl:True [SSLCertVerificationError: (1, \"[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: Hostname mismatch, certificate is not valid for 'duckduckgo.com'. (_ssl.c:1011)\")]", "error": "ClientConnectorCertificateError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
235 |
+
{"type": "message", "message": "FileNotFoundError: could not find a valid chrome browser binary. please make sure chrome is installed.or use the keyword argument 'browser_executable_path=/path/to/your/browser' ", "error": "FileNotFoundError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
236 |
+
{"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
237 |
+
{"type": "message", "message": "FileNotFoundError: could not find a valid chrome browser binary. please make sure chrome is installed.or use the keyword argument 'browser_executable_path=/path/to/your/browser' ", "error": "FileNotFoundError", "provider": {"name": "CopilotAccount", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "dall-e-3"}}
|
238 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"400 status code (no body)\",\"status\":500}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "evil"}}
|
239 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
240 |
+
{"type": "message", "message": "FileNotFoundError: could not find a valid chrome browser binary. please make sure chrome is installed.or use the keyword argument 'browser_executable_path=/path/to/your/browser' ", "error": "FileNotFoundError", "provider": {"name": "MicrosoftDesigner", "url": "https://designer.microsoft.com", "label": "Microsoft Designer", "model": "dall-e-3"}}
|
241 |
+
{"type": "message", "message": "MissingAuthError: Missing \"_U\" cookie", "error": "MissingAuthError", "provider": {"name": "BingCreateImages", "url": "https://www.bing.com/images/create", "label": "Microsoft Designer in Bing", "model": "dall-e-3"}}
|
242 |
+
{"type": "error", "error": "KeyError", "message": "KeyError: 'event_id'", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "qwen-2.5-1m-demo"}}
|
243 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
244 |
+
{"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
logging/2025-02-21.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logging/2025-02-22.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logging/2025-02-23.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
logging/2025-02-24.jsonl
ADDED
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
2 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
3 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
4 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
5 |
+
{"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4"}}
|
6 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: \u6587\u5b57\u8fc7\u957f\uff0c\u8bf7\u5220\u51cf\u540e\u91cd\u8bd5\u3002", "error": "ResponseStatusError", "provider": {"name": "Yqcloud", "url": "https://chat9.yqcloud.top", "label": null, "model": "gpt-4"}}
|
7 |
+
{"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4"}}
|
8 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: \u6587\u5b57\u8fc7\u957f\uff0c\u8bf7\u5220\u51cf\u540e\u91cd\u8bd5\u3002", "error": "ResponseStatusError", "provider": {"name": "Yqcloud", "url": "https://chat9.yqcloud.top", "label": null, "model": "gpt-4"}}
|
9 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4"}}
|
10 |
+
{"type": "message", "message": "ResponseStatusError: Response 522: HTML content", "error": "ResponseStatusError", "provider": {"name": "Mhystical", "url": "https://mhystical.cc", "label": null, "model": "gpt-4"}}
|
11 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4"}}
|
12 |
+
{"type": "message", "message": "ResponseStatusError: Response 522: HTML content", "error": "ResponseStatusError", "provider": {"name": "Mhystical", "url": "https://mhystical.cc", "label": null, "model": "gpt-4"}}
|
13 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4"}}
|
14 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: \u6587\u5b57\u8fc7\u957f\uff0c\u8bf7\u5220\u51cf\u540e\u91cd\u8bd5\u3002", "error": "ResponseStatusError", "provider": {"name": "Yqcloud", "url": "https://chat9.yqcloud.top", "label": null, "model": "gpt-4"}}
|
15 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4"}}
|
16 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
17 |
+
{"type": "message", "message": "Timeout: Failed to perform, curl: (28) Failed to connect to copilot.microsoft.com port 443 after 42092 ms: Couldn't connect to server. See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "Timeout", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4"}}
|
18 |
+
{"type": "message", "message": "RuntimeError: coroutine raised StopIteration", "error": "RuntimeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
19 |
+
{"type": "message", "message": "ConnectionRefusedError: [WinError 1225] \u0423\u0434\u0430\u043b\u0435\u043d\u043d\u044b\u0439 \u043a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440 \u043e\u0442\u043a\u043b\u043e\u043d\u0438\u043b \u044d\u0442\u043e \u0441\u0435\u0442\u0435\u0432\u043e\u0435 \u043f\u043e\u0434\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435", "error": "ConnectionRefusedError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4"}}
|
20 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4"}}
|
21 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: \u6587\u5b57\u8fc7\u957f\uff0c\u8bf7\u5220\u51cf\u540e\u91cd\u8bd5\u3002", "error": "ResponseStatusError", "provider": {"name": "Yqcloud", "url": "https://chat9.yqcloud.top", "label": null, "model": "gpt-4"}}
|
22 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4"}}
|
23 |
+
{"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4"}}
|
24 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4"}}
|
25 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
26 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
27 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
28 |
+
{"type": "error", "error": "UnicodeDecodeError", "message": "UnicodeDecodeError: 'utf-8' codec can't decode bytes in position 15-16: unexpected end of data", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "deepseek-v3"}}
|
29 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
30 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
31 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
32 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
33 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
34 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
35 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
36 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
37 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
38 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
39 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
40 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
41 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
42 |
+
{"type": "message", "message": "ResponseError: Model busy, retry later", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct-Turbo"}}
|
43 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
44 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
45 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
46 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
47 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
48 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
49 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
50 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
51 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
52 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
53 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
54 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
55 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
56 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
57 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
58 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
59 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
60 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
61 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
62 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
63 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
64 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
65 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
66 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
67 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
68 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
69 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: GPU token limit exceeded: data: null\n", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "sd-3.5"}}
|
70 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
71 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
72 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "dall-e-3"}}
|
73 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
74 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
75 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
76 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
77 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_CONVERSATION_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
78 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
79 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "midjourney"}}
|
80 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
81 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: GPU token limit exceeded: data: null\n", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "sd-3.5"}}
|
82 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "flux"}}
|
83 |
+
{"type": "message", "message": "MissingAuthError: Missing \"_U\" cookie", "error": "MissingAuthError", "provider": {"name": "BingCreateImages", "url": "https://www.bing.com/images/create", "label": "Microsoft Designer in Bing", "model": "dall-e-3"}}
|
84 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "dall-e-3"}}
|
85 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "midjourney"}}
|
86 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "midjourney"}}
|
87 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "midjourney"}}
|
88 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
89 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
90 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
91 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
92 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
93 |
+
{"type": "error", "error": "TimeoutError", "message": "TimeoutError: ", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
94 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
95 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
96 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
97 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
98 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "flux-schnell"}}
|
99 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "midjourney"}}
|
100 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
101 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_CONVERSATION_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
102 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
103 |
+
{"type": "error", "error": "error", "message": "error: bad character range \\w-= at position 17", "provider": {"name": "Gemini", "url": "https://gemini.google.com", "label": "Google Gemini", "model": "gemini-2.0-flash-thinking-with-apps"}}
|
104 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
105 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
106 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
107 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
108 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
109 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
110 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
111 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
112 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
113 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
114 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
115 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
116 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: You have exceeded your GPU quota (75s requested vs. 64s left).", "provider": {"name": "G4F", "url": "https://huggingface.co/spaces/roxky/g4f-space", "label": "G4F framework", "model": "flux"}}
|
117 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Your auth method doesn't allow you to make inference requests", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "llama-3.2-11b"}}
|
118 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Input validation error: `inputs` tokens + `max_new_tokens` must be <= 4096. Given: 2548 `inputs` tokens and 2048 `max_new_tokens`", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "llama-3.2-11b"}}
|
119 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-2"}}
|
120 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
121 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
122 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 429: {\"detail\":{\"message\":\"You have sent too many messages to the model. Please try again later.\",\"code\":\"model_cap_exceeded\",\"clears_in\":0}}", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o"}}
|
123 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
124 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
125 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
126 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
127 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
128 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
129 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
130 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
131 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
132 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
133 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
134 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4"}}
|
135 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: \u6587\u5b57\u8fc7\u957f\uff0c\u8bf7\u5220\u51cf\u540e\u91cd\u8bd5\u3002", "error": "ResponseStatusError", "provider": {"name": "Yqcloud", "url": "https://chat9.yqcloud.top", "label": null, "model": "gpt-4"}}
|
136 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4"}}
|
137 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4"}}
|
138 |
+
{"type": "message", "message": "ResponseStatusError: Response 522: HTML content", "error": "ResponseStatusError", "provider": {"name": "Mhystical", "url": "https://mhystical.cc", "label": null, "model": "gpt-4"}}
|
139 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4"}}
|
140 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4"}}
|
141 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
142 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
143 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: ResponseStatusError: Response 429: Rate limit\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
144 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
145 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
146 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
147 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
148 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
149 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
150 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
151 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
152 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
153 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o-mini"}}
|
154 |
+
{"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o-mini"}}
|
155 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host chatgpt.es:443 ssl:default [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o-mini"}}
|
156 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host jmuz.me:443 ssl:default [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o-mini"}}
|
157 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
|
158 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
159 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
160 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o-mini"}}
|
161 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o-mini"}}
|
162 |
+
{"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o-mini"}}
|
163 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
|
164 |
+
{"type": "message", "message": "TimeoutError: Request timed out: ", "error": "TimeoutError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
165 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
166 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
167 |
+
{"type": "message", "message": "RuntimeError: Error: {'event': 'error', 'id': '0', 'errorCode': 'empty-text'}", "error": "RuntimeError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
168 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 403: Query blocked", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "blackboxai"}}
|
169 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
170 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
171 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
172 |
+
{"type": "error", "error": "IndexError", "message": "IndexError: list index out of range", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "o3-mini-high"}}
|
173 |
+
{"type": "error", "error": "IndexError", "message": "IndexError: list index out of range", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o-mini"}}
|
174 |
+
{"type": "error", "error": "IndexError", "message": "IndexError: list index out of range", "provider": {"name": "OpenaiAccount", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
175 |
+
{"type": "message", "message": "TimeoutError: Request timed out: ", "error": "TimeoutError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
176 |
+
{"type": "message", "message": "SSLError: Failed to perform, curl: (35) Recv failure: Connection reset by peer. See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "SSLError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
177 |
+
{"type": "error", "error": "RuntimeError", "message": "RuntimeError: coroutine raised StopIteration", "provider": {"name": "You", "url": "https://you.com", "label": "You.com", "model": "claude-3.5-sonnet"}}
|
178 |
+
{"type": "message", "message": "TimeoutError: Request timed out: ", "error": "TimeoutError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
179 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
180 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
181 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: GPU token limit exceeded: data: null\n", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "sd-3.5"}}
|
182 |
+
{"type": "message", "message": "MissingAuthError: Missing \"_U\" cookie", "error": "MissingAuthError", "provider": {"name": "BingCreateImages", "url": "https://www.bing.com/images/create", "label": "Microsoft Designer in Bing", "model": "dall-e-3"}}
|
183 |
+
{"type": "message", "message": "RuntimeError: coroutine raised StopIteration", "error": "RuntimeError", "provider": {"name": "MicrosoftDesigner", "url": "https://designer.microsoft.com", "label": "Microsoft Designer", "model": "dall-e-3"}}
|
184 |
+
{"type": "message", "message": "MissingAuthError: Missing \"_U\" cookie", "error": "MissingAuthError", "provider": {"name": "BingCreateImages", "url": "https://www.bing.com/images/create", "label": "Microsoft Designer in Bing", "model": "dall-e-3"}}
|
185 |
+
{"type": "message", "message": "RuntimeError: coroutine raised StopIteration", "error": "RuntimeError", "provider": {"name": "CopilotAccount", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "dall-e-3"}}
|
186 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiAccount", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "dall-e-3"}}
|
187 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 503: HTML content", "provider": {"name": "HuggingFaceAPI", "url": "https://api-inference.huggingface.com", "label": "HuggingFace (Inference API)", "model": "qwen-2-vl-7b"}}
|
188 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
189 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
190 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
191 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
192 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
193 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
194 |
+
{"type": "message", "message": "NameError: name 'CurlMime' is not defined", "error": "NameError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-r1"}}
|
195 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
196 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Model Qwen/Qwen2-VL-7B-Instruct is currently loading", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "Qwen/Qwen2-VL-7B-Instruct"}}
|
197 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Model Qwen/Qwen2-VL-7B-Instruct is currently loading", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "Qwen/Qwen2-VL-7B-Instruct"}}
|
198 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
199 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
200 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: Add a \"api_key\"", "provider": {"name": "DeepSeek", "url": "https://platform.deepseek.com", "label": "DeepSeek", "model": "deepseek-chat"}}
|
201 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: Add a \"api_key\"", "provider": {"name": "DeepSeek", "url": "https://platform.deepseek.com", "label": "DeepSeek", "model": "deepseek-chat"}}
|
202 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
203 |
+
{"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
204 |
+
{"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
|
205 |
+
{"type": "message", "message": "TimeoutError: Request timed out: ", "error": "TimeoutError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
206 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: You have exceeded your GPU quota (75s requested vs. 48s left).", "provider": {"name": "G4F", "url": "https://huggingface.co/spaces/roxky/g4f-space", "label": "G4F framework", "model": "flux-dev"}}
|
207 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
208 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-r1"}}
|
209 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: You have exceeded your GPU quota (75s requested vs. 50s left).", "provider": {"name": "G4F", "url": "https://huggingface.co/spaces/roxky/g4f-space", "label": "G4F framework", "model": "flux-dev"}}
|
210 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
211 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
212 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "midjourney"}}
|
213 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "flux-pro"}}
|
214 |
+
{"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiAccount", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "dall-e-3"}}
|
215 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "dall-e-3"}}
|
216 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
217 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: You have exceeded your GPU quota (75s requested vs. 52s left).", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "black-forest-labs-flux-1-dev"}}
|
218 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI (Image)", "model": "sdxl-turbo"}}
|
219 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
220 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
221 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
222 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
223 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
224 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
225 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
226 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
227 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
228 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
229 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
230 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
231 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
232 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o-mini"}}
|
233 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o-mini"}}
|
234 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
|
235 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o-mini"}}
|
236 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o-mini"}}
|
237 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o-mini"}}
|
238 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
|
239 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash-thinking"}}
|
240 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash-thinking"}}
|
241 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-2"}}
|
242 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-2"}}
|
243 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
244 |
+
{"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
245 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
246 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
247 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
248 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
249 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
250 |
+
{"type": "message", "message": "ConnectionRefusedError: [WinError 1225] \u0423\u0434\u0430\u043b\u0435\u043d\u043d\u044b\u0439 \u043a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440 \u043e\u0442\u043a\u043b\u043e\u043d\u0438\u043b \u044d\u0442\u043e \u0441\u0435\u0442\u0435\u0432\u043e\u0435 \u043f\u043e\u0434\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435", "error": "ConnectionRefusedError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
251 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
252 |
+
{"type": "message", "message": "ConnectionRefusedError: [WinError 1225] \u0423\u0434\u0430\u043b\u0435\u043d\u043d\u044b\u0439 \u043a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440 \u043e\u0442\u043a\u043b\u043e\u043d\u0438\u043b \u044d\u0442\u043e \u0441\u0435\u0442\u0435\u0432\u043e\u0435 \u043f\u043e\u0434\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435", "error": "ConnectionRefusedError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
253 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
254 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
255 |
+
{"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
256 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
257 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
258 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
259 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
260 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-r1"}}
|
261 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-r1"}}
|
262 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-r1"}}
|
263 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
264 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
265 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: {\"error\":\"DeepSeek API error: 402 Payment Required - {\\\"error\\\":{\\\"message\\\":\\\"Insufficient Balance\\\",\\\"type\\\":\\\"unknown_error\\\",\\\"param\\\":null,\\\"code\\\":\\\"invalid_request_error\\\"}}\",\"status\":500}", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-r1"}}
|
266 |
+
{"type": "message", "message": "ResponseStatusError: Response 401: {\"error\":\"Invalid username or password.\"}", "error": "ResponseStatusError", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "deepseek-r1"}}
|
267 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
268 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
269 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: ResponseStatusError: Response 429: Rate limit\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
270 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
271 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
272 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
273 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash-thinking"}}
|
274 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash-thinking"}}
|
275 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gemini-2.0-flash-thinking"}}
|
276 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
277 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
278 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: ResponseStatusError: Response 429: Rate limit\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
279 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
280 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
281 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: ResponseStatusError: Response 429: Rate limit\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
282 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-r1"}}
|
283 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
284 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
285 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
286 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
287 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
288 |
+
{"type": "message", "message": "ClientConnectorError: Cannot connect to host liaobots.work:443 ssl:False [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
289 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 429: {\"detail\":{\"message\":\"You have sent too many messages to the model. Please try again later.\",\"code\":\"model_cap_exceeded\",\"clears_in\":0}}", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o"}}
|
290 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
291 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
292 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
293 |
+
{"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
294 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
295 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
296 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
297 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
298 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
299 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
300 |
+
{"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
301 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
302 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
303 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-v3"}}
|
304 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
305 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: {\"action\":\"error\",\"status\":429,\"type\":\"ERR_INPUT_LIMIT\"}", "error": "ResponseStatusError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
|
306 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
307 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
308 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
309 |
+
{"type": "message", "message": "ClientOSError: [Errno 104] Connection reset by peer", "error": "ClientOSError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-v3"}}
|
310 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "BlackboxAPI", "url": "https://api.blackbox.ai", "label": "Blackbox AI API", "model": "deepseek-chat"}}
|
311 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "deepseek-chat"}}
|
312 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: GPU token limit exceeded: data: null\n", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "sd-3.5"}}
|
313 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
314 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
315 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
316 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
317 |
+
{"type": "error", "error": "RetryNoProviderError", "message": "RetryNoProviderError: No provider found"}
|
318 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: ", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "flux"}}
|
319 |
+
{"type": "message", "message": "ResponseError: Error generating image: null\n\n", "error": "ResponseError", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "flux"}}
|
320 |
+
{"type": "message", "message": "InvalidStatus: server rejected WebSocket connection: HTTP 500", "error": "InvalidStatus", "provider": {"name": "OpenaiAccount", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "dall-e-3"}}
|
321 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 431: ", "provider": {"name": "PollinationsImage", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "dall-e-3"}}
|
322 |
+
{"type": "message", "message": "ConnectionError: HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /api/models?inference=warm&pipeline_tag=text-generation (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f0c397d3390>: Failed to establish a new connection: [Errno 101] Network is unreachable'))", "error": "ConnectionError", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "deepseek-r1"}}
|
323 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-2"}}
|
324 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-2"}}
|
325 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-v3"}}
|
326 |
+
{"type": "error", "error": "MissingAuthError", "message": "MissingAuthError: ('Missing or invalid \"__Secure-1PSID\" cookie', RuntimeError('coroutine raised StopIteration'))", "provider": {"name": "Gemini", "url": "https://gemini.google.com", "label": "Google Gemini", "model": "gemini-2.0"}}
|
327 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: GPU token limit exceeded: data: null\n", "provider": {"name": "G4F", "url": "https://huggingface.co/spaces/roxky/g4f-space", "label": "G4F framework", "model": "flux"}}
|
328 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 503: HTML content", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
329 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 503: HTML content", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
330 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 400: {\"error\":\"Model requires a Pro subscription; check out hf.co/pricing to learn more. Make sure to include your HF token in your query.\"}", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "CohereForAI/c4ai-command-r-plus-08-2024"}}
|
331 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 400: {\"error\":\"Model requires a Pro subscription; check out hf.co/pricing to learn more. Make sure to include your HF token in your query.\"}", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "meta-llama/Llama-3.3-70B-Instruct"}}
|
332 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
333 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
334 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
335 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
336 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
337 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nLiaobots: ResponseStatusError: Response 402: Error\nJmuz: ResponseStatusError: Response 429: Rate limit", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
338 |
+
{"type": "message", "message": "ResponseStatusError: Response 429: Rate limit", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "claude-3.5-sonnet"}}
|
339 |
+
{"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
340 |
+
{"type": "error", "error": "RetryProviderError", "message": "RetryProviderError: RetryProvider failed:\nJmuz: ResponseStatusError: Response 429: Rate limit\nLiaobots: ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3.5-sonnet"}}
|
341 |
+
{"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
|
342 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 400: {\"error\":\"Model requires a Pro subscription; check out hf.co/pricing to learn more. Make sure to include your HF token in your query.\"}", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "meta-llama/Llama-2-13b-chat-hf"}}
|
343 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"HfApiJson(Deserialize(Error(\\\"unknown variant `gguf`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`\\\", line: 1, column: 132)))\"}", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "city96/FLUX.1-dev-gguf"}}
|
344 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
345 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptt", "url": "https://chatgptt.me", "label": null, "model": "gpt-4o"}}
|
346 |
+
{"type": "message", "message": "ConnectionRefusedError: [WinError 1225] \u0423\u0434\u0430\u043b\u0435\u043d\u043d\u044b\u0439 \u043a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440 \u043e\u0442\u043a\u043b\u043e\u043d\u0438\u043b \u044d\u0442\u043e \u0441\u0435\u0442\u0435\u0432\u043e\u0435 \u043f\u043e\u0434\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435", "error": "ConnectionRefusedError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
|
347 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
|
348 |
+
{"type": "message", "message": "ResponseStatusError: Response 500: HTML content", "error": "ResponseStatusError", "provider": {"name": "CablyAI", "url": "https://cablyai.com", "label": "CablyAI", "model": "gpt-4o-mini"}}
|
349 |
+
{"type": "message", "message": "RequestException: Failed to perform, curl: (92) HTTP/2 stream 1 was not closed cleanly: INTERNAL_ERROR (err 2). See https://curl.se/libcurl/c/libcurl-errors.html first for more details.", "error": "RequestException", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
350 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
351 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
352 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
|
353 |
+
{"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 400: {\"error\":\"Authorization header is correct, but the token seems invalid\"}", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "sd-3.5"}}
|
354 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Authorization header is correct, but the token seems invalid", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "llama-3.2-11b"}}
|
355 |
+
{"type": "error", "error": "ResponseError", "message": "ResponseError: Your auth method doesn't allow you to make inference requests", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "llama-3.2-11b"}}
|
356 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
357 |
+
{"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
|
358 |
+
{"type": "message", "message": "RuntimeError: Error: {'event': 'error', 'id': '0', 'errorCode': 'text-too-long'}", "error": "RuntimeError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
|
359 |
+
{"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
|
save.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from g4f.cookies import get_cookies_dir
|
4 |
+
|
5 |
+
def get_logs(log_dir):
|
6 |
+
try:
|
7 |
+
return [f for f in os.listdir(log_dir) if os.path.isfile(os.path.join(log_dir, f))]
|
8 |
+
except OSError:
|
9 |
+
return None
|
10 |
+
|
11 |
+
for part in (".logging", ".usage"):
|
12 |
+
log_dir = os.path.join(get_cookies_dir(), part)
|
13 |
+
save_dir = os.path.join(".", part[1:])
|
14 |
+
for filename in get_logs(log_dir):
|
15 |
+
with open(os.path.join(log_dir, filename), "rb") as file:
|
16 |
+
with open(os.path.join(save_dir, filename), "w") as save:
|
17 |
+
for line in file:
|
18 |
+
line = json.loads(line)
|
19 |
+
if "origin" in line:
|
20 |
+
line.pop("origin")
|
21 |
+
if "user" in line:
|
22 |
+
line.pop("user")
|
23 |
+
save.write(json.dumps(line) + "\n")
|
usage/2025-02-20.jsonl
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"model": "r1-1776", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 113, "total_tokens": 139}
|
2 |
+
{"model": "r1-1776", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 119, "total_tokens": 145}
|
3 |
+
{"model": "sonar-reasoning", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 220, "total_tokens": 246}
|
4 |
+
{"model": "r1-1776", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 121, "total_tokens": 147}
|
5 |
+
{"model": "r1-1776", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 27, "total_tokens": 53}
|
6 |
+
{"model": "sonar-reasoning-pro", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 569, "total_tokens": 595}
|
7 |
+
{"model": "sonar-reasoning-pro", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 587, "total_tokens": 613}
|
8 |
+
{"model": "sonar-reasoning-pro", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 804, "total_tokens": 830}
|
9 |
+
{"model": "r1-1776", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 143, "total_tokens": 169}
|
10 |
+
{"model": "r1-1776", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 13, "total_tokens": 39}
|
11 |
+
{"model": "sonar-reasoning-pro", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 537, "total_tokens": 563}
|
12 |
+
{"model": "sonar-reasoning-pro", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 678, "total_tokens": 704}
|
13 |
+
{"model": "sonar-reasoning-pro", "provider": "PerplexityLabs", "prompt_tokens": 26, "completion_tokens": 810, "total_tokens": 836}
|
usage/2025-02-21.jsonl
ADDED
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 24, "completion_tokens": 532, "total_tokens": 556}
|
2 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 572, "completion_tokens": 746, "total_tokens": 1318}
|
3 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 556, "completion_tokens": 556, "total_tokens": 1112}
|
4 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 556, "completion_tokens": 1196, "total_tokens": 1752}
|
5 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 2329, "completion_tokens": 697, "total_tokens": 3026}
|
6 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 2378, "completion_tokens": 946, "total_tokens": 3324}
|
7 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 120, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 162, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 282}
|
8 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 18, "completion_tokens": 345, "total_tokens": 363}
|
9 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 379, "completion_tokens": 505, "total_tokens": 884}
|
10 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 210, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 309, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 519}
|
11 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 11, "completion_tokens": 608, "total_tokens": 619}
|
12 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 634, "completion_tokens": 922, "total_tokens": 1556}
|
13 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 19, "completion_tokens": 90, "total_tokens": 109}
|
14 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 128, "completion_tokens": 124, "total_tokens": 252}
|
15 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 1569, "completion_tokens": 922, "total_tokens": 2491}
|
16 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 12, "completion_tokens": 378, "total_tokens": 390}
|
17 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 406, "completion_tokens": 519, "total_tokens": 925}
|
18 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 315, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 539, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 854}
|
19 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 19, "completion_tokens": 89, "total_tokens": 108}
|
20 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 17, "completion_tokens": 98, "total_tokens": 115}
|
21 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 300, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 884, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1184}
|
22 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 131, "completion_tokens": 455, "total_tokens": 586}
|
23 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 32, "completion_tokens": 599, "total_tokens": 631}
|
24 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 277, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1197, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1474}
|
25 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 32, "completion_tokens": 141, "total_tokens": 173}
|
26 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 32, "completion_tokens": 182, "total_tokens": 214}
|
27 |
+
{"model": "deepseek-r1", "provider": "Blackbox", "prompt_tokens": 32, "completion_tokens": 575, "total_tokens": 607}
|
28 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 32, "completion_tokens": 141, "total_tokens": 173}
|
29 |
+
{"model": "deepseek-r1", "provider": "DeepInfraChat", "prompt_tokens": 32, "completion_tokens": 368, "total_tokens": 400}
|
30 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 1081, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 170, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1251}
|
31 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 863, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1271, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 2134}
|
32 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 14, "completion_tokens": 829, "total_tokens": 843}
|
33 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 10, "completion_tokens": 1059, "total_tokens": 1069}
|
34 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 1081, "completion_tokens": 380, "total_tokens": 1461}
|
35 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 1473, "completion_tokens": 287, "total_tokens": 1760}
|
36 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 1778, "completion_tokens": 798, "total_tokens": 2576}
|
37 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 17, "completion_tokens": 1364, "total_tokens": 1381}
|
38 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 18, "completion_tokens": 1015, "total_tokens": 1033}
|
39 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 1065, "completion_tokens": 728, "total_tokens": 1793}
|
40 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 1807, "completion_tokens": 892, "total_tokens": 2699}
|
41 |
+
{"model": "command-r-plus-08-2024", "provider": "CohereForAI", "prompt_tokens": 6394, "completion_tokens": 215, "total_tokens": 6609}
|
42 |
+
{"model": "general", "provider": "ImageLabs", "prompt_tokens": 9965, "completion_tokens": 207, "total_tokens": 10172}
|
43 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 33, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 164, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 197}
|
44 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 14, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 240, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 254}
|
45 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 28, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 240, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 268}
|
46 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 26, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 240, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 266}
|
47 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 27, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 240, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 267}
|
48 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 35, "completion_tokens": 89, "total_tokens": 124}
|
49 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 157, "completion_tokens": 125, "total_tokens": 282}
|
50 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 15, "completion_tokens": 1232, "total_tokens": 1247}
|
51 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 15, "completion_tokens": 1134, "total_tokens": 1149}
|
52 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 321, "completion_tokens": 135, "total_tokens": 456}
|
53 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 1166, "completion_tokens": 984, "total_tokens": 2150}
|
54 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 38, "completion_tokens": 124, "total_tokens": 162}
|
55 |
+
{"model": "deepseek-r1", "provider": "Blackbox", "prompt_tokens": 1166, "completion_tokens": 924, "total_tokens": 2090}
|
56 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 2165, "completion_tokens": 1325, "total_tokens": 3490}
|
57 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 36, "completion_tokens": 122, "total_tokens": 158}
|
58 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 187, "completion_tokens": 161, "total_tokens": 348}
|
59 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 364, "completion_tokens": 115, "total_tokens": 479}
|
60 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 492, "completion_tokens": 193, "total_tokens": 685}
|
61 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 708, "completion_tokens": 217, "total_tokens": 925}
|
62 |
+
{"model": "blackboxai", "provider": "Blackbox", "prompt_tokens": 62, "completion_tokens": 16, "total_tokens": 78}
|
63 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 99, "completion_tokens": 16, "total_tokens": 115}
|
64 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 132, "completion_tokens": 157, "total_tokens": 289}
|
65 |
+
{"model": "Copilot", "provider": "Copilot", "prompt_tokens": 16, "completion_tokens": 67, "total_tokens": 83}
|
66 |
+
{"model": "Copilot", "provider": "Copilot", "prompt_tokens": 99, "completion_tokens": 102, "total_tokens": 201}
|
67 |
+
{"model": "blackboxai", "provider": "Blackbox", "prompt_tokens": 218, "completion_tokens": 200, "total_tokens": 418}
|
68 |
+
{"model": "deepseek-r1", "provider": "Blackbox", "prompt_tokens": 435, "completion_tokens": 1418, "total_tokens": 1853}
|
69 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 22, "completion_tokens": 225, "total_tokens": 247}
|
70 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 435, "completion_tokens": 392, "total_tokens": 827}
|
71 |
+
{"model": "deepseek-r1", "provider": "Blackbox", "prompt_tokens": 22, "completion_tokens": 289, "total_tokens": 311}
|
72 |
+
{"model": "deepseek-r1", "provider": "Blackbox", "prompt_tokens": 17, "completion_tokens": 1296, "total_tokens": 1313}
|
73 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 21, "completion_tokens": 590, "total_tokens": 611}
|
74 |
+
{"model": "janus-pro-7b-image", "provider": "G4F", "prompt_tokens": 9, "completion_tokens": 711, "total_tokens": 720}
|
75 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 11, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 152, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 163}
|
76 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 24, "completion_tokens": 167, "total_tokens": 191}
|
77 |
+
{"model": "deepseek-r1", "provider": "Blackbox", "prompt_tokens": 18, "completion_tokens": 356, "total_tokens": 374}
|
78 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 9, "completion_tokens": 41, "total_tokens": 50}
|
79 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 61, "completion_tokens": 160, "total_tokens": 221}
|
80 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 237, "completion_tokens": 208, "total_tokens": 445}
|
81 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 18, "completion_tokens": 1362, "total_tokens": 1380}
|
82 |
+
{"model": "deepseek-r1", "provider": "Glider", "prompt_tokens": 24, "completion_tokens": 743, "total_tokens": 767}
|
83 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 12, "total_tokens": 20}
|
84 |
+
{"model": "command-r", "provider": "HuggingSpace", "prompt_tokens": 8, "completion_tokens": 10, "total_tokens": 18}
|
85 |
+
{"model": "llama-3.3-70b", "provider": "HuggingFace", "prompt_tokens": 13, "completion_tokens": 13, "total_tokens": 26}
|
86 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 13, "completion_tokens": 177, "total_tokens": 190}
|
87 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 8, "completion_tokens": 171, "total_tokens": 179}
|
88 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 8, "completion_tokens": 167, "total_tokens": 175}
|
89 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 46, "completion_tokens": 133, "total_tokens": 179}
|
90 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 221, "completion_tokens": 157, "total_tokens": 378}
|
91 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 398, "completion_tokens": 189, "total_tokens": 587}
|
92 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 398, "completion_tokens": 163, "total_tokens": 561}
|
93 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 784, "completion_tokens": 202, "total_tokens": 986}
|
94 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 1027, "completion_tokens": 16, "total_tokens": 1043}
|
95 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 1057, "completion_tokens": 930, "total_tokens": 1987}
|
96 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 8, "total_tokens": 19}
|
97 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 38, "completion_tokens": 52, "total_tokens": 90}
|
98 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 129, "completion_tokens": 89, "total_tokens": 218}
|
99 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 277, "completion_tokens": 891, "total_tokens": 1168}
|
100 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 1034, "completion_tokens": 174, "total_tokens": 1208}
|
101 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 60, "completion_tokens": 275, "total_tokens": 335}
|
102 |
+
{"model": "command-r", "provider": "HuggingSpace", "prompt_tokens": 282, "completion_tokens": 44, "total_tokens": 326}
|
103 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 282, "completion_tokens": 202, "total_tokens": 484}
|
104 |
+
{"model": "flux", "provider": "HuggingFace", "prompt_tokens": 282, "completion_tokens": 90, "total_tokens": 372}
|
105 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 352, "completion_tokens": 238, "total_tokens": 590}
|
106 |
+
{"model": "flux", "provider": "HuggingFace", "prompt_tokens": 282, "completion_tokens": 84, "total_tokens": 366}
|
107 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 27, "completion_tokens": 1082, "total_tokens": 1109}
|
108 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 621, "completion_tokens": 211, "total_tokens": 832}
|
109 |
+
{"model": "bigcode/starcoder2-15b", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 249, "total_tokens": 260}
|
110 |
+
{"model": "codellama/CodeLlama-34b-Instruct-hf", "provider": "HuggingFace", "prompt_tokens": 10208, "completion_tokens": 1025, "total_tokens": 11233}
|
111 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 9, "total_tokens": 17}
|
112 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 34, "completion_tokens": 1023, "total_tokens": 1057}
|
113 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 91, "completion_tokens": 372, "total_tokens": 463}
|
114 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 13, "completion_tokens": 210, "total_tokens": 223}
|
115 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 16, "total_tokens": 24}
|
116 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 559, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 2173, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 2732}
|
117 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 24, "total_tokens": 32}
|
118 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 39, "completion_tokens": 10, "total_tokens": 49}
|
119 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 63, "completion_tokens": 25, "total_tokens": 88}
|
120 |
+
{"model": "black-forest-labs-flux-1-dev", "provider": "HuggingSpace", "prompt_tokens": 8, "completion_tokens": 185, "total_tokens": 193}
|
121 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 1277, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 2754, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 4031}
|
122 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 1331, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 4056, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 5387}
|
123 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 12, "total_tokens": 20}
|
124 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 33, "completion_tokens": 32, "total_tokens": 65}
|
125 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 111, "completion_tokens": 34, "total_tokens": 145}
|
126 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 229, "completion_tokens": 40, "total_tokens": 269}
|
127 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 13, "completion_tokens": 39, "total_tokens": 52}
|
128 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 89, "completion_tokens": 43, "total_tokens": 132}
|
129 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 35, "total_tokens": 119}
|
130 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 35, "total_tokens": 119}
|
131 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 35, "total_tokens": 119}
|
132 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 27, "total_tokens": 111}
|
133 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 35, "total_tokens": 119}
|
134 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 39, "total_tokens": 123}
|
135 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 35, "total_tokens": 119}
|
136 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 35, "total_tokens": 119}
|
137 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 27, "total_tokens": 111}
|
138 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace", "prompt_tokens": 84, "completion_tokens": 27, "total_tokens": 111}
|
139 |
+
{"model": "tiiuae/falcon-7b-instruct", "provider": "HuggingFace", "prompt_tokens": 13, "completion_tokens": 100, "total_tokens": 113}
|
140 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 13, "completion_tokens": 47, "total_tokens": 60}
|
141 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 124, "completion_tokens": 20, "total_tokens": 144}
|
142 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 187, "completion_tokens": 155, "total_tokens": 342}
|
143 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 366, "completion_tokens": 256, "total_tokens": 622}
|
144 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 26, "completion_tokens": 19, "total_tokens": 45}
|
145 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 25, "completion_tokens": 10, "total_tokens": 35}
|
146 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 988, "completion_tokens": 34, "total_tokens": 1022}
|
147 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1038, "completion_tokens": 287, "total_tokens": 1325}
|
148 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1390, "completion_tokens": 282, "total_tokens": 1672}
|
149 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1697, "completion_tokens": 431, "total_tokens": 2128}
|
150 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1053, "completion_tokens": 208, "total_tokens": 1261}
|
151 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1295, "completion_tokens": 370, "total_tokens": 1665}
|
152 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 428, "completion_tokens": 66, "total_tokens": 494}
|
153 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 555, "completion_tokens": 70, "total_tokens": 625}
|
154 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 646, "completion_tokens": 92, "total_tokens": 738}
|
155 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 763, "completion_tokens": 225, "total_tokens": 988}
|
156 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1019, "completion_tokens": 121, "total_tokens": 1140}
|
157 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1171, "completion_tokens": 76, "total_tokens": 1247}
|
158 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1304, "completion_tokens": 126, "total_tokens": 1430}
|
159 |
+
{"model": "black-forest-labs-flux-1-dev", "provider": "HuggingSpace", "prompt_tokens": 15, "completion_tokens": 210, "total_tokens": 225}
|
160 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1454, "completion_tokens": 149, "total_tokens": 1603}
|
161 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1628, "completion_tokens": 83, "total_tokens": 1711}
|
162 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1739, "completion_tokens": 89, "total_tokens": 1828}
|
163 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1854, "completion_tokens": 101, "total_tokens": 1955}
|
164 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 1973, "completion_tokens": 90, "total_tokens": 2063}
|
165 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2110, "completion_tokens": 90, "total_tokens": 2200}
|
166 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2234, "completion_tokens": 100, "total_tokens": 2334}
|
167 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2360, "completion_tokens": 106, "total_tokens": 2466}
|
168 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2483, "completion_tokens": 80, "total_tokens": 2563}
|
169 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace", "prompt_tokens": 192, "completion_tokens": 88, "total_tokens": 280}
|
170 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2588, "completion_tokens": 105, "total_tokens": 2693}
|
171 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 883, "completion_tokens": 204, "total_tokens": 1087}
|
172 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 1110, "completion_tokens": 182, "total_tokens": 1292}
|
173 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2725, "completion_tokens": 75, "total_tokens": 2800}
|
174 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2817, "completion_tokens": 79, "total_tokens": 2896}
|
175 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 2924, "completion_tokens": 92, "total_tokens": 3016}
|
176 |
+
{"model": "01-ai/Yi-1.5-34B-Chat", "provider": "HuggingFace", "prompt_tokens": 3034, "completion_tokens": 103, "total_tokens": 3137}
|
177 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 1328, "completion_tokens": 185, "total_tokens": 1513}
|
178 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 1328, "completion_tokens": 196, "total_tokens": 1524}
|
179 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 1758, "completion_tokens": 194, "total_tokens": 1952}
|
180 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 1758, "completion_tokens": 265, "total_tokens": 2023}
|
181 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 423, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 165, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 588}
|
182 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 463, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 605, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1068}
|
183 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 8, "completion_tokens": 461, "total_tokens": 469}
|
184 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 192, "completion_tokens": 88, "total_tokens": 280}
|
185 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 12, "completion_tokens": 40, "total_tokens": 52}
|
186 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 81, "completion_tokens": 328, "total_tokens": 409}
|
187 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 425, "completion_tokens": 192, "total_tokens": 617}
|
188 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 621, "completion_tokens": 548, "total_tokens": 1169}
|
189 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1196, "completion_tokens": 319, "total_tokens": 1515}
|
190 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 127, "completion_tokens": 120, "total_tokens": 247}
|
191 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 262, "completion_tokens": 880, "total_tokens": 1142}
|
192 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1146, "completion_tokens": 76, "total_tokens": 1222}
|
193 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 13, "completion_tokens": 25, "total_tokens": 38}
|
194 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 30, "completion_tokens": 627, "total_tokens": 657}
|
195 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 661, "completion_tokens": 59, "total_tokens": 720}
|
196 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 724, "completion_tokens": 966, "total_tokens": 1690}
|
197 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1707, "completion_tokens": 284, "total_tokens": 1991}
|
198 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 2009, "completion_tokens": 703, "total_tokens": 2712}
|
199 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 44, "completion_tokens": 259, "total_tokens": 303}
|
200 |
+
{"model": "qwen-qvq-72b-preview", "provider": "HuggingSpace", "prompt_tokens": 8, "completion_tokens": 37, "total_tokens": 45}
|
201 |
+
{"model": "qvq-72b", "provider": "HuggingSpace", "prompt_tokens": 8, "completion_tokens": 37, "total_tokens": 45}
|
202 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 192, "completion_tokens": 268, "total_tokens": 460}
|
203 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 192, "completion_tokens": 92, "total_tokens": 284}
|
204 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 192, "completion_tokens": 266, "total_tokens": 458}
|
205 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 208, "completion_tokens": 219, "total_tokens": 427}
|
206 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 208, "completion_tokens": 75, "total_tokens": 283}
|
207 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 223, "completion_tokens": 76, "total_tokens": 299}
|
208 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 223, "completion_tokens": 226, "total_tokens": 449}
|
209 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 223, "completion_tokens": 76, "total_tokens": 299}
|
210 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 223, "completion_tokens": 72, "total_tokens": 295}
|
211 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 244, "completion_tokens": 272, "total_tokens": 516}
|
212 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 244, "completion_tokens": 268, "total_tokens": 512}
|
213 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 244, "completion_tokens": 90, "total_tokens": 334}
|
214 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 244, "completion_tokens": 270, "total_tokens": 514}
|
215 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 267, "completion_tokens": 290, "total_tokens": 557}
|
216 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 267, "completion_tokens": 292, "total_tokens": 559}
|
217 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 267, "completion_tokens": 96, "total_tokens": 363}
|
218 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 267, "completion_tokens": 96, "total_tokens": 363}
|
219 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 33, "total_tokens": 44}
|
220 |
+
{"model": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "provider": "HuggingFace", "prompt_tokens": 12, "completion_tokens": 91, "total_tokens": 103}
|
221 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 43, "completion_tokens": 27, "total_tokens": 70}
|
222 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 43, "completion_tokens": 27, "total_tokens": 70}
|
223 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 43, "completion_tokens": 27, "total_tokens": 70}
|
224 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 85, "completion_tokens": 18, "total_tokens": 103}
|
225 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 267, "completion_tokens": 294, "total_tokens": 561}
|
226 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 267, "completion_tokens": 300, "total_tokens": 567}
|
227 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 267, "completion_tokens": 292, "total_tokens": 559}
|
228 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 267, "completion_tokens": 298, "total_tokens": 565}
|
229 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 25, "completion_tokens": 631, "total_tokens": 656}
|
230 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 310, "total_tokens": 602}
|
231 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 292, "completion_tokens": 102, "total_tokens": 394}
|
232 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 316, "total_tokens": 608}
|
233 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 312, "total_tokens": 604}
|
234 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 314, "total_tokens": 606}
|
235 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 674, "completion_tokens": 122, "total_tokens": 796}
|
236 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 809, "completion_tokens": 522, "total_tokens": 1331}
|
237 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 314, "total_tokens": 606}
|
238 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 9, "completion_tokens": 15, "total_tokens": 24}
|
239 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 28, "completion_tokens": 75, "total_tokens": 103}
|
240 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 292, "completion_tokens": 100, "total_tokens": 392}
|
241 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 107, "completion_tokens": 132, "total_tokens": 239}
|
242 |
+
{"model": "gemini-2.0-flash-thinking", "provider": "Liaobots", "prompt_tokens": 243, "completion_tokens": 532, "total_tokens": 775}
|
243 |
+
{"model": "deepseek-r1", "provider": "Blackbox", "prompt_tokens": 9, "completion_tokens": 55, "total_tokens": 64}
|
244 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 858, "completion_tokens": 162, "total_tokens": 1020}
|
245 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 132, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 843, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 975}
|
246 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 1342, "completion_tokens": 117, "total_tokens": 1459}
|
247 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 1530, "completion_tokens": 129, "total_tokens": 1659}
|
248 |
+
{"model": "gemini-2.0-flash", "provider": "PollinationsAI", "completion_tokens": 452, "prompt_tokens": 10, "total_tokens": 462}
|
249 |
+
{"model": "gemini-2.0-flash", "provider": "Blackbox", "prompt_tokens": 724, "completion_tokens": 1544, "total_tokens": 2268}
|
250 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 28, "completion_tokens": 193, "total_tokens": 221}
|
251 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 18, "completion_tokens": 9, "total_tokens": 27}
|
252 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 316, "total_tokens": 608}
|
253 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 292, "completion_tokens": 102, "total_tokens": 394}
|
254 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 292, "completion_tokens": 238, "total_tokens": 530}
|
255 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 292, "completion_tokens": 98, "total_tokens": 390}
|
256 |
+
{"model": "deepseek-chat", "provider": "PollinationsAI", "completion_tokens": 9, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 147, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 156}
|
257 |
+
{"model": "gpt-4o-mini", "provider": "OpenaiChat", "prompt_tokens": 56, "completion_tokens": 6, "total_tokens": 62}
|
258 |
+
{"model": "gpt-4", "provider": "OpenaiChat", "prompt_tokens": 76, "completion_tokens": 8, "total_tokens": 84}
|
259 |
+
{"model": "meta-llama/Llama-3.2-3B", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
260 |
+
{"model": "meta-llama/Llama-3.2-3B-Instruct", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
261 |
+
{"model": "Qwen/QwQ-32B-Preview", "provider": "HuggingFace", "prompt_tokens": 14, "completion_tokens": 0, "total_tokens": 14}
|
262 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 14, "completion_tokens": 0, "total_tokens": 14}
|
263 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 12, "completion_tokens": 0, "total_tokens": 12}
|
264 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
265 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
266 |
+
{"model": "meta-llama/Llama-3.1-8B-Instruct", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
267 |
+
{"model": "meta-llama/Llama-3.2-3B-Instruct", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
268 |
+
{"model": "meta-llama/Llama-3.3-70B-Instruct", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
269 |
+
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
270 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 11, "completion_tokens": 928, "total_tokens": 939}
|
271 |
+
{"model": "meta-llama/Llama-3.3-70B-Instruct", "provider": "HuggingChat", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
272 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 319, "completion_tokens": 315, "total_tokens": 634}
|
273 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 319, "completion_tokens": 317, "total_tokens": 636}
|
274 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 319, "completion_tokens": 222, "total_tokens": 541}
|
275 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 319, "completion_tokens": 319, "total_tokens": 638}
|
276 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 319, "completion_tokens": 311, "total_tokens": 630}
|
277 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 319, "completion_tokens": 237, "total_tokens": 556}
|
278 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 332, "completion_tokens": 314, "total_tokens": 646}
|
279 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 332, "completion_tokens": 104, "total_tokens": 436}
|
280 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 292, "completion_tokens": 96, "total_tokens": 388}
|
281 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 292, "completion_tokens": 240, "total_tokens": 532}
|
282 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 292, "completion_tokens": 230, "total_tokens": 522}
|
283 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 320, "total_tokens": 612}
|
284 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 316, "total_tokens": 608}
|
285 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 292, "completion_tokens": 98, "total_tokens": 390}
|
286 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 292, "completion_tokens": 318, "total_tokens": 610}
|
287 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 292, "completion_tokens": 102, "total_tokens": 394}
|
288 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 267, "completion_tokens": 316, "total_tokens": 583}
|
289 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 267, "completion_tokens": 238, "total_tokens": 505}
|
290 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 267, "completion_tokens": 102, "total_tokens": 369}
|
291 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 267, "completion_tokens": 100, "total_tokens": 367}
|
292 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 267, "completion_tokens": 104, "total_tokens": 371}
|
293 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 192, "completion_tokens": 106, "total_tokens": 298}
|
294 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 192, "completion_tokens": 272, "total_tokens": 464}
|
295 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 297, "completion_tokens": 100, "total_tokens": 397}
|
296 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 302, "completion_tokens": 314, "total_tokens": 616}
|
297 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 32, "completion_tokens": 106, "total_tokens": 138}
|
298 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 90, "completion_tokens": 298, "total_tokens": 388}
|
299 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 95, "completion_tokens": 100, "total_tokens": 195}
|
300 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 69, "completion_tokens": 100, "total_tokens": 169}
|
301 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 69, "completion_tokens": 314, "total_tokens": 383}
|
302 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 69, "completion_tokens": 335, "total_tokens": 404}
|
303 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 69, "completion_tokens": 90, "total_tokens": 159}
|
304 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 467, "completion_tokens": 19, "total_tokens": 486}
|
305 |
+
{"model": "qvq-72b", "provider": "HuggingSpace", "prompt_tokens": 467, "completion_tokens": 1078, "total_tokens": 1545}
|
306 |
+
{"model": "qvq-72b", "provider": "HuggingSpace", "prompt_tokens": 1590, "completion_tokens": 745, "total_tokens": 2335}
|
307 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 2396, "completion_tokens": 673, "total_tokens": 3069}
|
308 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 2396, "completion_tokens": 671, "total_tokens": 3067}
|
309 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 2396, "completion_tokens": 673, "total_tokens": 3069}
|
310 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 2412, "completion_tokens": 759, "total_tokens": 3171}
|
311 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 2412, "completion_tokens": 757, "total_tokens": 3169}
|
312 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 332, "completion_tokens": 312, "total_tokens": 644}
|
313 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 332, "completion_tokens": 102, "total_tokens": 434}
|
314 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 332, "completion_tokens": 314, "total_tokens": 646}
|
315 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 332, "completion_tokens": 108, "total_tokens": 440}
|
316 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 332, "completion_tokens": 318, "total_tokens": 650}
|
317 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 332, "completion_tokens": 314, "total_tokens": 646}
|
318 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 504, "completion_tokens": 342, "total_tokens": 846}
|
319 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 504, "completion_tokens": 338, "total_tokens": 842}
|
320 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 529, "completion_tokens": 320, "total_tokens": 849}
|
321 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 106, "total_tokens": 635}
|
322 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 102, "total_tokens": 631}
|
323 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 104, "total_tokens": 633}
|
324 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 529, "completion_tokens": 322, "total_tokens": 851}
|
325 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 104, "total_tokens": 633}
|
326 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 529, "completion_tokens": 318, "total_tokens": 847}
|
327 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 48, "completion_tokens": 4011, "total_tokens": 4059}
|
328 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 104, "total_tokens": 633}
|
329 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 529, "completion_tokens": 318, "total_tokens": 847}
|
330 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 529, "completion_tokens": 318, "total_tokens": 847}
|
331 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 529, "completion_tokens": 320, "total_tokens": 849}
|
332 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 106, "total_tokens": 635}
|
333 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 104, "total_tokens": 633}
|
334 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 529, "completion_tokens": 102, "total_tokens": 631}
|
335 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 570, "completion_tokens": 122, "total_tokens": 692}
|
336 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 595, "completion_tokens": 308, "total_tokens": 903}
|
337 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 595, "completion_tokens": 120, "total_tokens": 715}
|
338 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 595, "completion_tokens": 104, "total_tokens": 699}
|
339 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 565, "completion_tokens": 104, "total_tokens": 669}
|
340 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 565, "completion_tokens": 362, "total_tokens": 927}
|
341 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 332, "completion_tokens": 314, "total_tokens": 646}
|
342 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 332, "completion_tokens": 312, "total_tokens": 644}
|
343 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 332, "completion_tokens": 102, "total_tokens": 434}
|
344 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 332, "completion_tokens": 102, "total_tokens": 434}
|
345 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 105, "completion_tokens": 104, "total_tokens": 209}
|
346 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 105, "completion_tokens": 98, "total_tokens": 203}
|
347 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 105, "completion_tokens": 96, "total_tokens": 201}
|
348 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 105, "completion_tokens": 316, "total_tokens": 421}
|
349 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 105, "completion_tokens": 104, "total_tokens": 209}
|
350 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 191, "completion_tokens": 102, "total_tokens": 293}
|
351 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 191, "completion_tokens": 102, "total_tokens": 293}
|
352 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 191, "completion_tokens": 351, "total_tokens": 542}
|
353 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 191, "completion_tokens": 111, "total_tokens": 302}
|
354 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 191, "completion_tokens": 111, "total_tokens": 302}
|
355 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 579, "completion_tokens": 108, "total_tokens": 687}
|
356 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 579, "completion_tokens": 308, "total_tokens": 887}
|
357 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 579, "completion_tokens": 113, "total_tokens": 692}
|
358 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 579, "completion_tokens": 316, "total_tokens": 895}
|
359 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 579, "completion_tokens": 108, "total_tokens": 687}
|
360 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 681, "completion_tokens": 280, "total_tokens": 961}
|
361 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 681, "completion_tokens": 276, "total_tokens": 957}
|
362 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 700, "completion_tokens": 250, "total_tokens": 950}
|
363 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 719, "completion_tokens": 246, "total_tokens": 965}
|
364 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 719, "completion_tokens": 248, "total_tokens": 967}
|
365 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 719, "completion_tokens": 244, "total_tokens": 963}
|
366 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 719, "completion_tokens": 248, "total_tokens": 967}
|
367 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 650, "completion_tokens": 308, "total_tokens": 958}
|
368 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 650, "completion_tokens": 314, "total_tokens": 964}
|
369 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 709, "completion_tokens": 414, "total_tokens": 1123}
|
370 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 709, "completion_tokens": 102, "total_tokens": 811}
|
371 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 709, "completion_tokens": 136, "total_tokens": 845}
|
372 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 709, "completion_tokens": 108, "total_tokens": 817}
|
373 |
+
{"model": "deepseek-chat", "provider": "Blackbox", "prompt_tokens": 26107, "completion_tokens": 31, "total_tokens": 26138}
|
374 |
+
{"model": "deepseek-chat", "provider": "Blackbox", "prompt_tokens": 27805, "completion_tokens": 1118, "total_tokens": 28923}
|
375 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 1666, "completion_tokens": 1796, "total_tokens": 3462}
|
376 |
+
{"model": "deepseek-v3", "provider": "Feature", "prompt_tokens": 9, "completion_tokens": 11, "total_tokens": 20}
|
377 |
+
{"model": "deepseek-r1", "provider": "Feature", "prompt_tokens": 9, "completion_tokens": 11, "total_tokens": 20}
|
378 |
+
{"model": "qwen-2.5-coder-32b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
379 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
380 |
+
{"model": "qwen-qvq-72b-preview", "provider": "HuggingSpace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
381 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
382 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 10, "total_tokens": 18}
|
usage/2025-02-22.jsonl
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"model": "sonar-reasoning-pro", "provider": "PerplexityLabs", "prompt_tokens": 12803, "completion_tokens": 1840, "total_tokens": 14643}
|
2 |
+
{"model": "r1-1776", "provider": "PerplexityLabs", "prompt_tokens": 12803, "completion_tokens": 1169, "total_tokens": 13972}
|
3 |
+
{"model": "sonar-pro", "provider": "PerplexityLabs", "prompt_tokens": 12803, "completion_tokens": 2149, "total_tokens": 14952}
|
4 |
+
{"model": "o3-mini", "provider": "Blackbox", "prompt_tokens": 12780, "completion_tokens": 4185, "total_tokens": 16965}
|
5 |
+
{"model": "flux-dev", "provider": "G4F"}
|
6 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
7 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
8 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
9 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
10 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 78, "completion_tokens": 65, "total_tokens": 143}
|
11 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 561, "total_tokens": 771, "completion_tokens": 210, "prompt_tokens_details": null}
|
12 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 884, "total_tokens": 1230, "completion_tokens": 346, "prompt_tokens_details": null}
|
13 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 1260, "total_tokens": 1661, "completion_tokens": 401, "prompt_tokens_details": null}
|
14 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 1691, "total_tokens": 2055, "completion_tokens": 364, "prompt_tokens_details": null}
|
15 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 2061, "total_tokens": 2430, "completion_tokens": 369, "prompt_tokens_details": null}
|
16 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 2448, "total_tokens": 2784, "completion_tokens": 336, "prompt_tokens_details": null}
|
17 |
+
{"model": "Copilot", "provider": "Copilot", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
18 |
+
{"model": "auto", "provider": "OpenaiChat", "prompt_tokens": 14, "completion_tokens": 0, "total_tokens": 14}
|
19 |
+
{"model": "deepseek-r1", "provider": "DeepInfraChat", "prompt_tokens": 706, "total_tokens": 1507, "completion_tokens": 801, "estimated_cost": 0.0024519}
|
20 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 4056, "total_tokens": 4451, "completion_tokens": 395, "prompt_tokens_details": null}
|
21 |
+
{"model": "claude-3-haiku-20240307", "provider": "DDG", "prompt_tokens": 27, "completion_tokens": 140, "total_tokens": 167}
|
22 |
+
{"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "provider": "DDG", "prompt_tokens": 19, "completion_tokens": 157, "total_tokens": 176}
|
23 |
+
{"model": "meta-llama/Llama-3.3-70B-Instruct", "provider": "DeepInfraChat", "prompt_tokens": 20, "total_tokens": 283, "completion_tokens": 263, "estimated_cost": 0.00010980000000000001}
|
24 |
+
{"model": "all-tools-230b", "provider": "ChatGLM", "prompt_tokens": 67, "completion_tokens": 132, "total_tokens": 199}
|
25 |
+
{"model": "gpt-4", "provider": "Yqcloud", "prompt_tokens": 15, "completion_tokens": 176, "total_tokens": 191}
|
26 |
+
{"model": "gpt-4", "provider": "Yqcloud", "prompt_tokens": 15, "completion_tokens": 424, "total_tokens": 439}
|
27 |
+
{"model": "gpt-4", "provider": "Yqcloud", "prompt_tokens": 16, "completion_tokens": 201, "total_tokens": 217}
|
28 |
+
{"model": "gpt-4", "provider": "Yqcloud", "prompt_tokens": 16, "completion_tokens": 140, "total_tokens": 156}
|
29 |
+
{"model": "qwen-qvq-72b-preview", "provider": "Qwen_QVQ_72B", "prompt_tokens": 11615, "completion_tokens": 2031, "total_tokens": 13646}
|
30 |
+
{"model": "qwen-2.5-1m-demo", "provider": "Qwen_Qwen_2_5M_Demo", "prompt_tokens": 11615, "completion_tokens": 1772, "total_tokens": 13387}
|
31 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 11615, "completion_tokens": 1227, "total_tokens": 12842}
|
32 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 14881, "completion_tokens": 1444, "total_tokens": 16325}
|
33 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 15098, "completion_tokens": 1191, "total_tokens": 16289}
|
34 |
+
{"model": "blackboxai-pro", "provider": "Blackbox", "prompt_tokens": 16324, "completion_tokens": 2044, "total_tokens": 18368}
|
35 |
+
{"model": "blackboxai-pro", "provider": "Blackbox", "prompt_tokens": 16324, "completion_tokens": 1930, "total_tokens": 18254}
|
36 |
+
{"model": "o3-mini", "provider": "Blackbox", "prompt_tokens": 16324, "completion_tokens": 1557, "total_tokens": 17881}
|
37 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 16324, "completion_tokens": 1459, "total_tokens": 17783}
|
38 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 20330, "completion_tokens": 1852, "total_tokens": 22182}
|
39 |
+
{"model": "voodoohop-flux-1-schnell", "provider": "VoodoohopFlux1Schnell", "prompt_tokens": 33, "completion_tokens": 228, "total_tokens": 261}
|
40 |
+
{"model": "voodoohop-flux-1-schnell", "provider": "VoodoohopFlux1Schnell", "prompt_tokens": 21, "completion_tokens": 200, "total_tokens": 221}
|
41 |
+
{"model": "voodoohop-flux-1-schnell", "provider": "VoodoohopFlux1Schnell", "prompt_tokens": 18, "completion_tokens": 193, "total_tokens": 211}
|
42 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 22186, "completion_tokens": 1572, "total_tokens": 23758}
|
43 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 23791, "completion_tokens": 1685, "total_tokens": 25476}
|
44 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 719, "completion_tokens": 270, "total_tokens": 989}
|
45 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 719, "completion_tokens": 258, "total_tokens": 977}
|
46 |
+
{"model": "stabilityai-stable-diffusion-3-5-large", "provider": "StableDiffusion35Large", "prompt_tokens": 18, "completion_tokens": 223, "total_tokens": 241}
|
47 |
+
{"model": "midijourney", "provider": "PollinationsAI", "completion_tokens": 250, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1176, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1426}
|
48 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 18, "completion_tokens": 273, "total_tokens": 291}
|
49 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 18, "completion_tokens": 273, "total_tokens": 291}
|
50 |
+
{"model": "black-forest-labs-flux-1-dev", "provider": "BlackForestLabsFlux1Dev", "prompt_tokens": 12, "completion_tokens": 144, "total_tokens": 156}
|
51 |
+
{"model": "dall-e-3", "provider": "PollinationsAI", "prompt_tokens": 18, "completion_tokens": 275, "total_tokens": 293}
|
52 |
+
{"model": "dall-e-3", "provider": "PollinationsAI", "prompt_tokens": 18, "completion_tokens": 275, "total_tokens": 293}
|
53 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 18, "completion_tokens": 269, "total_tokens": 287}
|
54 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 18, "completion_tokens": 267, "total_tokens": 285}
|
55 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 18, "completion_tokens": 211, "total_tokens": 229}
|
56 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 645, "completion_tokens": 142, "total_tokens": 787}
|
57 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 645, "completion_tokens": 102, "total_tokens": 747}
|
58 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 704, "completion_tokens": 246, "total_tokens": 950}
|
59 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 760, "completion_tokens": 108, "total_tokens": 868}
|
60 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 765, "completion_tokens": 84, "total_tokens": 849}
|
61 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 765, "completion_tokens": 82, "total_tokens": 847}
|
62 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 765, "completion_tokens": 315, "total_tokens": 1080}
|
63 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 765, "completion_tokens": 106, "total_tokens": 871}
|
64 |
+
{"model": "qwen-qvq-72b-preview", "provider": "Qwen_QVQ_72B", "prompt_tokens": 27, "completion_tokens": 1517, "total_tokens": 1544}
|
65 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 96, "completion_tokens": 311, "total_tokens": 407}
|
66 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 96, "completion_tokens": 799, "total_tokens": 895}
|
67 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 96, "completion_tokens": 795, "total_tokens": 891}
|
68 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 96, "completion_tokens": 793, "total_tokens": 889}
|
69 |
+
{"model": "ImageGeneration", "provider": "Blackbox", "prompt_tokens": 96, "completion_tokens": 315, "total_tokens": 411}
|
70 |
+
{"model": "flux", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 261, "total_tokens": 357}
|
71 |
+
{"model": "flux", "provider": "Blackbox", "prompt_tokens": 96, "completion_tokens": 340, "total_tokens": 436}
|
72 |
+
{"model": "stabilityai-stable-diffusion-3-5-large", "provider": "StableDiffusion35Large", "prompt_tokens": 41, "completion_tokens": 184, "total_tokens": 225}
|
73 |
+
{"model": "qwen-2.5-1m-demo", "provider": "Qwen_Qwen_2_5M_Demo", "prompt_tokens": 77, "completion_tokens": 122, "total_tokens": 199}
|
74 |
+
{"model": "janus-pro-7b-image", "provider": "Janus_Pro_7B", "prompt_tokens": 39, "completion_tokens": 655, "total_tokens": 694}
|
75 |
+
{"model": "qwen-qvq-72b-preview", "provider": "Qwen_QVQ_72B", "prompt_tokens": 15, "completion_tokens": 27, "total_tokens": 42}
|
76 |
+
{"model": "flux-pro", "provider": "PollinationsImage", "prompt_tokens": 45, "completion_tokens": 724, "total_tokens": 769}
|
77 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 59, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 145, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 204}
|
78 |
+
{"model": "claude", "provider": "PollinationsAI", "completion_tokens": 56, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 162, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 218}
|
79 |
+
{"model": "openai-large", "provider": "PollinationsAI", "completion_tokens": 217, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 146, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 363}
|
80 |
+
{"model": "o3-mini", "provider": "Pi", "prompt_tokens": 15, "completion_tokens": 53, "total_tokens": 68}
|
81 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 303, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 3591, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 3894}
|
82 |
+
{"model": "command-r-plus-08-2024", "provider": "HuggingSpace", "prompt_tokens": 351, "completion_tokens": 1038, "total_tokens": 1389}
|
83 |
+
{"model": "dall-e-3", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 799, "total_tokens": 895}
|
84 |
+
{"model": "dall-e-3", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 803, "total_tokens": 899}
|
85 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 806, "completion_tokens": 317, "total_tokens": 1123}
|
86 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 806, "completion_tokens": 105, "total_tokens": 911}
|
87 |
+
{"model": "general", "provider": "ImageLabs", "prompt_tokens": 39, "completion_tokens": 166, "total_tokens": 205}
|
88 |
+
{"model": "dall-e-3", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 807, "total_tokens": 903}
|
89 |
+
{"model": "dall-e-3", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 801, "total_tokens": 897}
|
90 |
+
{"model": "dall-e-3", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 803, "total_tokens": 899}
|
91 |
+
{"model": "dall-e-3", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 807, "total_tokens": 903}
|
92 |
+
{"model": "dall-e-3", "provider": "PollinationsImage", "prompt_tokens": 96, "completion_tokens": 484, "total_tokens": 580}
|
93 |
+
{"model": "mistralai/Mistral-Nemo-Instruct-2407", "provider": "HuggingChat", "prompt_tokens": 63, "completion_tokens": 1051, "total_tokens": 1114}
|
94 |
+
{"model": "mistralai/Mistral-Nemo-Instruct-2407", "provider": "HuggingChat", "prompt_tokens": 24, "completion_tokens": 206, "total_tokens": 230}
|
95 |
+
{"model": "chat-llama-3-1-70b", "provider": "Glider", "prompt_tokens": 27, "completion_tokens": 244, "total_tokens": 271}
|
96 |
+
{"model": "chat-gemini-flash", "provider": "GizAI", "prompt_tokens": 27, "completion_tokens": 339, "total_tokens": 366}
|
97 |
+
{"model": "janus-pro-7b-image", "provider": "G4F", "prompt_tokens": 69, "completion_tokens": 1215, "total_tokens": 1284}
|
98 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 18, "completion_tokens": 209, "total_tokens": 227}
|
99 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 18, "completion_tokens": 205, "total_tokens": 223}
|
100 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 18, "completion_tokens": 211, "total_tokens": 229}
|
101 |
+
{"model": "gemini-1.5-pro", "provider": "FreeGpt", "prompt_tokens": 15, "completion_tokens": 76, "total_tokens": 91}
|
102 |
+
{"model": "mistral-7b", "provider": "Free2GPT", "prompt_tokens": 15, "completion_tokens": 416, "total_tokens": 431}
|
103 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 21, "completion_tokens": 190, "total_tokens": 211}
|
104 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 21, "completion_tokens": 226, "total_tokens": 247}
|
105 |
+
{"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "provider": "DDG", "prompt_tokens": 17, "completion_tokens": 115, "total_tokens": 132}
|
106 |
+
{"model": "01-ai/Yi-34B-Chat", "provider": "DeepInfraChat", "prompt_tokens": 14, "total_tokens": 93, "completion_tokens": 79, "estimated_cost": 3.342e-05}
|
107 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "provider": "DeepInfraChat", "prompt_tokens": 7, "total_tokens": 99, "completion_tokens": 92, "estimated_cost": 6.508999999999999e-05}
|
108 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "provider": "DeepInfraChat", "prompt_tokens": 14, "total_tokens": 42, "completion_tokens": 28, "estimated_cost": 2.2539999999999998e-05}
|
109 |
+
{"model": "deepseek-ai/DeepSeek-V3", "provider": "DeepInfraChat", "prompt_tokens": 22, "completion_tokens": 154, "total_tokens": 176}
|
110 |
+
{"model": "deepseek-ai/DeepSeek-V3", "provider": "DeepInfraChat", "prompt_tokens": 11, "total_tokens": 182, "completion_tokens": 171, "estimated_cost": 0.00015758000000000002}
|
111 |
+
{"model": "black-forest-labs/FLUX-1.1-pro", "provider": "DeepInfra", "prompt_tokens": 41, "completion_tokens": 22, "total_tokens": 63}
|
112 |
+
{"model": "command-r-plus-08-2024", "provider": "CohereForAI", "prompt_tokens": 34, "completion_tokens": 672, "total_tokens": 706}
|
113 |
+
{"model": "command-r-plus-08-2024", "provider": "CohereForAI", "prompt_tokens": 21, "completion_tokens": 367, "total_tokens": 388}
|
114 |
+
{"model": "command-r7b-12-2024", "provider": "CohereForAI", "prompt_tokens": 22, "completion_tokens": 697, "total_tokens": 719}
|
115 |
+
{"model": "command-r-plus", "provider": "CohereForAI", "prompt_tokens": 22, "completion_tokens": 662, "total_tokens": 684}
|
116 |
+
{"model": "command-r-plus", "provider": "CohereForAI", "prompt_tokens": 20, "completion_tokens": 241, "total_tokens": 261}
|
117 |
+
{"model": "all-tools-230b", "provider": "ChatGLM", "prompt_tokens": 39, "completion_tokens": 132, "total_tokens": 171}
|
118 |
+
{"model": "black-forest-labs-flux-1-schnell", "provider": "BlackForestLabsFlux1Schnell", "prompt_tokens": 21, "completion_tokens": 188, "total_tokens": 209}
|
119 |
+
{"model": "blackboxai", "provider": "Blackbox", "prompt_tokens": 52, "completion_tokens": 93, "total_tokens": 145}
|
120 |
+
{"model": "GPT-4o", "provider": "Blackbox", "prompt_tokens": 12, "completion_tokens": 49, "total_tokens": 61}
|
121 |
+
{"model": "GPT-4o", "provider": "Blackbox", "prompt_tokens": 21, "completion_tokens": 77, "total_tokens": 98}
|
122 |
+
{"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 39, "completion_tokens": 308, "total_tokens": 347}
|
123 |
+
{"model": "DeepSeek-R1", "provider": "Blackbox", "prompt_tokens": 20, "completion_tokens": 779, "total_tokens": 799}
|
124 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 13, "completion_tokens": 15, "total_tokens": 28}
|
125 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 35, "completion_tokens": 17, "total_tokens": 52}
|
126 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 62, "completion_tokens": 17, "total_tokens": 79}
|
127 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 15, "completion_tokens": 10, "total_tokens": 25}
|
128 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 645, "completion_tokens": 109, "total_tokens": 754}
|
129 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 645, "completion_tokens": 240, "total_tokens": 885}
|
130 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 40, "completion_tokens": 11, "total_tokens": 51}
|
131 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 60, "completion_tokens": 27, "total_tokens": 87}
|
132 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 95, "completion_tokens": 42, "total_tokens": 137}
|
133 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 144, "completion_tokens": 58, "total_tokens": 202}
|
134 |
+
{"model": "sd-3.5", "provider": "HuggingSpace", "prompt_tokens": 208, "completion_tokens": 203, "total_tokens": 411}
|
135 |
+
{"model": "sd-3.5", "provider": "HuggingFace", "prompt_tokens": 224, "completion_tokens": 77, "total_tokens": 301}
|
136 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 251, "completion_tokens": 52, "total_tokens": 303}
|
137 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 811, "completion_tokens": 107, "total_tokens": 918}
|
138 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 811, "completion_tokens": 321, "total_tokens": 1132}
|
139 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 811, "completion_tokens": 321, "total_tokens": 1132}
|
140 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 318, "completion_tokens": 199, "total_tokens": 517}
|
141 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 863, "completion_tokens": 332, "total_tokens": 1195}
|
142 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 1938, "total_tokens": 2033, "completion_tokens": 95, "prompt_tokens_details": null}
|
143 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 3539, "total_tokens": 3893, "completion_tokens": 354, "prompt_tokens_details": null}
|
144 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 885, "completion_tokens": 334, "total_tokens": 1219}
|
145 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 910, "completion_tokens": 98, "total_tokens": 1008}
|
146 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 910, "completion_tokens": 104, "total_tokens": 1014}
|
147 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 910, "completion_tokens": 100, "total_tokens": 1010}
|
148 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 910, "completion_tokens": 104, "total_tokens": 1014}
|
149 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 23, "completion_tokens": 100, "total_tokens": 123}
|
150 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 23, "completion_tokens": 106, "total_tokens": 129}
|
151 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 23, "completion_tokens": 244, "total_tokens": 267}
|
152 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 50, "completion_tokens": 340, "total_tokens": 390}
|
153 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 50, "completion_tokens": 248, "total_tokens": 298}
|
154 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 50, "completion_tokens": 98, "total_tokens": 148}
|
155 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 50, "completion_tokens": 336, "total_tokens": 386}
|
156 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 50, "completion_tokens": 106, "total_tokens": 156}
|
157 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 50, "completion_tokens": 336, "total_tokens": 386}
|
158 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 50, "completion_tokens": 114, "total_tokens": 164}
|
159 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 23, "completion_tokens": 112, "total_tokens": 135}
|
160 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 50, "completion_tokens": 108, "total_tokens": 158}
|
161 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 150, "completion_tokens": 39, "total_tokens": 189}
|
162 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 204, "completion_tokens": 113, "total_tokens": 317}
|
163 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 521, "completion_tokens": 49, "total_tokens": 570}
|
164 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 574, "completion_tokens": 111, "total_tokens": 685}
|
165 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 755, "completion_tokens": 48, "total_tokens": 803}
|
166 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 929, "completion_tokens": 100, "total_tokens": 1029}
|
167 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 1275, "completion_tokens": 132, "total_tokens": 1407}
|
168 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 1596, "completion_tokens": 57, "total_tokens": 1653}
|
169 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 71, "completion_tokens": 73, "total_tokens": 144}
|
170 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 60, "completion_tokens": 120, "total_tokens": 180}
|
171 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 286, "completion_tokens": 61, "total_tokens": 347}
|
172 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 48, "completion_tokens": 52, "total_tokens": 100}
|
173 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 1833, "completion_tokens": 107, "total_tokens": 1940}
|
174 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 2008, "completion_tokens": 93, "total_tokens": 2101}
|
175 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 937, "completion_tokens": 120, "total_tokens": 1057}
|
176 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 937, "completion_tokens": 116, "total_tokens": 1053}
|
177 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 937, "completion_tokens": 248, "total_tokens": 1185}
|
178 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 937, "completion_tokens": 116, "total_tokens": 1053}
|
179 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 937, "completion_tokens": 112, "total_tokens": 1049}
|
180 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 937, "completion_tokens": 114, "total_tokens": 1051}
|
181 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 937, "completion_tokens": 250, "total_tokens": 1187}
|
182 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 937, "completion_tokens": 118, "total_tokens": 1055}
|
183 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 937, "completion_tokens": 344, "total_tokens": 1281}
|
184 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace", "prompt_tokens": 937, "completion_tokens": 118, "total_tokens": 1055}
|
185 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 937, "completion_tokens": 236, "total_tokens": 1173}
|
186 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 937, "completion_tokens": 244, "total_tokens": 1181}
|
187 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 33, "completion_tokens": 201, "total_tokens": 234}
|
188 |
+
{"model": "o3-mini", "provider": "Blackbox", "prompt_tokens": 17111, "completion_tokens": 3559, "total_tokens": 20670}
|
189 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
190 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
191 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
192 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
193 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
194 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
195 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
196 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
197 |
+
{"model": "flux-dev", "provider": "G4F"}
|
198 |
+
{"model": "flux-pro", "provider": "PollinationsAI"}
|
199 |
+
{"model": "flux-pro", "provider": "PollinationsAI"}
|
200 |
+
{"model": "flux-pro", "provider": "PollinationsAI"}
|
201 |
+
{"model": "flux-pro", "provider": "PollinationsAI"}
|
202 |
+
{"model": "flux-pro", "provider": "PollinationsAI"}
|
203 |
+
{"model": "flux-pro", "provider": "PollinationsAI"}
|
204 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat"}
|
205 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat"}
|
206 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
207 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
208 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
209 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
210 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
211 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
212 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
213 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
214 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
215 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 937, "completion_tokens": 0, "total_tokens": 937}
|
216 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 937, "completion_tokens": 0, "total_tokens": 937}
|
217 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 937, "completion_tokens": 0, "total_tokens": 937}
|
218 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 937, "completion_tokens": 0, "total_tokens": 937}
|
219 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 734, "completion_tokens": 0, "total_tokens": 734}
|
220 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 734, "completion_tokens": 0, "total_tokens": 734}
|
221 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 645, "completion_tokens": 0, "total_tokens": 645}
|
222 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 645, "completion_tokens": 0, "total_tokens": 645}
|
223 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 620, "completion_tokens": 0, "total_tokens": 620}
|
224 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 620, "completion_tokens": 0, "total_tokens": 620}
|
225 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 620, "completion_tokens": 0, "total_tokens": 620}
|
226 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 564, "completion_tokens": 0, "total_tokens": 564}
|
227 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 564, "completion_tokens": 0, "total_tokens": 564}
|
228 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 564, "completion_tokens": 0, "total_tokens": 564}
|
229 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 564, "completion_tokens": 0, "total_tokens": 564}
|
230 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 902, "completion_tokens": 0, "total_tokens": 902}
|
231 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 902, "completion_tokens": 0, "total_tokens": 902}
|
232 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 902, "completion_tokens": 0, "total_tokens": 902}
|
233 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 902, "completion_tokens": 0, "total_tokens": 902}
|
234 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 85, "completion_tokens": 0, "total_tokens": 85}
|
235 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 85, "completion_tokens": 0, "total_tokens": 85}
|
236 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 85, "completion_tokens": 0, "total_tokens": 85}
|
237 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 176, "completion_tokens": 0, "total_tokens": 176}
|
238 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
239 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
240 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
241 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
242 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
243 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
244 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
245 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
246 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
247 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
248 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
249 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
250 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
251 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
252 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
253 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
254 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 9, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1140, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1149}
|
255 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1007, "completion_tokens": 0, "total_tokens": 1007}
|
256 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1007, "completion_tokens": 0, "total_tokens": 1007}
|
257 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1007, "completion_tokens": 0, "total_tokens": 1007}
|
258 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
259 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
260 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
261 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
262 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
263 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
264 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
265 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
266 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1062, "completion_tokens": 0, "total_tokens": 1062}
|
267 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1062, "completion_tokens": 0, "total_tokens": 1062}
|
268 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1062, "completion_tokens": 0, "total_tokens": 1062}
|
269 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1062, "completion_tokens": 0, "total_tokens": 1062}
|
270 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
271 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
272 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
273 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
274 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
275 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
276 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
277 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
278 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
279 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 1529, "completion_tokens": 286, "total_tokens": 1815}
|
280 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 31, "completion_tokens": 3580, "total_tokens": 3611}
|
281 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
282 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
283 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
284 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
285 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
286 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 1013, "completion_tokens": 0, "total_tokens": 1013}
|
287 |
+
{"model": "qwen-qvq-72b-preview", "provider": "Qwen_QVQ_72B", "prompt_tokens": 1867, "completion_tokens": 1648, "total_tokens": 3515}
|
288 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 967, "completion_tokens": 0, "total_tokens": 967}
|
289 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 967, "completion_tokens": 0, "total_tokens": 967}
|
290 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 967, "completion_tokens": 0, "total_tokens": 967}
|
291 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 967, "completion_tokens": 0, "total_tokens": 967}
|
292 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 940, "completion_tokens": 0, "total_tokens": 940}
|
293 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 967, "completion_tokens": 0, "total_tokens": 967}
|
294 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 967, "completion_tokens": 0, "total_tokens": 967}
|
295 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
296 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
297 |
+
{"model": "qwen-2.5-1m-demo", "provider": "Qwen_Qwen_2_5M_Demo", "prompt_tokens": 3565, "completion_tokens": 982, "total_tokens": 4547}
|
298 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
299 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
300 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
301 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
302 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
303 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
304 |
+
{"model": "flux-pro", "provider": "PollinationsAI", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
305 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
306 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
307 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
308 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
309 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
310 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
311 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
312 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
313 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
314 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
315 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
316 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
317 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 982, "completion_tokens": 0, "total_tokens": 982}
|
318 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
319 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
320 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
321 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
322 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
323 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
324 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
325 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
326 |
+
{"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 276, "completion_tokens_details": {"reasoning_tokens": 256}, "prompt_tokens": 155, "prompt_tokens_details": {"cached_tokens": 0}, "total_tokens": 431}
|
327 |
+
{"model": "qwq-32b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
328 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
329 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
330 |
+
{"model": "command-r-plus", "provider": "HuggingSpace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
331 |
+
{"model": "command-r-plus", "provider": "HuggingSpace", "prompt_tokens": 27, "completion_tokens": 0, "total_tokens": 27}
|
332 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
333 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
334 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
335 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
336 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
337 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
338 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
339 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
340 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
341 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
342 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 1148, "completion_tokens": 0, "total_tokens": 1148}
|
343 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
344 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
345 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
346 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
347 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
348 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
349 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
350 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
351 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
352 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 935, "completion_tokens": 0, "total_tokens": 935}
|
353 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1178, "completion_tokens": 0, "total_tokens": 1178}
|
354 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1178, "completion_tokens": 0, "total_tokens": 1178}
|
355 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 1178, "completion_tokens": 0, "total_tokens": 1178}
|
356 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 277, "completion_tokens": 0, "total_tokens": 277}
|
357 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 277, "completion_tokens": 0, "total_tokens": 277}
|
358 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 277, "completion_tokens": 0, "total_tokens": 277}
|
359 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 277, "completion_tokens": 0, "total_tokens": 277}
|
360 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 231, "completion_tokens": 0, "total_tokens": 231}
|
361 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 231, "completion_tokens": 0, "total_tokens": 231}
|
362 |
+
{"model": "flux-dev", "provider": "PollinationsAI", "prompt_tokens": 231, "completion_tokens": 0, "total_tokens": 231}
|
363 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 2132, "completion_tokens": 271, "total_tokens": 2403}
|
364 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
365 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 245, "completion_tokens": 0, "total_tokens": 245}
|
366 |
+
{"model": "flux-dev", "provider": "G4F", "prompt_tokens": 535, "completion_tokens": 0, "total_tokens": 535}
|
367 |
+
{"model": "flux-dev", "provider": "HuggingFace", "prompt_tokens": 547, "completion_tokens": 0, "total_tokens": 547}
|
368 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 558, "completion_tokens": 0, "total_tokens": 558}
|
369 |
+
{"model": "command-r-plus", "provider": "HuggingSpace", "prompt_tokens": 2397, "completion_tokens": 0, "total_tokens": 2397}
|
370 |
+
{"model": "command-r-plus", "provider": "HuggingSpace", "prompt_tokens": 2592, "completion_tokens": 0, "total_tokens": 2592}
|
371 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 73, "completion_tokens": 0, "total_tokens": 73}
|
372 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
373 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 45, "completion_tokens": 0, "total_tokens": 45}
|
374 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 45, "completion_tokens": 0, "total_tokens": 45}
|
375 |
+
{"model": "command-r-plus", "provider": "HuggingSpace"}
|
376 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
377 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
378 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
379 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
380 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
381 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
382 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
383 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
384 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
385 |
+
{"model": "flux-dev", "provider": "G4F"}
|
386 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
387 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
388 |
+
{"model": "flux-dev", "provider": "G4F"}
|
389 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
390 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
391 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
392 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
393 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
394 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
395 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
396 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
397 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
398 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
399 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
400 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
401 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
402 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
403 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
404 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
405 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
406 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
407 |
+
{"model": "flux-schnell", "provider": "HuggingFace", "prompt_tokens": 26, "completion_tokens": 0, "total_tokens": 26}
|
408 |
+
{"model": "flux-schnell", "provider": "HuggingFace", "prompt_tokens": 62, "completion_tokens": 0, "total_tokens": 62}
|
409 |
+
{"model": "flux-schnell", "provider": "PollinationsImage", "prompt_tokens": 106, "completion_tokens": 0, "total_tokens": 106}
|
410 |
+
{"model": "deepseek-v3", "provider": "DeepInfraChat", "prompt_tokens": 13, "completion_tokens": 419, "total_tokens": 432}
|
411 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
412 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
413 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 106, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 2874, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 2980}
|
414 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 96, "completion_tokens": 0, "total_tokens": 96}
|
415 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 263, "completion_tokens": 0, "total_tokens": 263}
|
416 |
+
{"model": "meta-llama/Llama-3.2-3B-Instruct", "provider": "HuggingFace", "prompt_tokens": 179, "completion_tokens": 0, "total_tokens": 179}
|
417 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
418 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
419 |
+
{"model": "flux-dev", "provider": "G4F"}
|
420 |
+
{"model": "flux-dev", "provider": "G4F"}
|
421 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
422 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
423 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
424 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
425 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
426 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
427 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
428 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
429 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
430 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
431 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
432 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
433 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
434 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
435 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
436 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
437 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
438 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
439 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
440 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
441 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
442 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
443 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
444 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
445 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
446 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
447 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
448 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
449 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
450 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
451 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
452 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
453 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
454 |
+
{"model": "flux-dev", "provider": "PollinationsAI"}
|
455 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
456 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
457 |
+
{"model": "flux-dev", "provider": "HuggingFace"}
|
458 |
+
{"model": "flux-dev", "provider": "HuggingFace"}
|
459 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
460 |
+
{"model": "flux-dev", "provider": "HuggingFace"}
|
461 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace"}
|
462 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
463 |
+
{"model": "flux-dev", "provider": "HuggingFace"}
|
464 |
+
{"model": "flux-dev", "provider": "HuggingFace"}
|
usage/2025-02-23.jsonl
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
2 |
+
{"model": "sd-3.5", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
3 |
+
{"model": "flux-schnell", "provider": "HuggingSpace", "prompt_tokens": 29, "completion_tokens": 0, "total_tokens": 29}
|
4 |
+
{"model": "sd-3.5", "provider": "HuggingSpace", "prompt_tokens": 122, "completion_tokens": 0, "total_tokens": 122}
|
5 |
+
{"model": "flux-schnell", "provider": "HuggingFace", "prompt_tokens": 122, "completion_tokens": 0, "total_tokens": 122}
|
6 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 122, "completion_tokens": 0, "total_tokens": 122}
|
7 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 139, "completion_tokens": 0, "total_tokens": 139}
|
8 |
+
{"model": "deepseek-v3", "provider": "Feature"}
|
9 |
+
{"model": "deepseek-r1", "provider": "Feature"}
|
10 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15}
|
11 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 2427, "completion_tokens": 0, "total_tokens": 2427}
|
12 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 2489, "completion_tokens": 0, "total_tokens": 2489}
|
13 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 2642, "completion_tokens": 0, "total_tokens": 2642}
|
14 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
15 |
+
{"model": "deepseek-v3", "provider": "Feature"}
|
16 |
+
{"model": "gpt-4o-mini", "provider": "Feature"}
|
17 |
+
{"model": "o3-mini", "provider": "Feature"}
|
18 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
19 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 44, "completion_tokens": 0, "total_tokens": 44}
|
20 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 163, "completion_tokens": 269, "total_tokens": 432}
|
21 |
+
{"model": "qwen-2.5-coder-32b", "provider": "HuggingFace", "prompt_tokens": 233, "completion_tokens": 0, "total_tokens": 233}
|
22 |
+
{"model": "command-r-plus", "provider": "HuggingSpace", "prompt_tokens": 51, "completion_tokens": 0, "total_tokens": 51}
|
23 |
+
{"model": "command-r-plus", "provider": "HuggingSpace", "prompt_tokens": 51, "completion_tokens": 0, "total_tokens": 51}
|
24 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 27, "completion_tokens": 1031, "total_tokens": 1058}
|
25 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 66, "completion_tokens": 643, "total_tokens": 709}
|
26 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 713, "completion_tokens": 682, "total_tokens": 1395}
|
27 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1435, "completion_tokens": 473, "total_tokens": 1908}
|
28 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1912, "completion_tokens": 1024, "total_tokens": 2936}
|
29 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1435, "completion_tokens": 473, "total_tokens": 1908}
|
30 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1435, "completion_tokens": 473, "total_tokens": 1908}
|
31 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1435, "completion_tokens": 473, "total_tokens": 1908}
|
32 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 5152, "completion_tokens": 463, "total_tokens": 5615}
|
33 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 5619, "completion_tokens": 993, "total_tokens": 6612}
|
34 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 31, "completion_tokens": 0, "total_tokens": 31}
|
35 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 6685, "completion_tokens": 439, "total_tokens": 7124}
|
36 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 7128, "completion_tokens": 806, "total_tokens": 7934}
|
37 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 31, "completion_tokens": 75, "total_tokens": 106}
|
38 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 206, "completion_tokens": 439, "total_tokens": 645}
|
39 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 696, "completion_tokens": 336, "total_tokens": 1032}
|
40 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 48, "completion_tokens": 111, "total_tokens": 159}
|
41 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 172, "completion_tokens": 151, "total_tokens": 323}
|
42 |
+
{"model": "flux-dev", "provider": "PollinationsImage"}
|
43 |
+
{"model": "flux-dev", "provider": "G4F"}
|
44 |
+
{"model": "flux-dev", "provider": "G4F"}
|
45 |
+
{"model": "flux-dev", "provider": "G4F"}
|
46 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 21, "completion_tokens": 108, "total_tokens": 129}
|
47 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 153, "completion_tokens": 230, "total_tokens": 383}
|
48 |
+
{"model": "gpt-4", "provider": "Blackbox", "prompt_tokens": 15, "completion_tokens": 585, "total_tokens": 600}
|
49 |
+
{"model": "gpt-4", "provider": "PollinationsAI", "completion_tokens": 919, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 8598, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 9517}
|
50 |
+
{"model": "blackboxai", "provider": "Blackbox", "prompt_tokens": 15, "completion_tokens": 528, "total_tokens": 543}
|
51 |
+
{"model": "o3-mini", "provider": "Blackbox", "prompt_tokens": 15, "completion_tokens": 437, "total_tokens": 452}
|
52 |
+
{"model": "all-tools-230b", "provider": "ChatGLM", "prompt_tokens": 15, "completion_tokens": 147, "total_tokens": 162}
|
53 |
+
{"model": "command-r-plus", "provider": "CohereForAI", "prompt_tokens": 15, "completion_tokens": 297, "total_tokens": 312}
|
54 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 15, "completion_tokens": 1089, "total_tokens": 1104}
|
55 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 15, "completion_tokens": 956, "total_tokens": 971}
|
56 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "Glider", "prompt_tokens": 4634, "completion_tokens": 967, "total_tokens": 5601}
|
57 |
+
{"model": "GPT-4o", "provider": "Blackbox", "prompt_tokens": 15, "completion_tokens": 548, "total_tokens": 563}
|
58 |
+
{"model": "all-tools-230b", "provider": "ChatGLM", "prompt_tokens": 15, "completion_tokens": 147, "total_tokens": 162}
|
59 |
+
{"model": "all-tools-230b", "provider": "ChatGLM", "prompt_tokens": 10, "completion_tokens": 518, "total_tokens": 528}
|
60 |
+
{"model": "mistral-7b", "provider": "Free2GPT", "prompt_tokens": 17, "completion_tokens": 2769, "total_tokens": 2786}
|
61 |
+
{"model": "mistral-7b", "provider": "Free2GPT", "prompt_tokens": 12, "completion_tokens": 784, "total_tokens": 796}
|
62 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 32, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 154, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 186}
|
63 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 117, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 201, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 318}
|
64 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 67, "completion_tokens": 0, "total_tokens": 67}
|
65 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 354, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 337, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 691}
|
66 |
+
{"model": "mistral-7b", "provider": "Free2GPT", "prompt_tokens": 130, "completion_tokens": 760, "total_tokens": 890}
|
67 |
+
{"model": "all-tools-230b", "provider": "ChatGLM", "prompt_tokens": 121, "completion_tokens": 680, "total_tokens": 801}
|
68 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 14, "completion_tokens": 0, "total_tokens": 14}
|
69 |
+
{"model": "claude", "provider": "PollinationsAI", "completion_tokens": 43, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 144, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 187}
|
70 |
+
{"model": "claude", "provider": "PollinationsAI", "completion_tokens": 34, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 198, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 232}
|
71 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat"}
|
72 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat"}
|
73 |
+
{"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 534, "completion_tokens_details": {"reasoning_tokens": 512}, "prompt_tokens": 189, "prompt_tokens_details": {"cached_tokens": 0}, "total_tokens": 723}
|
74 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 11, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 171, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 182}
|
75 |
+
{"model": "o3-mini", "provider": "Blackbox", "prompt_tokens": 25, "completion_tokens": 731, "total_tokens": 756}
|
76 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingChat", "prompt_tokens": 34, "completion_tokens": 0, "total_tokens": 34}
|
77 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 34, "completion_tokens": 0, "total_tokens": 34}
|
78 |
+
{"model": "meta-llama/Llama-3.3-70B-Instruct", "provider": "HuggingChat", "prompt_tokens": 66, "completion_tokens": 0, "total_tokens": 66}
|
79 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingChat", "prompt_tokens": 19, "completion_tokens": 0, "total_tokens": 19}
|
80 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 76, "completion_tokens": 0, "total_tokens": 76}
|
81 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 130, "completion_tokens": 0, "total_tokens": 130}
|
82 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 276, "completion_tokens": 0, "total_tokens": 276}
|
83 |
+
{"model": "yi-34b", "provider": "DeepInfraChat", "prompt_tokens": 24245, "total_tokens": 24352, "completion_tokens": 107, "estimated_cost": 0.00319465}
|
84 |
+
{"model": "o3-mini-high", "provider": "Feature", "prompt_tokens": 11, "completion_tokens": 133, "total_tokens": 144}
|
85 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
86 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
87 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
88 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 18, "completion_tokens": 0, "total_tokens": 18}
|
89 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 18, "completion_tokens": 0, "total_tokens": 18}
|
90 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
91 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
92 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 32, "completion_tokens": 0, "total_tokens": 32}
|
93 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 32, "completion_tokens": 0, "total_tokens": 32}
|
94 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 99, "completion_tokens": 0, "total_tokens": 99}
|
95 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 37, "completion_tokens": 0, "total_tokens": 37}
|
96 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 420, "completion_tokens": 0, "total_tokens": 420}
|
97 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 139, "completion_tokens": 0, "total_tokens": 139}
|
98 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 384, "completion_tokens": 0, "total_tokens": 384}
|
99 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 577, "completion_tokens": 0, "total_tokens": 577}
|
100 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 876, "completion_tokens": 0, "total_tokens": 876}
|
101 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15}
|
102 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 58, "completion_tokens": 0, "total_tokens": 58}
|
103 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
104 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
105 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 44, "completion_tokens": 0, "total_tokens": 44}
|
106 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 81, "completion_tokens": 0, "total_tokens": 81}
|
107 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 22, "completion_tokens": 0, "total_tokens": 22}
|
108 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 82, "completion_tokens": 78, "total_tokens": 160}
|
109 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 1746, "completion_tokens": 0, "total_tokens": 1746}
|
110 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 1746, "completion_tokens": 0, "total_tokens": 1746}
|
111 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 2371, "completion_tokens": 0, "total_tokens": 2371}
|
112 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 2540, "completion_tokens": 38, "total_tokens": 2578}
|
113 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
114 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 74, "completion_tokens": 0, "total_tokens": 74}
|
115 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 200, "completion_tokens": 150, "total_tokens": 350}
|
116 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 1578, "completion_tokens": 239, "total_tokens": 1817}
|
117 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
118 |
+
{"model": "dall-e-3", "provider": "Feature", "prompt_tokens": 18, "completion_tokens": 0, "total_tokens": 18}
|
119 |
+
{"model": "dall-e-3", "provider": "Feature", "prompt_tokens": 84, "completion_tokens": 0, "total_tokens": 84}
|
120 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
121 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 22, "completion_tokens": 102, "total_tokens": 124}
|
122 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 128, "completion_tokens": 240, "total_tokens": 368}
|
123 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 298, "completion_tokens": 47, "total_tokens": 345}
|
124 |
+
{"model": "o3-mini", "provider": "Blackbox", "prompt_tokens": 372, "completion_tokens": 599, "total_tokens": 971}
|
125 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 5893, "completion_tokens": 1118, "total_tokens": 7011}
|
126 |
+
{"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 8735, "completion_tokens": 0, "total_tokens": 8735}
|
127 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
128 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 196, "completion_tokens": 9, "total_tokens": 205}
|
129 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 683, "completion_tokens": 0, "total_tokens": 683}
|
130 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 723, "completion_tokens": 40, "total_tokens": 763}
|
131 |
+
{"model": "o3-mini", "provider": "Feature", "prompt_tokens": 20, "completion_tokens": 75, "total_tokens": 95}
|
132 |
+
{"model": "o3-mini", "provider": "Feature", "prompt_tokens": 127, "completion_tokens": 125, "total_tokens": 252}
|
133 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
134 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 46, "completion_tokens": 93, "total_tokens": 139}
|
135 |
+
{"model": "command-r-plus", "provider": "HuggingSpace", "prompt_tokens": 18, "completion_tokens": 0, "total_tokens": 18}
|
136 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 27, "completion_tokens": 0, "total_tokens": 27}
|
137 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 38, "completion_tokens": 0, "total_tokens": 38}
|
138 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 329, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1892, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 2221}
|
139 |
+
{"model": "llama", "provider": "PollinationsAI", "prompt_tokens": 873, "completion_tokens": 256, "total_tokens": 1129}
|
140 |
+
{"model": "midijourney", "provider": "PollinationsAI", "completion_tokens": 661, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 6346, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 7007}
|
141 |
+
{"model": "midijourney", "provider": "PollinationsAI", "completion_tokens": 144, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 6555, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 6699}
|
142 |
+
{"model": "midijourney", "provider": "PollinationsAI", "completion_tokens": 497, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 7102, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 2688}, "total_tokens": 7599}
|
143 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
144 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 95, "completion_tokens": 0, "total_tokens": 95}
|
145 |
+
{"model": "openai-large", "provider": "PollinationsAI", "completion_tokens": 895, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 2967, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 3862}
|
146 |
+
{"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 757, "completion_tokens_details": {"reasoning_tokens": 704}, "prompt_tokens": 162, "prompt_tokens_details": {"cached_tokens": 0}, "total_tokens": 919}
|
147 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 39, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 150, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 189}
|
148 |
+
{"model": "openai-large", "provider": "PollinationsAI", "completion_tokens": 72, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 150, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 222}
|
149 |
+
{"model": "dall-e-3", "provider": "PollinationsAI", "prompt_tokens": 314, "completion_tokens": 0, "total_tokens": 314}
|
150 |
+
{"model": "o3-mini", "provider": "Feature", "prompt_tokens": 326, "completion_tokens": 0, "total_tokens": 326}
|
151 |
+
{"model": "o1-preview", "provider": "Feature", "prompt_tokens": 327, "completion_tokens": 0, "total_tokens": 327}
|
152 |
+
{"model": "dall-e-3", "provider": "Feature", "prompt_tokens": 355, "completion_tokens": 0, "total_tokens": 355}
|
153 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 127, "completion_tokens": 55, "total_tokens": 182}
|
154 |
+
{"model": "o1-mini", "provider": "Feature", "prompt_tokens": 403, "completion_tokens": 0, "total_tokens": 403}
|
155 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 433, "completion_tokens": 0, "total_tokens": 433}
|
156 |
+
{"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 1451, "completion_tokens_details": {"reasoning_tokens": 320}, "prompt_tokens": 5457, "prompt_tokens_details": {"cached_tokens": 0}, "total_tokens": 6908}
|
157 |
+
{"model": "evil", "provider": "PollinationsAI", "prompt_tokens": 34823, "total_tokens": 34996, "completion_tokens": 173, "prompt_tokens_details": null}
|
158 |
+
{"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 1804, "completion_tokens_details": {"reasoning_tokens": 1280}, "prompt_tokens": 10306, "prompt_tokens_details": {"cached_tokens": 3968}, "total_tokens": 12110}
|
159 |
+
{"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 551, "completion_tokens_details": {"reasoning_tokens": 512}, "prompt_tokens": 162, "prompt_tokens_details": {"cached_tokens": 0}, "total_tokens": 713}
|
160 |
+
{"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 1693, "completion_tokens_details": {"reasoning_tokens": 320}, "prompt_tokens": 10894, "prompt_tokens_details": {"cached_tokens": 0}, "total_tokens": 12587}
|
161 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 1430, "completion_tokens": 0, "total_tokens": 1430}
|
162 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 506, "completion_tokens": 0, "total_tokens": 506}
|
163 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 529, "completion_tokens": 0, "total_tokens": 529}
|
164 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 543, "completion_tokens": 0, "total_tokens": 543}
|
165 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 555, "completion_tokens": 0, "total_tokens": 555}
|
166 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 555, "completion_tokens": 0, "total_tokens": 555}
|
167 |
+
{"model": "dall-e-3", "provider": "PollinationsAI", "prompt_tokens": 555, "completion_tokens": 0, "total_tokens": 555}
|
168 |
+
{"model": "dall-e-3", "provider": "PollinationsAI", "prompt_tokens": 543, "completion_tokens": 0, "total_tokens": 543}
|
169 |
+
{"model": "dall-e-3", "provider": "PollinationsAI", "prompt_tokens": 529, "completion_tokens": 0, "total_tokens": 529}
|
170 |
+
{"model": "auto", "provider": "Feature"}
|
171 |
+
{"model": "gemini-thinking", "provider": "PollinationsAI", "completion_tokens": 751, "prompt_tokens": 44, "total_tokens": 795}
|
172 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 1507, "completion_tokens": 0, "total_tokens": 1507}
|
173 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 10, "completion_tokens": 1, "total_tokens": 11}
|
174 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
175 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 56, "completion_tokens": 187, "total_tokens": 243}
|
176 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 1442, "completion_tokens": 294, "total_tokens": 1736}
|
177 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 2970, "completion_tokens": 97, "total_tokens": 3067}
|
178 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 5109, "completion_tokens": 82, "total_tokens": 5191}
|
179 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 5707, "completion_tokens": 19, "total_tokens": 5726}
|
180 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 5925, "completion_tokens": 172, "total_tokens": 6097}
|
181 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 8623, "completion_tokens": 35, "total_tokens": 8658}
|
182 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 9956, "completion_tokens": 72, "total_tokens": 10028}
|
183 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 90, "completion_tokens": 140, "total_tokens": 230}
|
184 |
+
{"model": "o1", "provider": "Feature", "prompt_tokens": 10827, "completion_tokens": 72, "total_tokens": 10899}
|
185 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 20, "completion_tokens": 0, "total_tokens": 20}
|
186 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 1303, "completion_tokens": 0, "total_tokens": 1303}
|
187 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 1328, "completion_tokens": 0, "total_tokens": 1328}
|
188 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 20, "completion_tokens": 0, "total_tokens": 20}
|
189 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 411, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 2245, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 2656}
|
190 |
+
{"model": "deepseek-r1", "provider": "Feature", "prompt_tokens": 2535, "completion_tokens": 1147, "total_tokens": 3682}
|
191 |
+
{"model": "deepseek-r1", "provider": "Feature", "prompt_tokens": 2681, "completion_tokens": 765, "total_tokens": 3446}
|
192 |
+
{"model": "deepseek-r1", "provider": "Feature", "prompt_tokens": 3205, "completion_tokens": 518, "total_tokens": 3723}
|
193 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 3766, "completion_tokens": 0, "total_tokens": 3766}
|
194 |
+
{"model": "Jovie/Midjourney", "provider": "HuggingFace", "prompt_tokens": 590, "completion_tokens": 0, "total_tokens": 590}
|
195 |
+
{"model": "strangerzonehf/Flux-Midjourney-Mix2-LoRA", "provider": "HuggingFace", "prompt_tokens": 645, "completion_tokens": 0, "total_tokens": 645}
|
196 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 660, "completion_tokens": 0, "total_tokens": 660}
|
197 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 675, "completion_tokens": 0, "total_tokens": 675}
|
198 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 675, "completion_tokens": 0, "total_tokens": 675}
|
199 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 702, "completion_tokens": 0, "total_tokens": 702}
|
200 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 702, "completion_tokens": 0, "total_tokens": 702}
|
201 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 714, "completion_tokens": 0, "total_tokens": 714}
|
202 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 714, "completion_tokens": 0, "total_tokens": 714}
|
203 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 714, "completion_tokens": 0, "total_tokens": 714}
|
204 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 727, "completion_tokens": 0, "total_tokens": 727}
|
205 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 727, "completion_tokens": 0, "total_tokens": 727}
|
206 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 747, "completion_tokens": 0, "total_tokens": 747}
|
207 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 747, "completion_tokens": 0, "total_tokens": 747}
|
208 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 766, "completion_tokens": 0, "total_tokens": 766}
|
209 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 780, "completion_tokens": 0, "total_tokens": 780}
|
210 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 780, "completion_tokens": 0, "total_tokens": 780}
|
211 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
|
212 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 54, "completion_tokens": 0, "total_tokens": 54}
|
213 |
+
{"model": "flux-dev", "provider": "HuggingSpace", "prompt_tokens": 791, "completion_tokens": 0, "total_tokens": 791}
|
214 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingChat", "prompt_tokens": 802, "completion_tokens": 0, "total_tokens": 802}
|
215 |
+
{"model": "turbo", "provider": "PollinationsAI", "prompt_tokens": 802, "completion_tokens": 0, "total_tokens": 802}
|
216 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 802, "completion_tokens": 0, "total_tokens": 802}
|
217 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
218 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 956, "completion_tokens": 0, "total_tokens": 956}
|
219 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
220 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 956, "completion_tokens": 0, "total_tokens": 956}
|
221 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 28, "completion_tokens": 0, "total_tokens": 28}
|
222 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
223 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
224 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 79, "completion_tokens": 0, "total_tokens": 79}
|
225 |
+
{"model": "flux", "provider": "PollinationsAI", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
226 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 125, "completion_tokens": 0, "total_tokens": 125}
|
227 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
228 |
+
{"model": "cagliostrolab/animagine-xl-4.0", "provider": "HuggingFace", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
229 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 35, "completion_tokens": 0, "total_tokens": 35}
|
230 |
+
{"model": "strangerzonehf/Flux-Midjourney-Mix2-LoRA", "provider": "HuggingFace", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
231 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 52, "completion_tokens": 0, "total_tokens": 52}
|
232 |
+
{"model": "strangerzonehf/Flux-Midjourney-Mix2-LoRA", "provider": "HuggingFace", "prompt_tokens": 972, "completion_tokens": 0, "total_tokens": 972}
|
233 |
+
{"model": "strangerzonehf/Flux-Midjourney-Mix2-LoRA", "provider": "HuggingFace", "prompt_tokens": 1030, "completion_tokens": 0, "total_tokens": 1030}
|
234 |
+
{"model": "black-forest-labs/FLUX.1-schnell", "provider": "HuggingFace", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
235 |
+
{"model": "black-forest-labs/FLUX.1-dev", "provider": "HuggingFace", "prompt_tokens": 1041, "completion_tokens": 0, "total_tokens": 1041}
|
236 |
+
{"model": "o3-mini", "provider": "DDG", "prompt_tokens": 1665, "completion_tokens": 174, "total_tokens": 1839}
|
237 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 1665, "completion_tokens": 875, "total_tokens": 2540}
|
238 |
+
{"model": "gpt-4o", "provider": "Liaobots", "prompt_tokens": 1665, "completion_tokens": 164, "total_tokens": 1829}
|
239 |
+
{"model": "gpt-4o", "provider": "PollinationsAI", "completion_tokens": 69, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1766, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1835}
|
240 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 1659, "completion_tokens": 448, "total_tokens": 2107}
|
241 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 3697, "completion_tokens": 254, "total_tokens": 3951}
|
242 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingChat", "prompt_tokens": 127, "completion_tokens": 0, "total_tokens": 127}
|
243 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingChat", "prompt_tokens": 1246, "completion_tokens": 0, "total_tokens": 1246}
|
244 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 2730, "completion_tokens": 466, "total_tokens": 3196}
|
245 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 3375, "completion_tokens": 294, "total_tokens": 3669}
|
246 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 5764, "completion_tokens": 156, "total_tokens": 5920}
|
247 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 6287, "completion_tokens": 299, "total_tokens": 6586}
|
248 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 7188, "completion_tokens": 269, "total_tokens": 7457}
|
249 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 10138, "completion_tokens": 414, "total_tokens": 10552}
|
250 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 64, "completion_tokens": 541, "total_tokens": 605}
|
251 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 1226, "completion_tokens": 350, "total_tokens": 1576}
|
252 |
+
{"model": "flux-schnell", "provider": "PollinationsImage", "prompt_tokens": 51, "completion_tokens": 0, "total_tokens": 51}
|
253 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 2641, "completion_tokens": 427, "total_tokens": 3068}
|
254 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 15, "completion_tokens": 291, "total_tokens": 306}
|
255 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
256 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 37, "completion_tokens": 0, "total_tokens": 37}
|
257 |
+
{"model": "deepseek-r1", "provider": "HuggingFace", "prompt_tokens": 96, "completion_tokens": 215, "total_tokens": 311}
|
258 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
259 |
+
{"model": "gpt-4o", "provider": "Feature", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
260 |
+
{"model": "dall-e-3", "provider": "Feature", "prompt_tokens": 101, "completion_tokens": 0, "total_tokens": 101}
|
261 |
+
{"model": "dall-e-3", "provider": "Feature", "prompt_tokens": 240, "completion_tokens": 0, "total_tokens": 240}
|
262 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 240, "completion_tokens": 0, "total_tokens": 240}
|
263 |
+
{"model": "flux-schnell", "provider": "HuggingFace", "prompt_tokens": 240, "completion_tokens": 0, "total_tokens": 240}
|
264 |
+
{"model": "flux-schnell", "provider": "G4F", "prompt_tokens": 461, "completion_tokens": 0, "total_tokens": 461}
|
265 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
266 |
+
{"model": "sd-3.5", "provider": "HuggingSpace", "prompt_tokens": 633, "completion_tokens": 0, "total_tokens": 633}
|
267 |
+
{"model": "sd-3.5", "provider": "HuggingSpace", "prompt_tokens": 805, "completion_tokens": 0, "total_tokens": 805}
|
268 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 28, "completion_tokens": 0, "total_tokens": 28}
|
269 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 805, "completion_tokens": 0, "total_tokens": 805}
|
270 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 103, "completion_tokens": 0, "total_tokens": 103}
|
271 |
+
{"model": "midjourney", "provider": "PollinationsAI", "prompt_tokens": 899, "completion_tokens": 0, "total_tokens": 899}
|
272 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 1153, "completion_tokens": 0, "total_tokens": 1153}
|
273 |
+
{"model": "auto", "provider": "Feature", "prompt_tokens": 1428, "completion_tokens": 371, "total_tokens": 1799}
|
274 |
+
{"model": "auto", "provider": "Feature", "prompt_tokens": 1428, "completion_tokens": 314, "total_tokens": 1742}
|
275 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat"}
|
276 |
+
{"model": "Qwen/QwQ-32B-Preview", "provider": "HuggingChat"}
|
277 |
+
{"model": "CohereForAI/c4ai-command-r-plus-08-2024", "provider": "HuggingChat"}
|
278 |
+
{"model": "deepseek-r1", "provider": "HuggingFace"}
|
279 |
+
{"model": "deepseek-r1", "provider": "HuggingFace"}
|
280 |
+
{"model": "command-r", "provider": "HuggingSpace"}
|
281 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace"}
|
282 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace"}
|
283 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
284 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
285 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
286 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
287 |
+
{"model": "gpt-4o-mini", "provider": "DDG", "prompt_tokens": 33, "completion_tokens": 0, "total_tokens": 33}
|
288 |
+
{"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "provider": "DeepInfraChat", "prompt_tokens": 20, "total_tokens": 630, "completion_tokens": 610, "estimated_cost": 0.0001854}
|
289 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 238, "completion_tokens": 286, "total_tokens": 524}
|
290 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 1031, "completion_tokens": 531, "total_tokens": 1562}
|
291 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 1470, "completion_tokens": 265, "total_tokens": 1735}
|
292 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 1858, "completion_tokens": 1082, "total_tokens": 2940}
|
293 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat", "prompt_tokens": 32, "completion_tokens": 169, "total_tokens": 201}
|
294 |
+
{"model": "auto", "provider": "OpenaiChat", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
295 |
+
{"model": "auto", "provider": "OpenaiChat", "prompt_tokens": 39, "completion_tokens": 0, "total_tokens": 39}
|
296 |
+
{"model": "gpt-4o-mini", "provider": "OpenaiChat", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
|
297 |
+
{"model": "gpt-4o-mini", "provider": "DDG", "prompt_tokens": 30, "completion_tokens": 0, "total_tokens": 30}
|
298 |
+
{"model": "gpt-4o", "provider": "Blackbox", "prompt_tokens": 30, "completion_tokens": 0, "total_tokens": 30}
|
299 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
300 |
+
{"model": "openai-community/gpt2", "provider": "HuggingFace", "prompt_tokens": 38, "completion_tokens": 0, "total_tokens": 38}
|
301 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
302 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
303 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
304 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
305 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
306 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
307 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
308 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
309 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
310 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
311 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
312 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
313 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
usage/2025-02-24.jsonl
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"model": "gemini-2.0-flash-thinking", "provider": "Gemini"}
|
2 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
3 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
4 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
5 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
6 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
7 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
8 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
9 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 13, "completion_tokens": 29, "total_tokens": 42}
|
10 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 16, "completion_tokens": 9, "total_tokens": 25}
|
11 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 16, "completion_tokens": 436, "total_tokens": 452}
|
12 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 24, "completion_tokens": 582, "total_tokens": 606}
|
13 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 622, "completion_tokens": 808, "total_tokens": 1430}
|
14 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
15 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
16 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
17 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
18 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
19 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
20 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
21 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
22 |
+
{"model": "flux", "provider": "G4F", "prompt_tokens": 21, "completion_tokens": 0, "total_tokens": 21}
|
23 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 21, "completion_tokens": 0, "total_tokens": 21}
|
24 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
25 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
26 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
27 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
28 |
+
{"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
|
29 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 12, "completion_tokens": 0, "total_tokens": 12}
|
30 |
+
{"model": "o3-mini", "provider": "Blackbox", "prompt_tokens": 4022, "completion_tokens": 0, "total_tokens": 4022}
|
31 |
+
{"model": "o3-mini", "provider": "Blackbox"}
|
32 |
+
{"model": "o3-mini", "provider": "Blackbox"}
|
33 |
+
{"model": "command-r7b-12-2024", "provider": "CohereForAI"}
|
34 |
+
{"model": "deepseek-ai/DeepSeek-R1", "provider": "DeepInfraChat", "prompt_tokens": 3945, "total_tokens": 5862, "completion_tokens": 1917, "estimated_cost": 0.007559549999999999}
|
35 |
+
{"model": "deepseek-ai/DeepSeek-V3", "provider": "DeepInfraChat"}
|
36 |
+
{"model": "deepseek-v3", "provider": "DeepInfraChat", "prompt_tokens": 3168, "total_tokens": 3181, "completion_tokens": 13, "estimated_cost": 0.0015638899999999996}
|
37 |
+
{"model": "deepseek-v3", "provider": "Blackbox", "prompt_tokens": 125, "completion_tokens": 0, "total_tokens": 125}
|
38 |
+
{"model": "deepseek-v3", "provider": "DeepInfraChat", "prompt_tokens": 3201, "total_tokens": 3214, "completion_tokens": 13, "estimated_cost": 0.0015800599999999999}
|
39 |
+
{"model": "deepseek-ai/DeepSeek-V3", "provider": "DeepInfraChat", "prompt_tokens": 4549, "total_tokens": 5810, "completion_tokens": 1261, "estimated_cost": 0.0033512999999999998}
|
40 |
+
{"model": "gpt-4o-mini-free", "provider": "Liaobots"}
|
41 |
+
{"model": "gpt-4o-mini-free", "provider": "Liaobots"}
|
42 |
+
{"model": "gpt-4o-mini-free", "provider": "Liaobots"}
|
43 |
+
{"model": "gpt-4o-mini-free", "provider": "Liaobots"}
|
44 |
+
{"model": "gpt-4o-mini-free", "provider": "Liaobots"}
|
45 |
+
{"model": "gemini-2.0-flash-thinking-exp", "provider": "Liaobots"}
|
46 |
+
{"model": "Copilot", "provider": "Copilot"}
|
47 |
+
{"model": "Copilot", "provider": "Copilot"}
|
48 |
+
{"model": "Copilot", "provider": "Copilot"}
|
49 |
+
{"model": "Copilot", "provider": "Copilot"}
|
50 |
+
{"model": "Copilot", "provider": "Copilot"}
|
51 |
+
{"model": "command-r-plus-08-2024", "provider": "CohereForAI"}
|
52 |
+
{"model": "command-r-plus-08-2024", "provider": "CohereForAI"}
|
53 |
+
{"model": "command-r", "provider": "HuggingSpace"}
|
54 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace"}
|
55 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace"}
|
56 |
+
{"model": "openai", "provider": "PollinationsAI", "completion_tokens": 143, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1465, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1608}
|
57 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace"}
|
58 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
|
59 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace"}
|
60 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 98, "completion_tokens": 0, "total_tokens": 98}
|
61 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace"}
|
62 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 181, "completion_tokens": 0, "total_tokens": 181}
|
63 |
+
{"model": "llama-3.2-11b", "provider": "HuggingFace", "prompt_tokens": 16, "completion_tokens": 0, "total_tokens": 16}
|
64 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace"}
|
65 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace"}
|
66 |
+
{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "provider": "HuggingFace"}
|
67 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingChat"}
|
68 |
+
{"model": "gpt-4", "provider": "Feature"}
|
69 |
+
{"model": "o3-mini", "provider": "Feature"}
|
70 |
+
{"model": "black-forest-labs-flux-1-dev", "provider": "HuggingSpace"}
|
71 |
+
{"model": "gpt-4", "provider": "Feature"}
|
72 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 1213, "completion_tokens": 0, "total_tokens": 1213}
|
73 |
+
{"model": "DeepSeek-V3", "provider": "Blackbox", "prompt_tokens": 2125, "completion_tokens": 0, "total_tokens": 2125}
|
74 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 8, "completion_tokens": 16, "total_tokens": 24}
|
75 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 123, "completion_tokens": 467, "total_tokens": 590}
|
76 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 21, "completion_tokens": 89, "total_tokens": 110}
|
77 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 14, "completion_tokens": 49, "total_tokens": 63}
|
78 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 14, "completion_tokens": 23, "total_tokens": 37}
|
79 |
+
{"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 12, "completion_tokens": 28, "total_tokens": 40}
|
80 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 17, "completion_tokens": 399, "total_tokens": 416}
|
81 |
+
{"model": "Qwen/Qwen2.5-72B-Instruct", "provider": "HuggingFace", "prompt_tokens": 436, "completion_tokens": 873, "total_tokens": 1309}
|
82 |
+
{"model": "cagliostrolab/animagine-xl-4.0", "provider": "HuggingFace", "prompt_tokens": 22, "completion_tokens": 81, "total_tokens": 103}
|